python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 - Virtual Open Systems
* Author: Antonios Motakis <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include "vfio_platform_private.h"
#define DRIVER_VERSION "0.10"
#define DRIVER_AUTHOR "Antonios Motakis <[email protected]>"
#define DRIVER_DESC "VFIO for AMBA devices - User Level meta-driver"
/* probing devices from the AMBA bus */
static struct resource *get_amba_resource(struct vfio_platform_device *vdev,
int i)
{
struct amba_device *adev = (struct amba_device *) vdev->opaque;
if (i == 0)
return &adev->res;
return NULL;
}
static int get_amba_irq(struct vfio_platform_device *vdev, int i)
{
struct amba_device *adev = (struct amba_device *) vdev->opaque;
int ret = 0;
if (i < AMBA_NR_IRQS)
ret = adev->irq[i];
/* zero is an unset IRQ for AMBA devices */
return ret ? ret : -ENXIO;
}
static int vfio_amba_init_dev(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
struct amba_device *adev = to_amba_device(core_vdev->dev);
int ret;
vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid);
if (!vdev->name)
return -ENOMEM;
vdev->opaque = (void *) adev;
vdev->flags = VFIO_DEVICE_FLAGS_AMBA;
vdev->get_resource = get_amba_resource;
vdev->get_irq = get_amba_irq;
vdev->reset_required = false;
ret = vfio_platform_init_common(vdev);
if (ret)
kfree(vdev->name);
return ret;
}
static const struct vfio_device_ops vfio_amba_ops;
static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
{
struct vfio_platform_device *vdev;
int ret;
vdev = vfio_alloc_device(vfio_platform_device, vdev, &adev->dev,
&vfio_amba_ops);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
goto out_put_vdev;
pm_runtime_enable(&adev->dev);
dev_set_drvdata(&adev->dev, vdev);
return 0;
out_put_vdev:
vfio_put_device(&vdev->vdev);
return ret;
}
static void vfio_amba_release_dev(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
vfio_platform_release_common(vdev);
kfree(vdev->name);
}
static void vfio_amba_remove(struct amba_device *adev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&adev->dev);
vfio_unregister_group_dev(&vdev->vdev);
pm_runtime_disable(vdev->device);
vfio_put_device(&vdev->vdev);
}
static const struct vfio_device_ops vfio_amba_ops = {
.name = "vfio-amba",
.init = vfio_amba_init_dev,
.release = vfio_amba_release_dev,
.open_device = vfio_platform_open_device,
.close_device = vfio_platform_close_device,
.ioctl = vfio_platform_ioctl,
.read = vfio_platform_read,
.write = vfio_platform_write,
.mmap = vfio_platform_mmap,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct amba_id pl330_ids[] = {
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl330_ids);
static struct amba_driver vfio_amba_driver = {
.probe = vfio_amba_probe,
.remove = vfio_amba_remove,
.id_table = pl330_ids,
.drv = {
.name = "vfio-amba",
.owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
module_amba_driver(vfio_amba_driver);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/platform/vfio_amba.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO platform devices interrupt handling
*
* Copyright (C) 2013 - Virtual Open Systems
* Author: Antonios Motakis <[email protected]>
*/
#include <linux/eventfd.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vfio.h>
#include <linux/irq.h>
#include "vfio_platform_private.h"
static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
{
unsigned long flags;
spin_lock_irqsave(&irq_ctx->lock, flags);
if (!irq_ctx->masked) {
disable_irq_nosync(irq_ctx->hwirq);
irq_ctx->masked = true;
}
spin_unlock_irqrestore(&irq_ctx->lock, flags);
}
static int vfio_platform_mask_handler(void *opaque, void *unused)
{
struct vfio_platform_irq *irq_ctx = opaque;
vfio_platform_mask(irq_ctx);
return 0;
}
static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags,
void *data)
{
if (start != 0 || count != 1)
return -EINVAL;
if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
if (fd >= 0)
return vfio_virqfd_enable((void *) &vdev->irqs[index],
vfio_platform_mask_handler,
NULL, NULL,
&vdev->irqs[index].mask, fd);
vfio_virqfd_disable(&vdev->irqs[index].mask);
return 0;
}
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_platform_mask(&vdev->irqs[index]);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t mask = *(uint8_t *)data;
if (mask)
vfio_platform_mask(&vdev->irqs[index]);
}
return 0;
}
static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
{
unsigned long flags;
spin_lock_irqsave(&irq_ctx->lock, flags);
if (irq_ctx->masked) {
enable_irq(irq_ctx->hwirq);
irq_ctx->masked = false;
}
spin_unlock_irqrestore(&irq_ctx->lock, flags);
}
static int vfio_platform_unmask_handler(void *opaque, void *unused)
{
struct vfio_platform_irq *irq_ctx = opaque;
vfio_platform_unmask(irq_ctx);
return 0;
}
static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags,
void *data)
{
if (start != 0 || count != 1)
return -EINVAL;
if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
if (fd >= 0)
return vfio_virqfd_enable((void *) &vdev->irqs[index],
vfio_platform_unmask_handler,
NULL, NULL,
&vdev->irqs[index].unmask,
fd);
vfio_virqfd_disable(&vdev->irqs[index].unmask);
return 0;
}
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_platform_unmask(&vdev->irqs[index]);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t unmask = *(uint8_t *)data;
if (unmask)
vfio_platform_unmask(&vdev->irqs[index]);
}
return 0;
}
static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
unsigned long flags;
int ret = IRQ_NONE;
spin_lock_irqsave(&irq_ctx->lock, flags);
if (!irq_ctx->masked) {
ret = IRQ_HANDLED;
/* automask maskable interrupts */
disable_irq_nosync(irq_ctx->hwirq);
irq_ctx->masked = true;
}
spin_unlock_irqrestore(&irq_ctx->lock, flags);
if (ret == IRQ_HANDLED)
eventfd_signal(irq_ctx->trigger, 1);
return ret;
}
static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
eventfd_signal(irq_ctx->trigger, 1);
return IRQ_HANDLED;
}
static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
int fd, irq_handler_t handler)
{
struct vfio_platform_irq *irq = &vdev->irqs[index];
struct eventfd_ctx *trigger;
int ret;
if (irq->trigger) {
irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
free_irq(irq->hwirq, irq);
kfree(irq->name);
eventfd_ctx_put(irq->trigger);
irq->trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
irq->hwirq, vdev->name);
if (!irq->name)
return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
kfree(irq->name);
return PTR_ERR(trigger);
}
irq->trigger = trigger;
irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
if (ret) {
kfree(irq->name);
eventfd_ctx_put(trigger);
irq->trigger = NULL;
return ret;
}
if (!irq->masked)
enable_irq(irq->hwirq);
return 0;
}
static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags,
void *data)
{
struct vfio_platform_irq *irq = &vdev->irqs[index];
irq_handler_t handler;
if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
handler = vfio_automasked_irq_handler;
else
handler = vfio_irq_handler;
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
return vfio_set_trigger(vdev, index, -1, handler);
if (start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
return vfio_set_trigger(vdev, index, fd, handler);
}
if (flags & VFIO_IRQ_SET_DATA_NONE) {
handler(irq->hwirq, irq);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
if (trigger)
handler(irq->hwirq, irq);
}
return 0;
}
int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
uint32_t flags, unsigned index, unsigned start,
unsigned count, void *data)
{
int (*func)(struct vfio_platform_device *vdev, unsigned index,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_platform_set_irq_mask;
break;
case VFIO_IRQ_SET_ACTION_UNMASK:
func = vfio_platform_set_irq_unmask;
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_platform_set_irq_trigger;
break;
}
if (!func)
return -ENOTTY;
return func(vdev, index, start, count, flags, data);
}
int vfio_platform_irq_init(struct vfio_platform_device *vdev)
{
int cnt = 0, i;
while (vdev->get_irq(vdev, cnt) >= 0)
cnt++;
vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq),
GFP_KERNEL_ACCOUNT);
if (!vdev->irqs)
return -ENOMEM;
for (i = 0; i < cnt; i++) {
int hwirq = vdev->get_irq(vdev, i);
if (hwirq < 0)
goto err;
spin_lock_init(&vdev->irqs[i].lock);
vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
| VFIO_IRQ_INFO_AUTOMASKED;
vdev->irqs[i].count = 1;
vdev->irqs[i].hwirq = hwirq;
vdev->irqs[i].masked = false;
}
vdev->num_irqs = cnt;
return 0;
err:
kfree(vdev->irqs);
return -EINVAL;
}
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
{
int i;
for (i = 0; i < vdev->num_irqs; i++)
vfio_set_trigger(vdev, i, -1, NULL);
vdev->num_irqs = 0;
kfree(vdev->irqs);
}
| linux-master | drivers/vfio/platform/vfio_platform_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO platform driver specialized for AMD xgbe reset
* reset code is inherited from AMD xgbe native driver
*
* Copyright (c) 2015 Linaro Ltd.
* www.linaro.org
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <uapi/linux/mdio.h>
#include <linux/delay.h>
#include "../vfio_platform_private.h"
#define DMA_MR 0x3000
#define MAC_VR 0x0110
#define DMA_ISR 0x3008
#define MAC_ISR 0x00b0
#define PCS_MMD_SELECT 0xff
#define MDIO_AN_INT 0x8002
#define MDIO_AN_INTMASK 0x8001
static unsigned int xmdio_read(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg)
{
unsigned int mmd_address, value;
mmd_address = (mmd << 16) | ((reg) & 0xffff);
iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2));
value = ioread32(ioaddr + ((mmd_address & 0xff) << 2));
return value;
}
static void xmdio_write(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg, unsigned int value)
{
unsigned int mmd_address;
mmd_address = (mmd << 16) | ((reg) & 0xffff);
iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2));
iowrite32(value, ioaddr + ((mmd_address & 0xff) << 2));
}
static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *xgmac_regs = &vdev->regions[0];
struct vfio_platform_region *xpcs_regs = &vdev->regions[1];
u32 dma_mr_value, pcs_value, value;
unsigned int count;
if (!xgmac_regs->ioaddr) {
xgmac_regs->ioaddr =
ioremap(xgmac_regs->addr, xgmac_regs->size);
if (!xgmac_regs->ioaddr)
return -ENOMEM;
}
if (!xpcs_regs->ioaddr) {
xpcs_regs->ioaddr =
ioremap(xpcs_regs->addr, xpcs_regs->size);
if (!xpcs_regs->ioaddr)
return -ENOMEM;
}
/* reset the PHY through MDIO*/
pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1);
pcs_value |= MDIO_CTRL1_RESET;
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1, pcs_value);
count = 50;
do {
msleep(20);
pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS,
MDIO_CTRL1);
} while ((pcs_value & MDIO_CTRL1_RESET) && --count);
if (pcs_value & MDIO_CTRL1_RESET)
dev_warn(vdev->device, "%s: XGBE PHY reset timeout\n",
__func__);
/* disable auto-negotiation */
value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1);
value &= ~MDIO_AN_CTRL1_ENABLE;
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1, value);
/* disable AN IRQ */
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
/* clear AN IRQ */
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* MAC software reset */
dma_mr_value = ioread32(xgmac_regs->ioaddr + DMA_MR);
dma_mr_value |= 0x1;
iowrite32(dma_mr_value, xgmac_regs->ioaddr + DMA_MR);
usleep_range(10, 15);
count = 2000;
while (--count && (ioread32(xgmac_regs->ioaddr + DMA_MR) & 1))
usleep_range(500, 600);
if (!count)
dev_warn(vdev->device, "%s: MAC SW reset failed\n", __func__);
return 0;
}
module_vfio_reset_handler("amd,xgbe-seattle-v1a", vfio_platform_amdxgbe_reset);
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Auger <[email protected]>");
MODULE_DESCRIPTION("Reset support for AMD xgbe vfio platform device");
| linux-master | drivers/vfio/platform/reset/vfio_platform_amdxgbe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Broadcom
*/
/*
* This driver provides reset support for Broadcom FlexRM ring manager
* to VFIO platform.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "../vfio_platform_private.h"
/* FlexRM configuration */
#define RING_REGS_SIZE 0x10000
#define RING_VER_MAGIC 0x76303031
/* Per-Ring register offsets */
#define RING_VER 0x000
#define RING_CONTROL 0x034
#define RING_FLUSH_DONE 0x038
/* Register RING_CONTROL fields */
#define CONTROL_FLUSH_SHIFT 5
/* Register RING_FLUSH_DONE fields */
#define FLUSH_DONE_MASK 0x1
static int vfio_platform_bcmflexrm_shutdown(void __iomem *ring)
{
unsigned int timeout;
/* Disable/inactivate ring */
writel_relaxed(0x0, ring + RING_CONTROL);
/* Set ring flush state */
timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring + RING_CONTROL);
do {
if (readl_relaxed(ring + RING_FLUSH_DONE) &
FLUSH_DONE_MASK)
break;
mdelay(1);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
writel_relaxed(0x0, ring + RING_CONTROL);
do {
if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
return 0;
}
static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
{
void __iomem *ring;
int rc = 0, ret = 0, ring_num = 0;
struct vfio_platform_region *reg = &vdev->regions[0];
/* Map FlexRM ring registers if not mapped */
if (!reg->ioaddr) {
reg->ioaddr = ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
/* Discover and shutdown each FlexRM ring */
for (ring = reg->ioaddr;
ring < (reg->ioaddr + reg->size); ring += RING_REGS_SIZE) {
if (readl_relaxed(ring + RING_VER) == RING_VER_MAGIC) {
rc = vfio_platform_bcmflexrm_shutdown(ring);
if (rc) {
dev_warn(vdev->device,
"FlexRM ring%d shutdown error %d\n",
ring_num, rc);
ret |= rc;
}
ring_num++;
}
}
return ret;
}
module_vfio_reset_handler("brcm,iproc-flexrm-mbox",
vfio_platform_bcmflexrm_reset);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anup Patel <[email protected]>");
MODULE_DESCRIPTION("Reset support for Broadcom FlexRM VFIO platform device");
| linux-master | drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO platform driver specialized for Calxeda xgmac reset
* reset code is inherited from calxeda xgmac native driver
*
* Copyright 2010-2011 Calxeda, Inc.
* Copyright (c) 2015 Linaro Ltd.
* www.linaro.org
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include "../vfio_platform_private.h"
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Eric Auger <[email protected]>"
#define DRIVER_DESC "Reset support for Calxeda xgmac vfio platform device"
/* XGMAC Register definitions */
#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
/* DMA Control and Status Registers */
#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
/* DMA Control register defines */
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
/* Common MAC defines */
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
static inline void xgmac_mac_disable(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
writel(value, ioaddr + XGMAC_DMA_CONTROL);
value = readl(ioaddr + XGMAC_CONTROL);
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
writel(value, ioaddr + XGMAC_CONTROL);
}
static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *reg = &vdev->regions[0];
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
/* disable IRQ */
writel(0, reg->ioaddr + XGMAC_DMA_INTR_ENA);
/* Disable the MAC core */
xgmac_mac_disable(reg->ioaddr);
return 0;
}
module_vfio_reset_handler("calxeda,hb-xgmac", vfio_platform_calxedaxgmac_reset);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2019
* Author(s): Harald Freudenberger <[email protected]>
* Ingo Franzki <[email protected]>
*
* Collection of CCA misc functions used by zcrypt and pkey
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <asm/zcrypt.h>
#include <asm/pkey.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_ccamisc.h"
#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
/* Size of parameter block used for all cca requests/replies */
#define PARMBSIZE 512
/* Size of vardata block used for some of the cca requests/replies */
#define VARDATASIZE 4096
struct cca_info_list_entry {
struct list_head list;
u16 cardnr;
u16 domain;
struct cca_info info;
};
/* a list with cca_info_list_entry entries */
static LIST_HEAD(cca_info_list);
static DEFINE_SPINLOCK(cca_info_list_lock);
/*
* Simple check if the token is a valid CCA secure AES data key
* token. If keybitsize is given, the bitsize of the key is
* also checked. Returns 0 on success or errno value on failure.
*/
int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, int keybitsize)
{
struct secaeskeytoken *t = (struct secaeskeytoken *)token;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (t->type != TOKTYPE_CCA_INTERNAL) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
__func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
return -EINVAL;
}
if (t->version != TOKVER_CCA_AES) {
if (dbg)
DBF("%s token check failed, version 0x%02x != 0x%02x\n",
__func__, (int)t->version, TOKVER_CCA_AES);
return -EINVAL;
}
if (keybitsize > 0 && t->bitsize != keybitsize) {
if (dbg)
DBF("%s token check failed, bitsize %d != %d\n",
__func__, (int)t->bitsize, keybitsize);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(cca_check_secaeskeytoken);
/*
* Simple check if the token is a valid CCA secure AES cipher key
* token. If keybitsize is given, the bitsize of the key is
* also checked. If checkcpacfexport is enabled, the key is also
* checked for the export flag to allow CPACF export.
* Returns 0 on success or errno value on failure.
*/
int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
const u8 *token, int keybitsize,
int checkcpacfexport)
{
struct cipherkeytoken *t = (struct cipherkeytoken *)token;
bool keybitsizeok = true;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (t->type != TOKTYPE_CCA_INTERNAL) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
__func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
return -EINVAL;
}
if (t->version != TOKVER_CCA_VLSC) {
if (dbg)
DBF("%s token check failed, version 0x%02x != 0x%02x\n",
__func__, (int)t->version, TOKVER_CCA_VLSC);
return -EINVAL;
}
if (t->algtype != 0x02) {
if (dbg)
DBF("%s token check failed, algtype 0x%02x != 0x02\n",
__func__, (int)t->algtype);
return -EINVAL;
}
if (t->keytype != 0x0001) {
if (dbg)
DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
__func__, (int)t->keytype);
return -EINVAL;
}
if (t->plfver != 0x00 && t->plfver != 0x01) {
if (dbg)
DBF("%s token check failed, unknown plfver 0x%02x\n",
__func__, (int)t->plfver);
return -EINVAL;
}
if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
if (dbg)
DBF("%s token check failed, unknown wpllen %d\n",
__func__, (int)t->wpllen);
return -EINVAL;
}
if (keybitsize > 0) {
switch (keybitsize) {
case 128:
if (t->wpllen != (t->plfver ? 640 : 512))
keybitsizeok = false;
break;
case 192:
if (t->wpllen != (t->plfver ? 640 : 576))
keybitsizeok = false;
break;
case 256:
if (t->wpllen != 640)
keybitsizeok = false;
break;
default:
keybitsizeok = false;
break;
}
if (!keybitsizeok) {
if (dbg)
DBF("%s token check failed, bitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
}
if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
if (dbg)
DBF("%s token check failed, XPRT_CPAC bit is 0\n",
__func__);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(cca_check_secaescipherkey);
/*
* Simple check if the token is a valid CCA secure ECC private
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, size_t keysize,
int checkcpacfexport)
{
struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
__func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA);
return -EINVAL;
}
if (t->len > keysize) {
if (dbg)
DBF("%s token check failed, len %d > keysize %zu\n",
__func__, (int)t->len, keysize);
return -EINVAL;
}
if (t->secid != 0x20) {
if (dbg)
DBF("%s token check failed, secid 0x%02x != 0x20\n",
__func__, (int)t->secid);
return -EINVAL;
}
if (checkcpacfexport && !(t->kutc & 0x01)) {
if (dbg)
DBF("%s token check failed, XPRTCPAC bit is 0\n",
__func__);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(cca_check_sececckeytoken);
/*
* Allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block and fill in values
* for the common fields. Returns 0 on success or errno value
* on failure.
*/
static int alloc_and_prep_cprbmem(size_t paramblen,
u8 **p_cprb_mem,
struct CPRBX **p_req_cprb,
struct CPRBX **p_rep_cprb)
{
u8 *cprbmem;
size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
struct CPRBX *preqcblk, *prepcblk;
/*
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
if (!cprbmem)
return -ENOMEM;
preqcblk = (struct CPRBX *)cprbmem;
prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen);
/* fill request cprb struct */
preqcblk->cprb_len = sizeof(struct CPRBX);
preqcblk->cprb_ver_id = 0x02;
memcpy(preqcblk->func_id, "T2", 2);
preqcblk->rpl_msgbl = cprbplusparamblen;
if (paramblen) {
preqcblk->req_parmb =
((u8 __user *)preqcblk) + sizeof(struct CPRBX);
preqcblk->rpl_parmb =
((u8 __user *)prepcblk) + sizeof(struct CPRBX);
}
*p_cprb_mem = cprbmem;
*p_req_cprb = preqcblk;
*p_rep_cprb = prepcblk;
return 0;
}
/*
* Free the cprb memory allocated with the function above.
* If the scrub value is not zero, the memory is filled
* with zeros before freeing (useful if there was some
* clear key material in there).
*/
static void free_cprbmem(void *mem, size_t paramblen, int scrub)
{
if (scrub)
memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
kfree(mem);
}
/*
* Helper function to prepare the xcrb struct
*/
static inline void prep_xcrb(struct ica_xcRB *pxcrb,
u16 cardnr,
struct CPRBX *preqcblk,
struct CPRBX *prepcblk)
{
memset(pxcrb, 0, sizeof(*pxcrb));
pxcrb->agent_ID = 0x4341; /* 'CA' */
pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
pxcrb->request_control_blk_length =
preqcblk->cprb_len + preqcblk->req_parml;
pxcrb->request_control_blk_addr = (void __user *)preqcblk;
pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
pxcrb->reply_control_blk_addr = (void __user *)prepcblk;
}
/*
* Generate (random) CCA AES DATA secure key.
*/
int cca_genseckey(u16 cardnr, u16 domain,
u32 keybitsize, u8 *seckey)
{
int i, rc, keysize;
int seckeysize;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct kgreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv1 {
u16 len;
char key_form[8];
char key_length[8];
char key_type1[8];
char key_type2[8];
} lv1;
struct lv2 {
u16 len;
struct keyid {
u16 len;
u16 attr;
u8 data[SECKEYBLOBSIZE];
} keyid[6];
} lv2;
} __packed * preqparm;
struct kgrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 keyblocklen;
struct {
u16 toklen;
u16 tokattr;
u8 tok[];
/* ... some more data ... */
} keyblock;
} lv3;
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with KG request */
preqparm = (struct kgreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "KG", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
memcpy(preqparm->lv1.key_form, "OP ", 8);
switch (keybitsize) {
case PKEY_SIZE_AES_128:
case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
keysize = 16;
memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
break;
case PKEY_SIZE_AES_192:
case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
keysize = 24;
memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
break;
case PKEY_SIZE_AES_256:
case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
keysize = 32;
memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
break;
default:
DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
rc = -EINVAL;
goto out;
}
memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
preqparm->lv2.len = sizeof(struct lv2);
for (i = 0; i < 6; i++) {
preqparm->lv2.keyid[i].len = sizeof(struct keyid);
preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
}
preqcblk->req_parml = sizeof(struct kgreqparm);
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct kgrepparm *)ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
__func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
/* check secure key token */
rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
prepparm->lv3.keyblock.tok, 8 * keysize);
if (rc) {
rc = -EIO;
goto out;
}
/* copy the generated secure key token */
memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(cca_genseckey);
/*
* Generate an CCA AES DATA secure key with given key value.
*/
int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
const u8 *clrkey, u8 *seckey)
{
int rc, keysize, seckeysize;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct cmreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[8];
struct lv1 {
u16 len;
u8 clrkey[];
} lv1;
/* followed by struct lv2 */
} __packed * preqparm;
struct lv2 {
u16 len;
struct keyid {
u16 len;
u16 attr;
u8 data[SECKEYBLOBSIZE];
} keyid;
} __packed * plv2;
struct cmrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 keyblocklen;
struct {
u16 toklen;
u16 tokattr;
u8 tok[];
/* ... some more data ... */
} keyblock;
} lv3;
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with CM request */
preqparm = (struct cmreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "CM", 2);
memcpy(preqparm->rule_array, "AES ", 8);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
switch (keybitsize) {
case PKEY_SIZE_AES_128:
case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
keysize = 16;
break;
case PKEY_SIZE_AES_192:
case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
keysize = 24;
break;
case PKEY_SIZE_AES_256:
case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
keysize = 32;
break;
default:
DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
rc = -EINVAL;
goto out;
}
preqparm->lv1.len = sizeof(struct lv1) + keysize;
memcpy(preqparm->lv1.clrkey, clrkey, keysize);
plv2 = (struct lv2 *)(((u8 *)preqparm) + sizeof(*preqparm) + keysize);
plv2->len = sizeof(struct lv2);
plv2->keyid.len = sizeof(struct keyid);
plv2->keyid.attr = 0x30;
preqcblk->req_parml = sizeof(*preqparm) + keysize + sizeof(*plv2);
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct cmrepparm *)ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
__func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
/* check secure key token */
rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
prepparm->lv3.keyblock.tok, 8 * keysize);
if (rc) {
rc = -EIO;
goto out;
}
/* copy the generated secure key token */
if (seckey)
memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
free_cprbmem(mem, PARMBSIZE, 1);
return rc;
}
EXPORT_SYMBOL(cca_clr2seckey);
/*
* Derive proteced key from an CCA AES DATA secure key.
*/
int cca_sec2protkey(u16 cardnr, u16 domain,
const u8 *seckey, u8 *protkey, u32 *protkeylen,
u32 *protkeytype)
{
int rc;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct uskreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv1 {
u16 len;
u16 attr_len;
u16 attr_flags;
} lv1;
struct lv2 {
u16 len;
u16 attr_len;
u16 attr_flags;
u8 token[]; /* cca secure key token */
} lv2;
} __packed * preqparm;
struct uskrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct lv3 {
u16 len;
u16 attr_len;
u16 attr_flags;
struct cpacfkeyblock {
u8 version; /* version of this struct */
u8 flags[2];
u8 algo;
u8 form;
u8 pad1[3];
u16 len;
u8 key[64]; /* the key (len bytes) */
u16 keyattrlen;
u8 keyattr[32];
u8 pad2[1];
u8 vptype;
u8 vp[32]; /* verification pattern */
} ckb;
} lv3;
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with USK request */
preqparm = (struct uskreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "US", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
preqparm->lv1.attr_flags = 0x0001;
preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
preqparm->lv2.attr_len = sizeof(struct lv2)
- sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
preqparm->lv2.attr_flags = 0x0000;
memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE);
preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct uskrepparm *)ptr;
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
prepparm->lv3.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
__func__, (int)prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
switch (prepparm->lv3.ckb.len) {
case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
DEBUG_ERR("%s unknown/unsupported keylen %d\n",
__func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
if (protkeylen)
*protkeylen = prepparm->lv3.ckb.len;
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(cca_sec2protkey);
/*
* AES cipher key skeleton created with CSNBKTB2 with these flags:
* INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
* NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
* used by cca_gencipherkey() and cca_clr2cipherkey().
*/
static const u8 aes_cipher_key_skeleton[] = {
0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
/*
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
{
int rc;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct gkreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[2 * 8];
struct {
u16 len;
u8 key_type_1[8];
u8 key_type_2[8];
u16 clear_key_bit_len;
u16 key_name_1_len;
u16 key_name_2_len;
u16 user_data_1_len;
u16 user_data_2_len;
/* u8 key_name_1[]; */
/* u8 key_name_2[]; */
/* u8 user_data_1[]; */
/* u8 user_data_2[]; */
} vud;
struct {
u16 len;
struct {
u16 len;
u16 flag;
/* u8 kek_id_1[]; */
} tlv1;
struct {
u16 len;
u16 flag;
/* u8 kek_id_2[]; */
} tlv2;
struct {
u16 len;
u16 flag;
u8 gen_key_id_1[SIZEOF_SKELETON];
} tlv3;
struct {
u16 len;
u16 flag;
/* u8 gen_key_id_1_label[]; */
} tlv4;
struct {
u16 len;
u16 flag;
/* u8 gen_key_id_2[]; */
} tlv5;
struct {
u16 len;
u16 flag;
/* u8 gen_key_id_2_label[]; */
} tlv6;
} kb;
} __packed * preqparm;
struct gkrepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct {
u16 len;
} vud;
struct {
u16 len;
struct {
u16 len;
u16 flag;
u8 gen_key[]; /* 120-136 bytes */
} tlv1;
} kb;
} __packed * prepparm;
struct cipherkeytoken *t;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
preqcblk->req_parml = sizeof(struct gkreqparm);
/* prepare request param block with GK request */
preqparm = (struct gkreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "GK", 2);
preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preqparm->rule_array, "AES OP ", 2 * 8);
/* prepare vud block */
preqparm->vud.len = sizeof(preqparm->vud);
switch (keybitsize) {
case 128:
case 192:
case 256:
break;
default:
DEBUG_ERR(
"%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
rc = -EINVAL;
goto out;
}
preqparm->vud.clear_key_bit_len = keybitsize;
memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
/* prepare kb block */
preqparm->kb.len = sizeof(preqparm->kb);
preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
preqparm->kb.tlv1.flag = 0x0030;
preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
preqparm->kb.tlv2.flag = 0x0030;
preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
preqparm->kb.tlv3.flag = 0x0030;
memcpy(preqparm->kb.tlv3.gen_key_id_1,
aes_cipher_key_skeleton, SIZEOF_SKELETON);
preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
preqparm->kb.tlv4.flag = 0x0030;
preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
preqparm->kb.tlv5.flag = 0x0030;
preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
preqparm->kb.tlv6.flag = 0x0030;
/* patch the skeleton key token export flags inside the kb block */
if (keygenflags) {
t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1;
t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
}
/* prepare xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"%s cipher key generate failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct gkrepparm *)ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
goto out;
}
/* and some checks on the generated key */
rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
prepparm->kb.tlv1.gen_key,
keybitsize, 1);
if (rc) {
rc = -EIO;
goto out;
}
/* copy the generated vlsc key token */
t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key;
if (keybuf) {
if (*keybufsize >= t->len)
memcpy(keybuf, t, t->len);
else
rc = -EINVAL;
}
*keybufsize = t->len;
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(cca_gencipherkey);
/*
* Helper function, does a the CSNBKPI2 CPRB.
*/
static int _ip_cprb_helper(u16 cardnr, u16 domain,
const char *rule_array_1,
const char *rule_array_2,
const char *rule_array_3,
const u8 *clr_key_value,
int clr_key_bit_size,
u8 *key_token,
int *key_token_size)
{
int rc, n;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct rule_array_block {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[];
} __packed * preq_ra_block;
struct vud_block {
u16 len;
struct {
u16 len;
u16 flag; /* 0x0064 */
u16 clr_key_bit_len;
} tlv1;
struct {
u16 len;
u16 flag; /* 0x0063 */
u8 clr_key[]; /* clear key value bytes */
} tlv2;
} __packed * preq_vud_block;
struct key_block {
u16 len;
struct {
u16 len;
u16 flag; /* 0x0030 */
u8 key_token[]; /* key skeleton */
} tlv1;
} __packed * preq_key_block;
struct iprepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct {
u16 len;
} vud;
struct {
u16 len;
struct {
u16 len;
u16 flag; /* 0x0030 */
u8 key_token[]; /* key token */
} tlv1;
} kb;
} __packed * prepparm;
struct cipherkeytoken *t;
int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
preqcblk->req_parml = 0;
/* prepare request param block with IP request */
preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb;
memcpy(preq_ra_block->subfunc_code, "IP", 2);
preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preq_ra_block->rule_array, rule_array_1, 8);
memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
if (rule_array_3) {
preq_ra_block->rule_array_len += 8;
memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
preqcblk->req_parml += 8;
}
/* prepare vud block */
preq_vud_block = (struct vud_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = complete ? 0 : (clr_key_bit_size + 7) / 8;
preq_vud_block->len = sizeof(struct vud_block) + n;
preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
preq_vud_block->tlv1.flag = 0x0064;
preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
preq_vud_block->tlv2.flag = 0x0063;
if (!complete)
memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
preqcblk->req_parml += preq_vud_block->len;
/* prepare key block */
preq_key_block = (struct key_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = *key_token_size;
preq_key_block->len = sizeof(struct key_block) + n;
preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
preq_key_block->tlv1.flag = 0x0030;
memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
preqcblk->req_parml += preq_key_block->len;
/* prepare xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"%s CSNBKPI2 failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct iprepparm *)ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
goto out;
}
/* do not check the key here, it may be incomplete */
/* copy the vlsc key token back */
t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token;
memcpy(key_token, t, t->len);
*key_token_size = t->len;
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
/*
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
{
int rc;
u8 *token;
int tokensize;
u8 exorbuf[32];
struct cipherkeytoken *t;
/* fill exorbuf with random data */
get_random_bytes(exorbuf, sizeof(exorbuf));
/* allocate space for the key token to build */
token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
if (!token)
return -ENOMEM;
/* prepare the token with the key skeleton */
tokensize = SIZEOF_SKELETON;
memcpy(token, aes_cipher_key_skeleton, tokensize);
/* patch the skeleton key token export flags */
if (keygenflags) {
t = (struct cipherkeytoken *)token;
t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
}
/*
* Do the key import with the clear key value in 4 steps:
* 1/4 FIRST import with only random data
* 2/4 EXOR the clear key
* 3/4 EXOR the very same random data again
* 4/4 COMPLETE the secure cipher key import
*/
rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
exorbuf, keybitsize, token, &tokensize);
if (rc) {
DEBUG_ERR(
"%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
clrkey, keybitsize, token, &tokensize);
if (rc) {
DEBUG_ERR(
"%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
exorbuf, keybitsize, token, &tokensize);
if (rc) {
DEBUG_ERR(
"%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
NULL, keybitsize, token, &tokensize);
if (rc) {
DEBUG_ERR(
"%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
/* copy the generated key token */
if (keybuf) {
if (tokensize > *keybufsize)
rc = -EINVAL;
else
memcpy(keybuf, token, tokensize);
}
*keybufsize = tokensize;
out:
kfree(token);
return rc;
}
EXPORT_SYMBOL(cca_clr2cipherkey);
/*
* Derive proteced key from CCA AES cipher secure key.
*/
int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct aureqparm {
u8 subfunc_code[2];
u16 rule_array_len;
u8 rule_array[8];
struct {
u16 len;
u16 tk_blob_len;
u16 tk_blob_tag;
u8 tk_blob[66];
} vud;
struct {
u16 len;
u16 cca_key_token_len;
u16 cca_key_token_flags;
u8 cca_key_token[]; /* 64 or more */
} kb;
} __packed * preqparm;
struct aurepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct {
u16 len;
u16 sublen;
u16 tag;
struct cpacfkeyblock {
u8 version; /* version of this struct */
u8 flags[2];
u8 algo;
u8 form;
u8 pad1[3];
u16 keylen;
u8 key[64]; /* the key (keylen bytes) */
u16 keyattrlen;
u8 keyattr[32];
u8 pad2[1];
u8 vptype;
u8 vp[32]; /* verification pattern */
} ckb;
} vud;
struct {
u16 len;
} kb;
} __packed * prepparm;
int keytoklen = ((struct cipherkeytoken *)ckey)->len;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
+ sizeof(preqparm->rule_array);
memcpy(preqparm->rule_array, "EXPT-SK ", 8);
/* vud, tk blob */
preqparm->vud.len = sizeof(preqparm->vud);
preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ 2 * sizeof(uint16_t);
preqparm->vud.tk_blob_tag = 0x00C2;
/* kb, cca token */
preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
/* now fill length of param block into cprb */
preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"%s unwrap secure key failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
DEBUG_WARN(
"%s unwrap secure key warning, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct aurepparm *)ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
prepparm->vud.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
__func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x02) {
DEBUG_ERR(
"%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
__func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
switch (prepparm->vud.ckb.keylen) {
case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
DEBUG_ERR("%s unknown/unsupported keylen %d\n",
__func__, prepparm->vud.ckb.keylen);
rc = -EIO;
goto out;
}
memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
if (protkeylen)
*protkeylen = prepparm->vud.ckb.keylen;
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(cca_cipher2protkey);
/*
* Derive protected key from CCA ECC secure private key.
*/
int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct aureqparm {
u8 subfunc_code[2];
u16 rule_array_len;
u8 rule_array[8];
struct {
u16 len;
u16 tk_blob_len;
u16 tk_blob_tag;
u8 tk_blob[66];
} vud;
struct {
u16 len;
u16 cca_key_token_len;
u16 cca_key_token_flags;
u8 cca_key_token[];
} kb;
} __packed * preqparm;
struct aurepparm {
u8 subfunc_code[2];
u16 rule_array_len;
struct {
u16 len;
u16 sublen;
u16 tag;
struct cpacfkeyblock {
u8 version; /* version of this struct */
u8 flags[2];
u8 algo;
u8 form;
u8 pad1[3];
u16 keylen;
u8 key[]; /* the key (keylen bytes) */
/* u16 keyattrlen; */
/* u8 keyattr[32]; */
/* u8 pad2[1]; */
/* u8 vptype; */
/* u8 vp[32]; verification pattern */
} ckb;
} vud;
/* followed by a key block */
} __packed * prepparm;
int keylen = ((struct eccprivkeytoken *)key)->len;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
+ sizeof(preqparm->rule_array);
memcpy(preqparm->rule_array, "EXPT-SK ", 8);
/* vud, tk blob */
preqparm->vud.len = sizeof(preqparm->vud);
preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ 2 * sizeof(uint16_t);
preqparm->vud.tk_blob_tag = 0x00C2;
/* kb, cca token */
preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
memcpy(preqparm->kb.cca_key_token, key, keylen);
/* now fill length of param block into cprb */
preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
"%s unwrap secure key failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
DEBUG_WARN(
"%s unwrap secure key warning, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct aurepparm *)ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
__func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x81) {
DEBUG_ERR(
"%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
__func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (prepparm->vud.ckb.keylen > *protkeylen) {
DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
__func__, prepparm->vud.ckb.keylen, *protkeylen);
rc = -EIO;
goto out;
}
memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
*protkeylen = prepparm->vud.ckb.keylen;
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_ECC;
out:
free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
EXPORT_SYMBOL(cca_ecc2protkey);
/*
* query cryptographic facility from CCA adapter
*/
int cca_query_crypto_facility(u16 cardnr, u16 domain,
const char *keyword,
u8 *rarray, size_t *rarraylen,
u8 *varray, size_t *varraylen)
{
int rc;
u16 len;
u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct fqreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
char rule_array[8];
struct lv1 {
u16 len;
u8 data[VARDATASIZE];
} lv1;
u16 dummylen;
} __packed * preqparm;
size_t parmbsize = sizeof(struct fqreqparm);
struct fqrepparm {
u8 subfunc_code[2];
u8 lvdata[];
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
if (rc)
return rc;
/* fill request cprb struct */
preqcblk->domain = domain;
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
preqparm->lv1.len = sizeof(preqparm->lv1);
preqparm->dummylen = sizeof(preqparm->dummylen);
preqcblk->req_parml = parmbsize;
/* fill xcrb struct */
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
__func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
prepcblk->rpl_parmb = (u8 __user *)ptr;
prepparm = (struct fqrepparm *)ptr;
ptr = prepparm->lvdata;
/* check and possibly copy reply rule array */
len = *((u16 *)ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
if (rarray && rarraylen && *rarraylen > 0) {
*rarraylen = (len > *rarraylen ? *rarraylen : len);
memcpy(rarray, ptr, *rarraylen);
}
ptr += len;
}
/* check and possible copy reply var array */
len = *((u16 *)ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
if (varray && varraylen && *varraylen > 0) {
*varraylen = (len > *varraylen ? *varraylen : len);
memcpy(varray, ptr, *varraylen);
}
ptr += len;
}
out:
free_cprbmem(mem, parmbsize, 0);
return rc;
}
EXPORT_SYMBOL(cca_query_crypto_facility);
static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci)
{
int rc = -ENOENT;
struct cca_info_list_entry *ptr;
spin_lock_bh(&cca_info_list_lock);
list_for_each_entry(ptr, &cca_info_list, list) {
if (ptr->cardnr == cardnr && ptr->domain == domain) {
memcpy(ci, &ptr->info, sizeof(*ci));
rc = 0;
break;
}
}
spin_unlock_bh(&cca_info_list_lock);
return rc;
}
static void cca_info_cache_update(u16 cardnr, u16 domain,
const struct cca_info *ci)
{
int found = 0;
struct cca_info_list_entry *ptr;
spin_lock_bh(&cca_info_list_lock);
list_for_each_entry(ptr, &cca_info_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
memcpy(&ptr->info, ci, sizeof(*ci));
found = 1;
break;
}
}
if (!found) {
ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
if (!ptr) {
spin_unlock_bh(&cca_info_list_lock);
return;
}
ptr->cardnr = cardnr;
ptr->domain = domain;
memcpy(&ptr->info, ci, sizeof(*ci));
list_add(&ptr->list, &cca_info_list);
}
spin_unlock_bh(&cca_info_list_lock);
}
static void cca_info_cache_scrub(u16 cardnr, u16 domain)
{
struct cca_info_list_entry *ptr;
spin_lock_bh(&cca_info_list_lock);
list_for_each_entry(ptr, &cca_info_list, list) {
if (ptr->cardnr == cardnr &&
ptr->domain == domain) {
list_del(&ptr->list);
kfree(ptr);
break;
}
}
spin_unlock_bh(&cca_info_list_lock);
}
static void __exit mkvp_cache_free(void)
{
struct cca_info_list_entry *ptr, *pnext;
spin_lock_bh(&cca_info_list_lock);
list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) {
list_del(&ptr->list);
kfree(ptr);
}
spin_unlock_bh(&cca_info_list_lock);
}
/*
* Fetch cca_info values via query_crypto_facility from adapter.
*/
static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
{
int rc, found = 0;
size_t rlen, vlen;
u8 *rarray, *varray, *pg;
struct zcrypt_device_status_ext devstat;
memset(ci, 0, sizeof(*ci));
/* get first info from zcrypt device driver about this apqn */
rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
if (rc)
return rc;
ci->hwtype = devstat.hwtype;
/* prep page for rule array and var array use */
pg = (u8 *)__get_free_page(GFP_KERNEL);
if (!pg)
return -ENOMEM;
rarray = pg;
varray = pg + PAGE_SIZE / 2;
rlen = vlen = PAGE_SIZE / 2;
/* QF for this card/domain */
rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) {
memcpy(ci->serial, rarray, 8);
ci->new_asym_mk_state = (char)rarray[4 * 8];
ci->cur_asym_mk_state = (char)rarray[5 * 8];
ci->old_asym_mk_state = (char)rarray[6 * 8];
if (ci->old_asym_mk_state == '2')
memcpy(ci->old_asym_mkvp, varray + 64, 16);
if (ci->cur_asym_mk_state == '2')
memcpy(ci->cur_asym_mkvp, varray + 84, 16);
if (ci->new_asym_mk_state == '3')
memcpy(ci->new_asym_mkvp, varray + 104, 16);
ci->new_aes_mk_state = (char)rarray[7 * 8];
ci->cur_aes_mk_state = (char)rarray[8 * 8];
ci->old_aes_mk_state = (char)rarray[9 * 8];
if (ci->old_aes_mk_state == '2')
memcpy(&ci->old_aes_mkvp, varray + 172, 8);
if (ci->cur_aes_mk_state == '2')
memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
if (ci->new_aes_mk_state == '3')
memcpy(&ci->new_aes_mkvp, varray + 196, 8);
found++;
}
if (!found)
goto out;
rlen = vlen = PAGE_SIZE / 2;
rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) {
ci->new_apka_mk_state = (char)rarray[10 * 8];
ci->cur_apka_mk_state = (char)rarray[11 * 8];
ci->old_apka_mk_state = (char)rarray[12 * 8];
if (ci->old_apka_mk_state == '2')
memcpy(&ci->old_apka_mkvp, varray + 208, 8);
if (ci->cur_apka_mk_state == '2')
memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
if (ci->new_apka_mk_state == '3')
memcpy(&ci->new_apka_mkvp, varray + 232, 8);
found++;
}
out:
free_page((unsigned long)pg);
return found == 2 ? 0 : -ENOENT;
}
/*
* Fetch cca information about a CCA queue.
*/
int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify)
{
int rc;
rc = cca_info_cache_fetch(card, dom, ci);
if (rc || verify) {
rc = fetch_cca_info(card, dom, ci);
if (rc == 0)
cca_info_cache_update(card, dom, ci);
}
return rc;
}
EXPORT_SYMBOL(cca_get_info);
/*
* Search for a matching crypto card based on the
* Master Key Verification Pattern given.
*/
static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
int verify, int minhwtype)
{
struct zcrypt_device_status_ext *device_status;
u16 card, dom;
struct cca_info ci;
int i, rc, oi = -1;
/* mkvp must not be zero, minhwtype needs to be >= 0 */
if (mkvp == 0 || minhwtype < 0)
return -EINVAL;
/* fetch status of all crypto cards */
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
/* walk through all crypto cards */
for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
if (device_status[i].online &&
device_status[i].functions & 0x04) {
/* enabled CCA card, check current mkvp from cache */
if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
ci.hwtype >= minhwtype &&
ci.cur_aes_mk_state == '2' &&
ci.cur_aes_mkvp == mkvp) {
if (!verify)
break;
/* verify: refresh card info */
if (fetch_cca_info(card, dom, &ci) == 0) {
cca_info_cache_update(card, dom, &ci);
if (ci.hwtype >= minhwtype &&
ci.cur_aes_mk_state == '2' &&
ci.cur_aes_mkvp == mkvp)
break;
}
}
} else {
/* Card is offline and/or not a CCA card. */
/* del mkvp entry from cache if it exists */
cca_info_cache_scrub(card, dom);
}
}
if (i >= MAX_ZDEV_ENTRIES_EXT) {
/* nothing found, so this time without cache */
for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
if (!(device_status[i].online &&
device_status[i].functions & 0x04))
continue;
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
/* fresh fetch mkvp from adapter */
if (fetch_cca_info(card, dom, &ci) == 0) {
cca_info_cache_update(card, dom, &ci);
if (ci.hwtype >= minhwtype &&
ci.cur_aes_mk_state == '2' &&
ci.cur_aes_mkvp == mkvp)
break;
if (ci.hwtype >= minhwtype &&
ci.old_aes_mk_state == '2' &&
ci.old_aes_mkvp == mkvp &&
oi < 0)
oi = i;
}
}
if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
/* old mkvp matched, use this card then */
card = AP_QID_CARD(device_status[oi].qid);
dom = AP_QID_QUEUE(device_status[oi].qid);
}
}
if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
*pdomain = dom;
rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
} else {
rc = -ENODEV;
}
kvfree(device_status);
return rc;
}
/*
* Search for a matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key token.
*/
int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
{
u64 mkvp;
int minhwtype = 0;
const struct keytoken_header *hdr = (struct keytoken_header *)key;
if (hdr->type != TOKTYPE_CCA_INTERNAL)
return -EINVAL;
switch (hdr->version) {
case TOKVER_CCA_AES:
mkvp = ((struct secaeskeytoken *)key)->mkvp;
break;
case TOKVER_CCA_VLSC:
mkvp = ((struct cipherkeytoken *)key)->mkvp0;
minhwtype = AP_DEVICE_TYPE_CEX6;
break;
default:
return -EINVAL;
}
return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
}
EXPORT_SYMBOL(cca_findcard);
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
int verify)
{
struct zcrypt_device_status_ext *device_status;
u32 *_apqns = NULL, _nr_apqns = 0;
int i, card, dom, curmatch, oldmatch, rc = 0;
struct cca_info ci;
/* fetch status of all crypto cards */
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
/* allocate 1k space for up to 256 apqns */
_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!_apqns) {
kvfree(device_status);
return -ENOMEM;
}
/* walk through all the crypto apqnss */
for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
/* check online state */
if (!device_status[i].online)
continue;
/* check for cca functions */
if (!(device_status[i].functions & 0x04))
continue;
/* check cardnr */
if (cardnr != 0xFFFF && card != cardnr)
continue;
/* check domain */
if (domain != 0xFFFF && dom != domain)
continue;
/* get cca info on this apqn */
if (cca_get_info(card, dom, &ci, verify))
continue;
/* current master key needs to be valid */
if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
continue;
if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
continue;
/* check min hardware type */
if (minhwtype > 0 && minhwtype > ci.hwtype)
continue;
if (cur_mkvp || old_mkvp) {
/* check mkvps */
curmatch = oldmatch = 0;
if (mktype == AES_MK_SET) {
if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
curmatch = 1;
if (old_mkvp && ci.old_aes_mk_state == '2' &&
old_mkvp == ci.old_aes_mkvp)
oldmatch = 1;
} else {
if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
curmatch = 1;
if (old_mkvp && ci.old_apka_mk_state == '2' &&
old_mkvp == ci.old_apka_mkvp)
oldmatch = 1;
}
if (curmatch + oldmatch < 1)
continue;
}
/* apqn passed all filtering criterons, add to the array */
if (_nr_apqns < 256)
_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
/* nothing found ? */
if (!_nr_apqns) {
kfree(_apqns);
rc = -ENODEV;
} else {
/* no re-allocation, simple return the _apqns array */
*apqns = _apqns;
*nr_apqns = _nr_apqns;
rc = 0;
}
kvfree(device_status);
return rc;
}
EXPORT_SYMBOL(cca_findcard2);
void __exit zcrypt_ccamisc_exit(void)
{
mkvp_cache_free();
}
| linux-master | drivers/s390/crypto/zcrypt_ccamisc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016, 2023
* Author(s): Martin Schwidefsky <[email protected]>
*
* Adjunct processor bus, queue related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include "ap_bus.h"
#include "ap_debug.h"
static void __ap_flush_queue(struct ap_queue *aq);
/*
* some AP queue helper functions
*/
static inline bool ap_q_supports_bind(struct ap_queue *aq)
{
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
}
static inline bool ap_q_supports_assoc(struct ap_queue *aq)
{
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
}
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
* @aq: The AP queue
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
* value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
{
union ap_qirq_ctrl qirqctrl = { .value = 0 };
struct ap_queue_status status;
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
if (status.async)
return -EPERM;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid));
return -EOPNOTSUPP;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
default:
return -EBUSY;
}
}
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @msglen: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
int special)
{
if (special)
qid |= 0x400000UL;
return ap_nqap(qid, psmid, msg, msglen);
}
/* State machine definitions and helpers */
static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
{
return AP_SM_WAIT_NONE;
}
/**
* ap_sm_recv(): Receive pending reply messages from an AP queue but do
* not change the state of the device.
* @aq: pointer to the AP queue
*
* Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
bool found = false;
size_t reslen;
unsigned long resgr0 = 0;
int parts = 0;
/*
* DQAP loop until response code and resgr0 indicate that
* the msg is totally received. As we use the very same buffer
* the msg is overwritten with each invocation. That's intended
* and the receiver of the msg is informed with a msg rc code
* of EMSGSIZE in such a case.
*/
do {
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->msg, aq->reply->bufsize,
&aq->reply->len, &reslen, &resgr0);
parts++;
} while (status.response_code == 0xFF && resgr0 != 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (!status.queue_empty && !aq->queue_count)
aq->queue_count++;
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
list_for_each_entry(ap_msg, &aq->pendingq, list) {
if (ap_msg->psmid != aq->reply->psmid)
continue;
list_del_init(&ap_msg->list);
aq->pendingq_count--;
if (parts > 1) {
ap_msg->rc = -EMSGSIZE;
ap_msg->receive(aq, ap_msg, NULL);
} else {
ap_msg->receive(aq, ap_msg, aq->reply);
}
found = true;
break;
}
if (!found) {
AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
__func__, aq->reply->psmid,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
}
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
/* The card shouldn't forget requests but who knows. */
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
aq->pendingq_count = 0;
break;
default:
break;
}
return status;
}
/**
* ap_sm_read(): Receive pending reply messages from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
return AP_SM_WAIT_NONE;
status = ap_sm_recv(aq);
if (status.async)
return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
aq->sm_state = AP_SM_STATE_WORKING;
return AP_SM_WAIT_AGAIN;
}
aq->sm_state = AP_SM_STATE_IDLE;
return AP_SM_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
return aq->interrupt ?
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
aq->sm_state = AP_SM_STATE_IDLE;
return AP_SM_WAIT_NONE;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
ap_qid_t qid = aq->qid;
if (aq->requestq_count <= 0)
return AP_SM_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
if (status.async)
return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count = max_t(int, 1, aq->queue_count + 1);
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
aq->requestq_count--;
aq->pendingq_count++;
if (aq->queue_count < aq->card->queue_depth) {
aq->sm_state = AP_SM_STATE_WORKING;
return AP_SM_WAIT_AGAIN;
}
fallthrough;
case AP_RESPONSE_Q_FULL:
aq->sm_state = AP_SM_STATE_QUEUE_FULL;
return aq->interrupt ?
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
return AP_SM_WAIT_LOW_TIMEOUT;
case AP_RESPONSE_INVALID_DOMAIN:
AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
fallthrough;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(aq, ap_msg, NULL);
return AP_SM_WAIT_AGAIN;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/**
* ap_sm_read_write(): Send and receive messages to/from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
{
return min(ap_sm_read(aq), ap_sm_write(aq));
}
/**
* ap_sm_reset(): Reset an AP queue.
* @aq: The AP queue
*
* Submit the Reset command to an AP queue.
*/
static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
status = ap_rapq(aq->qid, aq->rapq_fbit);
if (status.async)
return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->interrupt = false;
aq->rapq_fbit = 0;
return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/**
* ap_sm_reset_wait(): Test queue for completion of the reset operation
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
void *lsi_ptr;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
lsi_ptr = ap_airq_ptr();
if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
else
aq->sm_state = (aq->queue_count > 0) ?
AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
return AP_SM_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
return AP_SM_WAIT_LOW_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/**
* ap_sm_setirq_wait(): Test queue for completion of the irq enablement
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
if (status.irq_enabled == 1) {
/* Irqs are now enabled */
aq->interrupt = true;
aq->sm_state = (aq->queue_count > 0) ?
AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_SM_WAIT_AGAIN;
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/**
* ap_sm_assoc_wait(): Test queue for completion of a pending
* association request.
* @aq: pointer to the AP queue
*/
static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_tapq_gr2 info;
status = ap_test_queue(aq->qid, 1, &info);
/* handle asynchronous error on this queue */
if (status.async && status.response_code) {
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
if (status.response_code > AP_RESPONSE_BUSY) {
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
/* check bs bits */
switch (info.bs) {
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
__func__, AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
/* association still pending */
return AP_SM_WAIT_LOW_TIMEOUT;
default:
/* reset from 'outside' happened or no idea at all */
aq->assoc_idx = ASSOC_IDX_INVALID;
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
__func__, info.bs,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
[AP_SM_STATE_RESET_START] = {
[AP_SM_EVENT_POLL] = ap_sm_reset,
[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_SM_STATE_RESET_WAIT] = {
[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_SM_STATE_SETIRQ_WAIT] = {
[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_SM_STATE_IDLE] = {
[AP_SM_EVENT_POLL] = ap_sm_write,
[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_SM_STATE_WORKING] = {
[AP_SM_EVENT_POLL] = ap_sm_read_write,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_SM_STATE_QUEUE_FULL] = {
[AP_SM_EVENT_POLL] = ap_sm_read,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_SM_STATE_ASSOC_WAIT] = {
[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
};
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
{
if (aq->config && !aq->chkstop &&
aq->dev_state > AP_DEV_STATE_UNINITIATED)
return ap_jumptable[aq->sm_state][event](aq);
else
return AP_SM_WAIT_NONE;
}
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
{
enum ap_sm_wait wait;
while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
;
return wait;
}
/*
* AP queue related attributes.
*/
static ssize_t request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
bool valid = false;
u64 req_cnt;
spin_lock_bh(&aq->lock);
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
req_cnt = aq->total_request_count;
valid = true;
}
spin_unlock_bh(&aq->lock);
if (valid)
return sysfs_emit(buf, "%llu\n", req_cnt);
else
return sysfs_emit(buf, "-\n");
}
static ssize_t request_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&aq->lock);
aq->total_request_count = 0;
spin_unlock_bh(&aq->lock);
return count;
}
static DEVICE_ATTR_RW(request_count);
static ssize_t requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int reqq_cnt = 0;
spin_lock_bh(&aq->lock);
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
static ssize_t pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int penq_cnt = 0;
spin_lock_bh(&aq->lock);
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
static ssize_t reset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
switch (aq->sm_state) {
case AP_SM_STATE_RESET_START:
case AP_SM_STATE_RESET_WAIT:
rc = sysfs_emit(buf, "Reset in progress.\n");
break;
case AP_SM_STATE_WORKING:
case AP_SM_STATE_QUEUE_FULL:
rc = sysfs_emit(buf, "Reset Timer armed.\n");
break;
default:
rc = sysfs_emit(buf, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
}
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
aq->sm_state = AP_SM_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
__func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return count;
}
static DEVICE_ATTR_RW(reset);
static ssize_t interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
else if (aq->interrupt)
rc = sysfs_emit(buf, "Interrupts enabled.\n");
else
rc = sysfs_emit(buf, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR_RO(interrupt);
static ssize_t config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc;
spin_lock_bh(&aq->lock);
rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR_RO(config);
static ssize_t chkstop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc;
spin_lock_bh(&aq->lock);
rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR_RO(chkstop);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
struct ap_tapq_gr2 info;
status = ap_test_queue(aq->qid, 1, &info);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
return sysfs_emit(buf, "0x%08X\n", info.fac);
}
static DEVICE_ATTR_RO(ap_functions);
#ifdef CONFIG_ZCRYPT_DEBUG
static ssize_t states_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
/* queue device state */
switch (aq->dev_state) {
case AP_DEV_STATE_UNINITIATED:
rc = sysfs_emit(buf, "UNINITIATED\n");
break;
case AP_DEV_STATE_OPERATING:
rc = sysfs_emit(buf, "OPERATING");
break;
case AP_DEV_STATE_SHUTDOWN:
rc = sysfs_emit(buf, "SHUTDOWN");
break;
case AP_DEV_STATE_ERROR:
rc = sysfs_emit(buf, "ERROR");
break;
default:
rc = sysfs_emit(buf, "UNKNOWN");
}
/* state machine state */
if (aq->dev_state) {
switch (aq->sm_state) {
case AP_SM_STATE_RESET_START:
rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
break;
case AP_SM_STATE_RESET_WAIT:
rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
break;
case AP_SM_STATE_SETIRQ_WAIT:
rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
break;
case AP_SM_STATE_IDLE:
rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
break;
case AP_SM_STATE_WORKING:
rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
break;
case AP_SM_STATE_QUEUE_FULL:
rc += sysfs_emit_at(buf, rc, " [FULL]\n");
break;
case AP_SM_STATE_ASSOC_WAIT:
rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
break;
default:
rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
}
}
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR_RO(states);
static ssize_t last_err_rc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc;
spin_lock_bh(&aq->lock);
rc = aq->last_err_rc;
spin_unlock_bh(&aq->lock);
switch (rc) {
case AP_RESPONSE_NORMAL:
return sysfs_emit(buf, "NORMAL\n");
case AP_RESPONSE_Q_NOT_AVAIL:
return sysfs_emit(buf, "Q_NOT_AVAIL\n");
case AP_RESPONSE_RESET_IN_PROGRESS:
return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
case AP_RESPONSE_DECONFIGURED:
return sysfs_emit(buf, "DECONFIGURED\n");
case AP_RESPONSE_CHECKSTOPPED:
return sysfs_emit(buf, "CHECKSTOPPED\n");
case AP_RESPONSE_BUSY:
return sysfs_emit(buf, "BUSY\n");
case AP_RESPONSE_INVALID_ADDRESS:
return sysfs_emit(buf, "INVALID_ADDRESS\n");
case AP_RESPONSE_OTHERWISE_CHANGED:
return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
case AP_RESPONSE_Q_FULL:
return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
case AP_RESPONSE_INDEX_TOO_BIG:
return sysfs_emit(buf, "INDEX_TOO_BIG\n");
case AP_RESPONSE_NO_FIRST_PART:
return sysfs_emit(buf, "NO_FIRST_PART\n");
case AP_RESPONSE_MESSAGE_TOO_BIG:
return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
case AP_RESPONSE_REQ_FAC_NOT_INST:
return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
default:
return sysfs_emit(buf, "response code %d\n", rc);
}
}
static DEVICE_ATTR_RO(last_err_rc);
#endif
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
&dev_attr_config.attr,
&dev_attr_chkstop.attr,
&dev_attr_ap_functions.attr,
#ifdef CONFIG_ZCRYPT_DEBUG
&dev_attr_states.attr,
&dev_attr_last_err_rc.attr,
#endif
NULL
};
static struct attribute_group ap_queue_dev_attr_group = {
.attrs = ap_queue_dev_attrs
};
static const struct attribute_group *ap_queue_dev_attr_groups[] = {
&ap_queue_dev_attr_group,
NULL
};
static struct device_type ap_queue_type = {
.name = "ap_queue",
.groups = ap_queue_dev_attr_groups,
};
static ssize_t se_bind_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
struct ap_tapq_gr2 info;
if (!ap_q_supports_bind(aq))
return sysfs_emit(buf, "-\n");
status = ap_test_queue(aq->qid, 1, &info);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
switch (info.bs) {
case AP_BS_Q_USABLE:
case AP_BS_Q_USABLE_NO_SECURE_KEY:
return sysfs_emit(buf, "bound\n");
default:
return sysfs_emit(buf, "unbound\n");
}
}
static ssize_t se_bind_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
bool value;
int rc;
if (!ap_q_supports_bind(aq))
return -EINVAL;
/* only 0 (unbind) and 1 (bind) allowed */
rc = kstrtobool(buf, &value);
if (rc)
return rc;
if (value) {
/* bind, do BAPQ */
spin_lock_bh(&aq->lock);
if (aq->sm_state < AP_SM_STATE_IDLE) {
spin_unlock_bh(&aq->lock);
return -EBUSY;
}
status = ap_bapq(aq->qid);
spin_unlock_bh(&aq->lock);
if (status.response_code) {
AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid));
return -EIO;
}
} else {
/* unbind, set F bit arg and trigger RAPQ */
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
aq->rapq_fbit = 1;
aq->assoc_idx = ASSOC_IDX_INVALID;
aq->sm_state = AP_SM_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
return count;
}
static DEVICE_ATTR_RW(se_bind);
static ssize_t se_associate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
struct ap_tapq_gr2 info;
if (!ap_q_supports_assoc(aq))
return sysfs_emit(buf, "-\n");
status = ap_test_queue(aq->qid, 1, &info);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
switch (info.bs) {
case AP_BS_Q_USABLE:
if (aq->assoc_idx == ASSOC_IDX_INVALID) {
AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
return -EIO;
}
return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
case AP_BS_Q_USABLE_NO_SECURE_KEY:
if (aq->assoc_idx != ASSOC_IDX_INVALID)
return sysfs_emit(buf, "association pending\n");
fallthrough;
default:
return sysfs_emit(buf, "unassociated\n");
}
}
static ssize_t se_associate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
unsigned int value;
int rc;
if (!ap_q_supports_assoc(aq))
return -EINVAL;
/* association index needs to be >= 0 */
rc = kstrtouint(buf, 0, &value);
if (rc)
return rc;
if (value >= ASSOC_IDX_INVALID)
return -EINVAL;
spin_lock_bh(&aq->lock);
/* sm should be in idle state */
if (aq->sm_state != AP_SM_STATE_IDLE) {
spin_unlock_bh(&aq->lock);
return -EBUSY;
}
/* already associated or association pending ? */
if (aq->assoc_idx != ASSOC_IDX_INVALID) {
spin_unlock_bh(&aq->lock);
return -EINVAL;
}
/* trigger the asynchronous association request */
status = ap_aapq(aq->qid, value);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
aq->assoc_idx = value;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
break;
default:
spin_unlock_bh(&aq->lock);
AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
return count;
}
static DEVICE_ATTR_RW(se_associate);
static struct attribute *ap_queue_dev_sb_attrs[] = {
&dev_attr_se_bind.attr,
&dev_attr_se_associate.attr,
NULL
};
static struct attribute_group ap_queue_dev_sb_attr_group = {
.attrs = ap_queue_dev_sb_attrs
};
static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
&ap_queue_dev_sb_attr_group,
NULL
};
static void ap_queue_device_release(struct device *dev)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&ap_queues_lock);
hash_del(&aq->hnode);
spin_unlock_bh(&ap_queues_lock);
kfree(aq);
}
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
{
struct ap_queue *aq;
aq = kzalloc(sizeof(*aq), GFP_KERNEL);
if (!aq)
return NULL;
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
// add optional SE secure binding attributes group
if (ap_sb_available() && is_prot_virt_guest())
aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
aq->interrupt = false;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
timer_setup(&aq->timeout, ap_request_timeout, 0);
return aq;
}
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
{
aq->reply = reply;
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_reply);
/**
* ap_queue_message(): Queue a request to an AP device.
* @aq: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
int rc = 0;
/* msg needs to have a valid receive-callback */
BUG_ON(!ap_msg->receive);
spin_lock_bh(&aq->lock);
/* only allow to queue new messages if device state is ok */
if (aq->dev_state == AP_DEV_STATE_OPERATING) {
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
atomic64_inc(&aq->card->total_request_count);
} else {
rc = -ENODEV;
}
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
return rc;
}
EXPORT_SYMBOL(ap_queue_message);
/**
* ap_cancel_message(): Cancel a crypto request.
* @aq: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request
* from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found.
*/
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
struct ap_message *tmp;
spin_lock_bh(&aq->lock);
if (!list_empty(&ap_msg->list)) {
list_for_each_entry(tmp, &aq->pendingq, list)
if (tmp->psmid == ap_msg->psmid) {
aq->pendingq_count--;
goto found;
}
aq->requestq_count--;
found:
list_del_init(&ap_msg->list);
}
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_cancel_message);
/**
* __ap_flush_queue(): Flush requests.
* @aq: Pointer to the AP queue
*
* Flush all requests from the request/pending queue of an AP device.
*/
static void __ap_flush_queue(struct ap_queue *aq)
{
struct ap_message *ap_msg, *next;
list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
aq->queue_count = 0;
}
void ap_flush_queue(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_flush_queue);
void ap_queue_prepare_remove(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
/* flush queue */
__ap_flush_queue(aq);
/* move queue device state to SHUTDOWN in progress */
aq->dev_state = AP_DEV_STATE_SHUTDOWN;
spin_unlock_bh(&aq->lock);
del_timer_sync(&aq->timeout);
}
void ap_queue_remove(struct ap_queue *aq)
{
/*
* all messages have been flushed and the device state
* is SHUTDOWN. Now reset with zero which also clears
* the irq registration and move the device state
* to the initial value AP_DEV_STATE_UNINITIATED.
*/
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid, 0);
aq->dev_state = AP_DEV_STATE_UNINITIATED;
spin_unlock_bh(&aq->lock);
}
void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
aq->last_err_rc = 0;
aq->assoc_idx = ASSOC_IDX_INVALID;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_state);
| linux-master | drivers/s390/crypto/ap_queue.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
* Cornelia Huck <[email protected]>
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* MSGTYPE restruct: Holger Dengler <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
/*
* Device attributes common for all crypto queue devices.
*/
static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
struct ap_queue *aq = to_ap_queue(dev);
int online = aq->config && zq->online ? 1 : 0;
return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
struct ap_queue *aq = to_ap_queue(dev);
struct zcrypt_card *zc = zq->zcard;
int online;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
if (online && (!aq->config || !aq->card->config))
return -ENODEV;
if (online && !zc->online)
return -EINVAL;
zq->online = online;
ZCRYPT_DBF_INFO("%s queue=%02x.%04x online=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), online);
ap_send_online_uevent(&aq->ap_dev, online);
if (!online)
ap_flush_queue(zq->queue);
return count;
}
static DEVICE_ATTR_RW(online);
static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", atomic_read(&zq->load));
}
static DEVICE_ATTR_RO(load);
static struct attribute *zcrypt_queue_attrs[] = {
&dev_attr_online.attr,
&dev_attr_load.attr,
NULL,
};
static const struct attribute_group zcrypt_queue_attr_group = {
.attrs = zcrypt_queue_attrs,
};
bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
{
if (!!zq->online != !!online) {
zq->online = online;
if (!online)
ap_flush_queue(zq->queue);
return true;
}
return false;
}
struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size)
{
struct zcrypt_queue *zq;
zq = kzalloc(sizeof(*zq), GFP_KERNEL);
if (!zq)
return NULL;
zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);
if (!zq->reply.msg)
goto out_free;
zq->reply.bufsize = reply_buf_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
out_free:
kfree(zq);
return NULL;
}
EXPORT_SYMBOL(zcrypt_queue_alloc);
void zcrypt_queue_free(struct zcrypt_queue *zq)
{
kfree(zq->reply.msg);
kfree(zq);
}
EXPORT_SYMBOL(zcrypt_queue_free);
static void zcrypt_queue_release(struct kref *kref)
{
struct zcrypt_queue *zq =
container_of(kref, struct zcrypt_queue, refcount);
zcrypt_queue_free(zq);
}
void zcrypt_queue_get(struct zcrypt_queue *zq)
{
kref_get(&zq->refcount);
}
EXPORT_SYMBOL(zcrypt_queue_get);
int zcrypt_queue_put(struct zcrypt_queue *zq)
{
return kref_put(&zq->refcount, zcrypt_queue_release);
}
EXPORT_SYMBOL(zcrypt_queue_put);
/**
* zcrypt_queue_register() - Register a crypto queue device.
* @zq: Pointer to a crypto queue device
*
* Register a crypto queue device. Returns 0 if successful.
*/
int zcrypt_queue_register(struct zcrypt_queue *zq)
{
struct zcrypt_card *zc;
int rc;
spin_lock(&zcrypt_list_lock);
zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
zcrypt_card_get(zc);
zq->zcard = zc;
zq->online = 1; /* New devices are online by default. */
ZCRYPT_DBF_INFO("%s queue=%02x.%04x register online=1\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid));
list_add_tail(&zq->list, &zc->zqueues);
spin_unlock(&zcrypt_list_lock);
rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
if (rc)
goto out;
if (zq->ops->rng) {
rc = zcrypt_rng_device_add();
if (rc)
goto out_unregister;
}
return 0;
out_unregister:
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
out:
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
spin_unlock(&zcrypt_list_lock);
zcrypt_card_put(zc);
return rc;
}
EXPORT_SYMBOL(zcrypt_queue_register);
/**
* zcrypt_queue_unregister(): Unregister a crypto queue device.
* @zq: Pointer to crypto queue device
*
* Unregister a crypto queue device.
*/
void zcrypt_queue_unregister(struct zcrypt_queue *zq)
{
struct zcrypt_card *zc;
ZCRYPT_DBF_INFO("%s queue=%02x.%04x unregister\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid));
zc = zq->zcard;
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
spin_unlock(&zcrypt_list_lock);
if (zq->ops->rng)
zcrypt_rng_device_remove();
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
zcrypt_card_put(zc);
zcrypt_queue_put(zq);
}
EXPORT_SYMBOL(zcrypt_queue_unregister);
| linux-master | drivers/s390/crypto/zcrypt_queue.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2023
* Author(s): Cornelia Huck <[email protected]>
* Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* Felix Beck <[email protected]>
* Holger Dengler <[email protected]>
* Harald Freudenberger <[email protected]>
*
* Adjunct processor bus.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <asm/airq.h>
#include <asm/tpi.h>
#include <linux/atomic.h>
#include <asm/isc.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <asm/facility.h>
#include <linux/crypto.h>
#include <linux/mod_devicetable.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include "ap_bus.h"
#include "ap_debug.h"
/*
* Module parameters; note though this file itself isn't modular.
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, 0440);
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);
static int ap_thread_flag;
module_param_named(poll_thread, ap_thread_flag, int, 0440);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
static char *apm_str;
module_param_named(apmask, apm_str, charp, 0440);
MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
static char *aqm_str;
module_param_named(aqmask, aqm_str, charp, 0440);
MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
static int ap_useirq = 1;
module_param_named(useirq, ap_useirq, int, 0440);
MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
EXPORT_SYMBOL(ap_max_msg_size);
static struct device *ap_root_device;
/* Hashtable of all queue devices on the AP bus */
DEFINE_HASHTABLE(ap_queues, 8);
/* lock used for the ap_queues hashtable */
DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
/* # of bus scans since init */
static atomic64_t ap_scan_bus_count;
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
/* completion for initial APQN bindings complete */
static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
static struct ap_config_info *ap_qci_info;
static struct ap_config_info *ap_qci_info_old;
/*
* AP bus related debug feature things.
*/
debug_info_t *ap_dbf_info;
/*
* Workqueue timer for bus rescan.
*/
static struct timer_list ap_config_timer;
static int ap_config_time = AP_CONFIG_TIME;
static void ap_scan_bus(struct work_struct *);
static DECLARE_WORK(ap_scan_work, ap_scan_bus);
/*
* Tasklet & timer for AP request polling and interrupts
*/
static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread;
static DEFINE_MUTEX(ap_poll_thread_mutex);
static DEFINE_SPINLOCK(ap_poll_timer_lock);
static struct hrtimer ap_poll_timer;
/*
* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
*/
static unsigned long poll_high_timeout = 250000UL;
/*
* Some state machine states only require a low frequency polling.
* We use 25 Hz frequency for these.
*/
static unsigned long poll_low_timeout = 40000000UL;
/* Maximum domain id, if not given via qci */
static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq,
struct tpi_info *tpi_info);
static bool ap_irq_flag;
static struct airq_struct ap_airq = {
.handler = ap_interrupt_handler,
.isc = AP_ISC,
};
/**
* ap_airq_ptr() - Get the address of the adapter interrupt indicator
*
* Returns the address of the local-summary-indicator of the adapter
* interrupt handler for AP, or NULL if adapter interrupts are not
* available.
*/
void *ap_airq_ptr(void)
{
if (ap_irq_flag)
return ap_airq.lsi_ptr;
return NULL;
}
/**
* ap_interrupts_available(): Test if AP interrupts are available.
*
* Returns 1 if AP interrupts are available.
*/
static int ap_interrupts_available(void)
{
return test_facility(65);
}
/**
* ap_qci_available(): Test if AP configuration
* information can be queried via QCI subfunction.
*
* Returns 1 if subfunction PQAP(QCI) is available.
*/
static int ap_qci_available(void)
{
return test_facility(12);
}
/**
* ap_apft_available(): Test if AP facilities test (APFT)
* facility is available.
*
* Returns 1 if APFT is available.
*/
static int ap_apft_available(void)
{
return test_facility(15);
}
/*
* ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
*
* Returns 1 if the QACT subfunction is available.
*/
static inline int ap_qact_available(void)
{
if (ap_qci_info)
return ap_qci_info->qact;
return 0;
}
/*
* ap_sb_available(): Test if the AP secure binding facility is available.
*
* Returns 1 if secure binding facility is available.
*/
int ap_sb_available(void)
{
if (ap_qci_info)
return ap_qci_info->apsb;
return 0;
}
/*
* ap_is_se_guest(): Check for SE guest with AP pass-through support.
*/
bool ap_is_se_guest(void)
{
return is_prot_virt_guest() && ap_sb_available();
}
EXPORT_SYMBOL(ap_is_se_guest);
/*
* ap_fetch_qci_info(): Fetch cryptographic config info
*
* Returns the ap configuration info fetched via PQAP(QCI).
* On success 0 is returned, on failure a negative errno
* is returned, e.g. if the PQAP(QCI) instruction is not
* available, the return value will be -EOPNOTSUPP.
*/
static inline int ap_fetch_qci_info(struct ap_config_info *info)
{
if (!ap_qci_available())
return -EOPNOTSUPP;
if (!info)
return -EINVAL;
return ap_qci(info);
}
/**
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
* and ap_max_adapter_id if this info is available.
*/
static void __init ap_init_qci_info(void)
{
if (!ap_qci_available()) {
AP_DBF_INFO("%s QCI not supported\n", __func__);
return;
}
ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
if (!ap_qci_info)
return;
ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
if (!ap_qci_info_old) {
kfree(ap_qci_info);
ap_qci_info = NULL;
return;
}
if (ap_fetch_qci_info(ap_qci_info) != 0) {
kfree(ap_qci_info);
kfree(ap_qci_info_old);
ap_qci_info = NULL;
ap_qci_info_old = NULL;
return;
}
AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
if (ap_qci_info->apxa) {
if (ap_qci_info->na) {
ap_max_adapter_id = ap_qci_info->na;
AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
__func__, ap_max_adapter_id);
}
if (ap_qci_info->nd) {
ap_max_domain_id = ap_qci_info->nd;
AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
__func__, ap_max_domain_id);
}
}
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
}
/*
* ap_test_config(): helper function to extract the nrth bit
* within the unsigned int array field.
*/
static inline int ap_test_config(unsigned int *field, unsigned int nr)
{
return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
}
/*
* ap_test_config_card_id(): Test, whether an AP card ID is configured.
*
* Returns 0 if the card is not configured
* 1 if the card is configured or
* if the configuration information is not available
*/
static inline int ap_test_config_card_id(unsigned int id)
{
if (id > ap_max_adapter_id)
return 0;
if (ap_qci_info)
return ap_test_config(ap_qci_info->apm, id);
return 1;
}
/*
* ap_test_config_usage_domain(): Test, whether an AP usage domain
* is configured.
*
* Returns 0 if the usage domain is not configured
* 1 if the usage domain is configured or
* if the configuration information is not available
*/
int ap_test_config_usage_domain(unsigned int domain)
{
if (domain > ap_max_domain_id)
return 0;
if (ap_qci_info)
return ap_test_config(ap_qci_info->aqm, domain);
return 1;
}
EXPORT_SYMBOL(ap_test_config_usage_domain);
/*
* ap_test_config_ctrl_domain(): Test, whether an AP control domain
* is configured.
* @domain AP control domain ID
*
* Returns 1 if the control domain is configured
* 0 in all other cases
*/
int ap_test_config_ctrl_domain(unsigned int domain)
{
if (!ap_qci_info || domain > ap_max_domain_id)
return 0;
return ap_test_config(ap_qci_info->adm, domain);
}
EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/*
* ap_queue_info(): Check and get AP queue info.
* Returns: 1 if APQN exists and info is filled,
* 0 if APQN seems to exit but there is no info
* available (eg. caused by an asynch pending error)
* -1 invalid APQN, TAPQ error or AP queue status which
* indicates there is no APQN.
*/
static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
{
struct ap_queue_status status;
struct ap_tapq_gr2 tapq_info;
tapq_info.value = 0;
/* make sure we don't run into a specifiation exception */
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
AP_QID_QUEUE(qid) > ap_max_domain_id)
return -1;
/* call TAPQ on this APQN */
status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
/* handle pending async error with return 'no info available' */
if (status.async)
return 0;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_BUSY:
/*
* According to the architecture in all these cases the
* info should be filled. All bits 0 is not possible as
* there is at least one of the mode bits set.
*/
if (WARN_ON_ONCE(!tapq_info.value))
return 0;
*q_type = tapq_info.at;
*q_fac = tapq_info.fac;
*q_depth = tapq_info.qd;
*q_ml = tapq_info.ml;
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
return 1;
default:
/*
* A response code which indicates, there is no info available.
*/
return -1;
}
}
void ap_wait(enum ap_sm_wait wait)
{
ktime_t hr_time;
switch (wait) {
case AP_SM_WAIT_AGAIN:
case AP_SM_WAIT_INTERRUPT:
if (ap_irq_flag)
break;
if (ap_poll_kthread) {
wake_up(&ap_poll_wait);
break;
}
fallthrough;
case AP_SM_WAIT_LOW_TIMEOUT:
case AP_SM_WAIT_HIGH_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
hr_time =
wait == AP_SM_WAIT_LOW_TIMEOUT ?
poll_low_timeout : poll_high_timeout;
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
spin_unlock_bh(&ap_poll_timer_lock);
break;
case AP_SM_WAIT_NONE:
default:
break;
}
}
/**
* ap_request_timeout(): Handling of request timeouts
* @t: timer making this callback
*
* Handles request timeouts.
*/
void ap_request_timeout(struct timer_list *t)
{
struct ap_queue *aq = from_timer(aq, t, timeout);
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
spin_unlock_bh(&aq->lock);
}
/**
* ap_poll_timeout(): AP receive polling for finished AP requests.
* @unused: Unused pointer.
*
* Schedules the AP tasklet using a high resolution timer.
*/
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
tasklet_schedule(&ap_tasklet);
return HRTIMER_NORESTART;
}
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
* @tpi_info: ignored
*/
static void ap_interrupt_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
inc_irq_stat(IRQIO_APB);
tasklet_schedule(&ap_tasklet);
}
/**
* ap_tasklet_fn(): Tasklet to poll all AP devices.
* @dummy: Unused variable
*
* Poll all AP devices on the bus.
*/
static void ap_tasklet_fn(unsigned long dummy)
{
int bkt;
struct ap_queue *aq;
enum ap_sm_wait wait = AP_SM_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
* be received. Doing it in the beginning of the tasklet is therefore
* important that no requests on any AP get lost.
*/
if (ap_irq_flag)
xchg(ap_airq.lsi_ptr, 0);
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
spin_lock_bh(&aq->lock);
wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
spin_unlock_bh(&ap_queues_lock);
ap_wait(wait);
}
static int ap_pending_requests(void)
{
int bkt;
struct ap_queue *aq;
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
if (aq->queue_count == 0)
continue;
spin_unlock_bh(&ap_queues_lock);
return 1;
}
spin_unlock_bh(&ap_queues_lock);
return 0;
}
/**
* ap_poll_thread(): Thread that polls for finished requests.
* @data: Unused pointer
*
* AP bus poll thread. The purpose of this thread is to poll for
* finished requests in a loop if there is a "free" cpu - that is
* a cpu that doesn't have anything better to do. The polling stops
* as soon as there is another task or if all messages have been
* delivered.
*/
static int ap_poll_thread(void *data)
{
DECLARE_WAITQUEUE(wait, current);
set_user_nice(current, MAX_NICE);
set_freezable();
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
if (!ap_pending_requests()) {
schedule();
try_to_freeze();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&ap_poll_wait, &wait);
if (need_resched()) {
schedule();
try_to_freeze();
continue;
}
ap_tasklet_fn(0);
}
return 0;
}
static int ap_poll_thread_start(void)
{
int rc;
if (ap_irq_flag || ap_poll_kthread)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
if (rc)
ap_poll_kthread = NULL;
mutex_unlock(&ap_poll_thread_mutex);
return rc;
}
static void ap_poll_thread_stop(void)
{
if (!ap_poll_kthread)
return;
mutex_lock(&ap_poll_thread_mutex);
kthread_stop(ap_poll_kthread);
ap_poll_kthread = NULL;
mutex_unlock(&ap_poll_thread_mutex);
}
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
/**
* ap_bus_match()
* @dev: Pointer to device
* @drv: Pointer to device_driver
*
* AP bus driver registration/unregistration.
*/
static int ap_bus_match(struct device *dev, struct device_driver *drv)
{
struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/*
* Compare device type of the device with the list of
* supported types of the device_driver.
*/
for (id = ap_drv->ids; id->match_flags; id++) {
if (is_card_dev(dev) &&
id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
id->dev_type == to_ap_dev(dev)->device_type)
return 1;
if (is_queue_dev(dev) &&
id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
id->dev_type == to_ap_dev(dev)->device_type)
return 1;
}
return 0;
}
/**
* ap_uevent(): Uevent function for AP devices.
* @dev: Pointer to device
* @env: Pointer to kobj_uevent_env
*
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/
static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int rc = 0;
const struct ap_device *ap_dev = to_ap_dev(dev);
/* Uevents from ap bus core don't need extensions to the env */
if (dev == ap_root_device)
return 0;
if (is_card_dev(dev)) {
struct ap_card *ac = to_ap_card(&ap_dev->device);
/* Set up DEV_TYPE environment variable. */
rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
if (rc)
return rc;
/* Add MODALIAS= */
rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
if (rc)
return rc;
/* Add MODE=<accel|cca|ep11> */
if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
rc = add_uevent_var(env, "MODE=accel");
else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
rc = add_uevent_var(env, "MODE=cca");
else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
} else {
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Add MODE=<accel|cca|ep11> */
if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
rc = add_uevent_var(env, "MODE=accel");
else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
rc = add_uevent_var(env, "MODE=cca");
else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
}
return 0;
}
static void ap_send_init_scan_done_uevent(void)
{
char *envp[] = { "INITSCAN=done", NULL };
kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
static void ap_send_bindings_complete_uevent(void)
{
char buf[32];
char *envp[] = { "BINDINGS=complete", buf, NULL };
snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
atomic64_inc_return(&ap_bindings_complete_count));
kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
{
char buf[16];
char *envp[] = { buf, NULL };
snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_config_uevent);
void ap_send_online_uevent(struct ap_device *ap_dev, int online)
{
char buf[16];
char *envp[] = { buf, NULL };
snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_online_uevent);
static void ap_send_mask_changed_uevent(unsigned long *newapm,
unsigned long *newaqm)
{
char buf[100];
char *envp[] = { buf, NULL };
if (newapm)
snprintf(buf, sizeof(buf),
"APMASK=0x%016lx%016lx%016lx%016lx\n",
newapm[0], newapm[1], newapm[2], newapm[3]);
else
snprintf(buf, sizeof(buf),
"AQMASK=0x%016lx%016lx%016lx%016lx\n",
newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
/*
* calc # of bound APQNs
*/
struct __ap_calc_ctrs {
unsigned int apqns;
unsigned int bound;
};
static int __ap_calc_helper(struct device *dev, void *arg)
{
struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
if (is_queue_dev(dev)) {
pctrs->apqns++;
if (dev->driver)
pctrs->bound++;
}
return 0;
}
static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
{
struct __ap_calc_ctrs ctrs;
memset(&ctrs, 0, sizeof(ctrs));
bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
*apqns = ctrs.apqns;
*bound = ctrs.bound;
}
/*
* After initial ap bus scan do check if all existing APQNs are
* bound to device drivers.
*/
static void ap_check_bindings_complete(void)
{
unsigned int apqns, bound;
if (atomic64_read(&ap_scan_bus_count) >= 1) {
ap_calc_bound_apqns(&apqns, &bound);
if (bound == apqns) {
if (!completion_done(&ap_init_apqn_bindings_complete)) {
complete_all(&ap_init_apqn_bindings_complete);
AP_DBF_INFO("%s complete\n", __func__);
}
ap_send_bindings_complete_uevent();
}
}
}
/*
* Interface to wait for the AP bus to have done one initial ap bus
* scan and all detected APQNs have been bound to device drivers.
* If these both conditions are not fulfilled, this function blocks
* on a condition with wait_for_completion_interruptible_timeout().
* If these both conditions are fulfilled (before the timeout hits)
* the return value is 0. If the timeout (in jiffies) hits instead
* -ETIME is returned. On failures negative return values are
* returned to the caller.
*/
int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
{
long l;
if (completion_done(&ap_init_apqn_bindings_complete))
return 0;
if (timeout)
l = wait_for_completion_interruptible_timeout(
&ap_init_apqn_bindings_complete, timeout);
else
l = wait_for_completion_interruptible(
&ap_init_apqn_bindings_complete);
if (l < 0)
return l == -ERESTARTSYS ? -EINTR : l;
else if (l == 0 && timeout)
return -ETIME;
return 0;
}
EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
if (is_queue_dev(dev) &&
AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
device_unregister(dev);
return 0;
}
static int __ap_revise_reserved(struct device *dev, void *dummy)
{
int rc, card, queue, devres, drvres;
if (is_queue_dev(dev)) {
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
mutex_lock(&ap_perms_mutex);
devres = test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm);
mutex_unlock(&ap_perms_mutex);
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
__func__, card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
__func__, card, queue);
}
}
return 0;
}
static void ap_bus_revise_bindings(void)
{
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
/**
* ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
* default host driver or not.
* @card: the APID of the adapter card to check
* @queue: the APQI of the queue to check
*
* Note: the ap_perms_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether the AP adapter is reserved for the host (1)
* or not (0).
*/
int ap_owned_by_def_drv(int card, int queue)
{
int rc = 0;
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
/**
* ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
* a set is reserved for the host drivers
* or not.
* @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
* @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
*
* Note: the ap_perms_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether each APQN is reserved for the host (1) or
* not (0)
*/
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
for (card = 0; !rc && card < AP_DEVICES; card++)
if (test_bit_inv(card, apm) &&
test_bit_inv(card, ap_perms.apm))
for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
if (test_bit_inv(queue, aqm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
int card, queue, devres, drvres, rc = -ENODEV;
if (!get_device(dev))
return rc;
if (is_queue_dev(dev)) {
/*
* If the apqn is marked as reserved/used by ap bus and
* default drivers, only probe with drivers with the default
* flag set. If it is not marked, only probe with drivers
* with the default flag not set.
*/
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
mutex_lock(&ap_perms_mutex);
devres = test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm);
mutex_unlock(&ap_perms_mutex);
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
goto out;
}
/* Add queue/card to list of active queues/cards */
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
hash_add(ap_queues, &to_ap_queue(dev)->hnode,
to_ap_queue(dev)->qid);
spin_unlock_bh(&ap_queues_lock);
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (rc) {
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
} else {
ap_check_bindings_complete();
}
out:
if (rc)
put_device(dev);
return rc;
}
static void ap_device_remove(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
/* prepare ap queue device removal */
if (is_queue_dev(dev))
ap_queue_prepare_remove(to_ap_queue(dev));
/* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
/* now do the ap queue device remove */
if (is_queue_dev(dev))
ap_queue_remove(to_ap_queue(dev));
/* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
put_device(dev);
}
struct ap_queue *ap_get_qdev(ap_qid_t qid)
{
int bkt;
struct ap_queue *aq;
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
if (aq->qid == qid) {
get_device(&aq->ap_dev.device);
spin_unlock_bh(&ap_queues_lock);
return aq;
}
}
spin_unlock_bh(&ap_queues_lock);
return NULL;
}
EXPORT_SYMBOL(ap_get_qdev);
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
struct device_driver *drv = &ap_drv->driver;
drv->bus = &ap_bus_type;
drv->owner = owner;
drv->name = name;
return driver_register(drv);
}
EXPORT_SYMBOL(ap_driver_register);
void ap_driver_unregister(struct ap_driver *ap_drv)
{
driver_unregister(&ap_drv->driver);
}
EXPORT_SYMBOL(ap_driver_unregister);
void ap_bus_force_rescan(void)
{
/* processing a asynchronous bus rescan */
del_timer(&ap_config_timer);
queue_work(system_long_wq, &ap_scan_work);
flush_work(&ap_scan_work);
}
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
* A config change has happened, force an ap bus rescan.
*/
void ap_bus_cfg_chg(void)
{
AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
/*
* hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number.
* Rest of the bitmap to the right is padded with 0. No spaces allowed
* within the string, the leading 0x may be omitted.
* Returns the bitmask with exactly the bits set as given by the hex
* string (both in big endian order).
*/
static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
{
int i, n, b;
/* bits needs to be a multiple of 8 */
if (bits & 0x07)
return -EINVAL;
if (str[0] == '0' && str[1] == 'x')
str++;
if (*str == 'x')
str++;
for (i = 0; isxdigit(*str) && i < bits; str++) {
b = hex_to_bin(*str);
for (n = 0; n < 4; n++)
if (b & (0x08 >> n))
set_bit_inv(i + n, bitmap);
i += 4;
}
if (*str == '\n')
str++;
if (*str)
return -EINVAL;
return 0;
}
/*
* modify_bitmap() - parse bitmask argument and modify an existing
* bit mask accordingly. A concatenation (done with ',') of these
* terms is recognized:
* +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
* <bitnr> may be any valid number (hex, decimal or octal) in the range
* 0...bits-1; the leading + or - is required. Here are some examples:
* +0-15,+32,-128,-0xFF
* -0-255,+1-16,+0x128
* +1,+2,+3,+4,-5,-7-10
* Returns the new bitmap after all changes have been applied. Every
* positive value in the string will set a bit and every negative value
* in the string will clear a bit. As a bit may be touched more than once,
* the last 'operation' wins:
* +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
* cleared again. All other bits are unmodified.
*/
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
int a, i, z;
char *np, sign;
/* bits needs to be a multiple of 8 */
if (bits & 0x07)
return -EINVAL;
while (*str) {
sign = *str++;
if (sign != '+' && sign != '-')
return -EINVAL;
a = z = simple_strtoul(str, &np, 0);
if (str == np || a >= bits)
return -EINVAL;
str = np;
if (*str == '-') {
z = simple_strtoul(++str, &np, 0);
if (str == np || a > z || z >= bits)
return -EINVAL;
str = np;
}
for (i = a; i <= z; i++)
if (sign == '+')
set_bit_inv(i, bitmap);
else
clear_bit_inv(i, bitmap);
while (*str == ',' || *str == '\n')
str++;
}
return 0;
}
static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
unsigned long *newmap)
{
unsigned long size;
int rc;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
if (*str == '+' || *str == '-') {
memcpy(newmap, bitmap, size);
rc = modify_bitmap(str, newmap, bits);
} else {
memset(newmap, 0, size);
rc = hex2bitmap(str, newmap, bits);
}
return rc;
}
int ap_parse_mask_str(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock)
{
unsigned long *newmap, size;
int rc;
/* bits needs to be a multiple of 8 */
if (bits & 0x07)
return -EINVAL;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
newmap = kmalloc(size, GFP_KERNEL);
if (!newmap)
return -ENOMEM;
if (mutex_lock_interruptible(lock)) {
kfree(newmap);
return -ERESTARTSYS;
}
rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
if (rc == 0)
memcpy(bitmap, newmap, size);
mutex_unlock(lock);
kfree(newmap);
return rc;
}
EXPORT_SYMBOL(ap_parse_mask_str);
/*
* AP bus attributes.
*/
static ssize_t ap_domain_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_domain_index);
}
static ssize_t ap_domain_store(const struct bus_type *bus,
const char *buf, size_t count)
{
int domain;
if (sscanf(buf, "%i\n", &domain) != 1 ||
domain < 0 || domain > ap_max_domain_id ||
!test_bit_inv(domain, ap_perms.aqm))
return -EINVAL;
spin_lock_bh(&ap_domain_lock);
ap_domain_index = domain;
spin_unlock_bh(&ap_domain_lock);
AP_DBF_INFO("%s stored new default domain=%d\n",
__func__, domain);
return count;
}
static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_qci_info->adm[0], ap_qci_info->adm[1],
ap_qci_info->adm[2], ap_qci_info->adm[3],
ap_qci_info->adm[4], ap_qci_info->adm[5],
ap_qci_info->adm[6], ap_qci_info->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_qci_info->aqm[0], ap_qci_info->aqm[1],
ap_qci_info->aqm[2], ap_qci_info->aqm[3],
ap_qci_info->aqm[4], ap_qci_info->aqm[5],
ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
{
if (!ap_qci_info) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_qci_info->apm[0], ap_qci_info->apm[1],
ap_qci_info->apm[2], ap_qci_info->apm[3],
ap_qci_info->apm[4], ap_qci_info->apm[5],
ap_qci_info->apm[6], ap_qci_info->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_irq_flag ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_config_time);
}
static ssize_t config_time_store(const struct bus_type *bus,
const char *buf, size_t count)
{
int time;
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
ap_config_time = time;
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
return count;
}
static BUS_ATTR_RW(config_time);
static ssize_t poll_thread_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_poll_kthread ? 1 : 0);
}
static ssize_t poll_thread_store(const struct bus_type *bus,
const char *buf, size_t count)
{
bool value;
int rc;
rc = kstrtobool(buf, &value);
if (rc)
return rc;
if (value) {
rc = ap_poll_thread_start();
if (rc)
count = rc;
} else {
ap_poll_thread_stop();
}
return count;
}
static BUS_ATTR_RW(poll_thread);
static ssize_t poll_timeout_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%lu\n", poll_high_timeout);
}
static ssize_t poll_timeout_store(const struct bus_type *bus, const char *buf,
size_t count)
{
unsigned long value;
ktime_t hr_time;
int rc;
rc = kstrtoul(buf, 0, &value);
if (rc)
return rc;
/* 120 seconds = maximum poll interval */
if (value > 120000000000UL)
return -EINVAL;
poll_high_timeout = value;
hr_time = poll_high_timeout;
spin_lock_bh(&ap_poll_timer_lock);
hrtimer_cancel(&ap_poll_timer);
hrtimer_set_expires(&ap_poll_timer, hr_time);
hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
spin_unlock_bh(&ap_poll_timer_lock);
return count;
}
static BUS_ATTR_RW(poll_timeout);
static ssize_t ap_max_domain_id_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
static ssize_t ap_max_adapter_id_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%d\n", ap_max_adapter_id);
}
static BUS_ATTR_RO(ap_max_adapter_id);
static ssize_t apmask_show(const struct bus_type *bus, char *buf)
{
int rc;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.apm[0], ap_perms.apm[1],
ap_perms.apm[2], ap_perms.apm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
}
static int __verify_card_reservations(struct device_driver *drv, void *data)
{
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newapm = (unsigned long *)data;
/*
* increase the driver's module refcounter to be sure it is not
* going away when we invoke the callback function.
*/
if (!try_module_get(drv->owner))
return 0;
if (ap_drv->in_use) {
rc = ap_drv->in_use(newapm, ap_perms.aqm);
if (rc)
rc = -EBUSY;
}
/* release the driver's module */
module_put(drv->owner);
return rc;
}
static int apmask_commit(unsigned long *newapm)
{
int rc;
unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];
/*
* Check if any bits in the apmask have been set which will
* result in queues being removed from non-default drivers
*/
if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
__verify_card_reservations);
if (rc)
return rc;
}
memcpy(ap_perms.apm, newapm, APMASKSIZE);
return 0;
}
static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
int rc, changes = 0;
DECLARE_BITMAP(newapm, AP_DEVICES);
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
if (rc)
goto done;
changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
if (changes)
rc = apmask_commit(newapm);
done:
mutex_unlock(&ap_perms_mutex);
if (rc)
return rc;
if (changes) {
ap_bus_revise_bindings();
ap_send_mask_changed_uevent(newapm, NULL);
}
return count;
}
static BUS_ATTR_RW(apmask);
static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
{
int rc;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.aqm[0], ap_perms.aqm[1],
ap_perms.aqm[2], ap_perms.aqm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
}
static int __verify_queue_reservations(struct device_driver *drv, void *data)
{
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newaqm = (unsigned long *)data;
/*
* increase the driver's module refcounter to be sure it is not
* going away when we invoke the callback function.
*/
if (!try_module_get(drv->owner))
return 0;
if (ap_drv->in_use) {
rc = ap_drv->in_use(ap_perms.apm, newaqm);
if (rc)
rc = -EBUSY;
}
/* release the driver's module */
module_put(drv->owner);
return rc;
}
static int aqmask_commit(unsigned long *newaqm)
{
int rc;
unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];
/*
* Check if any bits in the aqmask have been set which will
* result in queues being removed from non-default drivers
*/
if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
__verify_queue_reservations);
if (rc)
return rc;
}
memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
return 0;
}
static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
int rc, changes = 0;
DECLARE_BITMAP(newaqm, AP_DOMAINS);
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
if (rc)
goto done;
changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
if (changes)
rc = aqmask_commit(newaqm);
done:
mutex_unlock(&ap_perms_mutex);
if (rc)
return rc;
if (changes) {
ap_bus_revise_bindings();
ap_send_mask_changed_uevent(NULL, newaqm);
}
return count;
}
static BUS_ATTR_RW(aqmask);
static ssize_t scans_show(const struct bus_type *bus, char *buf)
{
return sysfs_emit(buf, "%llu\n", atomic64_read(&ap_scan_bus_count));
}
static ssize_t scans_store(const struct bus_type *bus, const char *buf,
size_t count)
{
AP_DBF_INFO("%s force AP bus rescan\n", __func__);
ap_bus_force_rescan();
return count;
}
static BUS_ATTR_RW(scans);
static ssize_t bindings_show(const struct bus_type *bus, char *buf)
{
int rc;
unsigned int apqns, n;
ap_calc_bound_apqns(&apqns, &n);
if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
rc = sysfs_emit(buf, "%u/%u (complete)\n", n, apqns);
else
rc = sysfs_emit(buf, "%u/%u\n", n, apqns);
return rc;
}
static BUS_ATTR_RO(bindings);
static ssize_t features_show(const struct bus_type *bus, char *buf)
{
int n = 0;
if (!ap_qci_info) /* QCI not supported */
return sysfs_emit(buf, "-\n");
if (ap_qci_info->apsc)
n += sysfs_emit_at(buf, n, "APSC ");
if (ap_qci_info->apxa)
n += sysfs_emit_at(buf, n, "APXA ");
if (ap_qci_info->qact)
n += sysfs_emit_at(buf, n, "QACT ");
if (ap_qci_info->rc8a)
n += sysfs_emit_at(buf, n, "RC8A ");
if (ap_qci_info->apsb)
n += sysfs_emit_at(buf, n, "APSB ");
sysfs_emit_at(buf, n == 0 ? 0 : n - 1, "\n");
return n;
}
static BUS_ATTR_RO(features);
static struct attribute *ap_bus_attrs[] = {
&bus_attr_ap_domain.attr,
&bus_attr_ap_control_domain_mask.attr,
&bus_attr_ap_usage_domain_mask.attr,
&bus_attr_ap_adapter_mask.attr,
&bus_attr_config_time.attr,
&bus_attr_poll_thread.attr,
&bus_attr_ap_interrupts.attr,
&bus_attr_poll_timeout.attr,
&bus_attr_ap_max_domain_id.attr,
&bus_attr_ap_max_adapter_id.attr,
&bus_attr_apmask.attr,
&bus_attr_aqmask.attr,
&bus_attr_scans.attr,
&bus_attr_bindings.attr,
&bus_attr_features.attr,
NULL,
};
ATTRIBUTE_GROUPS(ap_bus);
static struct bus_type ap_bus_type = {
.name = "ap",
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
.uevent = &ap_uevent,
.probe = ap_device_probe,
.remove = ap_device_remove,
};
/**
* ap_select_domain(): Select an AP domain if possible and we haven't
* already done so before.
*/
static void ap_select_domain(void)
{
struct ap_queue_status status;
int card, dom;
/*
* Choose the default domain. Either the one specified with
* the "domain=" parameter or the first domain with at least
* one valid APQN.
*/
spin_lock_bh(&ap_domain_lock);
if (ap_domain_index >= 0) {
/* Domain has already been selected. */
goto out;
}
for (dom = 0; dom <= ap_max_domain_id; dom++) {
if (!ap_test_config_usage_domain(dom) ||
!test_bit_inv(dom, ap_perms.aqm))
continue;
for (card = 0; card <= ap_max_adapter_id; card++) {
if (!ap_test_config_card_id(card) ||
!test_bit_inv(card, ap_perms.apm))
continue;
status = ap_test_queue(AP_MKQID(card, dom),
ap_apft_available(),
NULL);
if (status.response_code == AP_RESPONSE_NORMAL)
break;
}
if (card <= ap_max_adapter_id)
break;
}
if (dom <= ap_max_domain_id) {
ap_domain_index = dom;
AP_DBF_INFO("%s new default domain is %d\n",
__func__, ap_domain_index);
}
out:
spin_unlock_bh(&ap_domain_lock);
}
/*
* This function checks the type and returns either 0 for not
* supported or the highest compatible type value (which may
* include the input type value).
*/
static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
{
int comp_type = 0;
/* < CEX4 is not supported */
if (rawtype < AP_DEVICE_TYPE_CEX4) {
AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
__func__, AP_QID_CARD(qid),
AP_QID_QUEUE(qid), rawtype);
return 0;
}
/* up to CEX8 known and fully supported */
if (rawtype <= AP_DEVICE_TYPE_CEX8)
return rawtype;
/*
* unknown new type > CEX8, check for compatibility
* to the highest known and supported type which is
* currently CEX8 with the help of the QACT function.
*/
if (ap_qact_available()) {
struct ap_queue_status status;
union ap_qact_ap_info apinfo = {0};
apinfo.mode = (func >> 26) & 0x07;
apinfo.cat = AP_DEVICE_TYPE_CEX8;
status = ap_qact(qid, 0, &apinfo);
if (status.response_code == AP_RESPONSE_NORMAL &&
apinfo.cat >= AP_DEVICE_TYPE_CEX4 &&
apinfo.cat <= AP_DEVICE_TYPE_CEX8)
comp_type = apinfo.cat;
}
if (!comp_type)
AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
__func__, AP_QID_CARD(qid),
AP_QID_QUEUE(qid), rawtype);
else if (comp_type != rawtype)
AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
__func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
rawtype, comp_type);
return comp_type;
}
/*
* Helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
static int __match_card_device_with_id(struct device *dev, const void *data)
{
return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
}
/*
* Helper function to be used with bus_find_dev
* matches for the queue device with a given qid
*/
static int __match_queue_device_with_qid(struct device *dev, const void *data)
{
return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
}
/*
* Helper function to be used with bus_find_dev
* matches any queue device with given queue id
*/
static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
{
return is_queue_dev(dev) &&
AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
}
/* Helper function for notify_config_changed */
static int __drv_notify_config_changed(struct device_driver *drv, void *data)
{
struct ap_driver *ap_drv = to_ap_drv(drv);
if (try_module_get(drv->owner)) {
if (ap_drv->on_config_changed)
ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
module_put(drv->owner);
}
return 0;
}
/* Notify all drivers about an qci config change */
static inline void notify_config_changed(void)
{
bus_for_each_drv(&ap_bus_type, NULL, NULL,
__drv_notify_config_changed);
}
/* Helper function for notify_scan_complete */
static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
{
struct ap_driver *ap_drv = to_ap_drv(drv);
if (try_module_get(drv->owner)) {
if (ap_drv->on_scan_complete)
ap_drv->on_scan_complete(ap_qci_info,
ap_qci_info_old);
module_put(drv->owner);
}
return 0;
}
/* Notify all drivers about bus scan complete */
static inline void notify_scan_complete(void)
{
bus_for_each_drv(&ap_bus_type, NULL, NULL,
__drv_notify_scan_complete);
}
/*
* Helper function for ap_scan_bus().
* Remove card device and associated queue devices.
*/
static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
{
bus_for_each_dev(&ap_bus_type, NULL,
(void *)(long)ac->id,
__ap_queue_devices_with_id_unregister);
device_unregister(&ac->ap_dev.device);
}
/*
* Helper function for ap_scan_bus().
* Does the scan bus job for all the domains within
* a valid adapter given by an ap_card ptr.
*/
static inline void ap_scan_domains(struct ap_card *ac)
{
int rc, dom, depth, type, ml;
bool decfg, chkstop;
struct ap_queue *aq;
struct device *dev;
unsigned int func;
ap_qid_t qid;
/*
* Go through the configuration for the domains and compare them
* to the existing queue devices. Also take care of the config
* and error state for the queue devices.
*/
for (dom = 0; dom <= ap_max_domain_id; dom++) {
qid = AP_MKQID(ac->id, dom);
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(long)qid,
__match_queue_device_with_qid);
aq = dev ? to_ap_queue(dev) : NULL;
if (!ap_test_config_usage_domain(dom)) {
if (dev) {
AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
__func__, ac->id, dom);
device_unregister(dev);
}
goto put_dev_and_continue;
}
/* domain is valid, get info from this APQN */
rc = ap_queue_info(qid, &type, &func, &depth,
&ml, &decfg, &chkstop);
switch (rc) {
case -1:
if (dev) {
AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
__func__, ac->id, dom);
device_unregister(dev);
}
fallthrough;
case 0:
goto put_dev_and_continue;
default:
break;
}
/* if no queue device exists, create a new one */
if (!aq) {
aq = ap_queue_create(qid, ac->ap_dev.device_type);
if (!aq) {
AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
__func__, ac->id, dom);
continue;
}
aq->card = ac;
aq->config = !decfg;
aq->chkstop = chkstop;
dev = &aq->ap_dev.device;
dev->bus = &ap_bus_type;
dev->parent = &ac->ap_dev.device;
dev_set_name(dev, "%02x.%04x", ac->id, dom);
/* register queue device */
rc = device_register(dev);
if (rc) {
AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
__func__, ac->id, dom);
goto put_dev_and_continue;
}
/* get it and thus adjust reference counter */
get_device(dev);
if (decfg)
AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
__func__, ac->id, dom);
else if (chkstop)
AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
__func__, ac->id, dom);
else
AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
__func__, ac->id, dom);
goto put_dev_and_continue;
}
/* handle state changes on already existing queue device */
spin_lock_bh(&aq->lock);
/* checkstop state */
if (chkstop && !aq->chkstop) {
/* checkstop on */
aq->chkstop = true;
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
__func__, ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
} else if (!chkstop && aq->chkstop) {
/* checkstop off */
aq->chkstop = false;
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
}
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
__func__, ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
if (decfg && aq->config) {
/* config off this queue device */
aq->config = false;
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
__func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
} else if (!decfg && !aq->config) {
/* config on this queue device */
aq->config = true;
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
}
spin_unlock_bh(&aq->lock);
AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
__func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
/* handle other error states */
if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
spin_unlock_bh(&aq->lock);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
/* re-init (with reset) the queue device */
ap_queue_init_state(aq);
AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
__func__, ac->id, dom);
goto put_dev_and_continue;
}
spin_unlock_bh(&aq->lock);
put_dev_and_continue:
put_device(dev);
}
}
/*
* Helper function for ap_scan_bus().
* Does the scan bus job for the given adapter id.
*/
static inline void ap_scan_adapter(int ap)
{
int rc, dom, depth, type, comp_type, ml;
bool decfg, chkstop;
struct ap_card *ac;
struct device *dev;
unsigned int func;
ap_qid_t qid;
/* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(long)ap,
__match_card_device_with_id);
ac = dev ? to_ap_card(dev) : NULL;
/* Adapter not in configuration ? */
if (!ap_test_config_card_id(ap)) {
if (ac) {
AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
__func__, ap);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
}
return;
}
/*
* Adapter ap is valid in the current configuration. So do some checks:
* If no card device exists, build one. If a card device exists, check
* for type and functions changed. For all this we need to find a valid
* APQN first.
*/
for (dom = 0; dom <= ap_max_domain_id; dom++)
if (ap_test_config_usage_domain(dom)) {
qid = AP_MKQID(ap, dom);
if (ap_queue_info(qid, &type, &func, &depth,
&ml, &decfg, &chkstop) > 0)
break;
}
if (dom > ap_max_domain_id) {
/* Could not find one valid APQN for this adapter */
if (ac) {
AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
__func__, ap);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
__func__, ap);
}
return;
}
if (!type) {
/* No apdater type info available, an unusable adapter */
if (ac) {
AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
__func__, ap);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
__func__, ap);
}
return;
}
if (ac) {
/* Check APQN against existing card device for changes */
if (ac->raw_hwtype != type) {
AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
__func__, ap, type);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
} else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) !=
(func & TAPQ_CARD_FUNC_CMP_MASK)) {
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
__func__, ap, func);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
} else {
/* handle checkstop state change */
if (chkstop && !ac->chkstop) {
/* checkstop on */
ac->chkstop = true;
AP_DBF_INFO("%s(%d) card dev checkstop on\n",
__func__, ap);
} else if (!chkstop && ac->chkstop) {
/* checkstop off */
ac->chkstop = false;
AP_DBF_INFO("%s(%d) card dev checkstop off\n",
__func__, ap);
}
/* handle config state change */
if (decfg && ac->config) {
ac->config = false;
AP_DBF_INFO("%s(%d) card dev config off\n",
__func__, ap);
ap_send_config_uevent(&ac->ap_dev, ac->config);
} else if (!decfg && !ac->config) {
ac->config = true;
AP_DBF_INFO("%s(%d) card dev config on\n",
__func__, ap);
ap_send_config_uevent(&ac->ap_dev, ac->config);
}
}
}
if (!ac) {
/* Build a new card device */
comp_type = ap_get_compatible_type(qid, type, func);
if (!comp_type) {
AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
__func__, ap, type);
return;
}
ac = ap_card_create(ap, depth, type, comp_type, func, ml);
if (!ac) {
AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
__func__, ap);
return;
}
ac->config = !decfg;
ac->chkstop = chkstop;
dev = &ac->ap_dev.device;
dev->bus = &ap_bus_type;
dev->parent = ap_root_device;
dev_set_name(dev, "card%02x", ap);
/* maybe enlarge ap_max_msg_size to support this card */
if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
atomic_set(&ap_max_msg_size, ac->maxmsgsize);
AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
__func__, ap,
atomic_read(&ap_max_msg_size));
}
/* Register the new card device with AP bus */
rc = device_register(dev);
if (rc) {
AP_DBF_WARN("%s(%d) device_register() failed\n",
__func__, ap);
put_device(dev);
return;
}
/* get it and thus adjust reference counter */
get_device(dev);
if (decfg)
AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
__func__, ap, type, func);
else if (chkstop)
AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
__func__, ap, type, func);
else
AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
__func__, ap, type, func);
}
/* Verify the domains and the queue devices for this card */
ap_scan_domains(ac);
/* release the card device */
put_device(&ac->ap_dev.device);
}
/**
* ap_get_configuration - get the host AP configuration
*
* Stores the host AP configuration information returned from the previous call
* to Query Configuration Information (QCI), then retrieves and stores the
* current AP configuration returned from QCI.
*
* Return: true if the host AP configuration changed between calls to QCI;
* otherwise, return false.
*/
static bool ap_get_configuration(void)
{
if (!ap_qci_info) /* QCI not supported */
return false;
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
return memcmp(ap_qci_info, ap_qci_info_old,
sizeof(struct ap_config_info)) != 0;
}
/**
* ap_scan_bus(): Scan the AP bus for new devices
* Runs periodically, workqueue timer (ap_config_time)
* @unused: Unused pointer.
*/
static void ap_scan_bus(struct work_struct *unused)
{
int ap, config_changed = 0;
/* config change notify */
config_changed = ap_get_configuration();
if (config_changed)
notify_config_changed();
ap_select_domain();
AP_DBF_DBG("%s running\n", __func__);
/* loop over all possible adapters */
for (ap = 0; ap <= ap_max_adapter_id; ap++)
ap_scan_adapter(ap);
/* scan complete notify */
if (config_changed)
notify_scan_complete();
/* check if there is at least one queue available with default domain */
if (ap_domain_index >= 0) {
struct device *dev =
bus_find_device(&ap_bus_type, NULL,
(void *)(long)ap_domain_index,
__match_queue_device_with_queue_id);
if (dev)
put_device(dev);
else
AP_DBF_INFO("%s no queue device with default domain %d available\n",
__func__, ap_domain_index);
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
AP_DBF_DBG("%s init scan complete\n", __func__);
ap_send_init_scan_done_uevent();
ap_check_bindings_complete();
}
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
static void ap_config_timeout(struct timer_list *unused)
{
queue_work(system_long_wq, &ap_scan_work);
}
static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
return 0;
}
static void __init ap_perms_init(void)
{
/* all resources usable if no kernel parameter string given */
memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
/* apm kernel parameter string */
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
&ap_perms_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
&ap_perms_mutex);
}
}
/**
* ap_module_init(): The module initialization code.
*
* Initializes the module.
*/
static int __init ap_module_init(void)
{
int rc;
rc = ap_debug_init();
if (rc)
return rc;
if (!ap_instructions_available()) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
}
/* init ap_queue hashtable */
hash_init(ap_queues);
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
/* Get AP configuration data if available */
ap_init_qci_info();
/* check default domain setting */
if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
(ap_domain_index >= 0 &&
!test_bit_inv(ap_domain_index, ap_perms.aqm))) {
pr_warn("%d is not a valid cryptographic domain\n",
ap_domain_index);
ap_domain_index = -1;
}
/* enable interrupts if available */
if (ap_interrupts_available() && ap_useirq) {
rc = register_adapter_interrupt(&ap_airq);
ap_irq_flag = (rc == 0);
}
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
goto out;
/* Create /sys/devices/ap. */
ap_root_device = root_device_register("ap");
rc = PTR_ERR_OR_ZERO(ap_root_device);
if (rc)
goto out_bus;
ap_root_device->bus = &ap_bus_type;
/* Setup the AP bus rescan timer. */
timer_setup(&ap_config_timer, ap_config_timeout, 0);
/*
* Setup the high resolution poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
if (MACHINE_IS_VM)
poll_high_timeout = 1500000;
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
/* Start the low priority AP bus poll thread. */
if (ap_thread_flag) {
rc = ap_poll_thread_start();
if (rc)
goto out_work;
}
queue_work(system_long_wq, &ap_scan_work);
return 0;
out_work:
hrtimer_cancel(&ap_poll_timer);
root_device_unregister(ap_root_device);
out_bus:
bus_unregister(&ap_bus_type);
out:
if (ap_irq_flag)
unregister_adapter_interrupt(&ap_airq);
kfree(ap_qci_info);
return rc;
}
device_initcall(ap_module_init);
| linux-master | drivers/s390/crypto/ap_bus.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Adjunct processor matrix VFIO device driver callbacks.
*
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <[email protected]>
* Halil Pasic <[email protected]>
* Pierre Morel <[email protected]>
*/
#include <linux/string.h>
#include <linux/vfio.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/ctype.h>
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/uuid.h>
#include <asm/kvm.h>
#include <asm/zcrypt.h>
#include "vfio_ap_private.h"
#include "vfio_ap_debug.h"
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
#define AP_QUEUE_ASSIGNED "assigned"
#define AP_QUEUE_UNASSIGNED "unassigned"
#define AP_QUEUE_IN_USE "in use"
#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
/**
* get_update_locks_for_kvm: Acquire the locks required to dynamically update a
* KVM guest's APCB in the proper order.
*
* @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
*
* The proper locking order is:
* 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
* guest's APCB.
* 2. kvm->lock: required to update a guest's APCB
* 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
*
* Note: If @kvm is NULL, the KVM lock will not be taken.
*/
static inline void get_update_locks_for_kvm(struct kvm *kvm)
{
mutex_lock(&matrix_dev->guests_lock);
if (kvm)
mutex_lock(&kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
}
/**
* release_update_locks_for_kvm: Release the locks used to dynamically update a
* KVM guest's APCB in the proper order.
*
* @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
*
* The proper unlocking order is:
* 1. matrix_dev->mdevs_lock
* 2. kvm->lock
* 3. matrix_dev->guests_lock
*
* Note: If @kvm is NULL, the KVM lock will not be released.
*/
static inline void release_update_locks_for_kvm(struct kvm *kvm)
{
mutex_unlock(&matrix_dev->mdevs_lock);
if (kvm)
mutex_unlock(&kvm->lock);
mutex_unlock(&matrix_dev->guests_lock);
}
/**
* get_update_locks_for_mdev: Acquire the locks required to dynamically update a
* KVM guest's APCB in the proper order.
*
* @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
* configuration data to use to update a KVM guest's APCB.
*
* The proper locking order is:
* 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
* guest's APCB.
* 2. matrix_mdev->kvm->lock: required to update a guest's APCB
* 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
*
* Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
* lock will not be taken.
*/
static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
{
mutex_lock(&matrix_dev->guests_lock);
if (matrix_mdev && matrix_mdev->kvm)
mutex_lock(&matrix_mdev->kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
}
/**
* release_update_locks_for_mdev: Release the locks used to dynamically update a
* KVM guest's APCB in the proper order.
*
* @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
* configuration data to use to update a KVM guest's APCB.
*
* The proper unlocking order is:
* 1. matrix_dev->mdevs_lock
* 2. matrix_mdev->kvm->lock
* 3. matrix_dev->guests_lock
*
* Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
* lock will not be released.
*/
static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
{
mutex_unlock(&matrix_dev->mdevs_lock);
if (matrix_mdev && matrix_mdev->kvm)
mutex_unlock(&matrix_mdev->kvm->lock);
mutex_unlock(&matrix_dev->guests_lock);
}
/**
* get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
* acquire the locks required to update the APCB of
* the KVM guest to which the mdev is attached.
*
* @apqn: the APQN of a queue device.
*
* The proper locking order is:
* 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
* guest's APCB.
* 2. matrix_mdev->kvm->lock: required to update a guest's APCB
* 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
*
* Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
* will not be taken.
*
* Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
* is not assigned to an ap_matrix_mdev.
*/
static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
struct ap_matrix_mdev *matrix_mdev;
mutex_lock(&matrix_dev->guests_lock);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
if (matrix_mdev->kvm)
mutex_lock(&matrix_mdev->kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
return matrix_mdev;
}
}
mutex_lock(&matrix_dev->mdevs_lock);
return NULL;
}
/**
* get_update_locks_for_queue: get the locks required to update the APCB of the
* KVM guest to which the matrix mdev linked to a
* vfio_ap_queue object is attached.
*
* @q: a pointer to a vfio_ap_queue object.
*
* The proper locking order is:
* 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
* KVM guest's APCB.
* 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
* 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
*
* Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
* will not be taken.
*/
static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
{
mutex_lock(&matrix_dev->guests_lock);
if (q->matrix_mdev && q->matrix_mdev->kvm)
mutex_lock(&q->matrix_mdev->kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
}
/**
* vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
* hash table of queues assigned to a matrix mdev
* @matrix_mdev: the matrix mdev
* @apqn: The APQN of a queue device
*
* Return: the pointer to the vfio_ap_queue struct representing the queue or
* NULL if the queue is not assigned to @matrix_mdev
*/
static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
struct ap_matrix_mdev *matrix_mdev,
int apqn)
{
struct vfio_ap_queue *q;
hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
apqn) {
if (q && q->apqn == apqn)
return q;
}
return NULL;
}
/**
* vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
* @apqn: The AP Queue number
*
* Checks the IRQ bit for the status of this APQN using ap_tapq.
* Returns if the ap_tapq function succeeded and the bit is clear.
* Returns if ap_tapq function failed with invalid, deconfigured or
* checkstopped AP.
* Otherwise retries up to 5 times after waiting 20ms.
*/
static void vfio_ap_wait_for_irqclear(int apqn)
{
struct ap_queue_status status;
int retry = 5;
do {
status = ap_tapq(apqn, NULL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
if (!status.irq_enabled)
return;
fallthrough;
case AP_RESPONSE_BUSY:
msleep(20);
break;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
status.response_code, apqn);
return;
}
} while (--retry);
WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
__func__, status.response_code, apqn);
}
/**
* vfio_ap_free_aqic_resources - free vfio_ap_queue resources
* @q: The vfio_ap_queue
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
* Unpins the guest's page holding the NIB when it exists.
* Resets the saved_iova and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
if (!q)
return;
if (q->saved_isc != VFIO_AP_ISC_INVALID &&
!WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
q->saved_isc = VFIO_AP_ISC_INVALID;
}
if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
q->saved_iova = 0;
}
}
/**
* vfio_ap_irq_disable - disables and clears an ap_queue interrupt
* @q: The vfio_ap_queue
*
* Uses ap_aqic to disable the interruption and in case of success, reset
* in progress or IRQ disable command already proceeded: calls
* vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
* and calls vfio_ap_free_aqic_resources() to free the resources associated
* with the AP interrupt handling.
*
* In the case the AP is busy, or a reset is in progress,
* retries after 20ms, up to 5 times.
*
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
*
* Return: &struct ap_queue_status
*/
static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
union ap_qirq_ctrl aqic_gisa = { .value = 0 };
struct ap_queue_status status;
int retries = 5;
do {
status = ap_aqic(q->apqn, aqic_gisa, 0);
switch (status.response_code) {
case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_NORMAL:
vfio_ap_wait_for_irqclear(q->apqn);
goto end_free;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
msleep(20);
break;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
default:
/* All cases in default means AP not operational */
WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
status.response_code);
goto end_free;
}
} while (retries--);
WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
status.response_code);
end_free:
vfio_ap_free_aqic_resources(q);
return status;
}
/**
* vfio_ap_validate_nib - validate a notification indicator byte (nib) address.
*
* @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
* @nib: the location for storing the nib address.
*
* When the PQAP(AQIC) instruction is executed, general register 2 contains the
* address of the notification indicator byte (nib) used for IRQ notification.
* This function parses and validates the nib from gr2.
*
* Return: returns zero if the nib address is a valid; otherwise, returns
* -EINVAL.
*/
static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
if (!*nib)
return -EINVAL;
if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
return 0;
}
static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
{
int ret;
/*
* The nib has to be located in shared storage since guest and
* host access it. vfio_pin_pages() will do a pin shared and
* if that fails (possibly because it's not a shared page) it
* calls export. We try to do a second pin shared here so that
* the UV gives us an error code if we try to pin a non-shared
* page.
*
* If the page is already pinned shared the UV will return a success.
*/
ret = uv_pin_shared(addr);
if (ret) {
/* vfio_pin_pages() likely exported the page so let's re-import */
gmap_convert_to_secure(gmap, addr);
}
return ret;
}
/**
* vfio_ap_irq_enable - Enable Interruption for a APQN
*
* @q: the vfio_ap_queue holding AQIC parameters
* @isc: the guest ISC to register with the GIB interface
* @vcpu: the vcpu object containing the registers specifying the parameters
* passed to the PQAP(AQIC) instruction.
*
* Pin the NIB saved in *q
* Register the guest ISC to GIB interface and retrieve the
* host ISC to issue the host side PQAP/AQIC
*
* Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
* vfio_pin_pages failed.
*
* Otherwise return the ap_queue_status returned by the ap_aqic(),
* all retry handling will be done by the guest.
*
* Return: &struct ap_queue_status
*/
static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
union ap_qirq_ctrl aqic_gisa = { .value = 0 };
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
struct page *h_page;
int nisc;
struct kvm *kvm;
phys_addr_t h_nib;
dma_addr_t nib;
int ret;
/* Verify that the notification indicator byte address is valid */
if (vfio_ap_validate_nib(vcpu, &nib)) {
VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
__func__, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
case 1:
break;
default:
VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
"nib=%pad, apqn=%#04x\n",
__func__, ret, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
kvm = q->matrix_mdev->kvm;
gisa = kvm->arch.gisa_int.origin;
h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
/* NIB in non-shared storage is a rc 6 for PV guests */
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
nisc = kvm_s390_gisc_register(kvm, isc);
if (nisc < 0) {
VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
__func__, nisc, isc, q->apqn);
status.response_code = AP_RESPONSE_INVALID_GISA;
return status;
}
aqic_gisa.isc = nisc;
aqic_gisa.ir = 1;
aqic_gisa.gisa = virt_to_phys(gisa) >> 4;
status = ap_aqic(q->apqn, aqic_gisa, h_nib);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
/* See if we did clear older IRQ configuration */
vfio_ap_free_aqic_resources(q);
q->saved_iova = nib;
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ settings: clear new configuration */
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
status.response_code);
vfio_ap_irq_disable(q);
break;
}
if (status.response_code != AP_RESPONSE_NORMAL) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
"zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
"gisa=%#x, isc=%#x, apqn=%#04x\n",
__func__, status.response_code,
aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
q->apqn);
}
return status;
}
/**
* vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array
* of big endian elements that can be passed by
* value to an s390dbf sprintf event function to
* format a UUID string.
*
* @guid: the object containing the little endian guid
* @uuid: a six-element array of long values that can be passed by value as
* arguments for a formatting string specifying a UUID.
*
* The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf
* event functions if the memory for the passed string is available as long as
* the debug feature exists. Since a mediated device can be removed at any
* time, it's name can not be used because %s passes the reference to the string
* in memory and the reference will go stale once the device is removed .
*
* The s390dbf string formatting function allows a maximum of 9 arguments for a
* message to be displayed in the 'sprintf' view. In order to use the bytes
* comprising the mediated device's UUID to display the mediated device name,
* they will have to be converted into an array whose elements can be passed by
* value to sprintf. For example:
*
* guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 }
* mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804
* array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 }
* formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx"
*/
static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
{
/*
* The input guid is ordered in little endian, so it needs to be
* reordered for displaying a UUID as a string. This specifies the
* guid indices in proper order.
*/
uuid[0] = le32_to_cpup((__le32 *)guid);
uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
uuid[3] = *((__u16 *)&guid->b[8]);
uuid[4] = *((__u16 *)&guid->b[10]);
uuid[5] = *((__u32 *)&guid->b[12]);
}
/**
* handle_pqap - PQAP instruction callback
*
* @vcpu: The vcpu on which we received the PQAP instruction
*
* Get the general register contents to initialize internal variables.
* REG[0]: APQN
* REG[1]: IR and ISC
* REG[2]: NIB
*
* Response.status may be set to following Response Code:
* - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
* - AP_RESPONSE_DECONFIGURED: if the queue is not configured
* - AP_RESPONSE_NORMAL (0) : in case of success
* Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
* We take the matrix_dev lock to ensure serialization on queues and
* mediated device access.
*
* Return: 0 if we could handle the request inside KVM.
* Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
*/
static int handle_pqap(struct kvm_vcpu *vcpu)
{
uint64_t status;
uint16_t apqn;
unsigned long uuid[6];
struct vfio_ap_queue *q;
struct ap_queue_status qstatus = {
.response_code = AP_RESPONSE_Q_NOT_AVAIL, };
struct ap_matrix_mdev *matrix_mdev;
apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
/* If we do not use the AIV facility just go to userland */
if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
__func__, apqn, vcpu->arch.sie_block->eca);
return -EOPNOTSUPP;
}
mutex_lock(&matrix_dev->mdevs_lock);
if (!vcpu->kvm->arch.crypto.pqap_hook) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
__func__, apqn);
goto out_unlock;
}
matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
struct ap_matrix_mdev, pqap_hook);
/* If the there is no guest using the mdev, there is nothing to do */
if (!matrix_mdev->kvm) {
vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
__func__, uuid[0], uuid[1], uuid[2],
uuid[3], uuid[4], uuid[5], apqn);
goto out_unlock;
}
q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q) {
VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
__func__, AP_QID_CARD(apqn),
AP_QID_QUEUE(apqn));
goto out_unlock;
}
status = vcpu->run->s.regs.gprs[1];
/* If IR bit(16) is set we enable the interrupt */
if ((status >> (63 - 16)) & 0x01)
qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
else
qstatus = vfio_ap_irq_disable(q);
out_unlock:
memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu->run->s.regs.gprs[1] >>= 32;
mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
}
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
matrix->apm_max = info->apxa ? info->na : 63;
matrix->aqm_max = info->apxa ? info->nd : 15;
matrix->adm_max = info->apxa ? info->nd : 15;
}
static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
{
if (matrix_mdev->kvm)
kvm_arch_crypto_set_masks(matrix_mdev->kvm,
matrix_mdev->shadow_apcb.apm,
matrix_mdev->shadow_apcb.aqm,
matrix_mdev->shadow_apcb.adm);
}
static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
{
DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
(unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
AP_DOMAINS);
}
/*
* vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
* to ensure no queue devices are passed through to
* the guest that are not bound to the vfio_ap
* device driver.
*
* @matrix_mdev: the matrix mdev whose matrix is to be filtered.
*
* Note: If an APQN referencing a queue device that is not bound to the vfio_ap
* driver, its APID will be filtered from the guest's APCB. The matrix
* structure precludes filtering an individual APQN, so its APID will be
* filtered.
*
* Return: a boolean value indicating whether the KVM guest's APCB was changed
* by the filtering or not.
*/
static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
struct ap_matrix_mdev *matrix_mdev)
{
unsigned long apid, apqi, apqn;
DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
struct vfio_ap_queue *q;
bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
/*
* Copy the adapters, domains and control domains to the shadow_apcb
* from the matrix mdev, but only those that are assigned to the host's
* AP configuration.
*/
bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
(unsigned long *)matrix_dev->info.apm, AP_DEVICES);
bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
(unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
for_each_set_bit_inv(apid, apm, AP_DEVICES) {
for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
/*
* If the APQN is not bound to the vfio_ap device
* driver, then we can't assign it to the guest's
* AP configuration. The AP architecture won't
* allow filtering of a single APQN, so let's filter
* the APID since an adapter represents a physical
* hardware device.
*/
apqn = AP_MKQID(apid, apqi);
q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q || q->reset_status.response_code) {
clear_bit_inv(apid,
matrix_mdev->shadow_apcb.apm);
break;
}
}
}
return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
AP_DEVICES) ||
!bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
AP_DOMAINS);
}
static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
matrix_mdev->mdev = to_mdev_device(vdev->dev);
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
hash_init(matrix_mdev->qtable.queues);
return 0;
}
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
int ret;
matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
&vfio_ap_matrix_dev_ops);
if (IS_ERR(matrix_mdev))
return PTR_ERR(matrix_mdev);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_put_vdev;
matrix_mdev->req_trigger = NULL;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_put_vdev:
vfio_put_device(&matrix_mdev->vdev);
return ret;
}
static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
struct vfio_ap_queue *q)
{
if (q) {
q->matrix_mdev = matrix_mdev;
hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
}
}
static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
{
struct vfio_ap_queue *q;
q = vfio_ap_find_queue(apqn);
vfio_ap_mdev_link_queue(matrix_mdev, q);
}
static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
{
hash_del(&q->mdev_qnode);
}
static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
{
q->matrix_mdev = NULL;
}
static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
{
struct vfio_ap_queue *q;
unsigned long apid, apqi;
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
AP_DOMAINS) {
q = vfio_ap_mdev_get_queue(matrix_mdev,
AP_MKQID(apid, apqi));
if (q)
q->matrix_mdev = NULL;
}
}
}
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&matrix_mdev->vdev);
mutex_lock(&matrix_dev->guests_lock);
mutex_lock(&matrix_dev->mdevs_lock);
vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
vfio_put_device(&matrix_mdev->vdev);
}
#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
"already assigned to %s"
static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
unsigned long *apm,
unsigned long *aqm)
{
unsigned long apid, apqi;
const struct device *dev = mdev_dev(matrix_mdev->mdev);
const char *mdev_name = dev_name(dev);
for_each_set_bit_inv(apid, apm, AP_DEVICES)
for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
}
/**
* vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
* @mdev_apm: mask indicating the APIDs of the APQNs to be verified
* @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
* Verifies that each APQN derived from the Cartesian product of a bitmap of
* AP adapter IDs and AP queue indexes is not configured for any matrix
* mediated device. AP queue sharing is not allowed.
*
* Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
unsigned long *mdev_aqm)
{
struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
/*
* If the input apm and aqm are fields of the matrix_mdev
* object, then move on to the next matrix_mdev.
*/
if (mdev_apm == matrix_mdev->matrix.apm &&
mdev_aqm == matrix_mdev->matrix.aqm)
continue;
memset(apm, 0, sizeof(apm));
memset(aqm, 0, sizeof(aqm));
/*
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
AP_DEVICES))
continue;
if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
AP_DOMAINS))
continue;
vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
return -EADDRINUSE;
}
return 0;
}
/**
* vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
* not reserved for the default zcrypt driver and
* are not assigned to another mdev.
*
* @matrix_mdev: the mdev to which the APQNs being validated are assigned.
*
* Return: One of the following values:
* o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
* most likely -EBUSY indicating the ap_perms_mutex lock is already held.
* o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
* zcrypt default driver.
* o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
* o A zero indicating validation succeeded.
*/
static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
{
if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm))
return -EADDRNOTAVAIL;
return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm);
}
static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid)
{
unsigned long apqi;
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
vfio_ap_mdev_link_apqn(matrix_mdev,
AP_MKQID(apid, apqi));
}
/**
* assign_adapter_store - parses the APID from @buf and sets the
* corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_adapter attribute
* @buf: a buffer containing the AP adapter number (APID) to
* be assigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
*
* 1. -EINVAL
* The APID is not a valid number
*
* 2. -ENODEV
* The APID exceeds the maximum value configured for the system
*
* 3. -EADDRNOTAVAIL
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is not bound to the vfio_ap device
* driver; or, if no APQIs have yet been assigned, the APID is not
* contained in an APQN bound to the vfio_ap device driver.
*
* 4. -EADDRINUSE
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
*
* 5. -EAGAIN
* A lock required to validate the mdev's AP configuration could not
* be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long apid;
DECLARE_BITMAP(apm_delta, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&ap_perms_mutex);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
goto done;
if (apid > matrix_mdev->matrix.apm_max) {
ret = -ENODEV;
goto done;
}
if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
ret = count;
goto done;
}
set_bit_inv(apid, matrix_mdev->matrix.apm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
if (ret) {
clear_bit_inv(apid, matrix_mdev->matrix.apm);
goto done;
}
vfio_ap_mdev_link_adapter(matrix_mdev, apid);
memset(apm_delta, 0, sizeof(apm_delta));
set_bit_inv(apid, apm_delta);
if (vfio_ap_mdev_filter_matrix(apm_delta,
matrix_mdev->matrix.aqm, matrix_mdev))
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
static struct vfio_ap_queue
*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid, unsigned long apqi)
{
struct vfio_ap_queue *q = NULL;
q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
/* If the queue is assigned to the matrix mdev, unlink it. */
if (q)
vfio_ap_unlink_queue_fr_mdev(q);
return q;
}
/**
* vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
* adapter from the matrix mdev to which the
* adapter was assigned.
* @matrix_mdev: the matrix mediated device to which the adapter was assigned.
* @apid: the APID of the unassigned adapter.
* @qtable: table for storing queues associated with unassigned adapter.
*/
static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid,
struct ap_queue_table *qtable)
{
unsigned long apqi;
struct vfio_ap_queue *q;
for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
if (q && qtable) {
if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
hash_add(qtable->queues, &q->mdev_qnode,
q->apqn);
}
}
}
static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid)
{
int loop_cursor;
struct vfio_ap_queue *q;
struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
hash_init(qtable->queues);
vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
vfio_ap_mdev_reset_queues(qtable);
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
vfio_ap_unlink_mdev_fr_queue(q);
hash_del(&q->mdev_qnode);
}
kfree(qtable);
}
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_adapter attribute
* @buf: a buffer containing the adapter number (APID) to be unassigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APID is not a number
* -ENODEV if the APID it exceeds the maximum value configured for the
* system
*/
static ssize_t unassign_adapter_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long apid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
goto done;
if (apid > matrix_mdev->matrix.apm_max) {
ret = -ENODEV;
goto done;
}
if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
ret = count;
goto done;
}
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
unsigned long apqi)
{
unsigned long apid;
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
vfio_ap_mdev_link_apqn(matrix_mdev,
AP_MKQID(apid, apqi));
}
/**
* assign_domain_store - parses the APQI from @buf and sets the
* corresponding bit in the mediated matrix device's AQM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_domain attribute
* @buf: a buffer containing the AP queue index (APQI) of the domain to
* be assigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the APQI is valid; otherwise returns
* one of the following errors:
*
* 1. -EINVAL
* The APQI is not a valid number
*
* 2. -ENODEV
* The APQI exceeds the maximum value configured for the system
*
* 3. -EADDRNOTAVAIL
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is not bound to the vfio_ap device
* driver; or, if no APIDs have yet been assigned, the APQI is not
* contained in an APQN bound to the vfio_ap device driver.
*
* 4. -EADDRINUSE
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
*
* 5. -EAGAIN
* The lock required to validate the mdev's AP configuration could not
* be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long apqi;
DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&ap_perms_mutex);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
ret = count;
goto done;
}
set_bit_inv(apqi, matrix_mdev->matrix.aqm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
if (ret) {
clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
goto done;
}
vfio_ap_mdev_link_domain(matrix_mdev, apqi);
memset(aqm_delta, 0, sizeof(aqm_delta));
set_bit_inv(apqi, aqm_delta);
if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
matrix_mdev))
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
unsigned long apqi,
struct ap_queue_table *qtable)
{
unsigned long apid;
struct vfio_ap_queue *q;
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
if (q && qtable) {
if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
hash_add(qtable->queues, &q->mdev_qnode,
q->apqn);
}
}
}
static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
unsigned long apqi)
{
int loop_cursor;
struct vfio_ap_queue *q;
struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
hash_init(qtable->queues);
vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
vfio_ap_mdev_reset_queues(qtable);
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
vfio_ap_unlink_mdev_fr_queue(q);
hash_del(&q->mdev_qnode);
}
kfree(qtable);
}
/**
* unassign_domain_store - parses the APQI from @buf and clears the
* corresponding bit in the mediated matrix device's AQM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_domain attribute
* @buf: a buffer containing the AP queue index (APQI) of the domain to
* be unassigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the APQI is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APQI is not a number
* -ENODEV if the APQI exceeds the maximum value configured for the system
*/
static ssize_t unassign_domain_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long apqi;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
ret = count;
goto done;
}
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
/**
* assign_control_domain_store - parses the domain ID from @buf and sets
* the corresponding bit in the mediated matrix device's ADM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_control_domain attribute
* @buf: a buffer containing the domain ID to be assigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
*/
static ssize_t assign_control_domain_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long id;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
goto done;
if (id > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
ret = count;
goto done;
}
/* Set the bit in the ADM (bitmask) corresponding to the AP control
* domain number (id). The bits in the mask, from most significant to
* least significant, correspond to IDs 0 up to the one less than the
* number of control domains that can be assigned.
*/
set_bit_inv(id, matrix_mdev->matrix.adm);
if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
/**
* unassign_control_domain_store - parses the domain ID from @buf and
* clears the corresponding bit in the mediated matrix device's ADM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_control_domain attribute
* @buf: a buffer containing the domain ID to be unassigned
* @count: the number of bytes in @buf
*
* Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
*/
static ssize_t unassign_control_domain_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned long domid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
goto done;
if (domid > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
ret = count;
goto done;
}
clear_bit_inv(domid, matrix_mdev->matrix.adm);
if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
static ssize_t control_domains_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
unsigned long id;
int nchars = 0;
int n;
char *bufpos = buf;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
mutex_lock(&matrix_dev->mdevs_lock);
for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
n = sprintf(bufpos, "%04lx\n", id);
bufpos += n;
nchars += n;
}
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
unsigned long napm_bits = matrix->apm_max + 1;
unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
int n;
apid1 = find_first_bit_inv(matrix->apm, napm_bits);
apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
for_each_set_bit_inv(apqi, matrix->aqm,
naqm_bits) {
n = sprintf(bufpos, "%02lx.%04lx\n", apid,
apqi);
bufpos += n;
nchars += n;
}
}
} else if (apid1 < napm_bits) {
for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
n = sprintf(bufpos, "%02lx.\n", apid);
bufpos += n;
nchars += n;
}
} else if (apqi1 < naqm_bits) {
for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
n = sprintf(bufpos, ".%04lx\n", apqi);
bufpos += n;
nchars += n;
}
}
return nchars;
}
static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
ssize_t nchars;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->mdevs_lock);
nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
static ssize_t guest_matrix_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t nchars;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
mutex_lock(&matrix_dev->mdevs_lock);
nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(guest_matrix);
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
&dev_attr_assign_domain.attr,
&dev_attr_unassign_domain.attr,
&dev_attr_assign_control_domain.attr,
&dev_attr_unassign_control_domain.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
&dev_attr_guest_matrix.attr,
NULL,
};
static struct attribute_group vfio_ap_mdev_attr_group = {
.attrs = vfio_ap_mdev_attrs
};
static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
&vfio_ap_mdev_attr_group,
NULL
};
/**
* vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
* to manage AP resources for the guest whose state is represented by @kvm
*
* @matrix_mdev: a mediated matrix device
* @kvm: reference to KVM instance
*
* Return: 0 if no other mediated matrix device has a reference to @kvm;
* otherwise, returns an -EPERM.
*/
static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
struct kvm *kvm)
{
struct ap_matrix_mdev *m;
if (kvm->arch.crypto.crycbd) {
down_write(&kvm->arch.crypto.pqap_hook_rwsem);
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
get_update_locks_for_kvm(kvm);
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
if (m != matrix_mdev && m->kvm == kvm) {
release_update_locks_for_kvm(kvm);
return -EPERM;
}
}
kvm_get_kvm(kvm);
matrix_mdev->kvm = kvm;
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
release_update_locks_for_kvm(kvm);
}
return 0;
}
static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length)
{
struct ap_queue_table *qtable = &matrix_mdev->qtable;
struct vfio_ap_queue *q;
int loop_cursor;
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
if (q->saved_iova >= iova && q->saved_iova < iova + length)
vfio_ap_irq_disable(q);
}
}
static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
u64 length)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
mutex_lock(&matrix_dev->mdevs_lock);
unmap_iova(matrix_mdev, iova, length);
mutex_unlock(&matrix_dev->mdevs_lock);
}
/**
* vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
* by @matrix_mdev.
*
* @matrix_mdev: a matrix mediated device
*/
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
{
struct kvm *kvm = matrix_mdev->kvm;
if (kvm && kvm->arch.crypto.crycbd) {
down_write(&kvm->arch.crypto.pqap_hook_rwsem);
kvm->arch.crypto.pqap_hook = NULL;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
get_update_locks_for_kvm(kvm);
kvm_arch_crypto_clear_masks(kvm);
vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
release_update_locks_for_kvm(kvm);
}
}
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
struct ap_queue *queue;
struct vfio_ap_queue *q = NULL;
queue = ap_get_qdev(apqn);
if (!queue)
return NULL;
if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
q = dev_get_drvdata(&queue->ap_dev.device);
put_device(&queue->ap_dev.device);
return q;
}
static int apq_status_check(int apqn, struct ap_queue_status *status)
{
switch (status->response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_DECONFIGURED:
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
return -EBUSY;
case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
case AP_RESPONSE_ASSOC_FAILED:
/*
* These asynchronous response codes indicate a PQAP(AAPQ)
* instruction to associate a secret with the guest failed. All
* subsequent AP instructions will end with the asynchronous
* response code until the AP queue is reset; so, let's return
* a value indicating a reset needs to be performed again.
*/
return -EAGAIN;
default:
WARN(true,
"failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
AP_QID_CARD(apqn), AP_QID_QUEUE(apqn),
status->response_code);
return -EIO;
}
}
#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
static void apq_reset_check(struct work_struct *reset_work)
{
int ret = -EBUSY, elapsed = 0;
struct ap_queue_status status;
struct vfio_ap_queue *q;
q = container_of(reset_work, struct vfio_ap_queue, reset_work);
memcpy(&status, &q->reset_status, sizeof(status));
while (true) {
msleep(AP_RESET_INTERVAL);
elapsed += AP_RESET_INTERVAL;
status = ap_tapq(q->apqn, NULL);
ret = apq_status_check(q->apqn, &status);
if (ret == -EIO)
return;
if (ret == -EBUSY) {
pr_notice_ratelimited(WAIT_MSG, elapsed,
AP_QID_CARD(q->apqn),
AP_QID_QUEUE(q->apqn),
status.response_code,
status.queue_empty,
status.irq_enabled);
} else {
if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
q->reset_status.response_code == AP_RESPONSE_BUSY ||
q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
ret == -EAGAIN) {
status = ap_zapq(q->apqn, 0);
memcpy(&q->reset_status, &status, sizeof(status));
continue;
}
/*
* When an AP adapter is deconfigured, the
* associated queues are reset, so let's set the
* status response code to 0 so the queue may be
* passed through (i.e., not filtered)
*/
if (status.response_code == AP_RESPONSE_DECONFIGURED)
q->reset_status.response_code = 0;
if (q->saved_isc != VFIO_AP_ISC_INVALID)
vfio_ap_free_aqic_resources(q);
break;
}
}
}
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
{
struct ap_queue_status status;
if (!q)
return;
status = ap_zapq(q->apqn, 0);
memcpy(&q->reset_status, &status, sizeof(status));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
/*
* Let's verify whether the ZAPQ completed successfully on a work queue.
*/
queue_work(system_long_wq, &q->reset_work);
break;
case AP_RESPONSE_DECONFIGURED:
/*
* When an AP adapter is deconfigured, the associated
* queues are reset, so let's set the status response code to 0
* so the queue may be passed through (i.e., not filtered).
*/
q->reset_status.response_code = 0;
vfio_ap_free_aqic_resources(q);
break;
default:
WARN(true,
"PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
}
}
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
int ret = 0, loop_cursor;
struct vfio_ap_queue *q;
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
vfio_ap_mdev_reset_queue(q);
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
flush_work(&q->reset_work);
if (q->reset_status.response_code)
ret = -EIO;
}
return ret;
}
static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
if (!vdev->kvm)
return -EINVAL;
return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
{
struct device *dev = vdev->dev;
struct ap_matrix_mdev *matrix_mdev;
matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
if (matrix_mdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(matrix_mdev->req_trigger, 1);
} else if (count == 0) {
dev_notice(dev,
"No device request registered, blocked until released by user\n");
}
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
{
unsigned long minsz;
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
info.num_regions = 0;
info.num_irqs = VFIO_AP_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
static ssize_t vfio_ap_get_irq_info(unsigned long arg)
{
unsigned long minsz;
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_AP_REQ_IRQ_INDEX:
info.count = 1;
info.flags = VFIO_IRQ_INFO_EVENTFD;
break;
default:
return -EINVAL;
}
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
{
int ret;
size_t data_size;
unsigned long minsz;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(irq_set, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
&data_size);
if (ret)
return ret;
if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
return -EINVAL;
return 0;
}
static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
s32 fd;
void __user *data;
unsigned long minsz;
struct eventfd_ctx *req_trigger;
minsz = offsetofend(struct vfio_irq_set, count);
data = (void __user *)(arg + minsz);
if (get_user(fd, (s32 __user *)data))
return -EFAULT;
if (fd == -1) {
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = NULL;
} else if (fd >= 0) {
req_trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(req_trigger))
return PTR_ERR(req_trigger);
if (matrix_mdev->req_trigger)
eventfd_ctx_put(matrix_mdev->req_trigger);
matrix_mdev->req_trigger = req_trigger;
} else {
return -EINVAL;
}
return 0;
}
static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
int ret;
struct vfio_irq_set irq_set;
ret = vfio_ap_irq_set_init(&irq_set, arg);
if (ret)
return ret;
switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_EVENTFD:
switch (irq_set.index) {
case VFIO_AP_REQ_IRQ_INDEX:
return vfio_ap_set_request_irq(matrix_mdev, arg);
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd, unsigned long arg)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break;
case VFIO_DEVICE_GET_IRQ_INFO:
ret = vfio_ap_get_irq_info(arg);
break;
case VFIO_DEVICE_SET_IRQS:
ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
default:
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
{
struct ap_matrix_mdev *matrix_mdev;
unsigned long apid = AP_QID_CARD(q->apqn);
unsigned long apqi = AP_QID_QUEUE(q->apqn);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
test_bit_inv(apqi, matrix_mdev->matrix.aqm))
return matrix_mdev;
}
return NULL;
}
static ssize_t status_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t nchars = 0;
struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev;
struct ap_device *apdev = to_ap_dev(dev);
mutex_lock(&matrix_dev->mdevs_lock);
q = dev_get_drvdata(&apdev->device);
matrix_mdev = vfio_ap_mdev_for_queue(q);
if (matrix_mdev) {
if (matrix_mdev->kvm)
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_IN_USE);
else
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_ASSIGNED);
} else {
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_UNASSIGNED);
}
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(status);
static struct attribute *vfio_queue_attrs[] = {
&dev_attr_status.attr,
NULL,
};
static const struct attribute_group vfio_queue_attr_group = {
.attrs = vfio_queue_attrs,
};
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.init = vfio_ap_mdev_init_dev,
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
.dma_unmap = vfio_ap_mdev_dma_unmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
.request = vfio_ap_mdev_request
};
static struct mdev_driver vfio_ap_matrix_driver = {
.device_api = VFIO_DEVICE_API_AP_STRING,
.max_instances = MAX_ZDEV_ENTRIES_EXT,
.driver = {
.name = "vfio_ap_mdev",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
.dev_groups = vfio_ap_mdev_attr_groups,
},
.probe = vfio_ap_mdev_probe,
.remove = vfio_ap_mdev_remove,
};
int vfio_ap_mdev_register(void)
{
int ret;
ret = mdev_register_driver(&vfio_ap_matrix_driver);
if (ret)
return ret;
matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
&vfio_ap_matrix_driver,
matrix_dev->mdev_types, 1);
if (ret)
goto err_driver;
return 0;
err_driver:
mdev_unregister_driver(&vfio_ap_matrix_driver);
return ret;
}
void vfio_ap_mdev_unregister(void)
{
mdev_unregister_parent(&matrix_dev->parent);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
{
int ret;
struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev;
ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
if (ret)
return ret;
q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q) {
ret = -ENOMEM;
goto err_remove_group;
}
q->apqn = to_ap_queue(&apdev->device)->qid;
q->saved_isc = VFIO_AP_ISC_INVALID;
memset(&q->reset_status, 0, sizeof(q->reset_status));
INIT_WORK(&q->reset_work, apq_reset_check);
matrix_mdev = get_update_locks_by_apqn(q->apqn);
if (matrix_mdev) {
vfio_ap_mdev_link_queue(matrix_mdev, q);
if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev))
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
dev_set_drvdata(&apdev->device, q);
release_update_locks_for_mdev(matrix_mdev);
return 0;
err_remove_group:
sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
return ret;
}
void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
{
unsigned long apid, apqi;
struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev;
sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
q = dev_get_drvdata(&apdev->device);
get_update_locks_for_queue(q);
matrix_mdev = q->matrix_mdev;
if (matrix_mdev) {
vfio_ap_unlink_queue_fr_mdev(q);
apid = AP_QID_CARD(q->apqn);
apqi = AP_QID_QUEUE(q->apqn);
/*
* If the queue is assigned to the guest's APCB, then remove
* the adapter's APID from the APCB and hot it into the guest.
*/
if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
}
vfio_ap_mdev_reset_queue(q);
flush_work(&q->reset_work);
dev_set_drvdata(&apdev->device, NULL);
kfree(q);
release_update_locks_for_mdev(matrix_mdev);
}
/**
* vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
* assigned to a mediated device under the control
* of the vfio_ap device driver.
*
* @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
* @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
*
* Return:
* * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
* assigned to a mediated device under the control of the vfio_ap
* device driver.
* * Otherwise, return 0.
*/
int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
{
int ret;
mutex_lock(&matrix_dev->guests_lock);
mutex_lock(&matrix_dev->mdevs_lock);
ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
return ret;
}
/**
* vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
* domains that have been removed from the host's
* AP configuration from a guest.
*
* @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
* @aprem: the adapters that have been removed from the host's AP configuration
* @aqrem: the domains that have been removed from the host's AP configuration
* @cdrem: the control domains that have been removed from the host's AP
* configuration.
*/
static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
unsigned long *aprem,
unsigned long *aqrem,
unsigned long *cdrem)
{
int do_hotplug = 0;
if (!bitmap_empty(aprem, AP_DEVICES)) {
do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
matrix_mdev->shadow_apcb.apm,
aprem, AP_DEVICES);
}
if (!bitmap_empty(aqrem, AP_DOMAINS)) {
do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
matrix_mdev->shadow_apcb.aqm,
aqrem, AP_DEVICES);
}
if (!bitmap_empty(cdrem, AP_DOMAINS))
do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
matrix_mdev->shadow_apcb.adm,
cdrem, AP_DOMAINS);
if (do_hotplug)
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
}
/**
* vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
* domains and control domains that have been removed
* from the host AP configuration and unplugs them
* from those guests.
*
* @ap_remove: bitmap specifying which adapters have been removed from the host
* config.
* @aq_remove: bitmap specifying which domains have been removed from the host
* config.
* @cd_remove: bitmap specifying which control domains have been removed from
* the host config.
*/
static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
unsigned long *aq_remove,
unsigned long *cd_remove)
{
struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(aprem, AP_DEVICES);
DECLARE_BITMAP(aqrem, AP_DOMAINS);
DECLARE_BITMAP(cdrem, AP_DOMAINS);
int do_remove = 0;
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
mutex_lock(&matrix_mdev->kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
do_remove |= bitmap_and(aprem, ap_remove,
matrix_mdev->matrix.apm,
AP_DEVICES);
do_remove |= bitmap_and(aqrem, aq_remove,
matrix_mdev->matrix.aqm,
AP_DOMAINS);
do_remove |= bitmap_andnot(cdrem, cd_remove,
matrix_mdev->matrix.adm,
AP_DOMAINS);
if (do_remove)
vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
cdrem);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_mdev->kvm->lock);
}
}
/**
* vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
* control domains from the host AP configuration
* by unplugging them from the guests that are
* using them.
* @cur_config_info: the current host AP configuration information
* @prev_config_info: the previous host AP configuration information
*/
static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
struct ap_config_info *prev_config_info)
{
int do_remove;
DECLARE_BITMAP(aprem, AP_DEVICES);
DECLARE_BITMAP(aqrem, AP_DOMAINS);
DECLARE_BITMAP(cdrem, AP_DOMAINS);
do_remove = bitmap_andnot(aprem,
(unsigned long *)prev_config_info->apm,
(unsigned long *)cur_config_info->apm,
AP_DEVICES);
do_remove |= bitmap_andnot(aqrem,
(unsigned long *)prev_config_info->aqm,
(unsigned long *)cur_config_info->aqm,
AP_DEVICES);
do_remove |= bitmap_andnot(cdrem,
(unsigned long *)prev_config_info->adm,
(unsigned long *)cur_config_info->adm,
AP_DEVICES);
if (do_remove)
vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
}
/**
* vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
* are older than AP type 10 (CEX4).
* @apm: a bitmap of the APIDs to examine
* @aqm: a bitmap of the APQIs of the queues to query for the AP type.
*/
static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
{
bool apid_cleared;
struct ap_queue_status status;
unsigned long apid, apqi;
struct ap_tapq_gr2 info;
for_each_set_bit_inv(apid, apm, AP_DEVICES) {
apid_cleared = false;
for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
switch (status.response_code) {
/*
* According to the architecture in each case
* below, the queue's info should be filled.
*/
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_BUSY:
/*
* The vfio_ap device driver only
* supports CEX4 and newer adapters, so
* remove the APID if the adapter is
* older than a CEX4.
*/
if (info.at < AP_DEVICE_TYPE_CEX4) {
clear_bit_inv(apid, apm);
apid_cleared = true;
}
break;
default:
/*
* If we don't know the adapter type,
* clear its APID since it can't be
* determined whether the vfio_ap
* device driver supports it.
*/
clear_bit_inv(apid, apm);
apid_cleared = true;
break;
}
/*
* If we've already cleared the APID from the apm, there
* is no need to continue examining the remainin AP
* queues to determine the type of the adapter.
*/
if (apid_cleared)
continue;
}
}
}
/**
* vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
* control domains that have been added to the host's
* AP configuration for each matrix mdev to which they
* are assigned.
*
* @apm_add: a bitmap specifying the adapters that have been added to the AP
* configuration.
* @aqm_add: a bitmap specifying the domains that have been added to the AP
* configuration.
* @adm_add: a bitmap specifying the control domains that have been added to the
* AP configuration.
*/
static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
unsigned long *adm_add)
{
struct ap_matrix_mdev *matrix_mdev;
if (list_empty(&matrix_dev->mdev_list))
return;
vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
bitmap_and(matrix_mdev->apm_add,
matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
bitmap_and(matrix_mdev->aqm_add,
matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
bitmap_and(matrix_mdev->adm_add,
matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
}
}
/**
* vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
* control domains to the host AP configuration
* by updating the bitmaps that specify what adapters,
* domains and control domains have been added so they
* can be hot plugged into the guest when the AP bus
* scan completes (see vfio_ap_on_scan_complete
* function).
* @cur_config_info: the current AP configuration information
* @prev_config_info: the previous AP configuration information
*/
static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
struct ap_config_info *prev_config_info)
{
bool do_add;
DECLARE_BITMAP(apm_add, AP_DEVICES);
DECLARE_BITMAP(aqm_add, AP_DOMAINS);
DECLARE_BITMAP(adm_add, AP_DOMAINS);
do_add = bitmap_andnot(apm_add,
(unsigned long *)cur_config_info->apm,
(unsigned long *)prev_config_info->apm,
AP_DEVICES);
do_add |= bitmap_andnot(aqm_add,
(unsigned long *)cur_config_info->aqm,
(unsigned long *)prev_config_info->aqm,
AP_DOMAINS);
do_add |= bitmap_andnot(adm_add,
(unsigned long *)cur_config_info->adm,
(unsigned long *)prev_config_info->adm,
AP_DOMAINS);
if (do_add)
vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
}
/**
* vfio_ap_on_cfg_changed - handles notification of changes to the host AP
* configuration.
*
* @cur_cfg_info: the current host AP configuration
* @prev_cfg_info: the previous host AP configuration
*/
void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
struct ap_config_info *prev_cfg_info)
{
if (!cur_cfg_info || !prev_cfg_info)
return;
mutex_lock(&matrix_dev->guests_lock);
vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
mutex_unlock(&matrix_dev->guests_lock);
}
static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
{
bool do_hotplug = false;
int filter_domains = 0;
int filter_adapters = 0;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
mutex_lock(&matrix_mdev->kvm->lock);
mutex_lock(&matrix_dev->mdevs_lock);
filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
matrix_mdev->apm_add, AP_DEVICES);
filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
matrix_mdev->aqm_add, AP_DOMAINS);
if (filter_adapters && filter_domains)
do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
else if (filter_adapters)
do_hotplug |=
vfio_ap_mdev_filter_matrix(apm,
matrix_mdev->shadow_apcb.aqm,
matrix_mdev);
else
do_hotplug |=
vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
aqm, matrix_mdev);
if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
AP_DOMAINS))
do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
if (do_hotplug)
vfio_ap_mdev_update_guest_apcb(matrix_mdev);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_mdev->kvm->lock);
}
void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info)
{
struct ap_matrix_mdev *matrix_mdev;
mutex_lock(&matrix_dev->guests_lock);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
continue;
vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
}
mutex_unlock(&matrix_dev->guests_lock);
}
| linux-master | drivers/s390/crypto/vfio_ap_ops.c |
linux-master | drivers/s390/crypto/zcrypt_cex2a.c |
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* MSGTYPE restruct: Holger Dengler <[email protected]>
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_msgtype50.h"
/* >= CEX3A: 4096 bits */
#define CEX3A_MAX_MOD_SIZE 512
/* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */
#define CEX3A_MAX_RESPONSE_SIZE 0x210
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
*
* Note that all unsigned char arrays are right-justified and left-padded
* with zeroes.
*
* Note that all reserved fields must be zeroes.
*/
struct type50_hdr {
unsigned char reserved1;
unsigned char msg_type_code; /* 0x50 */
unsigned short msg_len;
unsigned char reserved2;
unsigned char ignored;
unsigned short reserved3;
} __packed;
#define TYPE50_TYPE_CODE 0x50
#define TYPE50_MEB1_FMT 0x0001
#define TYPE50_MEB2_FMT 0x0002
#define TYPE50_MEB3_FMT 0x0003
#define TYPE50_CRB1_FMT 0x0011
#define TYPE50_CRB2_FMT 0x0012
#define TYPE50_CRB3_FMT 0x0013
/* Mod-Exp, with a small modulus */
struct type50_meb1_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0001 */
unsigned char reserved[6];
unsigned char exponent[128];
unsigned char modulus[128];
unsigned char message[128];
} __packed;
/* Mod-Exp, with a large modulus */
struct type50_meb2_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0002 */
unsigned char reserved[6];
unsigned char exponent[256];
unsigned char modulus[256];
unsigned char message[256];
} __packed;
/* Mod-Exp, with a larger modulus */
struct type50_meb3_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0003 */
unsigned char reserved[6];
unsigned char exponent[512];
unsigned char modulus[512];
unsigned char message[512];
} __packed;
/* CRT, with a small modulus */
struct type50_crb1_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0011 */
unsigned char reserved[6];
unsigned char p[64];
unsigned char q[64];
unsigned char dp[64];
unsigned char dq[64];
unsigned char u[64];
unsigned char message[128];
} __packed;
/* CRT, with a large modulus */
struct type50_crb2_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0012 */
unsigned char reserved[6];
unsigned char p[128];
unsigned char q[128];
unsigned char dp[128];
unsigned char dq[128];
unsigned char u[128];
unsigned char message[256];
} __packed;
/* CRT, with a larger modulus */
struct type50_crb3_msg {
struct type50_hdr header;
unsigned short keyblock_type; /* 0x0013 */
unsigned char reserved[6];
unsigned char p[256];
unsigned char q[256];
unsigned char dp[256];
unsigned char dq[256];
unsigned char u[256];
unsigned char message[512];
} __packed;
/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
* with zeroes.
*
* Note that all reserved fields must be zeroes.
*/
#define TYPE80_RSP_CODE 0x80
struct type80_hdr {
unsigned char reserved1;
unsigned char type; /* 0x80 */
unsigned short len;
unsigned char code; /* 0x00 */
unsigned char reserved2[3];
unsigned char reserved3[8];
} __packed;
int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
{
if (!mex->inputdatalength)
return -EINVAL;
if (mex->inputdatalength <= 128) /* 1024 bit */
*fcode = MEX_1K;
else if (mex->inputdatalength <= 256) /* 2048 bit */
*fcode = MEX_2K;
else /* 4096 bit */
*fcode = MEX_4K;
return 0;
}
int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
{
if (!crt->inputdatalength)
return -EINVAL;
if (crt->inputdatalength <= 128) /* 1024 bit */
*fcode = CRT_1K;
else if (crt->inputdatalength <= 256) /* 2048 bit */
*fcode = CRT_2K;
else /* 4096 bit */
*fcode = CRT_4K;
return 0;
}
/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
* @ap_msg: crypto request pointer
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
unsigned char *mod, *exp, *inp;
int mod_len;
mod_len = mex->inputdatalength;
if (mod_len <= 128) {
struct type50_meb1_msg *meb1 = ap_msg->msg;
memset(meb1, 0, sizeof(*meb1));
ap_msg->len = sizeof(*meb1);
meb1->header.msg_type_code = TYPE50_TYPE_CODE;
meb1->header.msg_len = sizeof(*meb1);
meb1->keyblock_type = TYPE50_MEB1_FMT;
mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
inp = meb1->message + sizeof(meb1->message) - mod_len;
} else if (mod_len <= 256) {
struct type50_meb2_msg *meb2 = ap_msg->msg;
memset(meb2, 0, sizeof(*meb2));
ap_msg->len = sizeof(*meb2);
meb2->header.msg_type_code = TYPE50_TYPE_CODE;
meb2->header.msg_len = sizeof(*meb2);
meb2->keyblock_type = TYPE50_MEB2_FMT;
mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
} else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->msg;
memset(meb3, 0, sizeof(*meb3));
ap_msg->len = sizeof(*meb3);
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
meb3->header.msg_len = sizeof(*meb3);
meb3->keyblock_type = TYPE50_MEB3_FMT;
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
} else {
return -EINVAL;
}
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
copy_from_user(inp, mex->inputdata, mod_len))
return -EFAULT;
return 0;
}
/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
* @ap_msg: crypto request pointer
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
int mod_len, short_len;
unsigned char *p, *q, *dp, *dq, *u, *inp;
mod_len = crt->inputdatalength;
short_len = (mod_len + 1) / 2;
/*
* CEX2A and CEX3A w/o FW update can handle requests up to
* 256 byte modulus (2k keys).
* CEX3A with FW update and newer CEXxA cards are able to handle
* 512 byte modulus (4k keys).
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->msg;
memset(crb1, 0, sizeof(*crb1));
ap_msg->len = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
p = crb1->p + sizeof(crb1->p) - short_len;
q = crb1->q + sizeof(crb1->q) - short_len;
dp = crb1->dp + sizeof(crb1->dp) - short_len;
dq = crb1->dq + sizeof(crb1->dq) - short_len;
u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->msg;
memset(crb2, 0, sizeof(*crb2));
ap_msg->len = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
p = crb2->p + sizeof(crb2->p) - short_len;
q = crb2->q + sizeof(crb2->q) - short_len;
dp = crb2->dp + sizeof(crb2->dp) - short_len;
dq = crb2->dq + sizeof(crb2->dq) - short_len;
u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->msg;
memset(crb3, 0, sizeof(*crb3));
ap_msg->len = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
p = crb3->p + sizeof(crb3->p) - short_len;
q = crb3->q + sizeof(crb3->q) - short_len;
dp = crb3->dp + sizeof(crb3->dp) - short_len;
dq = crb3->dq + sizeof(crb3->dq) - short_len;
u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
} else {
return -EINVAL;
}
/*
* correct the offset of p, bp and mult_inv according zcrypt.h
* block size right aligned (skip the first byte)
*/
if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(q, crt->nq_prime, short_len) ||
copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(dq, crt->bq_key, short_len) ||
copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
return 0;
}
/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EFAULT.
*/
static int convert_type80(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type80_hdr *t80h = reply->msg;
unsigned char *data;
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEXxA card may not do that.. */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), t80h->code);
ZCRYPT_DBF_ERR("%s dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), t80h->code);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
data = reply->msg + t80h->len - outputdatalength;
if (copy_to_user(outputdata, data, outputdatalength))
return -EFAULT;
return 0;
}
static int convert_response(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
unsigned char rtype = ((unsigned char *)reply->msg)[1];
switch (rtype) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return convert_error(zq, reply);
case TYPE80_RSP_CODE:
return convert_type80(zq, reply,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)rtype);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), (int)rtype);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
* @aq: pointer to the AP device
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
static void zcrypt_msgtype50_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct type80_hdr *t80h;
int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
memcpy(msg->msg, reply->msg, len);
msg->len = len;
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
msg->len = sizeof(error_reply);
}
out:
complete((struct completion *)msg->private);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex,
struct ap_message *ap_msg)
{
struct completion work;
int rc;
ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
if (rc)
goto out;
init_completion(&work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
out:
ap_msg->private = NULL;
if (rc)
ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt,
struct ap_message *ap_msg)
{
struct completion work;
int rc;
ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
if (rc)
goto out;
init_completion(&work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response(zq, ap_msg,
crt->outputdata,
crt->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
out:
ap_msg->private = NULL;
if (rc)
ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
.rsa_modexpo = zcrypt_msgtype50_modexpo,
.rsa_modexpo_crt = zcrypt_msgtype50_modexpo_crt,
.owner = THIS_MODULE,
.name = MSGTYPE50_NAME,
.variant = MSGTYPE50_VARIANT_DEFAULT,
};
void __init zcrypt_msgtype50_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
}
void __exit zcrypt_msgtype50_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
}
| linux-master | drivers/s390/crypto/zcrypt_msgtype50.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2019
* Author(s): Harald Freudenberger <[email protected]>
*
* Collection of EP11 misc functions used by zcrypt and pkey
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <asm/zcrypt.h>
#include <asm/pkey.h>
#include <crypto/aes.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_ep11misc.h"
#include "zcrypt_ccamisc.h"
#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
#define EP11_PINBLOB_V1_BYTES 56
/* default iv used here */
static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
/* ep11 card info cache */
struct card_list_entry {
struct list_head list;
u16 cardnr;
struct ep11_card_info info;
};
static LIST_HEAD(card_list);
static DEFINE_SPINLOCK(card_list_lock);
static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
{
int rc = -ENOENT;
struct card_list_entry *ptr;
spin_lock_bh(&card_list_lock);
list_for_each_entry(ptr, &card_list, list) {
if (ptr->cardnr == cardnr) {
memcpy(ci, &ptr->info, sizeof(*ci));
rc = 0;
break;
}
}
spin_unlock_bh(&card_list_lock);
return rc;
}
static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
{
int found = 0;
struct card_list_entry *ptr;
spin_lock_bh(&card_list_lock);
list_for_each_entry(ptr, &card_list, list) {
if (ptr->cardnr == cardnr) {
memcpy(&ptr->info, ci, sizeof(*ci));
found = 1;
break;
}
}
if (!found) {
ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
if (!ptr) {
spin_unlock_bh(&card_list_lock);
return;
}
ptr->cardnr = cardnr;
memcpy(&ptr->info, ci, sizeof(*ci));
list_add(&ptr->list, &card_list);
}
spin_unlock_bh(&card_list_lock);
}
static void card_cache_scrub(u16 cardnr)
{
struct card_list_entry *ptr;
spin_lock_bh(&card_list_lock);
list_for_each_entry(ptr, &card_list, list) {
if (ptr->cardnr == cardnr) {
list_del(&ptr->list);
kfree(ptr);
break;
}
}
spin_unlock_bh(&card_list_lock);
}
static void __exit card_cache_free(void)
{
struct card_list_entry *ptr, *pnext;
spin_lock_bh(&card_list_lock);
list_for_each_entry_safe(ptr, pnext, &card_list, list) {
list_del(&ptr->list);
kfree(ptr);
}
spin_unlock_bh(&card_list_lock);
}
static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
u8 **kbpl, size_t *kbplsize)
{
struct ep11kblob_header *hdr = NULL;
size_t hdrsize, plsize = 0;
int rc = -EINVAL;
u8 *pl = NULL;
if (kblen < sizeof(struct ep11kblob_header))
goto out;
hdr = (struct ep11kblob_header *)kb;
switch (kbver) {
case TOKVER_EP11_AES:
/* header overlays the payload */
hdrsize = 0;
break;
case TOKVER_EP11_ECC_WITH_HEADER:
case TOKVER_EP11_AES_WITH_HEADER:
/* payload starts after the header */
hdrsize = sizeof(struct ep11kblob_header);
break;
default:
goto out;
}
plsize = kblen - hdrsize;
pl = (u8 *)kb + hdrsize;
if (kbhdr)
*kbhdr = hdr;
if (kbhdrsize)
*kbhdrsize = hdrsize;
if (kbpl)
*kbpl = pl;
if (kbplsize)
*kbplsize = plsize;
rc = 0;
out:
return rc;
}
static int ep11_kb_decode(const u8 *kb, size_t kblen,
struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
struct ep11keyblob **kbpl, size_t *kbplsize)
{
struct ep11kblob_header *tmph, *hdr = NULL;
size_t hdrsize = 0, plsize = 0;
struct ep11keyblob *pl = NULL;
int rc = -EINVAL;
u8 *tmpp;
if (kblen < sizeof(struct ep11kblob_header))
goto out;
tmph = (struct ep11kblob_header *)kb;
if (tmph->type != TOKTYPE_NON_CCA &&
tmph->len > kblen)
goto out;
if (ep11_kb_split(kb, kblen, tmph->version,
&hdr, &hdrsize, &tmpp, &plsize))
goto out;
if (plsize < sizeof(struct ep11keyblob))
goto out;
if (!is_ep11_keyblob(tmpp))
goto out;
pl = (struct ep11keyblob *)tmpp;
plsize = hdr->len - hdrsize;
if (kbhdr)
*kbhdr = hdr;
if (kbhdrsize)
*kbhdrsize = hdrsize;
if (kbpl)
*kbpl = pl;
if (kbplsize)
*kbplsize = plsize;
rc = 0;
out:
return rc;
}
/*
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
{
struct ep11keyblob *kb;
if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL))
return NULL;
return kb->wkvp;
}
EXPORT_SYMBOL(ep11_kb_wkvp);
/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (hdr->type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
__func__, (int)hdr->type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (hdr->hver != 0x00) {
if (dbg)
DBF("%s key check failed, header version 0x%02x != 0x00\n",
__func__, (int)hdr->hver);
return -EINVAL;
}
if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
__func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER);
return -EINVAL;
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
__func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
__func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
if (dbg)
DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
__func__);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
/*
* Simple check if the key blob is a valid EP11 ECC key blob with header.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (hdr->type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
__func__, (int)hdr->type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (hdr->hver != 0x00) {
if (dbg)
DBF("%s key check failed, header version 0x%02x != 0x00\n",
__func__, (int)hdr->hver);
return -EINVAL;
}
if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
__func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
return -EINVAL;
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
__func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
__func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
if (dbg)
DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
__func__);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
/*
* Simple check if the key blob is a valid EP11 AES key blob with
* the header in the session field (old style EP11 AES key).
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
struct ep11keyblob *kb = (struct ep11keyblob *)key;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
__func__, keylen, sizeof(*kb));
return -EINVAL;
}
if (kb->head.type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
__func__, (int)kb->head.type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (kb->head.version != TOKVER_EP11_AES) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
__func__, (int)kb->head.version, TOKVER_EP11_AES);
return -EINVAL;
}
if (kb->head.len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
__func__, (int)kb->head.len, keylen);
return -EINVAL;
}
if (kb->head.len < sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
__func__, (int)kb->head.len, sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
__func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
if (dbg)
DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
__func__);
return -EINVAL;
}
#undef DBF
return 0;
}
EXPORT_SYMBOL(ep11_check_aes_key);
/*
* Allocate and prepare ep11 cprb plus additional payload.
*/
static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
{
size_t len = sizeof(struct ep11_cprb) + payload_len;
struct ep11_cprb *cprb;
cprb = kzalloc(len, GFP_KERNEL);
if (!cprb)
return NULL;
cprb->cprb_len = sizeof(struct ep11_cprb);
cprb->cprb_ver_id = 0x04;
memcpy(cprb->func_id, "T4", 2);
cprb->ret_code = 0xFFFFFFFF;
cprb->payload_len = payload_len;
return cprb;
}
/*
* Some helper functions related to ASN1 encoding.
* Limited to length info <= 2 byte.
*/
#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
{
ptr[0] = tag;
if (valuelen > 255) {
ptr[1] = 0x82;
*((u16 *)(ptr + 2)) = valuelen;
memcpy(ptr + 4, pvalue, valuelen);
return 4 + valuelen;
}
if (valuelen > 127) {
ptr[1] = 0x81;
ptr[2] = (u8)valuelen;
memcpy(ptr + 3, pvalue, valuelen);
return 3 + valuelen;
}
ptr[1] = (u8)valuelen;
memcpy(ptr + 2, pvalue, valuelen);
return 2 + valuelen;
}
/* EP11 payload > 127 bytes starts with this struct */
struct pl_head {
u8 tag;
u8 lenfmt;
u16 len;
u8 func_tag;
u8 func_len;
u32 func;
u8 dom_tag;
u8 dom_len;
u32 dom;
} __packed;
/* prep ep11 payload head helper function */
static inline void prep_head(struct pl_head *h,
size_t pl_size, int api, int func)
{
h->tag = 0x30;
h->lenfmt = 0x82;
h->len = pl_size - 4;
h->func_tag = 0x04;
h->func_len = sizeof(u32);
h->func = (api << 16) + func;
h->dom_tag = 0x04;
h->dom_len = sizeof(u32);
}
/* prep urb helper function */
static inline void prep_urb(struct ep11_urb *u,
struct ep11_target_dev *t, int nt,
struct ep11_cprb *req, size_t req_len,
struct ep11_cprb *rep, size_t rep_len)
{
u->targets = (u8 __user *)t;
u->targets_num = nt;
u->req = (u8 __user *)req;
u->req_len = req_len;
u->resp = (u8 __user *)rep;
u->resp_len = rep_len;
}
/* Check ep11 reply payload, return 0 or suggested errno value. */
static int check_reply_pl(const u8 *pl, const char *func)
{
int len;
u32 ret;
/* start tag */
if (*pl++ != 0x30) {
DEBUG_ERR("%s reply start tag mismatch\n", func);
return -EIO;
}
/* payload length format */
if (*pl < 127) {
len = *pl;
pl++;
} else if (*pl == 0x81) {
pl++;
len = *pl;
pl++;
} else if (*pl == 0x82) {
pl++;
len = *((u16 *)pl);
pl += 2;
} else {
DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
func, *pl);
return -EIO;
}
/* len should cover at least 3 fields with 32 bit value each */
if (len < 3 * 6) {
DEBUG_ERR("%s reply length %d too small\n", func, len);
return -EIO;
}
/* function tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
DEBUG_ERR("%s function tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* dom tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
DEBUG_ERR("%s dom tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* return value tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
DEBUG_ERR("%s return value tag or length mismatch\n", func);
return -EIO;
}
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
return -EIO;
}
return 0;
}
/*
* Helper function which does an ep11 query with given query type.
*/
static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
size_t buflen, u8 *buf)
{
struct ep11_info_req_pl {
struct pl_head head;
u8 query_type_tag;
u8 query_type_len;
u32 query_type;
u8 query_subtype_tag;
u8 query_subtype_len;
u32 query_subtype;
} __packed * req_pl;
struct ep11_info_rep_pl {
struct pl_head head;
u8 rc_tag;
u8 rc_len;
u32 rc;
u8 data_tag;
u8 data_lenfmt;
u16 data_len;
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
int api = EP11_API_V1, rc = -ENOMEM;
/* request cprb and payload */
req = alloc_cprb(sizeof(struct ep11_info_req_pl));
if (!req)
goto out;
req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
req_pl->query_type_tag = 0x04;
req_pl->query_type_len = sizeof(u32);
req_pl->query_type = query_type;
req_pl->query_subtype_tag = 0x04;
req_pl->query_subtype_len = sizeof(u32);
/* reply cprb and payload */
rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
if (!rep)
goto out;
rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = cardnr;
target.dom_id = domain;
prep_urb(urb, &target, 1,
req, sizeof(*req) + sizeof(*req_pl),
rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
goto out;
}
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
DEBUG_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > buflen) {
DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
__func__);
rc = -ENOSPC;
goto out;
}
memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
out:
kfree(req);
kfree(rep);
kfree(urb);
return rc;
}
/*
* Provide information about an EP11 card.
*/
int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
{
int rc;
struct ep11_module_query_info {
u32 API_ord_nr;
u32 firmware_id;
u8 FW_major_vers;
u8 FW_minor_vers;
u8 CSP_major_vers;
u8 CSP_minor_vers;
u8 fwid[32];
u8 xcp_config_hash[32];
u8 CSP_config_hash[32];
u8 serial[16];
u8 module_date_time[16];
u64 op_mode;
u32 PKCS11_flags;
u32 ext_flags;
u32 domains;
u32 sym_state_bytes;
u32 digest_state_bytes;
u32 pin_blob_bytes;
u32 SPKI_bytes;
u32 priv_key_blob_bytes;
u32 sym_blob_bytes;
u32 max_payload_bytes;
u32 CP_profile_bytes;
u32 max_CP_index;
} __packed * pmqi = NULL;
rc = card_cache_fetch(card, info);
if (rc || verify) {
pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
if (!pmqi)
return -ENOMEM;
rc = ep11_query_info(card, AUTOSEL_DOM,
0x01 /* module info query */,
sizeof(*pmqi), (u8 *)pmqi);
if (rc) {
if (rc == -ENODEV)
card_cache_scrub(card);
goto out;
}
memset(info, 0, sizeof(*info));
info->API_ord_nr = pmqi->API_ord_nr;
info->FW_version =
(pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
memcpy(info->serial, pmqi->serial, sizeof(info->serial));
info->op_mode = pmqi->op_mode;
card_cache_update(card, info);
}
out:
kfree(pmqi);
return rc;
}
EXPORT_SYMBOL(ep11_get_card_info);
/*
* Provide information about a domain within an EP11 card.
*/
int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
{
int rc;
struct ep11_domain_query_info {
u32 dom_index;
u8 cur_WK_VP[32];
u8 new_WK_VP[32];
u32 dom_flags;
u64 op_mode;
} __packed * p_dom_info;
p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
if (!p_dom_info)
return -ENOMEM;
rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
sizeof(*p_dom_info), (u8 *)p_dom_info);
if (rc)
goto out;
memset(info, 0, sizeof(*info));
info->cur_wk_state = '0';
info->new_wk_state = '0';
if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
info->cur_wk_state = '1';
memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
}
if (p_dom_info->dom_flags & 0x04 || /* new wk present */
p_dom_info->dom_flags & 0x08 /* new wk committed */) {
info->new_wk_state =
p_dom_info->dom_flags & 0x08 ? '2' : '1';
memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
}
}
info->op_mode = p_dom_info->op_mode;
out:
kfree(p_dom_info);
return rc;
}
EXPORT_SYMBOL(ep11_get_domain_info);
/*
* Default EP11 AES key generate attributes, used when no keygenflags given:
* XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
*/
#define KEY_ATTR_DEFAULTS 0x00200c00
static int _ep11_genaeskey(u16 card, u16 domain,
u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
{
struct keygen_req_pl {
struct pl_head head;
u8 var_tag;
u8 var_len;
u32 var;
u8 keybytes_tag;
u8 keybytes_len;
u32 keybytes;
u8 mech_tag;
u8 mech_len;
u32 mech;
u8 attr_tag;
u8 attr_len;
u32 attr_header;
u32 attr_bool_mask;
u32 attr_bool_bits;
u32 attr_val_len_type;
u32 attr_val_len_value;
/* followed by empty pin tag or empty pinblob tag */
} __packed * req_pl;
struct keygen_rep_pl {
struct pl_head head;
u8 rc_tag;
u8 rc_len;
u32 rc;
u8 data_tag;
u8 data_lenfmt;
u16 data_len;
u8 data[512];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
int api, rc = -ENOMEM;
u8 *p;
switch (keybitsize) {
case 128:
case 192:
case 256:
break;
default:
DEBUG_ERR(
"%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
rc = -EINVAL;
goto out;
}
/* request cprb and payload */
api = (!keygenflags || keygenflags & 0x00200000) ?
EP11_API_V4 : EP11_API_V1;
if (ap_is_se_guest()) {
/*
* genkey within SE environment requires API ordinal 6
* with empty pinblob
*/
api = EP11_API_V6;
pinblob_size = EP11_PINBLOB_V1_BYTES;
}
req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size);
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, req_pl_size, api, 21); /* GenerateKey */
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
req_pl->keybytes_tag = 0x04;
req_pl->keybytes_len = sizeof(u32);
req_pl->keybytes = keybitsize / 8;
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32);
req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
req_pl->attr_tag = 0x04;
req_pl->attr_len = 5 * sizeof(u32);
req_pl->attr_header = 0x10010000;
req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
req_pl->attr_val_len_value = keybitsize / 8;
p = ((u8 *)req_pl) + sizeof(*req_pl);
/* pin tag */
*p++ = 0x04;
*p++ = pinblob_size;
/* reply cprb and payload */
rep = alloc_cprb(sizeof(struct keygen_rep_pl));
if (!rep)
goto out;
rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
target.dom_id = domain;
prep_urb(urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
goto out;
}
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
DEBUG_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
__func__);
rc = -ENOSPC;
goto out;
}
/* copy key blob */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
out:
kfree(req);
kfree(rep);
kfree(urb);
return rc;
}
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize, u32 keybufver)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
u8 *pl;
int rc;
switch (keybufver) {
case TOKVER_EP11_AES:
case TOKVER_EP11_AES_WITH_HEADER:
break;
default:
return -EINVAL;
}
rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
&hdr, &hdr_size, &pl, &pl_size);
if (rc)
return rc;
rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
pl, &pl_size);
if (rc)
return rc;
*keybufsize = hdr_size + pl_size;
/* update header information */
hdr->type = TOKTYPE_NON_CCA;
hdr->len = *keybufsize;
hdr->version = keybufver;
hdr->bitlen = keybitsize;
return 0;
}
EXPORT_SYMBOL(ep11_genaeskey);
static int ep11_cryptsingle(u16 card, u16 domain,
u16 mode, u32 mech, const u8 *iv,
const u8 *key, size_t keysize,
const u8 *inbuf, size_t inbufsize,
u8 *outbuf, size_t *outbufsize)
{
struct crypt_req_pl {
struct pl_head head;
u8 var_tag;
u8 var_len;
u32 var;
u8 mech_tag;
u8 mech_len;
u32 mech;
/*
* maybe followed by iv data
* followed by key tag + key blob
* followed by plaintext tag + plaintext
*/
} __packed * req_pl;
struct crypt_rep_pl {
struct pl_head head;
u8 rc_tag;
u8 rc_len;
u32 rc;
u8 data_tag;
u8 data_lenfmt;
/* data follows */
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
size_t req_pl_size, rep_pl_size;
int n, api = EP11_API_V1, rc = -ENOMEM;
u8 *p;
/* the simple asn1 coding used has length limits */
if (keysize > 0xFFFF || inbufsize > 0xFFFF)
return -EINVAL;
/* request cprb and payload */
req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
/* mech is mech + mech params (iv here) */
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
}
/* key and input data */
p += asn1tag_write(p, 0x04, key, keysize);
p += asn1tag_write(p, 0x04, inbuf, inbufsize);
/* reply cprb and payload, assume out data size <= in data size + 32 */
rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
rep = alloc_cprb(rep_pl_size);
if (!rep)
goto out;
rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
target.dom_id = domain;
prep_urb(urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + rep_pl_size);
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
goto out;
}
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04) {
DEBUG_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
p = ((u8 *)rep_pl) + sizeof(*rep_pl);
if (rep_pl->data_lenfmt <= 127) {
n = rep_pl->data_lenfmt;
} else if (rep_pl->data_lenfmt == 0x81) {
n = *p++;
} else if (rep_pl->data_lenfmt == 0x82) {
n = *((u16 *)p);
p += 2;
} else {
DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
__func__, rep_pl->data_lenfmt);
rc = -EIO;
goto out;
}
if (n > *outbufsize) {
DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
__func__, n, *outbufsize);
rc = -ENOSPC;
goto out;
}
memcpy(outbuf, p, n);
*outbufsize = n;
out:
kfree(req);
kfree(rep);
kfree(urb);
return rc;
}
static int _ep11_unwrapkey(u16 card, u16 domain,
const u8 *kek, size_t keksize,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
{
struct uw_req_pl {
struct pl_head head;
u8 attr_tag;
u8 attr_len;
u32 attr_header;
u32 attr_bool_mask;
u32 attr_bool_bits;
u32 attr_key_type;
u32 attr_key_type_value;
u32 attr_val_len;
u32 attr_val_len_value;
u8 mech_tag;
u8 mech_len;
u32 mech;
/*
* maybe followed by iv data
* followed by kek tag + kek blob
* followed by empty mac tag
* followed by empty pin tag or empty pinblob tag
* followed by encryted key tag + bytes
*/
} __packed * req_pl;
struct uw_rep_pl {
struct pl_head head;
u8 rc_tag;
u8 rc_len;
u32 rc;
u8 data_tag;
u8 data_lenfmt;
u16 data_len;
u8 data[512];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
int api, rc = -ENOMEM;
u8 *p;
/* request cprb and payload */
api = (!keygenflags || keygenflags & 0x00200000) ?
EP11_API_V4 : EP11_API_V1;
if (ap_is_se_guest()) {
/*
* unwrap within SE environment requires API ordinal 6
* with empty pinblob
*/
api = EP11_API_V6;
pinblob_size = EP11_PINBLOB_V1_BYTES;
}
req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keksize) + ASN1TAGLEN(0)
+ ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize);
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
req_pl->attr_tag = 0x04;
req_pl->attr_len = 7 * sizeof(u32);
req_pl->attr_header = 0x10020000;
req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
req_pl->attr_val_len_value = keybitsize / 8;
/* mech is mech + mech params (iv here) */
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
}
/* kek */
p += asn1tag_write(p, 0x04, kek, keksize);
/* empty mac key tag */
*p++ = 0x04;
*p++ = 0;
/* pin tag */
*p++ = 0x04;
*p++ = pinblob_size;
p += pinblob_size;
/* encrypted key value tag and bytes */
p += asn1tag_write(p, 0x04, enckey, enckeysize);
/* reply cprb and payload */
rep = alloc_cprb(sizeof(struct uw_rep_pl));
if (!rep)
goto out;
rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
target.dom_id = domain;
prep_urb(urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
goto out;
}
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
DEBUG_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
__func__);
rc = -ENOSPC;
goto out;
}
/* copy key blob */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
out:
kfree(req);
kfree(rep);
kfree(urb);
return rc;
}
static int ep11_unwrapkey(u16 card, u16 domain,
const u8 *kek, size_t keksize,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize,
u8 keybufver)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
u8 *pl;
int rc;
rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
&hdr, &hdr_size, &pl, &pl_size);
if (rc)
return rc;
rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
mech, iv, keybitsize, keygenflags,
pl, &pl_size);
if (rc)
return rc;
*keybufsize = hdr_size + pl_size;
/* update header information */
hdr = (struct ep11kblob_header *)keybuf;
hdr->type = TOKTYPE_NON_CCA;
hdr->len = *keybufsize;
hdr->version = keybufver;
hdr->bitlen = keybitsize;
return 0;
}
static int _ep11_wrapkey(u16 card, u16 domain,
const u8 *key, size_t keysize,
u32 mech, const u8 *iv,
u8 *databuf, size_t *datasize)
{
struct wk_req_pl {
struct pl_head head;
u8 var_tag;
u8 var_len;
u32 var;
u8 mech_tag;
u8 mech_len;
u32 mech;
/*
* followed by iv data
* followed by key tag + key blob
* followed by dummy kek param
* followed by dummy mac param
*/
} __packed * req_pl;
struct wk_rep_pl {
struct pl_head head;
u8 rc_tag;
u8 rc_len;
u32 rc;
u8 data_tag;
u8 data_lenfmt;
u16 data_len;
u8 data[1024];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
size_t req_pl_size;
int api, rc = -ENOMEM;
u8 *p;
/* request cprb and payload */
req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + 4;
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
if (!mech || mech == 0x80060001)
req->flags |= 0x20; /* CPACF_WRAP needs special bit */
req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req));
api = (!mech || mech == 0x80060001) ? /* CKM_IBM_CPACF_WRAP */
EP11_API_V4 : EP11_API_V1;
prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
/* mech is mech + mech params (iv here) */
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
}
/* key blob */
p += asn1tag_write(p, 0x04, key, keysize);
/* empty kek tag */
*p++ = 0x04;
*p++ = 0;
/* empty mac tag */
*p++ = 0x04;
*p++ = 0;
/* reply cprb and payload */
rep = alloc_cprb(sizeof(struct wk_rep_pl));
if (!rep)
goto out;
rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
target.dom_id = domain;
prep_urb(urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
goto out;
}
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
DEBUG_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *datasize) {
DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
__func__);
rc = -ENOSPC;
goto out;
}
/* copy the data from the cprb to the data buffer */
memcpy(databuf, rep_pl->data, rep_pl->data_len);
*datasize = rep_pl->data_len;
out:
kfree(req);
kfree(rep);
kfree(urb);
return rc;
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
u32 keytype)
{
int rc;
u8 encbuf[64], *kek = NULL;
size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
clrkeylen = keybitsize / 8;
} else {
DEBUG_ERR(
"%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
/* allocate memory for the temp kek */
keklen = MAXEP11AESKEYBLOBSIZE;
kek = kmalloc(keklen, GFP_ATOMIC);
if (!kek) {
rc = -ENOMEM;
goto out;
}
/* Step 1: generate AES 256 bit random kek key */
rc = _ep11_genaeskey(card, domain, 256,
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
kek, &keklen);
if (rc) {
DEBUG_ERR(
"%s generate kek key failed, rc=%d\n",
__func__, rc);
goto out;
}
/* Step 2: encrypt clear key value with the kek key */
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
clrkey, clrkeylen, encbuf, &encbuflen);
if (rc) {
DEBUG_ERR(
"%s encrypting key value with kek key failed, rc=%d\n",
__func__, rc);
goto out;
}
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
keybitsize, 0, keybuf, keybufsize, keytype);
if (rc) {
DEBUG_ERR(
"%s importing key value as new key failed,, rc=%d\n",
__func__, rc);
goto out;
}
out:
kfree(kek);
return rc;
}
EXPORT_SYMBOL(ep11_clr2keyblob);
int ep11_kblob2protkey(u16 card, u16 dom,
const u8 *keyblob, size_t keybloblen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct ep11kblob_header *hdr;
struct ep11keyblob *key;
size_t wkbuflen, keylen;
struct wk_info {
u16 version;
u8 res1[16];
u32 pkeytype;
u32 pkeybitsize;
u64 pkeysize;
u8 res2[8];
u8 pkey[];
} __packed * wki;
u8 *wkbuf = NULL;
int rc = -EIO;
if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen))
return -EINVAL;
if (hdr->version == TOKVER_EP11_AES) {
/* wipe overlayed header */
memset(hdr, 0, sizeof(*hdr));
}
/* !!! hdr is no longer a valid header !!! */
/* alloc temp working buffer */
wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
if (!wkbuf)
return -ENOMEM;
/* ep11 secure key -> protected key + info */
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
0, def_iv, wkbuf, &wkbuflen);
if (rc) {
DEBUG_ERR(
"%s rewrapping ep11 key to pkey failed, rc=%d\n",
__func__, rc);
goto out;
}
wki = (struct wk_info *)wkbuf;
/* check struct version and pkey type */
if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
__func__, (int)wki->version, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
/* check protected key type field */
switch (wki->pkeytype) {
case 1: /* AES */
switch (wki->pkeysize) {
case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
__func__, (int)wki->pkeysize);
rc = -EIO;
goto out;
}
break;
case 3: /* EC-P */
case 4: /* EC-ED */
case 5: /* EC-BP */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_ECC;
break;
case 2: /* TDES */
default:
DEBUG_ERR("%s unknown/unsupported key type %d\n",
__func__, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
__func__, wki->pkeysize, *protkeylen);
rc = -EINVAL;
goto out;
}
memcpy(protkey, wki->pkey, wki->pkeysize);
*protkeylen = wki->pkeysize;
out:
kfree(wkbuf);
return rc;
}
EXPORT_SYMBOL(ep11_kblob2protkey);
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int minapi, const u8 *wkvp)
{
struct zcrypt_device_status_ext *device_status;
u32 *_apqns = NULL, _nr_apqns = 0;
int i, card, dom, rc = -ENOMEM;
struct ep11_domain_info edi;
struct ep11_card_info eci;
/* fetch status of all crypto cards */
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
/* allocate 1k space for up to 256 apqns */
_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!_apqns) {
kvfree(device_status);
return -ENOMEM;
}
/* walk through all the crypto apqnss */
for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
/* check online state */
if (!device_status[i].online)
continue;
/* check for ep11 functions */
if (!(device_status[i].functions & 0x01))
continue;
/* check cardnr */
if (cardnr != 0xFFFF && card != cardnr)
continue;
/* check domain */
if (domain != 0xFFFF && dom != domain)
continue;
/* check min hardware type */
if (minhwtype && device_status[i].hwtype < minhwtype)
continue;
/* check min api version if given */
if (minapi > 0) {
if (ep11_get_card_info(card, &eci, 0))
continue;
if (minapi > eci.API_ord_nr)
continue;
}
/* check wkvp if given */
if (wkvp) {
if (ep11_get_domain_info(card, dom, &edi))
continue;
if (edi.cur_wk_state != '1')
continue;
if (memcmp(wkvp, edi.cur_wkvp, 16))
continue;
}
/* apqn passed all filtering criterons, add to the array */
if (_nr_apqns < 256)
_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
/* nothing found ? */
if (!_nr_apqns) {
kfree(_apqns);
rc = -ENODEV;
} else {
/* no re-allocation, simple return the _apqns array */
*apqns = _apqns;
*nr_apqns = _nr_apqns;
rc = 0;
}
kvfree(device_status);
return rc;
}
EXPORT_SYMBOL(ep11_findcard2);
void __exit zcrypt_ep11misc_exit(void)
{
card_cache_free();
}
| linux-master | drivers/s390/crypto/zcrypt_ep11misc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* VFIO based AP device driver
*
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <[email protected]>
* Pierre Morel <[email protected]>
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/facility.h>
#include "vfio_ap_private.h"
#include "vfio_ap_debug.h"
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
MODULE_LICENSE("GPL v2");
struct ap_matrix_dev *matrix_dev;
debug_info_t *vfio_ap_dbf_info;
/* Only type 10 adapters (CEX4 and later) are supported
* by the AP matrix device driver
*/
static struct ap_device_id ap_queue_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX7,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX8,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of sibling */ },
};
static struct ap_driver vfio_ap_drv = {
.probe = vfio_ap_mdev_probe_queue,
.remove = vfio_ap_mdev_remove_queue,
.in_use = vfio_ap_mdev_resource_in_use,
.on_config_changed = vfio_ap_on_cfg_changed,
.on_scan_complete = vfio_ap_on_scan_complete,
.ids = ap_queue_ids,
};
static void vfio_ap_matrix_dev_release(struct device *dev)
{
struct ap_matrix_dev *matrix_dev;
matrix_dev = container_of(dev, struct ap_matrix_dev, device);
kfree(matrix_dev);
}
static struct bus_type matrix_bus = {
.name = "matrix",
};
static struct device_driver matrix_driver = {
.name = "vfio_ap",
.bus = &matrix_bus,
.suppress_bind_attrs = true,
};
static int vfio_ap_matrix_dev_create(void)
{
int ret;
struct device *root_device;
root_device = root_device_register(VFIO_AP_ROOT_NAME);
if (IS_ERR(root_device))
return PTR_ERR(root_device);
ret = bus_register(&matrix_bus);
if (ret)
goto bus_register_err;
matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
if (!matrix_dev) {
ret = -ENOMEM;
goto matrix_alloc_err;
}
/* Fill in config info via PQAP(QCI), if available */
if (test_facility(12)) {
ret = ap_qci(&matrix_dev->info);
if (ret)
goto matrix_alloc_err;
}
mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
mutex_init(&matrix_dev->guests_lock);
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
matrix_dev->device.bus = &matrix_bus;
matrix_dev->device.release = vfio_ap_matrix_dev_release;
matrix_dev->vfio_ap_drv = &vfio_ap_drv;
ret = device_register(&matrix_dev->device);
if (ret)
goto matrix_reg_err;
ret = driver_register(&matrix_driver);
if (ret)
goto matrix_drv_err;
return 0;
matrix_drv_err:
device_del(&matrix_dev->device);
matrix_reg_err:
put_device(&matrix_dev->device);
matrix_alloc_err:
bus_unregister(&matrix_bus);
bus_register_err:
root_device_unregister(root_device);
return ret;
}
static void vfio_ap_matrix_dev_destroy(void)
{
struct device *root_device = matrix_dev->device.parent;
driver_unregister(&matrix_driver);
device_unregister(&matrix_dev->device);
bus_unregister(&matrix_bus);
root_device_unregister(root_device);
}
static int __init vfio_ap_dbf_info_init(void)
{
vfio_ap_dbf_info = debug_register("vfio_ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
if (!vfio_ap_dbf_info)
return -ENOENT;
debug_register_view(vfio_ap_dbf_info, &debug_sprintf_view);
debug_set_level(vfio_ap_dbf_info, DBF_WARN);
return 0;
}
static int __init vfio_ap_init(void)
{
int ret;
ret = vfio_ap_dbf_info_init();
if (ret)
return ret;
/* If there are no AP instructions, there is nothing to pass through. */
if (!ap_instructions_available())
return -ENODEV;
ret = vfio_ap_matrix_dev_create();
if (ret)
return ret;
ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
if (ret) {
vfio_ap_matrix_dev_destroy();
return ret;
}
ret = vfio_ap_mdev_register();
if (ret) {
ap_driver_unregister(&vfio_ap_drv);
vfio_ap_matrix_dev_destroy();
return ret;
}
return 0;
}
static void __exit vfio_ap_exit(void)
{
vfio_ap_mdev_unregister();
ap_driver_unregister(&vfio_ap_drv);
vfio_ap_matrix_dev_destroy();
debug_unregister(vfio_ap_dbf_info);
}
module_init(vfio_ap_init);
module_exit(vfio_ap_exit);
| linux-master | drivers/s390/crypto/vfio_ap_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <[email protected]>
*
* Adjunct processor bus, card related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include <asm/sclp.h>
#include "ap_bus.h"
/*
* AP card related attributes.
*/
static ssize_t hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR_RO(hwtype);
static ssize_t raw_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR_RO(raw_hwtype);
static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR_RO(depth);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR_RO(ap_functions);
static ssize_t request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
u64 req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_queues_lock);
req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_queues_lock);
return sysfs_emit(buf, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int bkt;
struct ap_queue *aq;
struct ap_card *ac = to_ap_card(dev);
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode)
if (ac == aq->card)
aq->total_request_count = 0;
spin_unlock_bh(&ap_queues_lock);
atomic64_set(&ac->total_request_count, 0);
return count;
}
static DEVICE_ATTR_RW(request_count);
static ssize_t requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int bkt;
struct ap_queue *aq;
unsigned int reqq_cnt;
struct ap_card *ac = to_ap_card(dev);
reqq_cnt = 0;
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode)
if (ac == aq->card)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_queues_lock);
return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
static ssize_t pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int bkt;
struct ap_queue *aq;
unsigned int penq_cnt;
struct ap_card *ac = to_ap_card(dev);
penq_cnt = 0;
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode)
if (ac == aq->card)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_queues_lock);
return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%d\n", ac->config ? 1 : 0);
}
static ssize_t config_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc = 0, cfg;
struct ap_card *ac = to_ap_card(dev);
if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
return -EINVAL;
if (cfg && !ac->config)
rc = sclp_ap_configure(ac->id);
else if (!cfg && ac->config)
rc = sclp_ap_deconfigure(ac->id);
if (rc)
return rc;
ac->config = cfg ? true : false;
ap_send_config_uevent(&ac->ap_dev, ac->config);
return count;
}
static DEVICE_ATTR_RW(config);
static ssize_t chkstop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%d\n", ac->chkstop ? 1 : 0);
}
static DEVICE_ATTR_RO(chkstop);
static ssize_t max_msg_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return sysfs_emit(buf, "%u\n", ac->maxmsgsize);
}
static DEVICE_ATTR_RO(max_msg_size);
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_ap_functions.attr,
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
&dev_attr_config.attr,
&dev_attr_chkstop.attr,
&dev_attr_max_msg_size.attr,
NULL
};
static struct attribute_group ap_card_dev_attr_group = {
.attrs = ap_card_dev_attrs
};
static const struct attribute_group *ap_card_dev_attr_groups[] = {
&ap_card_dev_attr_group,
NULL
};
static struct device_type ap_card_type = {
.name = "ap_card",
.groups = ap_card_dev_attr_groups,
};
static void ap_card_device_release(struct device *dev)
{
struct ap_card *ac = to_ap_card(dev);
kfree(ac);
}
struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
int comp_type, unsigned int functions, int ml)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = comp_type;
ac->raw_hwtype = raw_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
ac->maxmsgsize = ml > 0 ?
ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE;
return ac;
}
| linux-master | drivers/s390/crypto/ap_card.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012, 2022
* Author(s): Holger Dengler <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
/* Waiting time for requests to be processed.
* Currently there are some types of request which are not deterministic.
* But the maximum time limit managed by the stomper code is set to 60sec.
* Hence we have to wait at least that time period.
*/
#define CEX4_CLEANUP_TIME (900 * HZ)
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \
"Copyright IBM Corp. 2022");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex4_card_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX7,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX8,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
static struct ap_device_id zcrypt_cex4_queue_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX7,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX8,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
/*
* CCA card additional device attributes
*/
static ssize_t cca_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
memset(&ci, 0, sizeof(ci));
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
return sysfs_emit(buf, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
static struct attribute *cca_card_attrs[] = {
&dev_attr_cca_serialnr.attr,
NULL,
};
static const struct attribute_group cca_card_attr_grp = {
.attrs = cca_card_attrs,
};
/*
* CCA queue additional device attributes
*/
static ssize_t cca_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
int n = 0;
struct cca_info ci;
static const char * const cao_state[] = { "invalid", "valid" };
static const char * const new_state[] = { "empty", "partial", "full" };
memset(&ci, 0, sizeof(ci));
cca_get_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
&ci, zq->online);
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n",
new_state[ci.new_aes_mk_state - '1'],
ci.new_aes_mkvp);
else
n += sysfs_emit_at(buf, n, "AES NEW: - -\n");
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
cao_state[ci.cur_aes_mk_state - '1'],
ci.cur_aes_mkvp);
else
n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
cao_state[ci.old_aes_mk_state - '1'],
ci.old_aes_mkvp);
else
n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
new_state[ci.new_apka_mk_state - '1'],
ci.new_apka_mkvp);
else
n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
cao_state[ci.cur_apka_mk_state - '1'],
ci.cur_apka_mkvp);
else
n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
cao_state[ci.old_apka_mk_state - '1'],
ci.old_apka_mkvp);
else
n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3')
n += sysfs_emit_at(buf, n, "ASYM NEW: %s 0x%016llx%016llx\n",
new_state[ci.new_asym_mk_state - '1'],
*((u64 *)(ci.new_asym_mkvp)),
*((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
else
n += sysfs_emit_at(buf, n, "ASYM NEW: - -\n");
if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2')
n += sysfs_emit_at(buf, n, "ASYM CUR: %s 0x%016llx%016llx\n",
cao_state[ci.cur_asym_mk_state - '1'],
*((u64 *)(ci.cur_asym_mkvp)),
*((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
else
n += sysfs_emit_at(buf, n, "ASYM CUR: - -\n");
if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2')
n += sysfs_emit_at(buf, n, "ASYM OLD: %s 0x%016llx%016llx\n",
cao_state[ci.old_asym_mk_state - '1'],
*((u64 *)(ci.old_asym_mkvp)),
*((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
else
n += sysfs_emit_at(buf, n, "ASYM OLD: - -\n");
return n;
}
static struct device_attribute dev_attr_cca_mkvps =
__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
static struct attribute *cca_queue_attrs[] = {
&dev_attr_cca_mkvps.attr,
NULL,
};
static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
/*
* EP11 card additional device attributes
*/
static ssize_t ep11_api_ordinalnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
memset(&ci, 0, sizeof(ci));
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.API_ord_nr > 0)
return sysfs_emit(buf, "%u\n", ci.API_ord_nr);
else
return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_api_ordinalnr =
__ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
static ssize_t ep11_fw_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
memset(&ci, 0, sizeof(ci));
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.FW_version > 0)
return sysfs_emit(buf, "%d.%d\n",
(int)(ci.FW_version >> 8),
(int)(ci.FW_version & 0xFF));
else
return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_fw_version =
__ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
static ssize_t ep11_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
memset(&ci, 0, sizeof(ci));
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.serial[0])
return sysfs_emit(buf, "%16.16s\n", ci.serial);
else
return sysfs_emit(buf, "\n");
}
static struct device_attribute dev_attr_ep11_serialnr =
__ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
static const struct {
int mode_bit;
const char *mode_txt;
} ep11_op_modes[] = {
{ 0, "FIPS2009" },
{ 1, "BSI2009" },
{ 2, "FIPS2011" },
{ 3, "BSI2011" },
{ 6, "BSICC2017" },
{ 0, NULL }
};
static ssize_t ep11_card_op_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
int i, n = 0;
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
memset(&ci, 0, sizeof(ci));
ep11_get_card_info(ac->id, &ci, zc->online);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += sysfs_emit_at(buf, n, "%s",
ep11_op_modes[i].mode_txt);
}
}
n += sysfs_emit_at(buf, n, "\n");
return n;
}
static struct device_attribute dev_attr_ep11_card_op_modes =
__ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
static struct attribute *ep11_card_attrs[] = {
&dev_attr_ep11_api_ordinalnr.attr,
&dev_attr_ep11_fw_version.attr,
&dev_attr_ep11_serialnr.attr,
&dev_attr_ep11_card_op_modes.attr,
NULL,
};
static const struct attribute_group ep11_card_attr_grp = {
.attrs = ep11_card_attrs,
};
/*
* EP11 queue additional device attributes
*/
static ssize_t ep11_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
int n = 0;
struct ep11_domain_info di;
static const char * const cwk_state[] = { "invalid", "valid" };
static const char * const nwk_state[] = { "empty", "uncommitted",
"committed" };
memset(&di, 0, sizeof(di));
if (zq->online)
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
&di);
if (di.cur_wk_state == '0') {
n = sysfs_emit(buf, "WK CUR: %s -\n",
cwk_state[di.cur_wk_state - '0']);
} else if (di.cur_wk_state == '1') {
n = sysfs_emit(buf, "WK CUR: %s 0x",
cwk_state[di.cur_wk_state - '0']);
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
n += 2 * sizeof(di.cur_wkvp);
n += sysfs_emit_at(buf, n, "\n");
} else {
n = sysfs_emit(buf, "WK CUR: - -\n");
}
if (di.new_wk_state == '0') {
n += sysfs_emit_at(buf, n, "WK NEW: %s -\n",
nwk_state[di.new_wk_state - '0']);
} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
n += sysfs_emit_at(buf, n, "WK NEW: %s 0x",
nwk_state[di.new_wk_state - '0']);
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
n += 2 * sizeof(di.new_wkvp);
n += sysfs_emit_at(buf, n, "\n");
} else {
n += sysfs_emit_at(buf, n, "WK NEW: - -\n");
}
return n;
}
static struct device_attribute dev_attr_ep11_mkvps =
__ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
static ssize_t ep11_queue_op_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = dev_get_drvdata(dev);
int i, n = 0;
struct ep11_domain_info di;
memset(&di, 0, sizeof(di));
if (zq->online)
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
&di);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += sysfs_emit_at(buf, n, "%s",
ep11_op_modes[i].mode_txt);
}
}
n += sysfs_emit_at(buf, n, "\n");
return n;
}
static struct device_attribute dev_attr_ep11_queue_op_modes =
__ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
static struct attribute *ep11_queue_attrs[] = {
&dev_attr_ep11_mkvps.attr,
&dev_attr_ep11_queue_op_modes.attr,
NULL,
};
static const struct attribute_group ep11_queue_attr_grp = {
.attrs = ep11_queue_attrs,
};
/*
* Probe function for CEX[45678] card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
{
/*
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[NUM_OPS] = {
14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[NUM_OPS] = {
8, 9, 20, 18, 66, 458, 0, 0};
static const int CEX6A_SPEED_IDX[NUM_OPS] = {
6, 9, 20, 17, 65, 438, 0, 0};
static const int CEX7A_SPEED_IDX[NUM_OPS] = {
6, 8, 17, 15, 54, 362, 0, 0};
static const int CEX8A_SPEED_IDX[NUM_OPS] = {
6, 8, 17, 15, 54, 362, 0, 0};
static const int CEX4C_SPEED_IDX[NUM_OPS] = {
59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
24, 31, 50, 37, 90, 479, 27, 10};
static const int CEX6C_SPEED_IDX[NUM_OPS] = {
16, 20, 32, 27, 77, 455, 24, 9};
static const int CEX7C_SPEED_IDX[NUM_OPS] = {
14, 16, 26, 23, 64, 376, 23, 8};
static const int CEX8C_SPEED_IDX[NUM_OPS] = {
14, 16, 26, 23, 64, 376, 23, 8};
static const int CEX4P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 50};
static const int CEX5P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 10};
static const int CEX6P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 9};
static const int CEX7P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 8};
static const int CEX8P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 8};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
int rc = 0;
zc = zcrypt_card_alloc();
if (!zc)
return -ENOMEM;
zc->card = ac;
dev_set_drvdata(&ap_dev->device, zc);
if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4A";
zc->user_space_type = ZCRYPT_CEX4;
zc->speed_rating = CEX4A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
zc->speed_rating = CEX5A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6A";
zc->user_space_type = ZCRYPT_CEX6;
zc->speed_rating = CEX6A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
zc->type_string = "CEX7A";
zc->speed_rating = CEX7A_SPEED_IDX;
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
} else {
zc->type_string = "CEX8A";
zc->speed_rating = CEX8A_SPEED_IDX;
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_4K;
} else {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_2K;
}
} else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4C";
zc->speed_rating = CEX4C_SPEED_IDX;
/* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
zc->speed_rating = CEX5C_SPEED_IDX;
/* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6C";
zc->speed_rating = CEX6C_SPEED_IDX;
/* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
zc->type_string = "CEX7C";
zc->speed_rating = CEX7C_SPEED_IDX;
/* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
} else {
zc->type_string = "CEX8C";
zc->speed_rating = CEX8C_SPEED_IDX;
/* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4P";
zc->user_space_type = ZCRYPT_CEX4;
zc->speed_rating = CEX4P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
zc->speed_rating = CEX5P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6P";
zc->user_space_type = ZCRYPT_CEX6;
zc->speed_rating = CEX6P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
zc->type_string = "CEX7P";
zc->speed_rating = CEX7P_SPEED_IDX;
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
} else {
zc->type_string = "CEX8P";
zc->speed_rating = CEX8P_SPEED_IDX;
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
} else {
zcrypt_card_free(zc);
return -ENODEV;
}
zc->online = 1;
rc = zcrypt_card_register(zc);
if (rc) {
zcrypt_card_free(zc);
return rc;
}
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
zcrypt_card_free(zc);
}
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
zcrypt_card_free(zc);
}
}
return rc;
}
/*
* This is called to remove the CEX[45678] card driver
* information if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
struct ap_card *ac = to_ap_card(&ap_dev->device);
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex4_card_driver = {
.probe = zcrypt_cex4_card_probe,
.remove = zcrypt_cex4_card_remove,
.ids = zcrypt_cex4_card_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
/*
* Probe function for CEX[45678] queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq;
int rc;
if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_EP11);
} else {
return -ENODEV;
}
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME;
dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
if (rc) {
zcrypt_queue_free(zq);
return rc;
}
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
zcrypt_queue_free(zq);
}
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
zcrypt_queue_free(zq);
}
}
return rc;
}
/*
* This is called to remove the CEX[45678] queue driver
* information if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
{
struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
zcrypt_queue_unregister(zq);
}
static struct ap_driver zcrypt_cex4_queue_driver = {
.probe = zcrypt_cex4_queue_probe,
.remove = zcrypt_cex4_queue_remove,
.ids = zcrypt_cex4_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
int __init zcrypt_cex4_init(void)
{
int rc;
rc = ap_driver_register(&zcrypt_cex4_card_driver,
THIS_MODULE, "cex4card");
if (rc)
return rc;
rc = ap_driver_register(&zcrypt_cex4_queue_driver,
THIS_MODULE, "cex4queue");
if (rc)
ap_driver_unregister(&zcrypt_cex4_card_driver);
return rc;
}
void __exit zcrypt_cex4_exit(void)
{
ap_driver_unregister(&zcrypt_cex4_queue_driver);
ap_driver_unregister(&zcrypt_cex4_card_driver);
}
module_init(zcrypt_cex4_init);
module_exit(zcrypt_cex4_exit);
| linux-master | drivers/s390/crypto/zcrypt_cex4.c |
linux-master | drivers/s390/crypto/zcrypt_cex2c.c |
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* MSGTYPE restruct: Holger Dengler <[email protected]>
*/
#define KMSG_COMPONENT "zcrypt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_cca_key.h"
#define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define CEIL4(x) ((((x) + 3) / 4) * 4)
struct response_type {
struct completion work;
int type;
};
#define CEXXC_RESPONSE_TYPE_ICA 0
#define CEXXC_RESPONSE_TYPE_XCRB 1
#define CEXXC_RESPONSE_TYPE_EP11 2
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
struct function_and_rules_block {
unsigned char function_code[2];
unsigned short ulen;
unsigned char only_rule[8];
} __packed;
/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
* Everything about this interface is ascii/big-endian, since the
* device does *not* have 'Intel inside'.
*
* The CPRBX is followed immediately by the parm block.
* The parm block contains:
* - function code ('PD' 0x5044 or 'PK' 0x504B)
* - rule block (one of:)
* + 0x000A 'PKCS-1.2' (MCL2 'PD')
* + 0x000A 'ZERO-PAD' (MCL2 'PK')
* + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
* + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
* - VUD block
*/
static const struct CPRBX static_cprbx = {
.cprb_len = 0x00DC,
.cprb_ver_id = 0x02,
.func_id = {0x54, 0x32},
};
int speed_idx_cca(int req_type)
{
switch (req_type) {
case 0x4142:
case 0x4149:
case 0x414D:
case 0x4341:
case 0x4344:
case 0x4354:
case 0x4358:
case 0x444B:
case 0x4558:
case 0x4643:
case 0x4651:
case 0x4C47:
case 0x4C4B:
case 0x4C51:
case 0x4F48:
case 0x504F:
case 0x5053:
case 0x5058:
case 0x5343:
case 0x5344:
case 0x5345:
case 0x5350:
return LOW;
case 0x414B:
case 0x4345:
case 0x4349:
case 0x434D:
case 0x4847:
case 0x4849:
case 0x484D:
case 0x4850:
case 0x4851:
case 0x4954:
case 0x4958:
case 0x4B43:
case 0x4B44:
case 0x4B45:
case 0x4B47:
case 0x4B48:
case 0x4B49:
case 0x4B4E:
case 0x4B50:
case 0x4B52:
case 0x4B54:
case 0x4B58:
case 0x4D50:
case 0x4D53:
case 0x4D56:
case 0x4D58:
case 0x5044:
case 0x5045:
case 0x5046:
case 0x5047:
case 0x5049:
case 0x504B:
case 0x504D:
case 0x5254:
case 0x5347:
case 0x5349:
case 0x534B:
case 0x534D:
case 0x5356:
case 0x5358:
case 0x5443:
case 0x544B:
case 0x5647:
return HIGH;
default:
return MEDIUM;
}
}
int speed_idx_ep11(int req_type)
{
switch (req_type) {
case 1:
case 2:
case 36:
case 37:
case 38:
case 39:
case 40:
return LOW;
case 17:
case 18:
case 19:
case 20:
case 21:
case 22:
case 26:
case 30:
case 31:
case 32:
case 33:
case 34:
case 35:
return HIGH;
default:
return MEDIUM;
}
}
/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
* Returns 0 on success or negative errno value.
*/
static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A',},
.function_code = {'P', 'K'},
};
static struct function_and_rules_block static_pke_fnr = {
.function_code = {'P', 'K'},
.ulen = 10,
.only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
};
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
char text[];
} __packed * msg = ap_msg->msg;
int size;
/*
* The inputdatalength was a selection criteria in the dispatching
* function zcrypt_rsa_modexpo(). However, make sure the following
* copy_from_user() never exceeds the allocated buffer space.
*/
if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
return -EINVAL;
/* VUD.ciphertext */
msg->length = mex->inputdatalength + 2;
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
return -EFAULT;
/* Set up key which is located after the variable length text. */
size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength);
if (size < 0)
return size;
size += sizeof(*msg) + mex->inputdatalength;
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1;
msg->fr = static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
ap_msg->len = size;
return 0;
}
/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
* Returns 0 on success or negative errno value.
*/
static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A',},
.function_code = {'P', 'D'},
};
static struct function_and_rules_block static_pkd_fnr = {
.function_code = {'P', 'D'},
.ulen = 10,
.only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
};
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
char text[];
} __packed * msg = ap_msg->msg;
int size;
/*
* The inputdatalength was a selection criteria in the dispatching
* function zcrypt_rsa_crt(). However, make sure the following
* copy_from_user() never exceeds the allocated buffer space.
*/
if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
return -EINVAL;
/* VUD.ciphertext */
msg->length = crt->inputdatalength + 2;
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
return -EFAULT;
/* Set up key which is located after the variable length text. */
size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength);
if (size < 0)
return size;
size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
size - sizeof(msg->hdr) - sizeof(msg->cprbx);
msg->fr = static_pkd_fnr;
ap_msg->len = size;
return 0;
}
/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
* Returns 0 on success or -EFAULT, -EINVAL.
*/
struct type86_fmt2_msg {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
} __packed;
static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
struct ica_xcRB *xcrb,
unsigned int *fcode,
unsigned short **dom)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
};
struct {
struct type6_hdr hdr;
union {
struct CPRBX cprbx;
DECLARE_FLEX_ARRAY(u8, userdata);
};
} __packed * msg = ap_msg->msg;
int rcblen = CEIL4(xcrb->request_control_blk_length);
int req_sumlen, resp_sumlen;
char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
char *function_code;
if (CEIL4(xcrb->request_control_blk_length) <
xcrb->request_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/* length checks */
ap_msg->len = sizeof(struct type6_hdr) +
CEIL4(xcrb->request_control_blk_length) +
xcrb->request_data_length;
if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
req_sumlen = CEIL4(xcrb->request_control_blk_length) +
xcrb->request_data_length;
if ((CEIL4(xcrb->request_control_blk_length) <=
xcrb->request_data_length) ?
req_sumlen < xcrb->request_data_length :
req_sumlen < CEIL4(xcrb->request_control_blk_length)) {
return -EINVAL;
}
if (CEIL4(xcrb->reply_control_blk_length) <
xcrb->reply_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
resp_sumlen = CEIL4(xcrb->reply_control_blk_length) +
xcrb->reply_data_length;
if ((CEIL4(xcrb->reply_control_blk_length) <=
xcrb->reply_data_length) ?
resp_sumlen < xcrb->reply_data_length :
resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) {
return -EINVAL;
}
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID));
msg->hdr.tocardlen1 = xcrb->request_control_blk_length;
if (xcrb->request_data_length) {
msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
msg->hdr.tocardlen2 = xcrb->request_data_length;
}
msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length;
msg->hdr.fromcardlen2 = xcrb->reply_data_length;
/* prepare CPRB */
if (z_copy_from_user(userspace, msg->userdata,
xcrb->request_control_blk_addr,
xcrb->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcrb->request_control_blk_length)
return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code,
sizeof(msg->hdr.function_code));
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
/* check subfunction, US and AU need special flag with NQAP */
if (memcmp(function_code, "US", 2) == 0 ||
memcmp(function_code, "AU", 2) == 0)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
/* check CPRB minor version, set info bits in ap_message flag field */
switch (*(unsigned short *)(&msg->cprbx.func_id[0])) {
case 0x5432: /* "T2" */
ap_msg->flags |= AP_MSG_FLAG_USAGE;
break;
case 0x5433: /* "T3" */
case 0x5435: /* "T5" */
case 0x5436: /* "T6" */
case 0x5437: /* "T7" */
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n",
__func__, msg->cprbx.func_id[0],
msg->cprbx.func_id[1]);
}
/* copy data block */
if (xcrb->request_data_length &&
z_copy_from_user(userspace, req_data, xcrb->request_data_address,
xcrb->request_data_length))
return -EFAULT;
return 0;
}
static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
struct ep11_urb *xcrb,
unsigned int *fcode,
unsigned int *domain)
{
unsigned int lfmt;
static struct type6_hdr static_type6_ep11_hdr = {
.type = 0x06,
.rqid = {0x00, 0x01},
.function_code = {0x00, 0x00},
.agent_id[0] = 0x58, /* {'X'} */
.agent_id[1] = 0x43, /* {'C'} */
.offset1 = 0x00000058,
};
struct {
struct type6_hdr hdr;
union {
struct {
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* length format */
} __packed;
DECLARE_FLEX_ARRAY(u8, userdata);
};
} __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
unsigned char func_len; /* fixed value 0x4 */
unsigned int func_val; /* function ID */
unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
if (CEIL4(xcrb->req_len) < xcrb->req_len)
return -EINVAL; /* overflow after alignment*/
/* length checks */
ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len);
if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
if (CEIL4(xcrb->resp_len) < xcrb->resp_len)
return -EINVAL; /* overflow after alignment*/
/* prepare type6 header */
msg->hdr = static_type6_ep11_hdr;
msg->hdr.tocardlen1 = xcrb->req_len;
msg->hdr.fromcardlen1 = xcrb->resp_len;
/* Import CPRB data from the ioctl input parameter */
if (z_copy_from_user(userspace, msg->userdata,
(char __force __user *)xcrb->req, xcrb->req_len)) {
return -EFAULT;
}
if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
switch (msg->pld_lenfmt & 0x03) {
case 1:
lfmt = 2;
break;
case 2:
lfmt = 3;
break;
default:
return -EINVAL;
}
} else {
lfmt = 1; /* length format #1 */
}
payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
/* enable special processing based on the cprbs flags special bit */
if (msg->cprbx.flags & 0x20)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
/* set info bits in ap_message flag field */
if (msg->cprbx.flags & 0x80)
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
else
ap_msg->flags |= AP_MSG_FLAG_USAGE;
*domain = msg->cprbx.target_id;
return 0;
}
/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
struct type86x_reply {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
unsigned short length; /* length of data including length field size */
char data[];
} __packed;
struct type86_ep11_reply {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct ep11_cprb cprbx;
} __packed;
static int convert_type86_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->msg;
unsigned short service_rc, service_rs;
unsigned int data_len;
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode;
if ((service_rc == 8 && service_rs == 66) ||
(service_rc == 8 && service_rs == 65) ||
(service_rc == 8 && service_rs == 72) ||
(service_rc == 8 && service_rs == 770) ||
(service_rc == 12 && service_rs == 769)) {
ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)service_rc, (int)service_rs);
return -EINVAL;
}
zq->online = 0;
pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)service_rc, (int)service_rs);
ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)service_rc, (int)service_rs);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
data_len = msg->length - sizeof(msg->length);
if (data_len > outputdatalength)
return -EMSGSIZE;
/* Copy the crypto response to user space. */
if (copy_to_user(outputdata, msg->data, data_len))
return -EFAULT;
return 0;
}
/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
* @xcrb: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcrb)
{
struct type86_fmt2_msg *msg = reply->msg;
char *data = reply->msg;
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
__func__, xcrb->reply_control_blk_length,
msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcrb->reply_control_blk_length = msg->fmt2.count1;
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n",
__func__, xcrb->reply_data_length,
msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
data + msg->fmt2.offset2, msg->fmt2.count2))
return -EFAULT;
}
xcrb->reply_data_length = msg->fmt2.count2;
return 0;
}
/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
* @xcrb: pointer to EP11 user request block
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
struct ep11_urb *xcrb)
{
struct type86_fmt2_msg *msg = reply->msg;
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n",
__func__, (unsigned int)xcrb->resp_len,
msg->fmt2.count1);
return -EMSGSIZE;
}
/* Copy response CPRB to user */
if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcrb->resp_len = msg->fmt2.count1;
return 0;
}
static int convert_type86_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *buffer)
{
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __packed * msg = reply->msg;
char *data = reply->msg;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
return -EINVAL;
memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
return msg->fmt2.count2;
}
static int convert_response_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
msg->cprbx.ccp_rscode == 0x14f &&
outputdatalength > 256) {
if (zq->zcard->max_exp_bit_length <= 17) {
zq->zcard->max_exp_bit_length = 17;
return -EAGAIN;
} else {
return -EINVAL;
}
}
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
fallthrough; /* wrong cprb version is an unknown response */
default:
/* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcrb)
{
struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32));
return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(userspace, zq, reply, xcrb);
fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply, struct ep11_urb *xcrb)
{
struct type86_ep11_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE87_RSP_CODE:
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb);
fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
static int convert_response_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *data)
{
struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return -EINVAL;
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
* @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
static void zcrypt_msgtype6_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type = msg->private;
struct type86x_reply *t86r;
int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
memcpy(msg->msg, reply->msg, len);
msg->len = len;
break;
case CEXXC_RESPONSE_TYPE_XCRB:
if (t86r->fmt2.count2)
len = t86r->fmt2.offset2 + t86r->fmt2.count2;
else
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
memcpy(msg->msg, reply->msg, len);
msg->len = len;
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
msg->len = sizeof(error_reply);
}
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
msg->len = sizeof(error_reply);
}
out:
complete(&resp_type->work);
}
/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
* @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type = msg->private;
struct type86_ep11_reply *t86r;
int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x04) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_EP11:
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
memcpy(msg->msg, reply->msg, len);
msg->len = len;
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
msg->len = sizeof(error_reply);
}
} else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
msg->len = sizeof(error_reply);
}
out:
complete(&resp_type->work);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex,
struct ap_message *ap_msg)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_ica(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
out_free:
free_page((unsigned long)ap_msg->msg);
ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
}
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt,
struct ap_message *ap_msg)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_ica(zq, ap_msg,
crt->outputdata,
crt->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
out_free:
free_page((unsigned long)ap_msg->msg);
ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
}
/*
* Prepare a CCA AP msg request.
* Prepare a CCA AP msg: fetch the required data from userspace,
* prepare the AP msg, fill some info into the ap_message struct,
* extract some data from the CPRB and give back to the caller.
* This function allocates memory and needs an ap_msg prepared
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
ap_msg->bufsize = atomic_read(&ap_max_msg_size);
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom);
}
/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
* @xcrb: pointer to the send_cprb request buffer
*/
static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
struct ica_xcRB *xcrb,
struct ap_message *ap_msg)
{
struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
/* ... more data blocks ... */
} __packed * msg = ap_msg->msg;
unsigned int max_payload_size;
int rc, delta;
/* calculate maximum payload for this card and msg type */
max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg);
/* limit each of the two from fields to the maximum payload size */
msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size);
msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size);
/* calculate delta if the sum of both exceeds max payload size */
delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2
- max_payload_size;
if (delta > 0) {
/*
* Sum exceeds maximum payload size, prune fromcardlen1
* (always trust fromcardlen2)
*/
if (delta > msg->hdr.fromcardlen1) {
rc = -EINVAL;
goto out;
}
msg->hdr.fromcardlen1 -= delta;
}
init_completion(&rtype->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
rc = -EIO; /* do not retry administrative requests */
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
/*
* Prepare an EP11 AP msg request.
* Prepare an EP11 AP msg: fetch the required data from userspace,
* prepare the AP msg, fill some info into the ap_message struct,
* extract some data from the CPRB and give back to the caller.
* This function allocates memory and needs an ap_msg prepared
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned int *domain)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_EP11,
};
ap_msg->bufsize = atomic_read(&ap_max_msg_size);
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb,
func_code, domain);
}
/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor
* @xcrb: pointer to the ep11 user request block
*/
static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
struct ep11_urb *xcrb,
struct ap_message *ap_msg)
{
int rc;
unsigned int lfmt;
struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* payload length format */
} __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
unsigned char func_len; /* fixed value 0x4 */
unsigned int func_val; /* function ID */
unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
/*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
* management command indication.
* 0 - non management command
* 1 - management command
*/
if (!((msg->cprbx.flags & 0x80) == 0x80)) {
msg->cprbx.target_id = (unsigned int)
AP_QID_QUEUE(zq->queue->qid);
if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
switch (msg->pld_lenfmt & 0x03) {
case 1:
lfmt = 2;
break;
case 2:
lfmt = 3;
break;
default:
return -EINVAL;
}
} else {
lfmt = 1; /* length format #1 */
}
payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
payload_hdr->dom_val = (unsigned int)
AP_QID_QUEUE(zq->queue->qid);
}
/*
* Set the queue's reply buffer length minus the two prepend headers
* as reply limit for the card firmware.
*/
msg->hdr.fromcardlen1 = zq->reply.bufsize -
sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext);
init_completion(&rtype->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
rc = -EIO; /* do not retry administrative requests */
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
unsigned int *domain)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
*func_code = HWRNG;
return 0;
}
/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
char *buffer, struct ap_message *ap_msg)
{
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->msg;
struct response_type *rtype = ap_msg->private;
int rc;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
init_completion(&rtype->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_rng(zq, ap_msg, buffer);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
out:
return rc;
}
/*
* The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_ops = {
.owner = THIS_MODULE,
.name = MSGTYPE06_NAME,
.variant = MSGTYPE06_VARIANT_DEFAULT,
.rsa_modexpo = zcrypt_msgtype6_modexpo,
.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
.send_cprb = zcrypt_msgtype6_send_cprb,
.rng = zcrypt_msgtype6_rng,
};
static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
.owner = THIS_MODULE,
.name = MSGTYPE06_NAME,
.variant = MSGTYPE06_VARIANT_EP11,
.rsa_modexpo = NULL,
.rsa_modexpo_crt = NULL,
.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
};
void __init zcrypt_msgtype6_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
}
void __exit zcrypt_msgtype6_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
| linux-master | drivers/s390/crypto/zcrypt_msgtype6.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
* Cornelia Huck <[email protected]>
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* MSGTYPE restruct: Holger Dengler <[email protected]>
* Multiple device nodes: Harald Freudenberger <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <linux/cdev.h>
#include <linux/ctype.h>
#include <linux/capability.h>
#include <asm/debug.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/zcrypt.h>
#include "zcrypt_api.h"
#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
/*
* zcrypt tracepoint functions
*/
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
/*
* Process a rescan of the transport layer.
*
* Returns 1, if the rescan has been processed, otherwise 0.
*/
static inline int zcrypt_process_rescan(void)
{
if (atomic_read(&zcrypt_rescan_req)) {
atomic_set(&zcrypt_rescan_req, 0);
atomic_inc(&zcrypt_rescan_count);
ap_bus_force_rescan();
ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
atomic_inc_return(&zcrypt_rescan_count));
return 1;
}
return 0;
}
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
list_add_tail(&zops->list, &zcrypt_ops_list);
}
void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
{
list_del_init(&zops->list);
}
struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
{
struct zcrypt_ops *zops;
list_for_each_entry(zops, &zcrypt_ops_list, list)
if (zops->variant == variant &&
(!strncmp(zops->name, name, sizeof(zops->name))))
return zops;
return NULL;
}
EXPORT_SYMBOL(zcrypt_msgtype);
/*
* Multi device nodes extension functions.
*/
struct zcdn_device;
static struct class *zcrypt_class;
static dev_t zcrypt_devt;
static struct cdev zcrypt_cdev;
struct zcdn_device {
struct device device;
struct ap_perms perms;
};
#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
#define ZCDN_MAX_NAME 32
static int zcdn_create(const char *name);
static int zcdn_destroy(const char *name);
/*
* Find zcdn device by name.
* Returns reference to the zcdn device which needs to be released
* with put_device() after use.
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
struct device *dev = class_find_device_by_name(zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
/*
* Find zcdn device by devt value.
* Returns reference to the zcdn device which needs to be released
* with put_device() after use.
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
static ssize_t ioctlmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
return n;
}
static ssize_t ioctlmask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
AP_IOCTLS, &ap_perms_mutex);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR_RW(ioctlmask);
static ssize_t apmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
return n;
}
static ssize_t apmask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
AP_DEVICES, &ap_perms_mutex);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR_RW(apmask);
static ssize_t aqmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
return n;
}
static ssize_t aqmask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
AP_DOMAINS, &ap_perms_mutex);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR_RW(aqmask);
static ssize_t admask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
n += sysfs_emit_at(buf, n, "\n");
mutex_unlock(&ap_perms_mutex);
return n;
}
static ssize_t admask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
AP_DOMAINS, &ap_perms_mutex);
if (rc)
return rc;
return count;
}
static DEVICE_ATTR_RW(admask);
static struct attribute *zcdn_dev_attrs[] = {
&dev_attr_ioctlmask.attr,
&dev_attr_apmask.attr,
&dev_attr_aqmask.attr,
&dev_attr_admask.attr,
NULL
};
static struct attribute_group zcdn_dev_attr_group = {
.attrs = zcdn_dev_attrs
};
static const struct attribute_group *zcdn_dev_attr_groups[] = {
&zcdn_dev_attr_group,
NULL
};
static ssize_t zcdn_create_store(const struct class *class,
const struct class_attribute *attr,
const char *buf, size_t count)
{
int rc;
char name[ZCDN_MAX_NAME];
strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_create(strim(name));
return rc ? rc : count;
}
static const struct class_attribute class_attr_zcdn_create =
__ATTR(create, 0600, NULL, zcdn_create_store);
static ssize_t zcdn_destroy_store(const struct class *class,
const struct class_attribute *attr,
const char *buf, size_t count)
{
int rc;
char name[ZCDN_MAX_NAME];
strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_destroy(strim(name));
return rc ? rc : count;
}
static const struct class_attribute class_attr_zcdn_destroy =
__ATTR(destroy, 0600, NULL, zcdn_destroy_store);
static void zcdn_device_release(struct device *dev)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
__func__, MAJOR(dev->devt), MINOR(dev->devt));
kfree(zcdndev);
}
static int zcdn_create(const char *name)
{
dev_t devt;
int i, rc = 0;
struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
/* check if device node with this name already exists */
if (name[0]) {
zcdndev = find_zcdndev_by_name(name);
if (zcdndev) {
put_device(&zcdndev->device);
rc = -EEXIST;
goto unlockout;
}
}
/* find an unused minor number */
for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
zcdndev = find_zcdndev_by_devt(devt);
if (zcdndev)
put_device(&zcdndev->device);
else
break;
}
if (i == ZCRYPT_MAX_MINOR_NODES) {
rc = -ENOSPC;
goto unlockout;
}
/* alloc and prepare a new zcdn device */
zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
if (!zcdndev) {
rc = -ENOMEM;
goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
zcdndev->device.class = zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
rc = dev_set_name(&zcdndev->device, "%s", name);
else
rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
if (rc) {
kfree(zcdndev);
goto unlockout;
}
rc = device_register(&zcdndev->device);
if (rc) {
put_device(&zcdndev->device);
goto unlockout;
}
ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
__func__, MAJOR(devt), MINOR(devt));
unlockout:
mutex_unlock(&ap_perms_mutex);
return rc;
}
static int zcdn_destroy(const char *name)
{
int rc = 0;
struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
/* try to find this zcdn device */
zcdndev = find_zcdndev_by_name(name);
if (!zcdndev) {
rc = -ENOENT;
goto unlockout;
}
/*
* The zcdn device is not hard destroyed. It is subject to
* reference counting and thus just needs to be unregistered.
*/
put_device(&zcdndev->device);
device_unregister(&zcdndev->device);
unlockout:
mutex_unlock(&ap_perms_mutex);
return rc;
}
static void zcdn_destroy_all(void)
{
int i;
dev_t devt;
struct zcdn_device *zcdndev;
mutex_lock(&ap_perms_mutex);
for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
zcdndev = find_zcdndev_by_devt(devt);
if (zcdndev) {
put_device(&zcdndev->device);
device_unregister(&zcdndev->device);
}
}
mutex_unlock(&ap_perms_mutex);
}
/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
*/
static ssize_t zcrypt_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
return -EPERM;
}
/*
* zcrypt_write(): Not allowed.
*
* Write is not allowed
*/
static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
return -EPERM;
}
/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
*/
static int zcrypt_open(struct inode *inode, struct file *filp)
{
struct ap_perms *perms = &ap_perms;
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
/* find returns a reference, no get_device() needed */
mutex_unlock(&ap_perms_mutex);
if (zcdndev)
perms = &zcdndev->perms;
}
filp->private_data = (void *)perms;
atomic_inc(&zcrypt_open_count);
return stream_open(inode, filp);
}
/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex);
if (zcdndev) {
/* 2 puts here: one for find, one for open */
put_device(&zcdndev->device);
put_device(&zcdndev->device);
}
}
atomic_dec(&zcrypt_open_count);
return 0;
}
static inline int zcrypt_check_ioctl(struct ap_perms *perms,
unsigned int cmd)
{
int rc = -EPERM;
int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
if (test_bit_inv(ioctlnr, perms->ioctlm))
rc = 0;
}
if (rc)
ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
__func__, ioctlnr, rc);
return rc;
}
static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
{
return test_bit_inv(card, perms->apm) ? true : false;
}
static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
{
return test_bit_inv(queue, perms->aqm) ? true : false;
}
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
struct module **pmod,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
*pmod = zq->queue->ap_dev.device.driver->owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
struct module *mod,
unsigned int weight)
{
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
module_put(mod);
}
static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
struct zcrypt_card *pref_zc,
unsigned int weight,
unsigned int pref_weight)
{
if (!pref_zc)
return true;
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
return atomic64_read(&zc->card->total_request_count) <
atomic64_read(&pref_zc->card->total_request_count);
return weight < pref_weight;
}
static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
struct zcrypt_queue *pref_zq,
unsigned int weight,
unsigned int pref_weight)
{
if (!pref_zq)
return true;
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
return zq->queue->total_request_count <
pref_zq->queue->total_request_count;
return weight < pref_weight;
}
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ap_perms *perms,
struct zcrypt_track *tr,
struct ica_rsa_modexpo *mex)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
ap_init_message(&ap_msg);
if (mex->outputdatalength < mex->inputdatalength) {
func_code = 0;
rc = -EINVAL;
goto out;
}
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case
*/
mex->outputdatalength = mex->inputdatalength;
rc = get_rsa_modex_fc(mex, &func_code);
if (rc)
goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > mex->inputdatalength ||
zc->max_mod_size < mex->inputdatalength)
continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
wgt = zc->speed_rating[func_code];
/* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo ||
!zq->queue->config || zq->queue->chkstop)
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
/* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
__func__);
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
}
trace_s390_zcrypt_rep(mex, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static long zcrypt_rsa_crt(struct ap_perms *perms,
struct zcrypt_track *tr,
struct ica_rsa_modexpo_crt *crt)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
ap_init_message(&ap_msg);
if (crt->outputdatalength < crt->inputdatalength) {
func_code = 0;
rc = -EINVAL;
goto out;
}
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case
*/
crt->outputdatalength = crt->inputdatalength;
rc = get_rsa_crt_fc(crt, &func_code);
if (rc)
goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > crt->inputdatalength ||
zc->max_mod_size < crt->inputdatalength)
continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
wgt = zc->speed_rating[func_code];
/* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo_crt ||
!zq->queue->config || zq->queue->chkstop)
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
/* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
__func__);
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
}
trace_s390_zcrypt_rep(crt, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
struct zcrypt_track *tr,
struct ica_xcRB *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
unsigned short *domain, tdom;
int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
xcrb->status = 0;
ap_init_message(&ap_msg);
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
tdom = *domain;
if (perms != &ap_perms && tdom < AP_DOMAINS) {
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
if (!test_bit_inv(tdom, perms->adm)) {
rc = -ENODEV;
goto out;
}
} else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
rc = -EOPNOTSUPP;
goto out;
}
}
/*
* If a valid target domain is set and this domain is NOT a usage
* domain but a control only domain, autoselect target domain.
*/
if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom))
tdom = AUTOSEL_DOM;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x10000000))
continue;
/* Check for user selected CCA card */
if (xcrb->user_defined != AUTOSELECT &&
xcrb->user_defined != zc->card->id)
continue;
/* check if request size exceeds card max msg size */
if (ap_msg.len > zc->card->maxmsgsize)
continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
/* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check for device usable and eligible */
if (!zq->online || !zq->ops->send_cprb ||
!zq->queue->config || zq->queue->chkstop ||
(tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
/* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
__func__, xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
}
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
long zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
}
EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num,
struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
return true;
targets++;
}
return false;
}
static bool is_desired_ep11_queue(unsigned int dev_qid,
unsigned short target_num,
struct ep11_target_dev *targets)
{
int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
while (target_num-- > 0) {
if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
(targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
return true;
targets++;
}
return false;
}
static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
struct zcrypt_track *tr,
struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ep11_target_dev *targets;
unsigned short target_num;
unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code, domain;
struct ap_message ap_msg;
int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
ap_init_message(&ap_msg);
target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
targets = NULL;
if (target_num != 0) {
struct ep11_target_dev __user *uptr;
targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
if (!targets) {
func_code = 0;
rc = -ENOMEM;
goto out;
}
uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
if (z_copy_from_user(userspace, targets, uptr,
target_num * sizeof(*targets))) {
func_code = 0;
rc = -EFAULT;
goto out_free;
}
}
rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out_free;
if (perms != &ap_perms && domain < AUTOSEL_DOM) {
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
if (!test_bit_inv(domain, perms->adm)) {
rc = -ENODEV;
goto out_free;
}
} else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
rc = -EOPNOTSUPP;
goto out_free;
}
}
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable EP11 card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x04000000))
continue;
/* Check for user selected EP11 card */
if (targets &&
!is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
/* check if request size exceeds card max msg size */
if (ap_msg.len > zc->card->maxmsgsize)
continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
/* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->send_ep11_cprb ||
!zq->queue->config || zq->queue->chkstop ||
(targets &&
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
/* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
if (targets && target_num == 1) {
ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
__func__, (int)targets->ap_id,
(int)targets->dom_id);
} else if (targets) {
ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
__func__, (int)target_num);
} else {
ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
__func__);
}
rc = -ENODEV;
goto out_free;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out_free:
kfree(targets);
out:
ap_release_message(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
}
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
}
EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
ap_init_message(&ap_msg);
rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
if (rc)
goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x10000000))
continue;
/* get weight index of the card device */
wgt = zc->speed_rating[func_code];
if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check if device is usable and eligible */
if (!zq->online || !zq->ops->rng ||
!zq->queue->config || zq->queue->chkstop)
continue;
if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
__func__);
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
trace_s390_zcrypt_rep(buffer, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status *stat;
int card, queue;
memset(devstatus, 0, MAX_ZDEV_ENTRIES
* sizeof(struct zcrypt_device_status));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
if (card >= MAX_ZDEV_CARDIDS)
continue;
queue = AP_QID_QUEUE(zq->queue->qid);
stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
}
spin_unlock(&zcrypt_list_lock);
}
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status_ext *stat;
int card, queue;
memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
* sizeof(struct zcrypt_device_status_ext));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
queue = AP_QID_QUEUE(zq->queue->qid);
stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
}
spin_unlock(&zcrypt_list_lock);
}
EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
int zcrypt_device_status_ext(int card, int queue,
struct zcrypt_device_status_ext *devstat)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
memset(devstat, 0, sizeof(*devstat));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (card == AP_QID_CARD(zq->queue->qid) &&
queue == AP_QID_QUEUE(zq->queue->qid)) {
devstat->hwtype = zc->card->ap_dev.device_type;
devstat->functions = zc->card->functions >> 26;
devstat->qid = zq->queue->qid;
devstat->online = zq->online ? 0x01 : 0x00;
spin_unlock(&zcrypt_list_lock);
return 0;
}
}
}
spin_unlock(&zcrypt_list_lock);
return -ENODEV;
}
EXPORT_SYMBOL(zcrypt_device_status_ext);
static void zcrypt_status_mask(char status[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
memset(status, 0, max_adapters);
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
card >= max_adapters)
continue;
status[card] = zc->online ? zc->user_space_type : 0x0d;
}
}
spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
memset(qdepth, 0, max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
qdepth[card] =
zq->queue->pendingq_count +
zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
}
}
local_bh_enable();
spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
}
}
local_bh_enable();
spin_unlock(&zcrypt_list_lock);
}
static int zcrypt_pendingq_count(void)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int pendingq_count;
pendingq_count = 0;
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
continue;
spin_lock(&zq->queue->lock);
pendingq_count += zq->queue->pendingq_count;
spin_unlock(&zq->queue->lock);
}
}
local_bh_enable();
spin_unlock(&zcrypt_list_lock);
return pendingq_count;
}
static int zcrypt_requestq_count(void)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int requestq_count;
requestq_count = 0;
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
continue;
spin_lock(&zq->queue->lock);
requestq_count += zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
}
}
local_bh_enable();
spin_unlock(&zcrypt_list_lock);
return requestq_count;
}
static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct zcrypt_track tr;
struct ica_rsa_modexpo mex;
struct ica_rsa_modexpo __user *umex = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
}
static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct zcrypt_track tr;
struct ica_rsa_modexpo_crt crt;
struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
}
static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ica_xcRB xcrb;
struct zcrypt_track tr;
struct ica_xcRB __user *uxcrb = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
rc, xcrb.status);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
}
static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ep11_urb xcrb;
struct zcrypt_track tr;
struct ep11_urb __user *uxcrb = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
}
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
struct ap_perms *perms =
(struct ap_perms *)filp->private_data;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
return rc;
switch (cmd) {
case ICARSAMODEXPO:
return icarsamodexpo_ioctl(perms, arg);
case ICARSACRT:
return icarsacrt_ioctl(perms, arg);
case ZSECSENDCPRB:
return zsecsendcprb_ioctl(perms, arg);
case ZSENDEP11CPRB:
return zsendep11cprb_ioctl(perms, arg);
case ZCRYPT_DEVICE_STATUS: {
struct zcrypt_device_status_ext *device_status;
size_t total_size = MAX_ZDEV_ENTRIES_EXT
* sizeof(struct zcrypt_device_status_ext);
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kvfree(device_status);
return rc;
}
case ZCRYPT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status, AP_DEVICES);
if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
case ZCRYPT_QDEPTH_MASK: {
char qdepth[AP_DEVICES];
zcrypt_qdepth_mask(qdepth, AP_DEVICES);
if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
case ZCRYPT_PERDEV_REQCNT: {
u32 *reqcnt;
reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
if (copy_to_user((int __user *)arg, reqcnt,
sizeof(u32) * AP_DEVICES))
rc = -EFAULT;
kfree(reqcnt);
return rc;
}
case Z90STAT_REQUESTQ_COUNT:
return put_user(zcrypt_requestq_count(), (int __user *)arg);
case Z90STAT_PENDINGQ_COUNT:
return put_user(zcrypt_pendingq_count(), (int __user *)arg);
case Z90STAT_TOTALOPEN_COUNT:
return put_user(atomic_read(&zcrypt_open_count),
(int __user *)arg);
case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *)arg);
/*
* Deprecated ioctls
*/
case ZDEVICESTATUS: {
/* the old ioctl supports only 64 adapters */
struct zcrypt_device_status *device_status;
size_t total_size = MAX_ZDEV_ENTRIES
* sizeof(struct zcrypt_device_status);
device_status = kzalloc(total_size, GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask(device_status);
if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kfree(device_status);
return rc;
}
case Z90STAT_STATUS_MASK: {
/* the old ioctl supports only 64 adapters */
char status[MAX_ZDEV_CARDIDS];
zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
case Z90STAT_QDEPTH_MASK: {
/* the old ioctl supports only 64 adapters */
char qdepth[MAX_ZDEV_CARDIDS];
zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
case Z90STAT_PERDEV_REQCNT: {
/* the old ioctl supports only 64 adapters */
u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
return -EFAULT;
return 0;
}
/* unknown ioctl number */
default:
ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMPAT
/*
* ioctl32 conversion routines
*/
struct compat_ica_rsa_modexpo {
compat_uptr_t inputdata;
unsigned int inputdatalength;
compat_uptr_t outputdata;
unsigned int outputdatalength;
compat_uptr_t b_key;
compat_uptr_t n_modulus;
};
static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo mex32;
struct ica_rsa_modexpo mex64;
struct zcrypt_track tr;
long rc;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&mex32, umex32, sizeof(mex32)))
return -EFAULT;
mex64.inputdata = compat_ptr(mex32.inputdata);
mex64.inputdatalength = mex32.inputdatalength;
mex64.outputdata = compat_ptr(mex32.outputdata);
mex64.outputdatalength = mex32.outputdatalength;
mex64.b_key = compat_ptr(mex32.b_key);
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
return rc;
return put_user(mex64.outputdatalength,
&umex32->outputdatalength);
}
struct compat_ica_rsa_modexpo_crt {
compat_uptr_t inputdata;
unsigned int inputdatalength;
compat_uptr_t outputdata;
unsigned int outputdatalength;
compat_uptr_t bp_key;
compat_uptr_t bq_key;
compat_uptr_t np_prime;
compat_uptr_t nq_prime;
compat_uptr_t u_mult_inv;
};
static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo_crt crt32;
struct ica_rsa_modexpo_crt crt64;
struct zcrypt_track tr;
long rc;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
return -EFAULT;
crt64.inputdata = compat_ptr(crt32.inputdata);
crt64.inputdatalength = crt32.inputdatalength;
crt64.outputdata = compat_ptr(crt32.outputdata);
crt64.outputdatalength = crt32.outputdatalength;
crt64.bp_key = compat_ptr(crt32.bp_key);
crt64.bq_key = compat_ptr(crt32.bq_key);
crt64.np_prime = compat_ptr(crt32.np_prime);
crt64.nq_prime = compat_ptr(crt32.nq_prime);
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
return rc;
return put_user(crt64.outputdatalength,
&ucrt32->outputdatalength);
}
struct compat_ica_xcrb {
unsigned short agent_ID;
unsigned int user_defined;
unsigned short request_ID;
unsigned int request_control_blk_length;
unsigned char padding1[16 - sizeof(compat_uptr_t)];
compat_uptr_t request_control_blk_addr;
unsigned int request_data_length;
char padding2[16 - sizeof(compat_uptr_t)];
compat_uptr_t request_data_address;
unsigned int reply_control_blk_length;
char padding3[16 - sizeof(compat_uptr_t)];
compat_uptr_t reply_control_blk_addr;
unsigned int reply_data_length;
char padding4[16 - sizeof(compat_uptr_t)];
compat_uptr_t reply_data_addr;
unsigned short priority_window;
unsigned int status;
} __packed;
static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
struct compat_ica_xcrb xcrb32;
struct zcrypt_track tr;
struct ica_xcRB xcrb64;
long rc;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
return -EFAULT;
xcrb64.agent_ID = xcrb32.agent_ID;
xcrb64.user_defined = xcrb32.user_defined;
xcrb64.request_ID = xcrb32.request_ID;
xcrb64.request_control_blk_length =
xcrb32.request_control_blk_length;
xcrb64.request_control_blk_addr =
compat_ptr(xcrb32.request_control_blk_addr);
xcrb64.request_data_length =
xcrb32.request_data_length;
xcrb64.request_data_address =
compat_ptr(xcrb32.request_data_address);
xcrb64.reply_control_blk_length =
xcrb32.reply_control_blk_length;
xcrb64.reply_control_blk_addr =
compat_ptr(xcrb32.reply_control_blk_addr);
xcrb64.reply_data_length = xcrb32.reply_data_length;
xcrb64.reply_data_addr =
compat_ptr(xcrb32.reply_data_addr);
xcrb64.priority_window = xcrb32.priority_window;
xcrb64.status = xcrb32.status;
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
xcrb32.reply_data_length = xcrb64.reply_data_length;
xcrb32.status = xcrb64.status;
if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
return -EFAULT;
return rc;
}
static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
struct ap_perms *perms =
(struct ap_perms *)filp->private_data;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
return rc;
if (cmd == ICARSAMODEXPO)
return trans_modexpo32(perms, filp, cmd, arg);
if (cmd == ICARSACRT)
return trans_modexpo_crt32(perms, filp, cmd, arg);
if (cmd == ZSECSENDCPRB)
return trans_xcrb32(perms, filp, cmd, arg);
return zcrypt_unlocked_ioctl(filp, cmd, arg);
}
#endif
/*
* Misc device file operations.
*/
static const struct file_operations zcrypt_fops = {
.owner = THIS_MODULE,
.read = zcrypt_read,
.write = zcrypt_write,
.unlocked_ioctl = zcrypt_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zcrypt_compat_ioctl,
#endif
.open = zcrypt_open,
.release = zcrypt_release,
.llseek = no_llseek,
};
/*
* Misc device.
*/
static struct miscdevice zcrypt_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "z90crypt",
.fops = &zcrypt_fops,
};
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
static DEFINE_MUTEX(zcrypt_rng_mutex);
static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
{
int rc;
/*
* We don't need locking here because the RNG API guarantees serialized
* read method calls.
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof(*data);
}
*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
return sizeof(*data);
}
static struct hwrng zcrypt_rng_dev = {
.name = "zcrypt",
.data_read = zcrypt_rng_data_read,
.quality = 990,
};
int zcrypt_rng_device_add(void)
{
int rc = 0;
mutex_lock(&zcrypt_rng_mutex);
if (zcrypt_rng_device_count == 0) {
zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
if (!zcrypt_rng_buffer) {
rc = -ENOMEM;
goto out;
}
zcrypt_rng_buffer_index = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
zcrypt_rng_device_count = 1;
} else {
zcrypt_rng_device_count++;
}
mutex_unlock(&zcrypt_rng_mutex);
return 0;
out_free:
free_page((unsigned long)zcrypt_rng_buffer);
out:
mutex_unlock(&zcrypt_rng_mutex);
return rc;
}
void zcrypt_rng_device_remove(void)
{
mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--;
if (zcrypt_rng_device_count == 0) {
hwrng_unregister(&zcrypt_rng_dev);
free_page((unsigned long)zcrypt_rng_buffer);
}
mutex_unlock(&zcrypt_rng_mutex);
}
/*
* Wait until the zcrypt api is operational.
* The AP bus scan and the binding of ap devices to device drivers is
* an asynchronous job. This function waits until these initial jobs
* are done and so the zcrypt api should be ready to serve crypto
* requests - if there are resources available. The function uses an
* internal timeout of 60s. The very first caller will either wait for
* ap bus bindings complete or the timeout happens. This state will be
* remembered for further callers which will only be blocked until a
* decision is made (timeout or bindings complete).
* On timeout -ETIME is returned, on success the return value is 0.
*/
int zcrypt_wait_api_operational(void)
{
static DEFINE_MUTEX(zcrypt_wait_api_lock);
static int zcrypt_wait_api_state;
int rc;
rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
if (rc)
return rc;
switch (zcrypt_wait_api_state) {
case 0:
/* initial state, invoke wait for the ap bus complete */
rc = ap_wait_init_apqn_bindings_complete(
msecs_to_jiffies(60 * 1000));
switch (rc) {
case 0:
/* ap bus bindings are complete */
zcrypt_wait_api_state = 1;
break;
case -EINTR:
/* interrupted, go back to caller */
break;
case -ETIME:
/* timeout */
ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
__func__);
zcrypt_wait_api_state = -ETIME;
break;
default:
/* other failure */
ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
__func__, rc);
break;
}
break;
case 1:
/* a previous caller already found ap bus bindings complete */
rc = 0;
break;
default:
/* a previous caller had timeout or other failure */
rc = zcrypt_wait_api_state;
break;
}
mutex_unlock(&zcrypt_wait_api_lock);
return rc;
}
EXPORT_SYMBOL(zcrypt_wait_api_operational);
int __init zcrypt_debug_init(void)
{
zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_info, DBF_ERR);
return 0;
}
void zcrypt_debug_exit(void)
{
debug_unregister(zcrypt_dbf_info);
}
static int __init zcdn_init(void)
{
int rc;
/* create a new class 'zcrypt' */
zcrypt_class = class_create(ZCRYPT_NAME);
if (IS_ERR(zcrypt_class)) {
rc = PTR_ERR(zcrypt_class);
goto out_class_create_failed;
}
zcrypt_class->dev_release = zcdn_device_release;
/* alloc device minor range */
rc = alloc_chrdev_region(&zcrypt_devt,
0, ZCRYPT_MAX_MINOR_NODES,
ZCRYPT_NAME);
if (rc)
goto out_alloc_chrdev_failed;
cdev_init(&zcrypt_cdev, &zcrypt_fops);
zcrypt_cdev.owner = THIS_MODULE;
rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
if (rc)
goto out_cdev_add_failed;
/* need some class specific sysfs attributes */
rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
if (rc)
goto out_class_create_file_1_failed;
rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
if (rc)
goto out_class_create_file_2_failed;
return 0;
out_class_create_file_2_failed:
class_remove_file(zcrypt_class, &class_attr_zcdn_create);
out_class_create_file_1_failed:
cdev_del(&zcrypt_cdev);
out_cdev_add_failed:
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
out_alloc_chrdev_failed:
class_destroy(zcrypt_class);
out_class_create_failed:
return rc;
}
static void zcdn_exit(void)
{
class_remove_file(zcrypt_class, &class_attr_zcdn_create);
class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
zcdn_destroy_all();
cdev_del(&zcrypt_cdev);
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
class_destroy(zcrypt_class);
}
/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
*/
int __init zcrypt_api_init(void)
{
int rc;
rc = zcrypt_debug_init();
if (rc)
goto out;
rc = zcdn_init();
if (rc)
goto out;
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
goto out_misc_register_failed;
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
return 0;
out_misc_register_failed:
zcdn_exit();
zcrypt_debug_exit();
out:
return rc;
}
/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
*/
void __exit zcrypt_api_exit(void)
{
zcdn_exit();
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
zcrypt_ccamisc_exit();
zcrypt_ep11misc_exit();
zcrypt_debug_exit();
}
module_init(zcrypt_api_init);
module_exit(zcrypt_api_exit);
| linux-master | drivers/s390/crypto/zcrypt_api.c |
// SPDX-License-Identifier: GPL-2.0
/*
* pkey device driver
*
* Copyright IBM Corp. 2017, 2023
*
* Author(s): Harald Freudenberger
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/cpufeature.h>
#include <asm/zcrypt.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
#include <crypto/aes.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface");
#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
/*
* debug feature data and functions
*/
static debug_info_t *debug_info;
#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
static void __init pkey_debug_init(void)
{
/* 5 arguments per dbf entry (including the format string ptr) */
debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
debug_register_view(debug_info, &debug_sprintf_view);
debug_set_level(debug_info, 3);
}
static void __exit pkey_debug_exit(void)
{
debug_unregister(debug_info);
}
/* inside view of a protected key token (only type 0x00 version 0x01) */
struct protaeskeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3];
u8 version; /* should be 0x01 for protected AES key token */
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
u32 len; /* bytes actually stored in protkey[] */
u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
} __packed;
/* inside view of a clear key token (type 0x00 version 0x02) */
struct clearkeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3];
u8 version; /* 0x02 for clear key token */
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
u32 len; /* bytes actually stored in clearkey[] */
u8 clearkey[]; /* clear key value */
} __packed;
/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
static inline u32 pkey_keytype_aes_to_size(u32 keytype)
{
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
return 16;
case PKEY_KEYTYPE_AES_192:
return 24;
case PKEY_KEYTYPE_AES_256:
return 32;
default:
return 0;
}
}
/*
* Create a protected key from a clear key value via PCKMO instruction.
*/
static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
/* mask of available pckmo subfunctions */
static cpacf_mask_t pckmo_functions;
u8 paramblock[112];
u32 pkeytype;
int keysize;
long fc;
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
/* 16 byte key, 32 byte aes wkvp, total 48 bytes */
keysize = 16;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_128_KEY;
break;
case PKEY_KEYTYPE_AES_192:
/* 24 byte key, 32 byte aes wkvp, total 56 bytes */
keysize = 24;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_192_KEY;
break;
case PKEY_KEYTYPE_AES_256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_256_KEY;
break;
case PKEY_KEYTYPE_ECC_P256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
break;
case PKEY_KEYTYPE_ECC_P384:
/* 48 byte key, 32 byte aes wkvp, total 80 bytes */
keysize = 48;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
break;
case PKEY_KEYTYPE_ECC_P521:
/* 80 byte key, 32 byte aes wkvp, total 112 bytes */
keysize = 80;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
break;
case PKEY_KEYTYPE_ECC_ED25519:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
break;
case PKEY_KEYTYPE_ECC_ED448:
/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
keysize = 64;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
default:
DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, keytype);
return -EINVAL;
}
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
DEBUG_ERR("%s prot key buffer size too small: %u < %d\n",
__func__, *protkeylen, keysize + AES_WK_VP_SIZE);
return -EINVAL;
}
/* Did we already check for PCKMO ? */
if (!pckmo_functions.bytes[0]) {
/* no, so check now */
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
return -ENODEV;
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
}
/* prepare param block */
memset(paramblock, 0, sizeof(paramblock));
memcpy(paramblock, clrkey, keysize);
/* call the pckmo instruction */
cpacf_pckmo(fc, paramblock);
/* copy created protected key to key buffer including the wkvp block */
*protkeylen = keysize + AES_WK_VP_SIZE;
memcpy(protkey, paramblock, *protkeylen);
*protkeytype = pkeytype;
return 0;
}
/*
* Find card and transform secure key into protected key.
*/
static int pkey_skey2pkey(const u8 *key, u8 *protkey,
u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u16 cardnr, domain;
int rc, verify;
zcrypt_wait_api_operational();
/*
* The cca_xxx2protkey call may fail when a card has been
* addressed where the master key was changed after last fetch
* of the mkvp into the cache. Try 3 times: First without verify
* then with verify and last round with verify and old master
* key verification pattern match not ignored.
*/
for (verify = 0; verify < 3; verify++) {
rc = cca_findcard(key, &cardnr, &domain, verify);
if (rc < 0)
continue;
if (rc > 0 && verify < 2)
continue;
switch (hdr->version) {
case TOKVER_CCA_AES:
rc = cca_sec2protkey(cardnr, domain, key,
protkey, protkeylen, protkeytype);
break;
case TOKVER_CCA_VLSC:
rc = cca_cipher2protkey(cardnr, domain, key,
protkey, protkeylen,
protkeytype);
break;
default:
return -EINVAL;
}
if (rc == 0)
break;
}
if (rc)
DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
return rc;
}
/*
* Construct EP11 key with given clear key value.
*/
static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
u8 *keybuf, size_t *keybuflen)
{
u32 nr_apqns, *apqns = NULL;
u16 card, dom;
int i, rc;
zcrypt_wait_api_operational();
/* build a list of apqns suitable for ep11 keys with cpacf support */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7,
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
NULL);
if (rc)
goto out;
/* go through the list of apqns and try to bild an ep11 key */
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
0, clrkey, keybuf, keybuflen,
PKEY_TYPE_EP11);
if (rc == 0)
break;
}
out:
kfree(apqns);
if (rc)
DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
return rc;
}
/*
* Find card and transform EP11 secure key into protected key.
*/
static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 nr_apqns, *apqns = NULL;
u16 card, dom;
int i, rc;
zcrypt_wait_api_operational();
/* build a list of apqns suitable for this key */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7,
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
/* go through the list of apqns and try to derive an pkey */
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
rc = ep11_kblob2protkey(card, dom, key, keylen,
protkey, protkeylen, protkeytype);
if (rc == 0)
break;
}
out:
kfree(apqns);
if (rc)
DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
return rc;
}
/*
* Verify key and give back some info about the key.
*/
static int pkey_verifykey(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain,
u16 *pkeysize, u32 *pattributes)
{
struct secaeskeytoken *t = (struct secaeskeytoken *)seckey;
u16 cardnr, domain;
int rc;
/* check the secure key for valid AES secure key */
rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0);
if (rc)
goto out;
if (pattributes)
*pattributes = PKEY_VERIFY_ATTR_AES;
if (pkeysize)
*pkeysize = t->bitsize;
/* try to find a card which can handle this key */
rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
if (rc < 0)
goto out;
if (rc > 0) {
/* key mkvp matches to old master key mkvp */
DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
rc = 0;
}
if (pcardnr)
*pcardnr = cardnr;
if (pdomain)
*pdomain = domain;
out:
DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
/*
* Generate a random protected key
*/
static int pkey_genprotkey(u32 keytype, u8 *protkey,
u32 *protkeylen, u32 *protkeytype)
{
u8 clrkey[32];
int keysize;
int rc;
keysize = pkey_keytype_aes_to_size(keytype);
if (!keysize) {
DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
keytype);
return -EINVAL;
}
/* generate a dummy random clear key */
get_random_bytes(clrkey, keysize);
/* convert it to a dummy protected key */
rc = pkey_clr2protkey(keytype, clrkey,
protkey, protkeylen, protkeytype);
if (rc)
return rc;
/* replace the key part of the protected key with random bytes */
get_random_bytes(protkey, keysize);
return 0;
}
/*
* Verify if a protected key is still valid
*/
static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
u32 protkeytype)
{
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE];
} param;
u8 null_msg[AES_BLOCK_SIZE];
u8 dest_buf[AES_BLOCK_SIZE];
unsigned int k, pkeylen;
unsigned long fc;
switch (protkeytype) {
case PKEY_KEYTYPE_AES_128:
pkeylen = 16 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_128;
break;
case PKEY_KEYTYPE_AES_192:
pkeylen = 24 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_192;
break;
case PKEY_KEYTYPE_AES_256:
pkeylen = 32 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_256;
break;
default:
DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__,
protkeytype);
return -EINVAL;
}
if (protkeylen != pkeylen) {
DEBUG_ERR("%s invalid protected key size %u for keytype %u\n",
__func__, protkeylen, protkeytype);
return -EINVAL;
}
memset(null_msg, 0, sizeof(null_msg));
memset(param.iv, 0, sizeof(param.iv));
memcpy(param.key, protkey, protkeylen);
k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf,
sizeof(null_msg));
if (k != sizeof(null_msg)) {
DEBUG_ERR("%s protected key is not valid\n", __func__);
return -EKEYREJECTED;
}
return 0;
}
/* Helper for pkey_nonccatok2pkey, handles aes clear key token */
static int nonccatokaes2pkey(const struct clearkeytoken *t,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE);
u8 *tmpbuf = NULL;
u32 keysize;
int rc;
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize) {
DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, t->keytype);
return -EINVAL;
}
if (t->len != keysize) {
DEBUG_ERR("%s non clear key aes token: invalid key len %u\n",
__func__, t->len);
return -EINVAL;
}
/* try direct way with the PCKMO instruction */
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
/* PCKMO failed, so try the CCA secure key way */
tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
if (!tmpbuf)
return -ENOMEM;
zcrypt_wait_api_operational();
rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf);
if (rc)
goto try_via_ep11;
rc = pkey_skey2pkey(tmpbuf,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
try_via_ep11:
/* if the CCA way also failed, let's try via EP11 */
rc = pkey_clr2ep11key(t->clearkey, t->len,
tmpbuf, &tmpbuflen);
if (rc)
goto failure;
rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
failure:
DEBUG_ERR("%s unable to build protected key from clear", __func__);
out:
kfree(tmpbuf);
return rc;
}
/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */
static int nonccatokecc2pkey(const struct clearkeytoken *t,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 keylen;
int rc;
switch (t->keytype) {
case PKEY_KEYTYPE_ECC_P256:
keylen = 32;
break;
case PKEY_KEYTYPE_ECC_P384:
keylen = 48;
break;
case PKEY_KEYTYPE_ECC_P521:
keylen = 80;
break;
case PKEY_KEYTYPE_ECC_ED25519:
keylen = 32;
break;
case PKEY_KEYTYPE_ECC_ED448:
keylen = 64;
break;
default:
DEBUG_ERR("%s unknown/unsupported keytype %u\n",
__func__, t->keytype);
return -EINVAL;
}
if (t->len != keylen) {
DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n",
__func__, t->len);
return -EINVAL;
}
/* only one path possible: via PCKMO instruction */
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (rc) {
DEBUG_ERR("%s unable to build protected key from clear",
__func__);
}
return rc;
}
/*
* Transform a non-CCA key token into a protected key
*/
static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc = -EINVAL;
switch (hdr->version) {
case TOKVER_PROTECTED_KEY: {
struct protaeskeytoken *t;
if (keylen != sizeof(struct protaeskeytoken))
goto out;
t = (struct protaeskeytoken *)key;
rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype);
if (rc)
goto out;
memcpy(protkey, t->protkey, t->len);
*protkeylen = t->len;
*protkeytype = t->keytype;
break;
}
case TOKVER_CLEAR_KEY: {
struct clearkeytoken *t = (struct clearkeytoken *)key;
if (keylen < sizeof(struct clearkeytoken) ||
keylen != sizeof(*t) + t->len)
goto out;
switch (t->keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
rc = nonccatokaes2pkey(t, protkey,
protkeylen, protkeytype);
break;
case PKEY_KEYTYPE_ECC_P256:
case PKEY_KEYTYPE_ECC_P384:
case PKEY_KEYTYPE_ECC_P521:
case PKEY_KEYTYPE_ECC_ED25519:
case PKEY_KEYTYPE_ECC_ED448:
rc = nonccatokecc2pkey(t, protkey,
protkeylen, protkeytype);
break;
default:
DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n",
__func__, t->keytype);
return -EINVAL;
}
break;
}
case TOKVER_EP11_AES: {
/* check ep11 key for exportable as protected key */
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
}
case TOKVER_EP11_AES_WITH_HEADER:
/* check ep11 key with header for exportable as protected key */
rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version);
}
out:
return rc;
}
/*
* Transform a CCA internal key token into a protected key
*/
static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
switch (hdr->version) {
case TOKVER_CCA_AES:
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
break;
case TOKVER_CCA_VLSC:
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
break;
default:
DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
return pkey_skey2pkey(key, protkey, protkeylen, protkeytype);
}
/*
* Transform a key blob (of any type) into a protected key
*/
int pkey_keyblob2pkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc;
if (keylen < sizeof(struct keytoken_header)) {
DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
}
switch (hdr->type) {
case TOKTYPE_NON_CCA:
rc = pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
case TOKTYPE_CCA_INTERNAL:
rc = pkey_ccainttok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type);
return -EINVAL;
}
DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
enum pkey_key_type ktype, enum pkey_key_size ksize,
u32 kflags, u8 *keybuf, size_t *keybufsize)
{
int i, card, dom, rc;
/* check for at least one apqn given */
if (!apqns || !nr_apqns)
return -EINVAL;
/* check key type and size */
switch (ktype) {
case PKEY_TYPE_CCA_DATA:
case PKEY_TYPE_CCA_CIPHER:
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
case PKEY_TYPE_EP11:
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
return -EINVAL;
break;
case PKEY_TYPE_EP11_AES:
if (*keybufsize < (sizeof(struct ep11kblob_header) +
MINEP11AESKEYBLOBSIZE))
return -EINVAL;
break;
default:
return -EINVAL;
}
switch (ksize) {
case PKEY_SIZE_AES_128:
case PKEY_SIZE_AES_192:
case PKEY_SIZE_AES_256:
break;
default:
return -EINVAL;
}
/* simple try all apqns from the list */
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
if (ktype == PKEY_TYPE_EP11 ||
ktype == PKEY_TYPE_EP11_AES) {
rc = ep11_genaeskey(card, dom, ksize, kflags,
keybuf, keybufsize, ktype);
} else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_genseckey(card, dom, ksize, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
} else {
/* TOKVER_CCA_VLSC */
rc = cca_gencipherkey(card, dom, ksize, kflags,
keybuf, keybufsize);
}
if (rc == 0)
break;
}
return rc;
}
static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
enum pkey_key_type ktype, enum pkey_key_size ksize,
u32 kflags, const u8 *clrkey,
u8 *keybuf, size_t *keybufsize)
{
int i, card, dom, rc;
/* check for at least one apqn given */
if (!apqns || !nr_apqns)
return -EINVAL;
/* check key type and size */
switch (ktype) {
case PKEY_TYPE_CCA_DATA:
case PKEY_TYPE_CCA_CIPHER:
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
case PKEY_TYPE_EP11:
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
return -EINVAL;
break;
case PKEY_TYPE_EP11_AES:
if (*keybufsize < (sizeof(struct ep11kblob_header) +
MINEP11AESKEYBLOBSIZE))
return -EINVAL;
break;
default:
return -EINVAL;
}
switch (ksize) {
case PKEY_SIZE_AES_128:
case PKEY_SIZE_AES_192:
case PKEY_SIZE_AES_256:
break;
default:
return -EINVAL;
}
zcrypt_wait_api_operational();
/* simple try all apqns from the list */
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
if (ktype == PKEY_TYPE_EP11 ||
ktype == PKEY_TYPE_EP11_AES) {
rc = ep11_clr2keyblob(card, dom, ksize, kflags,
clrkey, keybuf, keybufsize,
ktype);
} else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_clr2seckey(card, dom, ksize,
clrkey, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
} else {
/* TOKVER_CCA_VLSC */
rc = cca_clr2cipherkey(card, dom, ksize, kflags,
clrkey, keybuf, keybufsize);
}
if (rc == 0)
break;
}
return rc;
}
static int pkey_verifykey2(const u8 *key, size_t keylen,
u16 *cardnr, u16 *domain,
enum pkey_key_type *ktype,
enum pkey_key_size *ksize, u32 *flags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
if (rc)
goto out;
if (ktype)
*ktype = PKEY_TYPE_CCA_DATA;
if (ksize)
*ksize = (enum pkey_key_size)t->bitsize;
rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&_apqns, &_nr_apqns,
*cardnr, *domain,
ZCRYPT_CEX3C, AES_MK_SET,
0, t->mkvp, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
if (rc)
goto out;
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
if (rc)
goto out;
if (ktype)
*ktype = PKEY_TYPE_CCA_CIPHER;
if (ksize) {
*ksize = PKEY_SIZE_UNKNOWN;
if (!t->plfver && t->wpllen == 512)
*ksize = PKEY_SIZE_AES_128;
else if (!t->plfver && t->wpllen == 576)
*ksize = PKEY_SIZE_AES_192;
else if (!t->plfver && t->wpllen == 640)
*ksize = PKEY_SIZE_AES_256;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&_apqns, &_nr_apqns,
*cardnr, *domain,
ZCRYPT_CEX6, AES_MK_SET,
0, t->mkvp0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
if (rc)
goto out;
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int api;
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
*ktype = PKEY_TYPE_EP11;
if (ksize)
*ksize = kb->head.bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
ZCRYPT_CEX7, api,
ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
if (flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
int api;
rc = ep11_check_aes_key_with_hdr(debug_info, 3,
key, keylen, 1);
if (rc)
goto out;
if (ktype)
*ktype = PKEY_TYPE_EP11_AES;
if (ksize)
*ksize = kh->bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
ZCRYPT_CEX7, api,
ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
if (flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
} else {
rc = -EINVAL;
}
out:
kfree(_apqns);
return rc;
}
static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int i, card, dom, rc;
/* check for at least one apqn given */
if (!apqns || !nr_apqns)
return -EINVAL;
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
if (hdr->type == TOKTYPE_CCA_INTERNAL) {
if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
return -EINVAL;
} else {
DEBUG_ERR("%s unknown CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_NON_CCA) {
if (hdr->version == TOKVER_EP11_AES) {
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
if (ep11_check_aes_key_with_hdr(debug_info, 3,
key, keylen, 1))
return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen,
protkeytype);
}
} else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type);
return -EINVAL;
}
zcrypt_wait_api_operational();
/* simple try all apqns from the list */
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
rc = cca_sec2protkey(card, dom, key,
protkey, protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
rc = cca_cipher2protkey(card, dom, key,
protkey, protkeylen,
protkeytype);
} else {
rc = ep11_kblob2protkey(card, dom, key, keylen,
protkey, protkeylen,
protkeytype);
}
if (rc == 0)
break;
}
return rc;
}
static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL;
zcrypt_wait_api_operational();
if (hdr->type == TOKTYPE_NON_CCA &&
(hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
struct ep11keyblob *kb = (struct ep11keyblob *)
(key + sizeof(struct ep11kblob_header));
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
if (rc)
goto out;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
if (rc)
goto out;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
u64 cur_mkvp = 0, old_mkvp = 0;
int minhwtype = ZCRYPT_CEX3C;
if (hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
} else if (hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
minhwtype = ZCRYPT_CEX6;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp0;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp0;
} else {
/* unknown cca internal token type */
return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
u64 cur_mkvp = 0, old_mkvp = 0;
if (t->secid == 0x20) {
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
} else {
/* unknown cca internal 2 token type */
return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else {
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
return rc;
}
static int pkey_apqns4keytype(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
u32 _nr_apqns, *_apqns = NULL;
int rc;
zcrypt_wait_api_operational();
if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
u64 cur_mkvp = 0, old_mkvp = 0;
int minhwtype = ZCRYPT_CEX3C;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = *((u64 *)alt_mkvp);
if (ktype == PKEY_TYPE_CCA_CIPHER)
minhwtype = ZCRYPT_CEX6;
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else if (ktype == PKEY_TYPE_CCA_ECC) {
u64 cur_mkvp = 0, old_mkvp = 0;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = *((u64 *)alt_mkvp);
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else if (ktype == PKEY_TYPE_EP11 ||
ktype == PKEY_TYPE_EP11_AES ||
ktype == PKEY_TYPE_EP11_ECC) {
u8 *wkvp = NULL;
int api;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
wkvp = cur_mkvp;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, api, wkvp);
if (rc)
goto out;
} else {
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
return rc;
}
static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int i, card, dom, rc;
/* check for at least one apqn given */
if (!apqns || !nr_apqns)
return -EINVAL;
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
if (hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
return -EINVAL;
} else {
DEBUG_ERR("%s unknown CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
/* CCA ECC (private) key */
if (keylen < sizeof(struct eccprivkeytoken))
return -EINVAL;
if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA) {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
} else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type);
return -EINVAL;
}
/* simple try all apqns from the list */
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
if (hdr->type == TOKTYPE_NON_CCA &&
(hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
protkey, protkeylen,
protkeytype);
else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
protkey, protkeylen,
protkeytype);
else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES)
rc = cca_sec2protkey(card, dom, key, protkey,
protkeylen, protkeytype);
else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC)
rc = cca_cipher2protkey(card, dom, key, protkey,
protkeylen, protkeytype);
else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
rc = cca_ecc2protkey(card, dom, key, protkey,
protkeylen, protkeytype);
else
return -EINVAL;
}
return rc;
}
/*
* File io functions
*/
static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{
if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
return ERR_PTR(-EINVAL);
return memdup_user(ukey, keylen);
}
static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{
if (!uapqns || nr_apqns == 0)
return NULL;
return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
}
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
switch (cmd) {
case PKEY_GENSECK: {
struct pkey_genseck __user *ugs = (void __user *)arg;
struct pkey_genseck kgs;
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
return -EFAULT;
rc = cca_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, kgs.seckey.seckey);
DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
return -EFAULT;
break;
}
case PKEY_CLR2SECK: {
struct pkey_clr2seck __user *ucs = (void __user *)arg;
struct pkey_clr2seck kcs;
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
kcs.clrkey.clrkey, kcs.seckey.seckey);
DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
return -EFAULT;
memzero_explicit(&kcs, sizeof(kcs));
break;
}
case PKEY_SEC2PROTK: {
struct pkey_sec2protk __user *usp = (void __user *)arg;
struct pkey_sec2protk ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
return -EFAULT;
break;
}
case PKEY_CLR2PROTK: {
struct pkey_clr2protk __user *ucp = (void __user *)arg;
struct pkey_clr2protk kcp;
if (copy_from_user(&kcp, ucp, sizeof(kcp)))
return -EFAULT;
kcp.protkey.len = sizeof(kcp.protkey.protkey);
rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
kcp.protkey.protkey,
&kcp.protkey.len, &kcp.protkey.type);
DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
return -EFAULT;
memzero_explicit(&kcp, sizeof(kcp));
break;
}
case PKEY_FINDCARD: {
struct pkey_findcard __user *ufc = (void __user *)arg;
struct pkey_findcard kfc;
if (copy_from_user(&kfc, ufc, sizeof(kfc)))
return -EFAULT;
rc = cca_findcard(kfc.seckey.seckey,
&kfc.cardnr, &kfc.domain, 1);
DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
return -EFAULT;
break;
}
case PKEY_SKEY2PKEY: {
struct pkey_skey2pkey __user *usp = (void __user *)arg;
struct pkey_skey2pkey ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
return -EFAULT;
break;
}
case PKEY_VERIFYKEY: {
struct pkey_verifykey __user *uvk = (void __user *)arg;
struct pkey_verifykey kvk;
if (copy_from_user(&kvk, uvk, sizeof(kvk)))
return -EFAULT;
rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
&kvk.keysize, &kvk.attributes);
DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
return -EFAULT;
break;
}
case PKEY_GENPROTK: {
struct pkey_genprotk __user *ugp = (void __user *)arg;
struct pkey_genprotk kgp;
if (copy_from_user(&kgp, ugp, sizeof(kgp)))
return -EFAULT;
kgp.protkey.len = sizeof(kgp.protkey.protkey);
rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
&kgp.protkey.len, &kgp.protkey.type);
DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugp, &kgp, sizeof(kgp)))
return -EFAULT;
break;
}
case PKEY_VERIFYPROTK: {
struct pkey_verifyprotk __user *uvp = (void __user *)arg;
struct pkey_verifyprotk kvp;
if (copy_from_user(&kvp, uvp, sizeof(kvp)))
return -EFAULT;
rc = pkey_verifyprotkey(kvp.protkey.protkey,
kvp.protkey.len, kvp.protkey.type);
DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
break;
}
case PKEY_KBLOB2PROTK: {
struct pkey_kblob2pkey __user *utp = (void __user *)arg;
struct pkey_kblob2pkey ktp;
u8 *kkey;
if (copy_from_user(&ktp, utp, sizeof(ktp)))
return -EFAULT;
kkey = _copy_key_from_user(ktp.key, ktp.keylen);
if (IS_ERR(kkey))
return PTR_ERR(kkey);
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
&ktp.protkey.len, &ktp.protkey.type);
DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
break;
if (copy_to_user(utp, &ktp, sizeof(ktp)))
return -EFAULT;
break;
}
case PKEY_GENSECK2: {
struct pkey_genseck2 __user *ugs = (void __user *)arg;
size_t klen = KEYBLOBBUFSIZE;
struct pkey_genseck2 kgs;
struct pkey_apqn *apqns;
u8 *kkey;
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
return -EFAULT;
apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
kkey = kzalloc(klen, GFP_KERNEL);
if (!kkey) {
kfree(apqns);
return -ENOMEM;
}
rc = pkey_genseckey2(apqns, kgs.apqn_entries,
kgs.type, kgs.size, kgs.keygenflags,
kkey, &klen);
DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
break;
}
if (kgs.key) {
if (kgs.keylen < klen) {
kfree(kkey);
return -EINVAL;
}
if (copy_to_user(kgs.key, kkey, klen)) {
kfree(kkey);
return -EFAULT;
}
}
kgs.keylen = klen;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
rc = -EFAULT;
kfree(kkey);
break;
}
case PKEY_CLR2SECK2: {
struct pkey_clr2seck2 __user *ucs = (void __user *)arg;
size_t klen = KEYBLOBBUFSIZE;
struct pkey_clr2seck2 kcs;
struct pkey_apqn *apqns;
u8 *kkey;
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
kkey = kzalloc(klen, GFP_KERNEL);
if (!kkey) {
kfree(apqns);
return -ENOMEM;
}
rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
kcs.type, kcs.size, kcs.keygenflags,
kcs.clrkey.clrkey, kkey, &klen);
DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
break;
}
if (kcs.key) {
if (kcs.keylen < klen) {
kfree(kkey);
return -EINVAL;
}
if (copy_to_user(kcs.key, kkey, klen)) {
kfree(kkey);
return -EFAULT;
}
}
kcs.keylen = klen;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
rc = -EFAULT;
memzero_explicit(&kcs, sizeof(kcs));
kfree(kkey);
break;
}
case PKEY_VERIFYKEY2: {
struct pkey_verifykey2 __user *uvk = (void __user *)arg;
struct pkey_verifykey2 kvk;
u8 *kkey;
if (copy_from_user(&kvk, uvk, sizeof(kvk)))
return -EFAULT;
kkey = _copy_key_from_user(kvk.key, kvk.keylen);
if (IS_ERR(kkey))
return PTR_ERR(kkey);
rc = pkey_verifykey2(kkey, kvk.keylen,
&kvk.cardnr, &kvk.domain,
&kvk.type, &kvk.size, &kvk.flags);
DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
kfree(kkey);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
return -EFAULT;
break;
}
case PKEY_KBLOB2PROTK2: {
struct pkey_kblob2pkey2 __user *utp = (void __user *)arg;
struct pkey_apqn *apqns = NULL;
struct pkey_kblob2pkey2 ktp;
u8 *kkey;
if (copy_from_user(&ktp, utp, sizeof(ktp)))
return -EFAULT;
apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
kkey = _copy_key_from_user(ktp.key, ktp.keylen);
if (IS_ERR(kkey)) {
kfree(apqns);
return PTR_ERR(kkey);
}
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
&ktp.protkey.type);
DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
break;
if (copy_to_user(utp, &ktp, sizeof(ktp)))
return -EFAULT;
break;
}
case PKEY_APQNS4K: {
struct pkey_apqns4key __user *uak = (void __user *)arg;
struct pkey_apqn *apqns = NULL;
struct pkey_apqns4key kak;
size_t nr_apqns, len;
u8 *kkey;
if (copy_from_user(&kak, uak, sizeof(kak)))
return -EFAULT;
nr_apqns = kak.apqn_entries;
if (nr_apqns) {
apqns = kmalloc_array(nr_apqns,
sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!apqns)
return -ENOMEM;
}
kkey = _copy_key_from_user(kak.key, kak.keylen);
if (IS_ERR(kkey)) {
kfree(apqns);
return PTR_ERR(kkey);
}
rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
apqns, &nr_apqns);
DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
kfree(kkey);
if (rc && rc != -ENOSPC) {
kfree(apqns);
break;
}
if (!rc && kak.apqns) {
if (nr_apqns > kak.apqn_entries) {
kfree(apqns);
return -EINVAL;
}
len = nr_apqns * sizeof(struct pkey_apqn);
if (len) {
if (copy_to_user(kak.apqns, apqns, len)) {
kfree(apqns);
return -EFAULT;
}
}
}
kak.apqn_entries = nr_apqns;
if (copy_to_user(uak, &kak, sizeof(kak)))
rc = -EFAULT;
kfree(apqns);
break;
}
case PKEY_APQNS4KT: {
struct pkey_apqns4keytype __user *uat = (void __user *)arg;
struct pkey_apqn *apqns = NULL;
struct pkey_apqns4keytype kat;
size_t nr_apqns, len;
if (copy_from_user(&kat, uat, sizeof(kat)))
return -EFAULT;
nr_apqns = kat.apqn_entries;
if (nr_apqns) {
apqns = kmalloc_array(nr_apqns,
sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!apqns)
return -ENOMEM;
}
rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
kat.flags, apqns, &nr_apqns);
DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
if (rc && rc != -ENOSPC) {
kfree(apqns);
break;
}
if (!rc && kat.apqns) {
if (nr_apqns > kat.apqn_entries) {
kfree(apqns);
return -EINVAL;
}
len = nr_apqns * sizeof(struct pkey_apqn);
if (len) {
if (copy_to_user(kat.apqns, apqns, len)) {
kfree(apqns);
return -EFAULT;
}
}
}
kat.apqn_entries = nr_apqns;
if (copy_to_user(uat, &kat, sizeof(kat)))
rc = -EFAULT;
kfree(apqns);
break;
}
case PKEY_KBLOB2PROTK3: {
struct pkey_kblob2pkey3 __user *utp = (void __user *)arg;
u32 protkeylen = PROTKEYBLOBBUFSIZE;
struct pkey_apqn *apqns = NULL;
struct pkey_kblob2pkey3 ktp;
u8 *kkey, *protkey;
if (copy_from_user(&ktp, utp, sizeof(ktp)))
return -EFAULT;
apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
kkey = _copy_key_from_user(ktp.key, ktp.keylen);
if (IS_ERR(kkey)) {
kfree(apqns);
return PTR_ERR(kkey);
}
protkey = kmalloc(protkeylen, GFP_KERNEL);
if (!protkey) {
kfree(apqns);
kfree(kkey);
return -ENOMEM;
}
rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
kkey, ktp.keylen,
protkey, &protkeylen, &ktp.pkeytype);
DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc) {
kfree(protkey);
break;
}
if (ktp.pkey && ktp.pkeylen) {
if (protkeylen > ktp.pkeylen) {
kfree(protkey);
return -EINVAL;
}
if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
kfree(protkey);
return -EFAULT;
}
}
kfree(protkey);
ktp.pkeylen = protkeylen;
if (copy_to_user(utp, &ktp, sizeof(ktp)))
return -EFAULT;
break;
}
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
}
return rc;
}
/*
* Sysfs and file io operations
*/
/*
* Sysfs attribute read function for all protected key binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
struct protaeskeytoken protkeytoken;
struct pkey_protkey protkey;
int rc;
if (off != 0 || count < sizeof(protkeytoken))
return -EINVAL;
if (is_xts)
if (count < 2 * sizeof(protkeytoken))
return -EINVAL;
memset(&protkeytoken, 0, sizeof(protkeytoken));
protkeytoken.type = TOKTYPE_NON_CCA;
protkeytoken.version = TOKVER_PROTECTED_KEY;
protkeytoken.keytype = keytype;
protkey.len = sizeof(protkey.protkey);
rc = pkey_genprotkey(protkeytoken.keytype,
protkey.protkey, &protkey.len, &protkey.type);
if (rc)
return rc;
protkeytoken.len = protkey.len;
memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
memcpy(buf, &protkeytoken, sizeof(protkeytoken));
if (is_xts) {
/* xts needs a second protected key, reuse protkey struct */
protkey.len = sizeof(protkey.protkey);
rc = pkey_genprotkey(protkeytoken.keytype,
protkey.protkey, &protkey.len, &protkey.type);
if (rc)
return rc;
protkeytoken.len = protkey.len;
memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
memcpy(buf + sizeof(protkeytoken), &protkeytoken,
sizeof(protkeytoken));
return 2 * sizeof(protkeytoken);
}
return sizeof(protkeytoken);
}
static ssize_t protkey_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
off, count);
}
static ssize_t protkey_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
off, count);
}
static ssize_t protkey_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
off, count);
}
static ssize_t protkey_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
off, count);
}
static ssize_t protkey_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
static struct bin_attribute *protkey_attrs[] = {
&bin_attr_protkey_aes_128,
&bin_attr_protkey_aes_192,
&bin_attr_protkey_aes_256,
&bin_attr_protkey_aes_128_xts,
&bin_attr_protkey_aes_256_xts,
NULL
};
static struct attribute_group protkey_attr_group = {
.name = "protkey",
.bin_attrs = protkey_attrs,
};
/*
* Sysfs attribute read function for all secure key ccadata binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
int rc;
if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL;
if (is_xts)
if (count < 2 * sizeof(struct secaeskeytoken))
return -EINVAL;
rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
if (is_xts) {
seckey++;
rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
return 2 * sizeof(struct secaeskeytoken);
}
return sizeof(struct secaeskeytoken);
}
static ssize_t ccadata_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
off, count);
}
static ssize_t ccadata_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
off, count);
}
static ssize_t ccadata_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
off, count);
}
static ssize_t ccadata_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
off, count);
}
static ssize_t ccadata_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
static struct bin_attribute *ccadata_attrs[] = {
&bin_attr_ccadata_aes_128,
&bin_attr_ccadata_aes_192,
&bin_attr_ccadata_aes_256,
&bin_attr_ccadata_aes_128_xts,
&bin_attr_ccadata_aes_256_xts,
NULL
};
static struct attribute_group ccadata_attr_group = {
.name = "ccadata",
.bin_attrs = ccadata_attrs,
};
#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
/*
* Sysfs attribute read function for all secure key ccacipher binary attributes.
* The implementation can not deal with partial reads, because a new random
* secure key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
size_t keysize = CCACIPHERTOKENSIZE;
u32 nr_apqns, *apqns = NULL;
int i, rc, card, dom;
if (off != 0 || count < CCACIPHERTOKENSIZE)
return -EINVAL;
if (is_xts)
if (count < 2 * CCACIPHERTOKENSIZE)
return -EINVAL;
/* build a list of apqns able to generate an cipher key */
rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX6, 0, 0, 0, 0);
if (rc)
return rc;
memset(buf, 0, is_xts ? 2 * keysize : keysize);
/* simple try all apqns from the list */
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
if (rc == 0)
break;
}
if (rc)
return rc;
if (is_xts) {
keysize = CCACIPHERTOKENSIZE;
buf += CCACIPHERTOKENSIZE;
rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
if (rc == 0)
return 2 * CCACIPHERTOKENSIZE;
}
return CCACIPHERTOKENSIZE;
}
static ssize_t ccacipher_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
off, count);
}
static ssize_t ccacipher_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
off, count);
}
static ssize_t ccacipher_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
off, count);
}
static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
off, count);
}
static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
static struct bin_attribute *ccacipher_attrs[] = {
&bin_attr_ccacipher_aes_128,
&bin_attr_ccacipher_aes_192,
&bin_attr_ccacipher_aes_256,
&bin_attr_ccacipher_aes_128_xts,
&bin_attr_ccacipher_aes_256_xts,
NULL
};
static struct attribute_group ccacipher_attr_group = {
.name = "ccacipher",
.bin_attrs = ccacipher_attrs,
};
/*
* Sysfs attribute read function for all ep11 aes key binary attributes.
* The implementation can not deal with partial reads, because a new random
* secure key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
* This function and the sysfs attributes using it provide EP11 key blobs
* padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
* 336 bytes.
*/
static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
size_t keysize = MAXEP11AESKEYBLOBSIZE;
u32 nr_apqns, *apqns = NULL;
int i, rc, card, dom;
if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
return -EINVAL;
if (is_xts)
if (count < 2 * MAXEP11AESKEYBLOBSIZE)
return -EINVAL;
/* build a list of apqns able to generate an cipher key */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7,
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
NULL);
if (rc)
return rc;
memset(buf, 0, is_xts ? 2 * keysize : keysize);
/* simple try all apqns from the list */
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
PKEY_TYPE_EP11_AES);
if (rc == 0)
break;
}
if (rc)
return rc;
if (is_xts) {
keysize = MAXEP11AESKEYBLOBSIZE;
buf += MAXEP11AESKEYBLOBSIZE;
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
PKEY_TYPE_EP11_AES);
if (rc == 0)
return 2 * MAXEP11AESKEYBLOBSIZE;
}
return MAXEP11AESKEYBLOBSIZE;
}
static ssize_t ep11_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
off, count);
}
static ssize_t ep11_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
off, count);
}
static ssize_t ep11_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
off, count);
}
static ssize_t ep11_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
off, count);
}
static ssize_t ep11_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
static struct bin_attribute *ep11_attrs[] = {
&bin_attr_ep11_aes_128,
&bin_attr_ep11_aes_192,
&bin_attr_ep11_aes_256,
&bin_attr_ep11_aes_128_xts,
&bin_attr_ep11_aes_256_xts,
NULL
};
static struct attribute_group ep11_attr_group = {
.name = "ep11",
.bin_attrs = ep11_attrs,
};
static const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
&ccacipher_attr_group,
&ep11_attr_group,
NULL,
};
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.llseek = no_llseek,
.unlocked_ioctl = pkey_unlocked_ioctl,
};
static struct miscdevice pkey_dev = {
.name = "pkey",
.minor = MISC_DYNAMIC_MINOR,
.mode = 0666,
.fops = &pkey_fops,
.groups = pkey_attr_groups,
};
/*
* Module init
*/
static int __init pkey_init(void)
{
cpacf_mask_t func_mask;
/*
* The pckmo instruction should be available - even if we don't
* actually invoke it. This instruction comes with MSA 3 which
* is also the minimum level for the kmc instructions which
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &func_mask))
return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &func_mask))
return -ENODEV;
if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
return -ENODEV;
pkey_debug_init();
return misc_register(&pkey_dev);
}
/*
* Module exit
*/
static void __exit pkey_exit(void)
{
misc_deregister(&pkey_dev);
pkey_debug_exit();
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);
| linux-master | drivers/s390/crypto/pkey_api.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
* Cornelia Huck <[email protected]>
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
* MSGTYPE restruct: Holger Dengler <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
/*
* Device attributes common for all crypto card devices.
*/
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", zc->type_string);
}
static DEVICE_ATTR_RO(type);
static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ap_card *ac = to_ap_card(dev);
int online = ac->config && zc->online ? 1 : 0;
return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ap_card *ac = to_ap_card(dev);
struct zcrypt_queue *zq;
int online, id, i = 0, maxzqs = 0;
struct zcrypt_queue **zq_uelist = NULL;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
if (online && !ac->config)
return -ENODEV;
zc->online = online;
id = zc->card->id;
ZCRYPT_DBF_INFO("%s card=%02x online=%d\n", __func__, id, online);
ap_send_online_uevent(&ac->ap_dev, online);
spin_lock(&zcrypt_list_lock);
/*
* As we are in atomic context here, directly sending uevents
* does not work. So collect the zqueues in a dynamic array
* and process them after zcrypt_list_lock release. As we get/put
* the zqueue objects, we make sure they exist after lock release.
*/
list_for_each_entry(zq, &zc->zqueues, list)
maxzqs++;
if (maxzqs > 0)
zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
list_for_each_entry(zq, &zc->zqueues, list)
if (zcrypt_queue_force_online(zq, online))
if (zq_uelist) {
zcrypt_queue_get(zq);
zq_uelist[i++] = zq;
}
spin_unlock(&zcrypt_list_lock);
if (zq_uelist) {
for (i = 0; zq_uelist[i]; i++) {
zq = zq_uelist[i];
ap_send_online_uevent(&zq->queue->ap_dev, online);
zcrypt_queue_put(zq);
}
kfree(zq_uelist);
}
return count;
}
static DEVICE_ATTR_RW(online);
static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", atomic_read(&zc->load));
}
static DEVICE_ATTR_RO(load);
static struct attribute *zcrypt_card_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
&dev_attr_load.attr,
NULL,
};
static const struct attribute_group zcrypt_card_attr_group = {
.attrs = zcrypt_card_attrs,
};
struct zcrypt_card *zcrypt_card_alloc(void)
{
struct zcrypt_card *zc;
zc = kzalloc(sizeof(*zc), GFP_KERNEL);
if (!zc)
return NULL;
INIT_LIST_HEAD(&zc->list);
INIT_LIST_HEAD(&zc->zqueues);
kref_init(&zc->refcount);
return zc;
}
EXPORT_SYMBOL(zcrypt_card_alloc);
void zcrypt_card_free(struct zcrypt_card *zc)
{
kfree(zc);
}
EXPORT_SYMBOL(zcrypt_card_free);
static void zcrypt_card_release(struct kref *kref)
{
struct zcrypt_card *zdev =
container_of(kref, struct zcrypt_card, refcount);
zcrypt_card_free(zdev);
}
void zcrypt_card_get(struct zcrypt_card *zc)
{
kref_get(&zc->refcount);
}
EXPORT_SYMBOL(zcrypt_card_get);
int zcrypt_card_put(struct zcrypt_card *zc)
{
return kref_put(&zc->refcount, zcrypt_card_release);
}
EXPORT_SYMBOL(zcrypt_card_put);
/**
* zcrypt_card_register() - Register a crypto card device.
* @zc: Pointer to a crypto card device
*
* Register a crypto card device. Returns 0 if successful.
*/
int zcrypt_card_register(struct zcrypt_card *zc)
{
int rc;
spin_lock(&zcrypt_list_lock);
list_add_tail(&zc->list, &zcrypt_card_list);
spin_unlock(&zcrypt_list_lock);
zc->online = 1;
ZCRYPT_DBF_INFO("%s card=%02x register online=1\n",
__func__, zc->card->id);
rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
&zcrypt_card_attr_group);
if (rc) {
spin_lock(&zcrypt_list_lock);
list_del_init(&zc->list);
spin_unlock(&zcrypt_list_lock);
}
return rc;
}
EXPORT_SYMBOL(zcrypt_card_register);
/**
* zcrypt_card_unregister(): Unregister a crypto card device.
* @zc: Pointer to crypto card device
*
* Unregister a crypto card device.
*/
void zcrypt_card_unregister(struct zcrypt_card *zc)
{
ZCRYPT_DBF_INFO("%s card=%02x unregister\n",
__func__, zc->card->id);
spin_lock(&zcrypt_list_lock);
list_del_init(&zc->list);
spin_unlock(&zcrypt_list_lock);
sysfs_remove_group(&zc->card->ap_dev.device.kobj,
&zcrypt_card_attr_group);
zcrypt_card_put(zc);
}
EXPORT_SYMBOL(zcrypt_card_unregister);
| linux-master | drivers/s390/crypto/zcrypt_card.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2013
* Author(s): Eugene Crosser <[email protected]>
*/
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "qeth_core.h"
#include "qeth_l2.h"
static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
struct device_attribute *attr, char *buf,
int show_state)
{
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE;
int rc = 0;
char *word;
if (!qeth_bridgeport_allowed(card))
return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
&card->options.sbp.role, &state);
if (!rc) {
if (show_state)
switch (state) {
case QETH_SBP_STATE_INACTIVE:
word = "inactive"; break;
case QETH_SBP_STATE_STANDBY:
word = "standby"; break;
case QETH_SBP_STATE_ACTIVE:
word = "active"; break;
default:
rc = -EIO;
}
else
switch (card->options.sbp.role) {
case QETH_SBP_ROLE_NONE:
word = "none"; break;
case QETH_SBP_ROLE_PRIMARY:
word = "primary"; break;
case QETH_SBP_ROLE_SECONDARY:
word = "secondary"; break;
default:
rc = -EIO;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
card->options.sbp.role, state);
else
rc = sysfs_emit(buf, "%s\n", word);
}
mutex_unlock(&card->sbp_lock);
return rc;
}
static ssize_t qeth_bridge_port_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
}
static ssize_t qeth_bridge_port_role_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
enum qeth_sbp_roles role;
if (sysfs_streq(buf, "primary"))
role = QETH_SBP_ROLE_PRIMARY;
else if (sysfs_streq(buf, "secondary"))
role = QETH_SBP_ROLE_SECONDARY;
else if (sysfs_streq(buf, "none"))
role = QETH_SBP_ROLE_NONE;
else
return -EINVAL;
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.reflect_promisc)
/* Forbid direct manipulation */
rc = -EPERM;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_setrole(card, role);
if (!rc)
card->options.sbp.role = role;
} else
card->options.sbp.role = role;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
qeth_bridge_port_role_store);
static ssize_t qeth_bridge_port_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
}
static DEVICE_ATTR(bridge_state, 0444, qeth_bridge_port_state_show,
NULL);
static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
if (!qeth_bridgeport_allowed(card))
return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
return sysfs_emit(buf, "%d\n", enabled);
}
static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool enable;
int rc;
rc = kstrtobool(buf, &enable);
if (rc)
return rc;
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
/* sbp_lock ensures ordering vs notifications-stopped events */
if (!rc)
card->options.sbp.hostnotification = enable;
} else
card->options.sbp.hostnotification = enable;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(bridge_hostnotify, 0644,
qeth_bridgeport_hostnotification_show,
qeth_bridgeport_hostnotification_store);
static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
if (!qeth_bridgeport_allowed(card))
return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
if (card->options.sbp.reflect_promisc_primary)
state = "primary";
else
state = "secondary";
} else
state = "none";
return sysfs_emit(buf, "%s\n", state);
}
static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int enable, primary;
int rc = 0;
if (sysfs_streq(buf, "none")) {
enable = 0;
primary = 0;
} else if (sysfs_streq(buf, "primary")) {
enable = 1;
primary = 1;
} else if (sysfs_streq(buf, "secondary")) {
enable = 1;
primary = 0;
} else
return -EINVAL;
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
rc = -EPERM;
else {
card->options.sbp.reflect_promisc = enable;
card->options.sbp.reflect_promisc_primary = primary;
rc = 0;
}
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(bridge_reflect_promisc, 0644,
qeth_bridgeport_reflect_show,
qeth_bridgeport_reflect_store);
static struct attribute *qeth_l2_bridgeport_attrs[] = {
&dev_attr_bridge_role.attr,
&dev_attr_bridge_state.attr,
&dev_attr_bridge_hostnotify.attr,
&dev_attr_bridge_reflect_promisc.attr,
NULL,
};
static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
/* VNIC CHARS support */
/* convert sysfs attr name to VNIC characteristic */
static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name)
{
if (sysfs_streq(attr_name, "flooding"))
return QETH_VNICC_FLOODING;
else if (sysfs_streq(attr_name, "mcast_flooding"))
return QETH_VNICC_MCAST_FLOODING;
else if (sysfs_streq(attr_name, "learning"))
return QETH_VNICC_LEARNING;
else if (sysfs_streq(attr_name, "takeover_setvmac"))
return QETH_VNICC_TAKEOVER_SETVMAC;
else if (sysfs_streq(attr_name, "takeover_learning"))
return QETH_VNICC_TAKEOVER_LEARNING;
else if (sysfs_streq(attr_name, "bridge_invisible"))
return QETH_VNICC_BRIDGE_INVISIBLE;
else if (sysfs_streq(attr_name, "rx_bcast"))
return QETH_VNICC_RX_BCAST;
return 0;
}
/* get current timeout setting */
static ssize_t qeth_vnicc_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
u32 timeout;
int rc;
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
return sysfs_emit(buf, "n/a\n");
return rc ? rc : sysfs_emit(buf, "%d\n", timeout);
}
/* change timeout setting */
static ssize_t qeth_vnicc_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
u32 timeout;
int rc;
rc = kstrtou32(buf, 10, &timeout);
if (rc)
return rc;
mutex_lock(&card->conf_mutex);
rc = qeth_l2_vnicc_set_timeout(card, timeout);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
/* get current setting of characteristic */
static ssize_t qeth_vnicc_char_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool state;
u32 vnicc;
int rc;
vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
if (rc == -EBUSY)
return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
return sysfs_emit(buf, "n/a\n");
return rc ? rc : sysfs_emit(buf, "%d\n", state);
}
/* change setting of characteristic */
static ssize_t qeth_vnicc_char_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool state;
u32 vnicc;
int rc;
if (kstrtobool(buf, &state))
return -EINVAL;
vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
mutex_lock(&card->conf_mutex);
rc = qeth_l2_vnicc_set_state(card, vnicc, state);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show,
qeth_vnicc_char_store);
static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show,
qeth_vnicc_timeout_store);
static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show,
qeth_vnicc_char_store);
static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show,
qeth_vnicc_char_store);
static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show,
qeth_vnicc_char_store);
static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
static struct attribute *qeth_l2_vnicc_attrs[] = {
&dev_attr_flooding.attr,
&dev_attr_mcast_flooding.attr,
&dev_attr_learning.attr,
&dev_attr_learning_timeout.attr,
&dev_attr_takeover_setvmac.attr,
&dev_attr_takeover_learning.attr,
&dev_attr_bridge_invisible.attr,
&dev_attr_rx_bcast.attr,
NULL,
};
static struct attribute_group qeth_l2_vnicc_attr_group = {
.attrs = qeth_l2_vnicc_attrs,
.name = "vnicc",
};
const struct attribute_group *qeth_l2_attr_groups[] = {
&qeth_l2_bridgeport_attr_group,
&qeth_l2_vnicc_attr_group,
NULL,
};
| linux-master | drivers/s390/net/qeth_l2_sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/iucv/af_iucv.h>
#include <linux/hashtable.h>
#include "qeth_l3.h"
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
char *buf)
{
if (proto == QETH_PROT_IPV4)
return scnprintf(buf, INET_ADDRSTRLEN, "%pI4", addr);
else
return scnprintf(buf, INET6_ADDRSTRLEN, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
u32 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
} else {
hash_for_each_possible(card->ip_htable, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
}
return NULL;
}
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{
int i, j;
u8 octet;
for (i = 0; i < len; ++i) {
octet = addr[i];
for (j = 7; j >= 0; --j) {
bits[i*8 + j] = octet & 1;
octet >>= 1;
}
}
}
static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
struct qeth_ipaddr *addr)
{
struct qeth_ipato_entry *ipatoe;
u8 addr_bits[128] = {0, };
u8 ipatoe_bits[128] = {0, };
int rc = 0;
if (!card->ipato.enabled)
return false;
if (addr->type != QETH_IP_TYPE_NORMAL)
return false;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4) ? 4 : 16);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (addr->proto != ipatoe->proto)
continue;
qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
/* invert? */
if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
rc = !rc;
else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
rc = !rc;
return rc;
}
static int qeth_l3_delete_ip(struct qeth_card *card,
struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
struct qeth_ipaddr *addr;
if (tmp_addr->type == QETH_IP_TYPE_RXIP)
QETH_CARD_TEXT(card, 2, "delrxip");
else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
QETH_CARD_TEXT(card, 2, "delvipa");
else
QETH_CARD_TEXT(card, 2, "delip");
if (tmp_addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
return -ENOENT;
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
return rc;
if (qeth_card_hw_is_reachable(card))
rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
return rc;
}
static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
struct qeth_ipaddr *addr;
char buf[INET6_ADDRSTRLEN];
if (tmp_addr->type == QETH_IP_TYPE_RXIP)
QETH_CARD_TEXT(card, 2, "addrxip");
else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
QETH_CARD_TEXT(card, 2, "addvipa");
else
QETH_CARD_TEXT(card, 2, "addip");
if (tmp_addr->proto == QETH_PROT_IPV4)
QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
}
addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (addr) {
if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
return -EADDRINUSE;
if (qeth_l3_addr_match_all(addr, tmp_addr)) {
addr->ref_counter++;
return 0;
}
qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->ipato = 1;
}
hash_add(card->ip_htable, &addr->hnode,
qeth_l3_ipaddr_hash(addr));
if (!qeth_card_hw_is_reachable(card)) {
addr->disp_flag = QETH_DISP_ADDR_ADD;
return 0;
}
rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
hash_del(&addr->hnode);
kfree(addr);
}
}
return rc;
}
static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
bool add)
{
int rc;
mutex_lock(&card->ip_lock);
rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
mutex_unlock(&card->ip_lock);
return rc;
}
static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
hash_del(&addr->hnode);
kfree(addr);
}
}
static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
QETH_CARD_TEXT(card, 4, "clearip");
mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
continue;
}
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
}
static void qeth_l3_recover_ip(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i;
int rc;
QETH_CARD_TEXT(card, 4, "recovrip");
mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
hash_del(&addr->hnode);
kfree(addr);
}
}
}
mutex_unlock(&card->ip_lock);
}
static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
switch (cmd->hdr.return_code) {
case IPA_RC_SUCCESS:
return 0;
case IPA_RC_DUPLICATE_IP_ADDRESS:
return -EADDRINUSE;
case IPA_RC_MC_ADDR_NOT_FOUND:
return -ENOENT;
case IPA_RC_LAN_OFFLINE:
return -ENETDOWN;
default:
return -EIO;
}
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_ipaddr *addr,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
IPA_DATA_SIZEOF(setdelipm));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
if (addr->proto == QETH_PROT_IPV6) {
cmd->data.setdelipm.ip = addr->u.a6.addr;
ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
} else {
cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
}
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
{
unsigned int i = 0;
while (len && i < 4) {
int mask_len = min_t(int, len, 32);
prefix->s6_addr32[i] = inet_make_mask(mask_len);
len -= mask_len;
i++;
}
}
static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
{
switch (addr->type) {
case QETH_IP_TYPE_RXIP:
return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
case QETH_IP_TYPE_VIPA:
return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
QETH_IPA_DELIP_VIPA_FLAG;
default:
return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
}
}
static int qeth_l3_send_setdelip(struct qeth_card *card,
struct qeth_ipaddr *addr,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
u32 flags;
QETH_CARD_TEXT(card, 4, "setdelip");
iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
IPA_DATA_SIZEOF(setdelip6));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
if (addr->proto == QETH_PROT_IPV6) {
cmd->data.setdelip6.addr = addr->u.a6.addr;
qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
addr->u.a6.pfxlen);
cmd->data.setdelip6.flags = flags;
} else {
cmd->data.setdelip4.addr = addr->u.a4.addr;
cmd->data.setdelip4.mask = addr->u.a4.mask;
cmd->data.setdelip4.flags = flags;
}
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static int qeth_l3_send_setrouting(struct qeth_card *card,
enum qeth_routing_types type, enum qeth_prot_versions prot)
{
int rc;
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot,
IPA_DATA_SIZEOF(setrtg));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setrtg.type = (type);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
static int qeth_l3_correct_routing_type(struct qeth_card *card,
enum qeth_routing_types *type, enum qeth_prot_versions prot)
{
if (IS_IQD(card)) {
switch (*type) {
case NO_ROUTER:
case PRIMARY_CONNECTOR:
case SECONDARY_CONNECTOR:
case MULTICAST_ROUTER:
return 0;
default:
goto out_inval;
}
} else {
switch (*type) {
case NO_ROUTER:
case PRIMARY_ROUTER:
case SECONDARY_ROUTER:
return 0;
case MULTICAST_ROUTER:
if (qeth_is_ipafunc_supported(card, prot,
IPA_OSA_MC_ROUTER))
return 0;
goto out_inval;
default:
goto out_inval;
}
}
out_inval:
*type = NO_ROUTER;
return -EINVAL;
}
int qeth_l3_setrouting_v4(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 3, "setrtg4");
rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
QETH_PROT_IPV4);
if (rc)
return rc;
rc = qeth_l3_send_setrouting(card, card->options.route4.type,
QETH_PROT_IPV4);
if (rc) {
card->options.route4.type = NO_ROUTER;
QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
rc, CARD_DEVID(card));
}
return rc;
}
int qeth_l3_setrouting_v6(struct qeth_card *card)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "setrtg6");
if (!qeth_is_supported(card, IPA_IPV6))
return 0;
rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
QETH_PROT_IPV6);
if (rc)
return rc;
rc = qeth_l3_send_setrouting(card, card->options.route6.type,
QETH_PROT_IPV6);
if (rc) {
card->options.route6.type = NO_ROUTER;
QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
rc, CARD_DEVID(card));
}
return rc;
}
/*
* IP address takeover related functions
*/
/*
* qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
*
* Caller must hold ip_lock.
*/
void qeth_l3_update_ipato(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
unsigned int i;
hash_for_each(card->ip_htable, i, addr, hnode) {
if (addr->type != QETH_IP_TYPE_NORMAL)
continue;
addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
}
}
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
kfree(ipatoe);
}
qeth_l3_update_ipato(card);
mutex_unlock(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *new)
{
struct qeth_ipato_entry *ipatoe;
int rc = 0;
QETH_CARD_TEXT(card, 2, "addipato");
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
continue;
if (!memcmp(ipatoe->addr, new->addr,
(ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == new->mask_bits)) {
rc = -EEXIST;
break;
}
}
if (!rc) {
list_add_tail(&new->entry, &card->ipato.entries);
qeth_l3_update_ipato(card);
}
mutex_unlock(&card->ip_lock);
return rc;
}
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
unsigned int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
if (!memcmp(ipatoe->addr, addr,
(proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
qeth_l3_update_ipato(card);
kfree(ipatoe);
rc = 0;
}
}
mutex_unlock(&card->ip_lock);
return rc;
}
int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_ip_types type,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
memcpy(&addr.u.a4.addr, ip, 4);
else
memcpy(&addr.u.a6.addr, ip, 16);
return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
{
struct qeth_ipaddr addr;
unsigned int i;
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
addr.u.a6.addr.s6_addr[0] = 0xfe;
addr.u.a6.addr.s6_addr[1] = 0x80;
for (i = 0; i < 8; i++)
addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
return qeth_l3_modify_ip(card, &addr, add);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
struct qeth_ipaddr *addr)
{
char buf[50];
int rc = 0;
int cnt = 3;
if (card->options.sniffer)
return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
QETH_CARD_TEXT(card, 2, "setaddr6");
QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
QETH_CARD_TEXT(card, 2, "setaddr?");
QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
do {
if (addr->is_multicast)
rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
else
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
if (rc)
QETH_CARD_TEXT(card, 2, "failed");
} while ((--cnt > 0) && rc);
if (rc) {
QETH_CARD_TEXT(card, 2, "FAILED");
qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
}
return rc;
}
static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
struct qeth_ipaddr *addr)
{
int rc = 0;
if (card->options.sniffer)
return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "deladdr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
QETH_CARD_TEXT(card, 2, "deladdr6");
QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
QETH_CARD_TEXT(card, 2, "deladdr?");
QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
if (addr->is_multicast)
rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
else
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
if (rc)
QETH_CARD_TEXT(card, 2, "failed");
return rc;
}
static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc = 0;
QETH_CARD_TEXT(card, 2, "setadprm");
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc)
dev_warn(&card->gdev->dev, "Reading the adapter MAC"
" address failed\n");
}
return rc;
}
static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 3, "ipaarp");
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
"ARP processing not supported on %s!\n",
netdev_name(card->dev));
return 0;
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting ARP processing support for %s failed\n",
netdev_name(card->dev));
}
return rc;
}
static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 3, "stsrcmac");
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
"Inbound source MAC-address not supported on %s\n",
netdev_name(card->dev));
return -EOPNOTSUPP;
}
rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
IPA_CMD_ASS_START, NULL);
if (rc)
dev_warn(&card->gdev->dev,
"Starting source MAC-address support for %s failed\n",
netdev_name(card->dev));
return rc;
}
static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "strtvlan");
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
"VLAN not supported on %s\n", netdev_name(card->dev));
return -EOPNOTSUPP;
}
rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting VLAN support for %s failed\n",
netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "VLAN enabled\n");
}
return rc;
}
static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 3, "stmcast");
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
"Multicast not supported on %s\n",
netdev_name(card->dev));
return -EOPNOTSUPP;
}
rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting multicast support for %s failed\n",
netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "Multicast enabled\n");
card->dev->flags |= IFF_MULTICAST;
}
return rc;
}
static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
u32 ipv6_data = 3;
int rc;
QETH_CARD_TEXT(card, 3, "softipv6");
if (IS_IQD(card))
goto out;
rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START,
&ipv6_data);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
NULL);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling the passthrough mode for %s failed\n",
netdev_name(card->dev));
return rc;
}
out:
dev_info(&card->gdev->dev, "IPV6 enabled\n");
return 0;
}
static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
"IPv6 not supported on %s\n", netdev_name(card->dev));
return 0;
}
return qeth_l3_softsetup_ipv6(card);
}
static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
u32 filter_data = 1;
int rc;
QETH_CARD_TEXT(card, 3, "stbrdcst");
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
"Broadcast not supported on %s\n",
netdev_name(card->dev));
rc = -EOPNOTSUPP;
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling broadcast filtering for %s failed\n",
netdev_name(card->dev));
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_CONFIGURE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
"Setting up broadcast filtering for %s failed\n",
netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
dev_info(&card->gdev->dev, "Broadcast enabled\n");
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_ENABLE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
"Setting up broadcast echo filtering for %s failed\n",
netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
out:
if (card->info.broadcast_capable)
card->dev->flags |= IFF_BROADCAST;
else
card->dev->flags &= ~IFF_BROADCAST;
return rc;
}
static void qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
qeth_l3_start_ipa_arp_processing(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
qeth_l3_start_ipa_vlan(card); /* go on*/
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
}
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code)
return -EIO;
if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
return -EADDRNOTAVAIL;
eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr);
return 0;
}
static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
{
int rc = 0;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "hsrmac");
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
NULL);
return rc;
}
static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
u16 *uid = reply->param;
if (cmd->hdr.return_code == 0) {
*uid = cmd->data.create_destroy_addr.uid;
return 0;
}
dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
return -EIO;
}
static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "guniqeid");
if (!qeth_is_supported(card, IPA_IPV6))
goto out;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
goto out;
__ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
out:
return uid;
}
static int
qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd;
__u16 rc;
QETH_CARD_TEXT(card, 2, "diastrcb");
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
switch (cmd->data.diagass.action) {
case QETH_DIAGS_CMD_TRACE_QUERY:
break;
case QETH_DIAGS_CMD_TRACE_DISABLE:
switch (rc) {
case 0:
case IPA_RC_INVALID_SUBCMD:
card->info.promisc_mode = SET_PROMISC_MODE_OFF;
dev_info(&card->gdev->dev, "The HiperSockets network "
"traffic analyzer is deactivated\n");
break;
default:
break;
}
break;
case QETH_DIAGS_CMD_TRACE_ENABLE:
switch (rc) {
case 0:
card->info.promisc_mode = SET_PROMISC_MODE_ON;
dev_info(&card->gdev->dev, "The HiperSockets network "
"traffic analyzer is activated\n");
break;
case IPA_RC_HARDWARE_AUTH_ERROR:
dev_warn(&card->gdev->dev, "The device is not "
"authorized to run as a HiperSockets network "
"traffic analyzer\n");
break;
case IPA_RC_TRACE_ALREADY_ACTIVE:
dev_warn(&card->gdev->dev, "A HiperSockets "
"network traffic analyzer is already "
"active in the HiperSockets LAN\n");
break;
default:
break;
}
break;
default:
QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
cmd->data.diagass.action, CARD_DEVID(card));
}
return rc ? -EIO : 0;
}
static int
qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "diagtrac");
iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
cmd->data.diagass.action = diags_cmd;
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
{
struct qeth_card *card = arg;
struct inet6_dev *in6_dev;
struct in_device *in4_dev;
struct qeth_ipaddr *ipm;
struct qeth_ipaddr tmp;
struct ip_mc_list *im4;
struct ifmcaddr6 *im6;
QETH_CARD_TEXT(card, 4, "addmc");
if (!dev || !(dev->flags & IFF_UP))
goto out;
in4_dev = __in_dev_get_rtnl(dev);
if (!in4_dev)
goto walk_ipv6;
qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
tmp.disp_flag = QETH_DISP_ADDR_ADD;
tmp.is_multicast = 1;
for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rtnl_dereference(im4->next_rcu)) {
tmp.u.a4.addr = im4->multiaddr;
ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
if (!ipm)
continue;
hash_add(card->rx_mode_addrs, &ipm->hnode,
qeth_l3_ipaddr_hash(ipm));
}
walk_ipv6:
if (!qeth_is_supported(card, IPA_IPV6))
goto out;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
goto out;
qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
tmp.disp_flag = QETH_DISP_ADDR_ADD;
tmp.is_multicast = 1;
for (im6 = rtnl_dereference(in6_dev->mc_list);
im6;
im6 = rtnl_dereference(im6->next)) {
tmp.u.a6.addr = im6->mca_addr;
ipm = qeth_l3_find_addr_by_ip(card, &tmp);
if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue;
}
ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
if (!ipm)
continue;
hash_add(card->rx_mode_addrs, &ipm->hnode,
qeth_l3_ipaddr_hash(ipm));
}
out:
return 0;
}
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
if (card->info.promisc_mode == enable)
return;
if (IS_VM_NIC(card)) { /* Guestlan trace */
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card, enable);
} else if (card->options.sniffer && /* HiperSockets trace */
qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
if (enable) {
QETH_CARD_TEXT(card, 3, "+promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
} else {
QETH_CARD_TEXT(card, 3, "-promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
}
}
}
static void qeth_l3_rx_mode_work(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
rx_mode_work);
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
rtnl_lock();
qeth_l3_add_mcast_rtnl(card->dev, 0, card);
if (qeth_is_supported(card, IPA_FULL_VLAN))
vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
rtnl_unlock();
hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
switch (addr->disp_flag) {
case QETH_DISP_ADDR_DELETE:
rc = qeth_l3_deregister_addr_entry(card, addr);
if (!rc || rc == -ENOENT) {
hash_del(&addr->hnode);
kfree(addr);
}
break;
case QETH_DISP_ADDR_ADD:
rc = qeth_l3_register_addr_entry(card, addr);
if (rc && rc != -ENETDOWN) {
hash_del(&addr->hnode);
kfree(addr);
break;
}
fallthrough;
default:
/* for next call to set_rx_mode(): */
addr->disp_flag = QETH_DISP_ADDR_DELETE;
}
}
}
qeth_l3_set_promisc_mode(card);
}
static int qeth_l3_arp_makerc(u16 rc)
{
switch (rc) {
case IPA_RC_SUCCESS:
return 0;
case QETH_IPA_ARP_RC_NOTSUPP:
case QETH_IPA_ARP_RC_Q_NOTSUPP:
return -EOPNOTSUPP;
case QETH_IPA_ARP_RC_OUT_OF_RANGE:
return -EINVAL;
case QETH_IPA_ARP_RC_Q_NO_DATA:
return -ENOENT;
default:
return -EIO;
}
}
static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
qeth_setassparms_cb(card, reply, data);
return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
/*
* currently GuestLAN only supports the ARP assist function
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
* thus we say EOPNOTSUPP for this ARP function
*/
if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
SETASS_DATA_SIZEOF(flags_32bit),
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
__ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
CARD_DEVID(card), rc);
return rc;
}
static __u32 get_arp_entry_size(struct qeth_card *card,
struct qeth_arp_query_data *qdata,
struct qeth_arp_entrytype *type, __u8 strip_entries)
{
__u32 rc;
__u8 is_hsi;
is_hsi = qdata->reply_bits == 5;
if (type->ip == QETHARP_IP_ADDR_V4) {
QETH_CARD_TEXT(card, 4, "arpev4");
if (strip_entries) {
rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
sizeof(struct qeth_arp_qi_entry7_short);
} else {
rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
sizeof(struct qeth_arp_qi_entry7);
}
} else if (type->ip == QETHARP_IP_ADDR_V6) {
QETH_CARD_TEXT(card, 4, "arpev6");
if (strip_entries) {
rc = is_hsi ?
sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
sizeof(struct qeth_arp_qi_entry7_short_ipv6);
} else {
rc = is_hsi ?
sizeof(struct qeth_arp_qi_entry5_ipv6) :
sizeof(struct qeth_arp_qi_entry7_ipv6);
}
} else {
QETH_CARD_TEXT(card, 4, "arpinv");
rc = 0;
}
return rc;
}
static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
{
return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
(type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
}
static int qeth_l3_arp_query_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_data *qdata;
struct qeth_arp_query_info *qinfo;
int e;
int entrybytes_done;
int stripped_bytes;
__u8 do_strip_entries;
QETH_CARD_TEXT(card, 3, "arpquecb");
qinfo = (struct qeth_arp_query_info *) reply->param;
cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
if (cmd->hdr.return_code) {
QETH_CARD_TEXT(card, 4, "arpcberr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
QETH_CARD_TEXT(card, 4, "setaperr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
qdata = &cmd->data.setassparms.data.query_arp;
QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
entrybytes_done = 0;
for (e = 0; e < qdata->no_entries; ++e) {
char *cur_entry;
__u32 esize;
struct qeth_arp_entrytype *etype;
cur_entry = &qdata->data + entrybytes_done;
etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
QETH_CARD_TEXT(card, 4, "pmis");
QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
break;
}
esize = get_arp_entry_size(card, qdata, etype,
do_strip_entries);
QETH_CARD_TEXT_(card, 5, "esz%i", esize);
if (!esize)
break;
if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC);
memset(qinfo->udata, 0, 4);
return -ENOSPC;
}
memcpy(qinfo->udata + qinfo->udata_offset,
&qdata->data + entrybytes_done + stripped_bytes,
esize);
entrybytes_done += esize + stripped_bytes;
qinfo->udata_offset += esize;
++qinfo->no_entries;
}
/* check if all replies received ... */
if (cmd->data.setassparms.hdr.seq_no <
cmd->data.setassparms.hdr.number_of_replies)
return 1;
QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
memcpy(qinfo->udata, &qinfo->no_entries, 4);
/* keep STRIP_ENTRIES flag so the user program can distinguish
* stripped entries from normal ones */
if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
QETH_CARD_TEXT_(card, 4, "rc%i", 0);
return 0;
}
static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
enum qeth_prot_versions prot,
struct qeth_arp_query_info *qinfo)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
int rc;
QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_QUERY_INFO,
SETASS_DATA_SIZEOF(query_arp), prot);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
return rc;
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
{
struct qeth_arp_query_info qinfo = {0, };
int rc;
QETH_CARD_TEXT(card, 3, "arpquery");
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
IPA_ARP_PROCESSING)) {
QETH_CARD_TEXT(card, 3, "arpqnsup");
rc = -EOPNOTSUPP;
goto out;
}
/* get size of userspace buffer and mask_bits -> 6 bytes */
if (copy_from_user(&qinfo, udata, 6)) {
rc = -EFAULT;
goto out;
}
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
rc = -ENOMEM;
goto out;
}
qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
if (rc) {
if (copy_to_user(udata, qinfo.udata, 4))
rc = -EFAULT;
goto free_and_out;
}
if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
/* fails in case of GuestLAN QDIO mode */
qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
}
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
QETH_CARD_TEXT(card, 4, "qactf");
rc = -EFAULT;
goto free_and_out;
}
QETH_CARD_TEXT(card, 4, "qacts");
free_and_out:
kfree(qinfo.udata);
out:
return rc;
}
static int qeth_l3_arp_modify_entry(struct qeth_card *card,
struct qeth_arp_cache_entry *entry,
enum qeth_arp_process_subcmds arp_cmd)
{
struct qeth_arp_cache_entry *cmd_entry;
struct qeth_cmd_buffer *iob;
int rc;
if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
QETH_CARD_TEXT(card, 3, "arpadd");
else
QETH_CARD_TEXT(card, 3, "arpdel");
/*
* currently GuestLAN only supports the ARP assist function
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
* thus we say EOPNOTSUPP for this ARP function
*/
if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
SETASS_DATA_SIZEOF(arp_entry),
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
arp_cmd, CARD_DEVID(card), rc);
return rc;
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpflush");
/*
* currently GuestLAN only supports the ARP assist function
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
* thus we say EOPNOTSUPP for this ARP function
*/
if (IS_VM_NIC(card) || IS_IQD(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0,
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
return rc;
}
static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
enum qeth_arp_process_subcmds arp_cmd;
int rc = 0;
switch (cmd) {
case SIOC_QETH_ARP_SET_NO_ENTRIES:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
break;
case SIOC_QETH_ARP_QUERY_INFO:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
rc = qeth_l3_arp_query(card, data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
case SIOC_QETH_ARP_REMOVE_ENTRY:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
return -EFAULT;
arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
IPA_CMD_ASS_ARP_ADD_ENTRY :
IPA_CMD_ASS_ARP_REMOVE_ENTRY;
return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
case SIOC_QETH_ARP_FLUSH_CACHE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
rc = qeth_l3_arp_flush_cache(card);
break;
default:
rc = qeth_siocdevprivate(dev, rq, data, cmd);
}
return rc;
}
static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
__be16 proto)
{
struct neighbour *n = NULL;
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
if (n) {
int cast_type = n->type;
neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
return cast_type;
return RTN_UNICAST;
}
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
switch (proto) {
case htons(ETH_P_IP):
if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case htons(ETH_P_IPV6):
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case htons(ETH_P_AF_IUCV):
return RTN_UNICAST;
default:
/* OSA only: ... and MAC address */
return qeth_get_ether_cast_type(skb);
}
}
static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
{
struct dst_entry *dst;
int cast_type;
rcu_read_lock();
dst = qeth_dst_check_rcu(skb, proto);
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
rcu_read_unlock();
return cast_type;
}
static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
if (cast_type == RTN_MULTICAST)
return QETH_CAST_MULTICAST;
if (cast_type == RTN_ANYCAST)
return QETH_CAST_ANYCAST;
if (cast_type == RTN_BROADCAST)
return QETH_CAST_BROADCAST;
return QETH_CAST_UNICAST;
}
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
struct dst_entry *dst;
int cast_type;
hdr->hdr.l3.length = data_len;
if (skb_is_gso(skb)) {
hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
/* some HW requires combined L3+L4 csum offload: */
if (proto == htons(ETH_P_IP))
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
}
}
if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
/* NETIF_F_HW_VLAN_CTAG_TX */
if (skb_vlan_tag_present(skb)) {
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
} else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
rcu_read_lock();
dst = qeth_dst_check_rcu(skb, proto);
if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
cast_type = RTN_UNICAST;
else
cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
switch (proto) {
case htons(ETH_P_IP):
l3_hdr->next_hop.addr.s6_addr32[3] =
qeth_next_hop_v4_rcu(skb, dst);
break;
case htons(ETH_P_IPV6):
l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
break;
case htons(ETH_P_AF_IUCV):
l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
l3_hdr->flags |= QETH_HDR_IPV6;
break;
default:
/* OSA only: */
l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
}
static void qeth_l3_fixup_headers(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
/* this is safe, IPv6 traffic takes a different path */
if (skb->ip_summed == CHECKSUM_PARTIAL)
iph->check = 0;
if (skb_is_gso(skb)) {
iph->tot_len = 0;
tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
iph->daddr, 0);
}
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, __be16 proto)
{
unsigned int hw_hdr_len;
int rc;
/* re-use the L2 header area for the HW header: */
hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
sizeof(struct qeth_hdr);
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
__be16 proto = vlan_get_protocol(skb);
u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int rc;
if (!skb_is_gso(skb))
qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
if (card->options.sniffer)
goto tx_drop;
switch (proto) {
case htons(ETH_P_AF_IUCV):
if (card->options.cq != QETH_CQ_ENABLED)
goto tx_drop;
break;
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
if (card->options.cq == QETH_CQ_ENABLED)
goto tx_drop;
break;
default:
goto tx_drop;
}
} else {
queue = card->qdio.out_qs[txq];
}
if (!(dev->flags & IFF_BROADCAST) &&
qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
goto tx_drop;
if (proto == htons(ETH_P_IP) || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, proto);
else
rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
if (!rc)
return NETDEV_TX_OK;
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void qeth_l3_set_rx_mode(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
schedule_work(&card->rx_mode_work);
}
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
* solicitation. For IPv4 we install a neighbor_setup function. We don't want
* arp resolution but we want the hard header (packet socket will work
* e.g. tcpdump)
*/
static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
{
n->nud_state = NUD_NOARP;
memcpy(n->ha, "FAKELL", 6);
n->output = n->ops->connected_output;
return 0;
}
static int
qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
{
if (np->tbl->family == AF_INET)
np->neigh_setup = qeth_l3_neigh_setup_noarp;
return 0;
}
static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
__be16 proto = vlan_get_protocol(skb);
return qeth_iqd_select_queue(dev, skb,
qeth_l3_get_cast_type(skb, proto), sb_dev);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
.ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
.ndo_select_queue = qeth_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
.ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
struct net_device *dev = card->dev;
unsigned int headroom;
int rc;
if (IS_OSD(card) || IS_OSX(card)) {
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
card->dev->vlan_features |= NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
}
if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
card->dev->hw_features |= NETIF_F_IPV6_CSUM;
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO6;
card->dev->vlan_features |= NETIF_F_TSO6;
}
/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
if (card->dev->hw_features & NETIF_F_TSO6)
headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
else if (card->dev->hw_features & NETIF_F_TSO)
headroom = sizeof(struct qeth_hdr_tso);
else
headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
} else if (IS_IQD(card)) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
return rc;
} else
return -ENODEV;
card->dev->needed_headroom = headroom;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
netif_keep_dst(card->dev);
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
netif_set_tso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
static const struct device_type qeth_l3_devtype = {
.name = "qeth_layer3",
.groups = qeth_l3_attr_groups,
};
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
hash_init(card->ip_htable);
mutex_init(&card->ip_lock);
card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
dev_name(&gdev->dev));
if (!card->cmd_wq)
return -ENOMEM;
if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
if (rc) {
destroy_workqueue(card->cmd_wq);
return rc;
}
} else {
gdev->dev.type = &qeth_l3_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
return 0;
}
static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
if (cgdev->dev.type != &qeth_l3_devtype)
device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
destroy_workqueue(card->cmd_wq);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
{
struct net_device *dev = card->dev;
int rc = 0;
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
qeth_l3_start_ipassists(card);
rc = qeth_l3_setrouting_v4(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
if (dev->reg_state != NETREG_REGISTERED) {
rc = qeth_l3_setup_netdev(card);
if (rc)
goto err_setup;
if (carrier_ok)
netif_carrier_on(dev);
} else {
rtnl_lock();
rc = qeth_set_real_num_tx_queues(card,
qeth_tx_actual_queues(card));
if (rc) {
rtnl_unlock();
goto err_set_queues;
}
if (carrier_ok)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
netif_device_attach(dev);
qeth_enable_hw_features(dev);
if (netif_running(dev)) {
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
}
rtnl_unlock();
}
return 0;
err_set_queues:
err_setup:
qeth_set_allowed_threads(card, 0, 1);
card->state = CARD_STATE_DOWN;
qeth_l3_clear_ip_htable(card, 1);
return rc;
}
static void qeth_l3_set_offline(struct qeth_card *card)
{
qeth_set_allowed_threads(card, 0, 1);
qeth_l3_drain_rx_mode_cache(card);
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
if (card->state == CARD_STATE_SOFTSETUP) {
card->state = CARD_STATE_DOWN;
qeth_l3_clear_ip_htable(card, 1);
}
}
/* Returns zero if the command is successfully "consumed" */
static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
return 1;
}
const struct qeth_discipline qeth_l3_discipline = {
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
.set_offline = qeth_l3_set_offline,
.control_event_handler = qeth_l3_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l3_discipline);
static int qeth_l3_handle_ip_event(struct qeth_card *card,
struct qeth_ipaddr *addr,
unsigned long event)
{
switch (event) {
case NETDEV_UP:
qeth_l3_modify_ip(card, addr, true);
return NOTIFY_OK;
case NETDEV_DOWN:
qeth_l3_modify_ip(card, addr, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
struct qeth_l3_ip_event_work {
struct work_struct work;
struct qeth_card *card;
struct qeth_ipaddr addr;
};
#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
static void qeth_l3_add_ip_worker(struct work_struct *work)
{
struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
kfree(work);
}
static void qeth_l3_delete_ip_worker(struct work_struct *work)
{
struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
kfree(work);
}
static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
{
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
dev->netdev_ops == &qeth_l3_netdev_ops)
return (struct qeth_card *) dev->ml_priv;
return NULL;
}
static int qeth_l3_ip_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct qeth_ipaddr addr;
struct qeth_card *card;
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
QETH_CARD_TEXT(card, 3, "ipevent");
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
addr.u.a4.addr = ifa->ifa_address;
addr.u.a4.mask = ifa->ifa_mask;
return qeth_l3_handle_ip_event(card, &addr, event);
}
static struct notifier_block qeth_l3_ip_notifier = {
qeth_l3_ip_event,
NULL,
};
static int qeth_l3_ip6_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *dev = ifa->idev->dev;
struct qeth_l3_ip_event_work *ip_work;
struct qeth_card *card;
if (event != NETDEV_UP && event != NETDEV_DOWN)
return NOTIFY_DONE;
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
QETH_CARD_TEXT(card, 3, "ip6event");
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
if (!ip_work)
return NOTIFY_DONE;
if (event == NETDEV_UP)
INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
else
INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
ip_work->card = card;
qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
QETH_PROT_IPV6);
ip_work->addr.u.a6.addr = ifa->addr;
ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
queue_work(card->cmd_wq, &ip_work->work);
return NOTIFY_OK;
}
static struct notifier_block qeth_l3_ip6_notifier = {
qeth_l3_ip6_event,
NULL,
};
static int qeth_l3_register_notifiers(void)
{
int rc;
QETH_DBF_TEXT(SETUP, 5, "regnotif");
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
if (rc) {
unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
return rc;
}
return 0;
}
static void qeth_l3_unregister_notifiers(void)
{
QETH_DBF_TEXT(SETUP, 5, "unregnot");
WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
}
static int __init qeth_l3_init(void)
{
pr_info("register layer 3 discipline\n");
return qeth_l3_register_notifiers();
}
static void __exit qeth_l3_exit(void)
{
qeth_l3_unregister_notifiers();
pr_info("unregister layer 3 discipline\n");
}
module_init(qeth_l3_init);
module_exit(qeth_l3_exit);
MODULE_AUTHOR("Frank Blaschka <[email protected]>");
MODULE_DESCRIPTION("qeth layer 3 discipline");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/qeth_l3_main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Deliver z/VM CP special messages (SMSG) as uevents.
*
* The driver registers for z/VM CP special messages with the
* "APP" prefix. Incoming messages are delivered to user space
* as uevents.
*
* Copyright IBM Corp. 2010
* Author(s): Hendrik Brueckner <[email protected]>
*
*/
#define KMSG_COMPONENT "smsgiucv_app"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <net/iucv/iucv.h>
#include "smsgiucv.h"
/* prefix used for SMSG registration */
#define SMSG_PREFIX "APP"
/* SMSG related uevent environment variables */
#define ENV_SENDER_STR "SMSG_SENDER="
#define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1)
#define ENV_PREFIX_STR "SMSG_ID="
#define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \
strlen(SMSG_PREFIX) + 1)
#define ENV_TEXT_STR "SMSG_TEXT="
#define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
/* z/VM user ID which is permitted to send SMSGs
* If the value is undefined or empty (""), special messages are
* accepted from any z/VM user ID. */
static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted");
/* SMSG device representation */
static struct device *smsg_app_dev;
/* list element for queuing received messages for delivery */
struct smsg_app_event {
struct list_head list;
char *buf;
char *envp[4];
};
/* queue for outgoing uevents */
static LIST_HEAD(smsg_event_queue);
static DEFINE_SPINLOCK(smsg_event_queue_lock);
static void smsg_app_event_free(struct smsg_app_event *ev)
{
kfree(ev->buf);
kfree(ev);
}
static struct smsg_app_event *smsg_app_event_alloc(const char *from,
const char *msg)
{
struct smsg_app_event *ev;
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev)
return NULL;
ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN +
ENV_TEXT_LEN(msg), GFP_ATOMIC);
if (!ev->buf) {
kfree(ev);
return NULL;
}
/* setting up environment pointers into buf */
ev->envp[0] = ev->buf;
ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN;
ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN;
ev->envp[3] = NULL;
/* setting up environment: sender, prefix name, and message text */
snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from);
snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX);
snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg);
return ev;
}
static void smsg_event_work_fn(struct work_struct *work)
{
LIST_HEAD(event_queue);
struct smsg_app_event *p, *n;
struct device *dev;
dev = get_device(smsg_app_dev);
if (!dev)
return;
spin_lock_bh(&smsg_event_queue_lock);
list_splice_init(&smsg_event_queue, &event_queue);
spin_unlock_bh(&smsg_event_queue_lock);
list_for_each_entry_safe(p, n, &event_queue, list) {
list_del(&p->list);
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
smsg_app_event_free(p);
}
put_device(dev);
}
static DECLARE_WORK(smsg_event_work, smsg_event_work_fn);
static void smsg_app_callback(const char *from, char *msg)
{
struct smsg_app_event *se;
/* check if the originating z/VM user ID matches
* the configured sender. */
if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0)
return;
/* get start of message text (skip prefix and leading blanks) */
msg += strlen(SMSG_PREFIX);
while (*msg && isspace(*msg))
msg++;
if (*msg == '\0')
return;
/* allocate event list element and its environment */
se = smsg_app_event_alloc(from, msg);
if (!se)
return;
/* queue event and schedule work function */
spin_lock(&smsg_event_queue_lock);
list_add_tail(&se->list, &smsg_event_queue);
spin_unlock(&smsg_event_queue_lock);
schedule_work(&smsg_event_work);
return;
}
static int __init smsgiucv_app_init(void)
{
struct device_driver *smsgiucv_drv;
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL);
if (!smsg_app_dev)
return -ENOMEM;
smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
if (!smsgiucv_drv) {
kfree(smsg_app_dev);
return -ENODEV;
}
rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
if (rc) {
kfree(smsg_app_dev);
goto fail;
}
smsg_app_dev->bus = &iucv_bus;
smsg_app_dev->parent = iucv_root;
smsg_app_dev->release = (void (*)(struct device *)) kfree;
smsg_app_dev->driver = smsgiucv_drv;
rc = device_register(smsg_app_dev);
if (rc) {
put_device(smsg_app_dev);
goto fail;
}
/* convert sender to uppercase characters */
if (sender) {
int len = strlen(sender);
while (len--)
sender[len] = toupper(sender[len]);
}
/* register with the smsgiucv device driver */
rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
if (rc) {
device_unregister(smsg_app_dev);
goto fail;
}
rc = 0;
fail:
return rc;
}
module_init(smsgiucv_app_init);
static void __exit smsgiucv_app_exit(void)
{
/* unregister callback */
smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback);
/* cancel pending work and flush any queued event work */
cancel_work_sync(&smsg_event_work);
smsg_event_work_fn(&smsg_event_work);
device_unregister(smsg_app_dev);
}
module_exit(smsgiucv_app_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents");
MODULE_AUTHOR("Hendrik Brueckner <[email protected]>");
| linux-master | drivers/s390/net/smsgiucv_app.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2007
* Authors: Peter Tiedemann ([email protected])
*
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include "ctcm_main.h"
/*
* sysfs attributes
*/
static ssize_t ctcm_buffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sysfs_emit(buf, "%d\n", priv->buffer_size);
}
static ssize_t ctcm_buffer_write(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct net_device *ndev;
unsigned int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
int rc;
if (!(priv && priv->channel[CTCM_READ] &&
priv->channel[CTCM_READ]->netdev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
ndev = priv->channel[CTCM_READ]->netdev;
rc = kstrtouint(buf, 0, &bs1);
if (rc)
goto einval;
if (bs1 > CTCM_BUFSIZE_LIMIT)
goto einval;
if (bs1 < (576 + LL_HEADER_LENGTH + 2))
goto einval;
priv->buffer_size = bs1; /* just to overwrite the default */
if ((ndev->flags & IFF_RUNNING) &&
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval;
priv->channel[CTCM_READ]->max_bufsize = bs1;
priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf);
return count;
einval:
CTCM_DBF_DEV(SETUP, ndev, "buff_err");
return -EINVAL;
}
static void ctcm_print_statistics(struct ctcm_priv *priv)
{
char *sbuf;
char *p;
if (!priv)
return;
sbuf = kmalloc(2048, GFP_KERNEL);
if (sbuf == NULL)
return;
p = sbuf;
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm));
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " RX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " TX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti);
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " Max. chained SKBs: %ld\n",
priv->channel[WRITE]->prof.maxcqueue);
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " TX single write ops: %ld\n",
priv->channel[WRITE]->prof.doios_single);
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " TX multi write ops: %ld\n",
priv->channel[WRITE]->prof.doios_multi);
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " Netto bytes written: %ld\n",
priv->channel[WRITE]->prof.txlen);
p += scnprintf(p, CTCM_STATSIZE_LIMIT, " Max. TX IO-time: %u\n",
jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time));
printk(KERN_INFO "Statistics for %s:\n%s",
priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf);
return;
}
static ssize_t stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv || gdev->state != CCWGROUP_ONLINE)
return -ENODEV;
ctcm_print_statistics(priv);
return sysfs_emit(buf, "0\n");
}
static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
/* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0,
sizeof(priv->channel[CTCM_WRITE]->prof));
return count;
}
static ssize_t ctcm_proto_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sysfs_emit(buf, "%d\n", priv->protocol);
}
static ssize_t ctcm_proto_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int value, rc;
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
rc = kstrtoint(buf, 0, &value);
if (rc ||
!((value == CTCM_PROTO_S390) ||
(value == CTCM_PROTO_LINUX) ||
(value == CTCM_PROTO_MPC) ||
(value == CTCM_PROTO_OS390)))
return -EINVAL;
priv->protocol = value;
CTCM_DBF_DEV(SETUP, dev, buf);
return count;
}
static const char *ctcm_type[] = {
"not a channel",
"CTC/A",
"FICON channel",
"ESCON channel",
"unknown channel type",
"unsupported channel type",
};
static ssize_t ctcm_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ccwgroup_device *cgdev;
cgdev = to_ccwgroupdev(dev);
if (!cgdev)
return -ENODEV;
return sysfs_emit(buf, "%s\n",
ctcm_type[cgdev->cdev[0]->id.driver_info]);
}
static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
static struct attribute *ctcm_attr[] = {
&dev_attr_protocol.attr,
&dev_attr_type.attr,
&dev_attr_buffer.attr,
&dev_attr_stats.attr,
NULL,
};
static struct attribute_group ctcm_attr_group = {
.attrs = ctcm_attr,
};
const struct attribute_group *ctcm_attr_groups[] = {
&ctcm_attr_group,
NULL,
};
| linux-master | drivers/s390/net/ctcm_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#include <linux/module.h>
#include <asm/cio.h>
#include "qeth_core_mpc.h"
const unsigned char IDX_ACTIVATE_READ[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
const unsigned char IDX_ACTIVATE_WRITE[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
const unsigned char CM_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
0x00,
0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff
};
const unsigned char CM_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x11,
0x00, 0x09, 0x04,
0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0xc8, 0x00
};
const unsigned char ULP_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
0x00,
0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
0xf1, 0x00, 0x00
};
const unsigned char ULP_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x14,
0x00, 0x09, 0x04,
0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0x40, 0x00,
0x00, 0x08, 0x04, 0x0b,
0x00, 0x00, 0x00, 0x00
};
const unsigned char DM_ACT[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x40, 0x01, 0x01, 0x00
};
const unsigned char IPA_PDU_HEADER[] = {
0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00,
0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
};
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
const char *msg;
};
static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
{IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
{IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"},
{IPA_RC_INVALID_FORMAT, "invalid format or length"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
{IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
{IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
{IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
{IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"},
{IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
{IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
{IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
{IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
{IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
{IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
{IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
{IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"},
{IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"},
{IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
{IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"},
{IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
{IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
{IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"},
{IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
{IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
{IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
{IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
{IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
{IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
{IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
{IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
{IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
{IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
{IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
{IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"},
{IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
{IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
{IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
{IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
{IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
/* default for qeth_get_ipa_msg(): */
{IPA_RC_FFFF, "Unknown Error"}
};
const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
int x;
for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
if (qeth_ipa_rc_msg[x].rc == rc)
return qeth_ipa_rc_msg[x].msg;
return qeth_ipa_rc_msg[x].msg;
}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
const char *name;
};
static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
{IPA_CMD_DELVMAC, "delvmac"},
{IPA_CMD_SETGMAC, "setgmac"},
{IPA_CMD_DELGMAC, "delgmac"},
{IPA_CMD_SETVLAN, "setvlan"},
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_VNICC, "vnic_characteristics"},
{IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
{IPA_CMD_SETIPM, "setipm"},
{IPA_CMD_DELIPM, "delipm"},
{IPA_CMD_SETRTG, "setrtg"},
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
{IPA_CMD_SETBRIDGEPORT_IQD, "set_bridge_port(hs)"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
{IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
{IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"},
{IPA_CMD_UNKNOWN, "unknown"},
};
const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
{
int x;
for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
if (qeth_ipa_cmd_names[x].cmd == cmd)
return qeth_ipa_cmd_names[x].name;
return qeth_ipa_cmd_names[x].name;
}
| linux-master | drivers/s390/net/qeth_core_mpc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Linux for S/390 Lan Channel Station Network Driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Original Code written by
* DJ Barrow <[email protected],[email protected]>
* Rewritten by
* Frank Pavlic <[email protected]> and
* Martin Schwidefsky <[email protected]>
*/
#define KMSG_COMPONENT "lcs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/igmp.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/arp.h>
#include <net/ip.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/timex.h>
#include <linux/device.h>
#include <asm/ccwgroup.h>
#include "lcs.h"
/*
* initialization string for output
*/
static char version[] __initdata = "LCS driver";
/*
* the root device for lcs group devices
*/
static struct device *lcs_root_dev;
/*
* Some prototypes.
*/
static void lcs_tasklet(unsigned long);
static void lcs_start_kernel_thread(struct work_struct *);
static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
#ifdef CONFIG_IP_MULTICAST
static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
#endif /* CONFIG_IP_MULTICAST */
static int lcs_recovery(void *ptr);
/*
* Debug Facility Stuff
*/
static char debug_buffer[255];
static debug_info_t *lcs_dbf_setup;
static debug_info_t *lcs_dbf_trace;
/*
* LCS Debug Facility functions
*/
static void
lcs_unregister_debug_facility(void)
{
debug_unregister(lcs_dbf_setup);
debug_unregister(lcs_dbf_trace);
}
static int
lcs_register_debug_facility(void)
{
lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
pr_err("Not enough memory for debug facility.\n");
lcs_unregister_debug_facility();
return -ENOMEM;
}
debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
debug_set_level(lcs_dbf_setup, 2);
debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
debug_set_level(lcs_dbf_trace, 2);
return 0;
}
/*
* Allocate io buffers.
*/
static int
lcs_alloc_channel(struct lcs_channel *channel)
{
int cnt;
LCS_DBF_TEXT(2, setup, "ichalloc");
for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
/* alloc memory fo iobuffer */
channel->iob[cnt].data =
kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
}
if (cnt < LCS_NUM_BUFFS) {
/* Not all io buffers could be allocated. */
LCS_DBF_TEXT(2, setup, "echalloc");
while (cnt-- > 0)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
return 0;
}
/*
* Free io buffers.
*/
static void
lcs_free_channel(struct lcs_channel *channel)
{
int cnt;
LCS_DBF_TEXT(2, setup, "ichfree");
for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
kfree(channel->iob[cnt].data);
channel->iob[cnt].data = NULL;
}
}
/*
* Cleanup channel.
*/
static void
lcs_cleanup_channel(struct lcs_channel *channel)
{
LCS_DBF_TEXT(3, setup, "cleanch");
/* Kill write channel tasklets. */
tasklet_kill(&channel->irq_tasklet);
/* Free channel buffers. */
lcs_free_channel(channel);
}
/*
* LCS free memory for card and channels.
*/
static void
lcs_free_card(struct lcs_card *card)
{
LCS_DBF_TEXT(2, setup, "remcard");
LCS_DBF_HEX(2, setup, &card, sizeof(void*));
kfree(card);
}
/*
* LCS alloc memory for card and channels
*/
static struct lcs_card *
lcs_alloc_card(void)
{
struct lcs_card *card;
int rc;
LCS_DBF_TEXT(2, setup, "alloclcs");
card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
if (card == NULL)
return NULL;
card->lan_type = LCS_FRAME_TYPE_AUTO;
card->pkt_seq = 0;
card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
/* Allocate io buffers for the read channel. */
rc = lcs_alloc_channel(&card->read);
if (rc){
LCS_DBF_TEXT(2, setup, "iccwerr");
lcs_free_card(card);
return NULL;
}
/* Allocate io buffers for the write channel. */
rc = lcs_alloc_channel(&card->write);
if (rc) {
LCS_DBF_TEXT(2, setup, "iccwerr");
lcs_cleanup_channel(&card->read);
lcs_free_card(card);
return NULL;
}
#ifdef CONFIG_IP_MULTICAST
INIT_LIST_HEAD(&card->ipm_list);
#endif
LCS_DBF_HEX(2, setup, &card, sizeof(void*));
return card;
}
/*
* Setup read channel.
*/
static void
lcs_setup_read_ccws(struct lcs_card *card)
{
int cnt;
LCS_DBF_TEXT(2, setup, "ireadccw");
/* Setup read ccws. */
memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
card->read.ccws[cnt].flags =
CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
/*
* Note: we have allocated the buffer with GFP_DMA, so
* we do not need to do set_normalized_cda.
*/
card->read.ccws[cnt].cda =
(__u32)virt_to_phys(card->read.iob[cnt].data);
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
card->read.iob[cnt].state = LCS_BUF_STATE_READY;
card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
}
card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
/* Last ccw is a tic (transfer in channel). */
card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
card->read.ccws[LCS_NUM_BUFFS].cda =
(__u32)virt_to_phys(card->read.ccws);
/* Setg initial state of the read channel. */
card->read.state = LCS_CH_STATE_INIT;
card->read.io_idx = 0;
card->read.buf_idx = 0;
}
static void
lcs_setup_read(struct lcs_card *card)
{
LCS_DBF_TEXT(3, setup, "initread");
lcs_setup_read_ccws(card);
/* Initialize read channel tasklet. */
card->read.irq_tasklet.data = (unsigned long) &card->read;
card->read.irq_tasklet.func = lcs_tasklet;
/* Initialize waitqueue. */
init_waitqueue_head(&card->read.wait_q);
}
/*
* Setup write channel.
*/
static void
lcs_setup_write_ccws(struct lcs_card *card)
{
int cnt;
LCS_DBF_TEXT(3, setup, "iwritccw");
/* Setup write ccws. */
memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
card->write.ccws[cnt].count = 0;
card->write.ccws[cnt].flags =
CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
/*
* Note: we have allocated the buffer with GFP_DMA, so
* we do not need to do set_normalized_cda.
*/
card->write.ccws[cnt].cda =
(__u32)virt_to_phys(card->write.iob[cnt].data);
}
/* Last ccw is a tic (transfer in channel). */
card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
card->write.ccws[LCS_NUM_BUFFS].cda =
(__u32)virt_to_phys(card->write.ccws);
/* Set initial state of the write channel. */
card->read.state = LCS_CH_STATE_INIT;
card->write.io_idx = 0;
card->write.buf_idx = 0;
}
static void
lcs_setup_write(struct lcs_card *card)
{
LCS_DBF_TEXT(3, setup, "initwrit");
lcs_setup_write_ccws(card);
/* Initialize write channel tasklet. */
card->write.irq_tasklet.data = (unsigned long) &card->write;
card->write.irq_tasklet.func = lcs_tasklet;
/* Initialize waitqueue. */
init_waitqueue_head(&card->write.wait_q);
}
static void
lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
{
unsigned long flags;
spin_lock_irqsave(&card->mask_lock, flags);
card->thread_allowed_mask = threads;
spin_unlock_irqrestore(&card->mask_lock, flags);
wake_up(&card->wait_q);
}
static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->mask_lock, flags);
rc = (card->thread_running_mask & threads);
spin_unlock_irqrestore(&card->mask_lock, flags);
return rc;
}
static int
lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
{
return wait_event_interruptible(card->wait_q,
lcs_threads_running(card, threads) == 0);
}
static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->mask_lock, flags);
if ( !(card->thread_allowed_mask & thread) ||
(card->thread_start_mask & thread) ) {
spin_unlock_irqrestore(&card->mask_lock, flags);
return -EPERM;
}
card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->mask_lock, flags);
return 0;
}
static void
lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->mask_lock, flags);
wake_up(&card->wait_q);
}
static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->mask_lock, flags);
if (card->thread_start_mask & thread){
if ((card->thread_allowed_mask & thread) &&
!(card->thread_running_mask & thread)){
rc = 1;
card->thread_start_mask &= ~thread;
card->thread_running_mask |= thread;
} else
rc = -EPERM;
}
spin_unlock_irqrestore(&card->mask_lock, flags);
return rc;
}
static int
lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
{
int rc = 0;
wait_event(card->wait_q,
(rc = __lcs_do_run_thread(card, thread)) >= 0);
return rc;
}
static int
lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->mask_lock, flags);
LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
rc = (card->thread_start_mask & thread);
spin_unlock_irqrestore(&card->mask_lock, flags);
return rc;
}
/*
* Initialize channels,card and state machines.
*/
static void
lcs_setup_card(struct lcs_card *card)
{
LCS_DBF_TEXT(2, setup, "initcard");
LCS_DBF_HEX(2, setup, &card, sizeof(void*));
lcs_setup_read(card);
lcs_setup_write(card);
/* Set cards initial state. */
card->state = DEV_STATE_DOWN;
card->tx_buffer = NULL;
card->tx_emitted = 0;
init_waitqueue_head(&card->wait_q);
spin_lock_init(&card->lock);
spin_lock_init(&card->ipm_lock);
spin_lock_init(&card->mask_lock);
#ifdef CONFIG_IP_MULTICAST
INIT_LIST_HEAD(&card->ipm_list);
#endif
INIT_LIST_HEAD(&card->lancmd_waiters);
}
static void lcs_clear_multicast_list(struct lcs_card *card)
{
#ifdef CONFIG_IP_MULTICAST
struct lcs_ipm_list *ipm;
unsigned long flags;
/* Free multicast list. */
LCS_DBF_TEXT(3, setup, "clmclist");
spin_lock_irqsave(&card->ipm_lock, flags);
while (!list_empty(&card->ipm_list)){
ipm = list_entry(card->ipm_list.next,
struct lcs_ipm_list, list);
list_del(&ipm->list);
if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
spin_unlock_irqrestore(&card->ipm_lock, flags);
lcs_send_delipm(card, ipm);
spin_lock_irqsave(&card->ipm_lock, flags);
}
kfree(ipm);
}
spin_unlock_irqrestore(&card->ipm_lock, flags);
#endif
}
/*
* Cleanup channels,card and state machines.
*/
static void
lcs_cleanup_card(struct lcs_card *card)
{
LCS_DBF_TEXT(3, setup, "cleancrd");
LCS_DBF_HEX(2,setup,&card,sizeof(void*));
if (card->dev != NULL)
free_netdev(card->dev);
/* Cleanup channels. */
lcs_cleanup_channel(&card->write);
lcs_cleanup_channel(&card->read);
}
/*
* Start channel.
*/
static int
lcs_start_channel(struct lcs_channel *channel)
{
unsigned long flags;
int rc;
LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
channel->ccws + channel->io_idx, 0, 0,
DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
if (rc == 0)
channel->state = LCS_CH_STATE_RUNNING;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4,trace,"essh%s",
dev_name(&channel->ccwdev->dev));
dev_err(&channel->ccwdev->dev,
"Starting an LCS device resulted in an error,"
" rc=%d!\n", rc);
}
return rc;
}
static int
lcs_clear_channel(struct lcs_channel *channel)
{
unsigned long flags;
int rc;
LCS_DBF_TEXT(4,trace,"clearch");
LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4, trace, "ecsc%s",
dev_name(&channel->ccwdev->dev));
return rc;
}
wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
channel->state = LCS_CH_STATE_STOPPED;
return rc;
}
/*
* Stop channel.
*/
static int
lcs_stop_channel(struct lcs_channel *channel)
{
unsigned long flags;
int rc;
if (channel->state == LCS_CH_STATE_STOPPED)
return 0;
LCS_DBF_TEXT(4,trace,"haltsch");
LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
channel->state = LCS_CH_STATE_INIT;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4, trace, "ehsc%s",
dev_name(&channel->ccwdev->dev));
return rc;
}
/* Asynchronous halt initialted. Wait for its completion. */
wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
lcs_clear_channel(channel);
return 0;
}
/*
* start read and write channel
*/
static int
lcs_start_channels(struct lcs_card *card)
{
int rc;
LCS_DBF_TEXT(2, trace, "chstart");
/* start read channel */
rc = lcs_start_channel(&card->read);
if (rc)
return rc;
/* start write channel */
rc = lcs_start_channel(&card->write);
if (rc)
lcs_stop_channel(&card->read);
return rc;
}
/*
* stop read and write channel
*/
static int
lcs_stop_channels(struct lcs_card *card)
{
LCS_DBF_TEXT(2, trace, "chhalt");
lcs_stop_channel(&card->read);
lcs_stop_channel(&card->write);
return 0;
}
/*
* Get empty buffer.
*/
static struct lcs_buffer *
__lcs_get_buffer(struct lcs_channel *channel)
{
int index;
LCS_DBF_TEXT(5, trace, "_getbuff");
index = channel->io_idx;
do {
if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
channel->iob[index].state = LCS_BUF_STATE_LOCKED;
return channel->iob + index;
}
index = (index + 1) & (LCS_NUM_BUFFS - 1);
} while (index != channel->io_idx);
return NULL;
}
static struct lcs_buffer *
lcs_get_buffer(struct lcs_channel *channel)
{
struct lcs_buffer *buffer;
unsigned long flags;
LCS_DBF_TEXT(5, trace, "getbuff");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer = __lcs_get_buffer(channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
return buffer;
}
/*
* Resume channel program if the channel is suspended.
*/
static int
__lcs_resume_channel(struct lcs_channel *channel)
{
int rc;
if (channel->state != LCS_CH_STATE_SUSPENDED)
return 0;
if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
return 0;
LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
rc = ccw_device_resume(channel->ccwdev);
if (rc) {
LCS_DBF_TEXT_(4, trace, "ersc%s",
dev_name(&channel->ccwdev->dev));
dev_err(&channel->ccwdev->dev,
"Sending data from the LCS device to the LAN failed"
" with rc=%d\n",rc);
} else
channel->state = LCS_CH_STATE_RUNNING;
return rc;
}
/*
* Make a buffer ready for processing.
*/
static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
{
int prev, next;
LCS_DBF_TEXT(5, trace, "rdybits");
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
next = (index + 1) & (LCS_NUM_BUFFS - 1);
/* Check if we may clear the suspend bit of this buffer. */
if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
/* Check if we have to set the PCI bit. */
if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
/* Suspend bit of the previous buffer is not set. */
channel->ccws[index].flags |= CCW_FLAG_PCI;
/* Suspend bit of the next buffer is set. */
channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
}
}
static int
lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
unsigned long flags;
int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff");
BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = LCS_BUF_STATE_READY;
index = buffer - channel->iob;
/* Set length. */
channel->ccws[index].count = buffer->count;
/* Check relevant PCI/suspend bits. */
__lcs_ready_buffer_bits(channel, index);
rc = __lcs_resume_channel(channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
return rc;
}
/*
* Mark the buffer as processed. Take care of the suspend bit
* of the previous buffer. This function is called from
* interrupt context, so the lock must not be taken.
*/
static int
__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff");
BUG_ON(buffer->state != LCS_BUF_STATE_READY);
buffer->state = LCS_BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
next = (index + 1) & (LCS_NUM_BUFFS - 1);
/* Set the suspend bit and clear the PCI bit of this buffer. */
channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
channel->ccws[index].flags &= ~CCW_FLAG_PCI;
/* Check the suspend bit of the previous buffer. */
if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
/*
* Previous buffer is in state ready. It might have
* happened in lcs_ready_buffer that the suspend bit
* has not been cleared to avoid an endless loop.
* Do it now.
*/
__lcs_ready_buffer_bits(channel, prev);
}
/* Clear PCI bit of next buffer. */
channel->ccws[next].flags &= ~CCW_FLAG_PCI;
return __lcs_resume_channel(channel);
}
/*
* Put a processed buffer back to state empty.
*/
static void
lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff");
BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
buffer->state != LCS_BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = LCS_BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
}
/*
* Get buffer for a lan command.
*/
static struct lcs_buffer *
lcs_get_lancmd(struct lcs_card *card, int count)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(4, trace, "getlncmd");
/* Get buffer and wait if none is available. */
wait_event(card->write.wait_q,
((buffer = lcs_get_buffer(&card->write)) != NULL));
count += sizeof(struct lcs_header);
*(__u16 *)(buffer->data + count) = 0;
buffer->count = count + sizeof(__u16);
buffer->callback = lcs_release_buffer;
cmd = (struct lcs_cmd *) buffer->data;
cmd->offset = count;
cmd->type = LCS_FRAME_TYPE_CONTROL;
cmd->slot = 0;
return buffer;
}
static void
lcs_get_reply(struct lcs_reply *reply)
{
refcount_inc(&reply->refcnt);
}
static void
lcs_put_reply(struct lcs_reply *reply)
{
if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
}
static struct lcs_reply *
lcs_alloc_reply(struct lcs_cmd *cmd)
{
struct lcs_reply *reply;
LCS_DBF_TEXT(4, trace, "getreply");
reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
if (!reply)
return NULL;
refcount_set(&reply->refcnt, 1);
reply->sequence_no = cmd->sequence_no;
reply->received = 0;
reply->rc = 0;
init_waitqueue_head(&reply->wait_q);
return reply;
}
/*
* Notifier function for lancmd replies. Called from read irq.
*/
static void
lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
{
struct list_head *l, *n;
struct lcs_reply *reply;
LCS_DBF_TEXT(4, trace, "notiwait");
spin_lock(&card->lock);
list_for_each_safe(l, n, &card->lancmd_waiters) {
reply = list_entry(l, struct lcs_reply, list);
if (reply->sequence_no == cmd->sequence_no) {
lcs_get_reply(reply);
list_del_init(&reply->list);
if (reply->callback != NULL)
reply->callback(card, cmd);
reply->received = 1;
reply->rc = cmd->return_code;
wake_up(&reply->wait_q);
lcs_put_reply(reply);
break;
}
}
spin_unlock(&card->lock);
}
/*
* Emit buffer of a lan command.
*/
static void
lcs_lancmd_timeout(struct timer_list *t)
{
struct lcs_reply *reply = from_timer(reply, t, timer);
struct lcs_reply *list_reply, *r;
unsigned long flags;
LCS_DBF_TEXT(4, trace, "timeout");
spin_lock_irqsave(&reply->card->lock, flags);
list_for_each_entry_safe(list_reply, r,
&reply->card->lancmd_waiters,list) {
if (reply == list_reply) {
lcs_get_reply(reply);
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
reply->received = 1;
reply->rc = -ETIME;
wake_up(&reply->wait_q);
lcs_put_reply(reply);
return;
}
}
spin_unlock_irqrestore(&reply->card->lock, flags);
}
static int
lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
{
struct lcs_reply *reply;
struct lcs_cmd *cmd;
unsigned long flags;
int rc;
LCS_DBF_TEXT(4, trace, "sendcmd");
cmd = (struct lcs_cmd *) buffer->data;
cmd->return_code = 0;
cmd->sequence_no = card->sequence_no++;
reply = lcs_alloc_reply(cmd);
if (!reply)
return -ENOMEM;
reply->callback = reply_callback;
reply->card = card;
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->lancmd_waiters);
spin_unlock_irqrestore(&card->lock, flags);
buffer->callback = lcs_release_buffer;
rc = lcs_ready_buffer(&card->write, buffer);
if (rc)
return rc;
timer_setup(&reply->timer, lcs_lancmd_timeout, 0);
mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout);
wait_event(reply->wait_q, reply->received);
del_timer_sync(&reply->timer);
LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
rc = reply->rc;
lcs_put_reply(reply);
return rc ? -EIO : 0;
}
/*
* LCS startup command
*/
static int
lcs_send_startup(struct lcs_card *card, __u8 initiator)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "startup");
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_STARTUP;
cmd->initiator = initiator;
cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
return lcs_send_lancmd(card, buffer, NULL);
}
/*
* LCS shutdown command
*/
static int
lcs_send_shutdown(struct lcs_card *card)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "shutdown");
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_SHUTDOWN;
cmd->initiator = LCS_INITIATOR_TCPIP;
return lcs_send_lancmd(card, buffer, NULL);
}
/*
* LCS lanstat command
*/
static void
__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
{
LCS_DBF_TEXT(2, trace, "statcb");
memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
}
static int
lcs_send_lanstat(struct lcs_card *card)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2,trace, "cmdstat");
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
/* Setup lanstat command. */
cmd->cmd_code = LCS_CMD_LANSTAT;
cmd->initiator = LCS_INITIATOR_TCPIP;
cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
cmd->cmd.lcs_std_cmd.portno = card->portno;
return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
}
/*
* send stoplan command
*/
static int
lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "cmdstpln");
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_STOPLAN;
cmd->initiator = initiator;
cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
cmd->cmd.lcs_std_cmd.portno = card->portno;
return lcs_send_lancmd(card, buffer, NULL);
}
/*
* send startlan command
*/
static void
__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
{
LCS_DBF_TEXT(2, trace, "srtlancb");
card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
card->portno = cmd->cmd.lcs_std_cmd.portno;
}
static int
lcs_send_startlan(struct lcs_card *card, __u8 initiator)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "cmdstaln");
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_STARTLAN;
cmd->initiator = initiator;
cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
cmd->cmd.lcs_std_cmd.portno = card->portno;
return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
}
#ifdef CONFIG_IP_MULTICAST
/*
* send setipm command (Multicast)
*/
static int
lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "cmdsetim");
buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_SETIPM;
cmd->initiator = LCS_INITIATOR_TCPIP;
cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
cmd->cmd.lcs_qipassist.portno = card->portno;
cmd->cmd.lcs_qipassist.version = 4;
cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
&ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
return lcs_send_lancmd(card, buffer, NULL);
}
/*
* send delipm command (Multicast)
*/
static int
lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
LCS_DBF_TEXT(2, trace, "cmddelim");
buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_DELIPM;
cmd->initiator = LCS_INITIATOR_TCPIP;
cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
cmd->cmd.lcs_qipassist.portno = card->portno;
cmd->cmd.lcs_qipassist.version = 4;
cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
&ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
return lcs_send_lancmd(card, buffer, NULL);
}
/*
* check if multicast is supported by LCS
*/
static void
__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
{
LCS_DBF_TEXT(2, trace, "chkmccb");
card->ip_assists_supported =
cmd->cmd.lcs_qipassist.ip_assists_supported;
card->ip_assists_enabled =
cmd->cmd.lcs_qipassist.ip_assists_enabled;
}
static int
lcs_check_multicast_support(struct lcs_card *card)
{
struct lcs_buffer *buffer;
struct lcs_cmd *cmd;
int rc;
LCS_DBF_TEXT(2, trace, "cmdqipa");
/* Send query ipassist. */
buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
cmd = (struct lcs_cmd *) buffer->data;
cmd->cmd_code = LCS_CMD_QIPASSIST;
cmd->initiator = LCS_INITIATOR_TCPIP;
cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
cmd->cmd.lcs_qipassist.portno = card->portno;
cmd->cmd.lcs_qipassist.version = 4;
cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
if (rc != 0) {
pr_err("Query IPAssist failed. Assuming unsupported!\n");
return -EOPNOTSUPP;
}
if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
return 0;
return -EOPNOTSUPP;
}
/*
* set or del multicast address on LCS card
*/
static void
lcs_fix_multicast_list(struct lcs_card *card)
{
struct list_head failed_list;
struct lcs_ipm_list *ipm, *tmp;
unsigned long flags;
int rc;
LCS_DBF_TEXT(4,trace, "fixipm");
INIT_LIST_HEAD(&failed_list);
spin_lock_irqsave(&card->ipm_lock, flags);
list_modified:
list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
switch (ipm->ipm_state) {
case LCS_IPM_STATE_SET_REQUIRED:
/* del from ipm_list so no one else can tamper with
* this entry */
list_del_init(&ipm->list);
spin_unlock_irqrestore(&card->ipm_lock, flags);
rc = lcs_send_setipm(card, ipm);
spin_lock_irqsave(&card->ipm_lock, flags);
if (rc) {
pr_info("Adding multicast address failed."
" Table possibly full!\n");
/* store ipm in failed list -> will be added
* to ipm_list again, so a retry will be done
* during the next call of this function */
list_add_tail(&ipm->list, &failed_list);
} else {
ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
/* re-insert into ipm_list */
list_add_tail(&ipm->list, &card->ipm_list);
}
goto list_modified;
case LCS_IPM_STATE_DEL_REQUIRED:
list_del(&ipm->list);
spin_unlock_irqrestore(&card->ipm_lock, flags);
lcs_send_delipm(card, ipm);
spin_lock_irqsave(&card->ipm_lock, flags);
kfree(ipm);
goto list_modified;
case LCS_IPM_STATE_ON_CARD:
break;
}
}
/* re-insert all entries from the failed_list into ipm_list */
list_for_each_entry_safe(ipm, tmp, &failed_list, list)
list_move_tail(&ipm->list, &card->ipm_list);
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
/*
* get mac address for the relevant Multicast address
*/
static void
lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
{
LCS_DBF_TEXT(4,trace, "getmac");
ip_eth_mc_map(ipm, mac);
}
/*
* function called by net device to handle multicast address relevant things
*/
static void lcs_remove_mc_addresses(struct lcs_card *card,
struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct list_head *l;
struct lcs_ipm_list *ipm;
unsigned long flags;
char buf[MAX_ADDR_LEN];
LCS_DBF_TEXT(4, trace, "remmclst");
spin_lock_irqsave(&card->ipm_lock, flags);
list_for_each(l, &card->ipm_list) {
ipm = list_entry(l, struct lcs_ipm_list, list);
for (im4 = rcu_dereference(in4_dev->mc_list);
im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
(memcmp(buf, &ipm->ipm.mac_addr,
LCS_MAC_LENGTH) == 0) )
break;
}
if (im4 == NULL)
ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
}
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
struct ip_mc_list *im4,
char *buf)
{
struct lcs_ipm_list *tmp, *ipm = NULL;
struct list_head *l;
unsigned long flags;
LCS_DBF_TEXT(4, trace, "chkmcent");
spin_lock_irqsave(&card->ipm_lock, flags);
list_for_each(l, &card->ipm_list) {
tmp = list_entry(l, struct lcs_ipm_list, list);
if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
(memcmp(buf, &tmp->ipm.mac_addr,
LCS_MAC_LENGTH) == 0) ) {
ipm = tmp;
break;
}
}
spin_unlock_irqrestore(&card->ipm_lock, flags);
return ipm;
}
static void lcs_set_mc_addresses(struct lcs_card *card,
struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct lcs_ipm_list *ipm;
char buf[MAX_ADDR_LEN];
unsigned long flags;
LCS_DBF_TEXT(4, trace, "setmclst");
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
ipm = lcs_check_addr_entry(card, im4, buf);
if (ipm != NULL)
continue; /* Address already in list. */
ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
if (ipm == NULL) {
pr_info("Not enough memory to add"
" new multicast entry!\n");
break;
}
memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
ipm->ipm.ip_addr = im4->multiaddr;
ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
spin_lock_irqsave(&card->ipm_lock, flags);
LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
list_add(&ipm->list, &card->ipm_list);
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
}
static int
lcs_register_mc_addresses(void *data)
{
struct lcs_card *card;
struct in_device *in4_dev;
card = (struct lcs_card *) data;
if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
return 0;
LCS_DBF_TEXT(4, trace, "regmulti");
in4_dev = in_dev_get(card->dev);
if (in4_dev == NULL)
goto out;
rcu_read_lock();
lcs_remove_mc_addresses(card,in4_dev);
lcs_set_mc_addresses(card, in4_dev);
rcu_read_unlock();
in_dev_put(in4_dev);
netif_carrier_off(card->dev);
netif_tx_disable(card->dev);
wait_event(card->write.wait_q,
(card->write.state != LCS_CH_STATE_RUNNING));
lcs_fix_multicast_list(card);
if (card->state == DEV_STATE_UP) {
netif_carrier_on(card->dev);
netif_wake_queue(card->dev);
}
out:
lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
return 0;
}
#endif /* CONFIG_IP_MULTICAST */
/*
* function called by net device to
* handle multicast address relevant things
*/
static void
lcs_set_multicast_list(struct net_device *dev)
{
#ifdef CONFIG_IP_MULTICAST
struct lcs_card *card;
LCS_DBF_TEXT(4, trace, "setmulti");
card = (struct lcs_card *) dev->ml_priv;
if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
schedule_work(&card->kernel_thread_starter);
#endif /* CONFIG_IP_MULTICAST */
}
static long
lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
switch (PTR_ERR(irb)) {
case -EIO:
dev_warn(&cdev->dev,
"An I/O-error occurred on the LCS device\n");
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
break;
case -ETIMEDOUT:
dev_warn(&cdev->dev,
"A command timed out on the LCS device\n");
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
break;
default:
dev_warn(&cdev->dev,
"An error occurred on the LCS device, rc=%ld\n",
PTR_ERR(irb));
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT(2, trace, " rc???");
}
return PTR_ERR(irb);
}
static int
lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
{
int dstat, cstat;
char *sense;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
LCS_DBF_TEXT(2, trace, "CGENCHK");
return 1;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[LCS_SENSE_BYTE_1] &
LCS_SENSE_RESETTING_EVENT) {
LCS_DBF_TEXT(2, trace, "REVIND");
return 1;
}
if (sense[LCS_SENSE_BYTE_0] &
LCS_SENSE_CMD_REJECT) {
LCS_DBF_TEXT(2, trace, "CMDREJ");
return 0;
}
if ((!sense[LCS_SENSE_BYTE_0]) &&
(!sense[LCS_SENSE_BYTE_1]) &&
(!sense[LCS_SENSE_BYTE_2]) &&
(!sense[LCS_SENSE_BYTE_3])) {
LCS_DBF_TEXT(2, trace, "ZEROSEN");
return 0;
}
LCS_DBF_TEXT(2, trace, "DGENCHK");
return 1;
}
return 0;
}
static void
lcs_schedule_recovery(struct lcs_card *card)
{
LCS_DBF_TEXT(2, trace, "startrec");
if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
schedule_work(&card->kernel_thread_starter);
}
/*
* IRQ Handler for LCS channels
*/
static void
lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct lcs_card *card;
struct lcs_channel *channel;
int rc, index;
int cstat, dstat;
if (lcs_check_irb_error(cdev, irb))
return;
card = CARD_FROM_DEV(cdev);
if (card->read.ccwdev == cdev)
channel = &card->read;
else
channel = &card->write;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
irb->scsw.cmd.actl);
/* Check for channel and device errors presented */
rc = lcs_get_problem(cdev, irb);
if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
dev_warn(&cdev->dev,
"The LCS device stopped because of an error,"
" dstat=0x%X, cstat=0x%X \n",
dstat, cstat);
if (rc) {
channel->state = LCS_CH_STATE_ERROR;
}
}
if (channel->state == LCS_CH_STATE_ERROR) {
lcs_schedule_recovery(card);
wake_up(&card->wait_q);
return;
}
/* How far in the ccw chain have we processed? */
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.cmd.cpa != 0)) {
index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
- channel->ccws;
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
(irb->scsw.cmd.cstat & SCHN_STAT_PCI))
/* Bloody io subsystem tells us lies about cpa... */
index = (index - 1) & (LCS_NUM_BUFFS - 1);
while (channel->io_idx != index) {
__lcs_processed_buffer(channel,
channel->iob + channel->io_idx);
channel->io_idx =
(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
}
}
if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
/* Mark channel as stopped. */
channel->state = LCS_CH_STATE_STOPPED;
else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
/* CCW execution stopped on a suspend bit. */
channel->state = LCS_CH_STATE_SUSPENDED;
if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
if (irb->scsw.cmd.cc != 0) {
ccw_device_halt(channel->ccwdev, 0);
return;
}
/* The channel has been stopped by halt_IO. */
channel->state = LCS_CH_STATE_HALTED;
}
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
channel->state = LCS_CH_STATE_CLEARED;
/* Do the rest in the tasklet. */
tasklet_schedule(&channel->irq_tasklet);
}
/*
* Tasklet for IRQ handler
*/
static void
lcs_tasklet(unsigned long data)
{
unsigned long flags;
struct lcs_channel *channel;
struct lcs_buffer *iob;
int buf_idx;
channel = (struct lcs_channel *) data;
LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
/* Check for processed buffers. */
iob = channel->iob;
buf_idx = channel->buf_idx;
while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
/* Do the callback thing. */
if (iob[buf_idx].callback != NULL)
iob[buf_idx].callback(channel, iob + buf_idx);
buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
}
channel->buf_idx = buf_idx;
if (channel->state == LCS_CH_STATE_STOPPED)
lcs_start_channel(channel);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
if (channel->state == LCS_CH_STATE_SUSPENDED &&
channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
__lcs_resume_channel(channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
/* Something happened on the channel. Wake up waiters. */
wake_up(&channel->wait_q);
}
/*
* Finish current tx buffer and make it ready for transmit.
*/
static void
__lcs_emit_txbuffer(struct lcs_card *card)
{
LCS_DBF_TEXT(5, trace, "emittx");
*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
card->tx_buffer->count += 2;
lcs_ready_buffer(&card->write, card->tx_buffer);
card->tx_buffer = NULL;
card->tx_emitted++;
}
/*
* Callback for finished tx buffers.
*/
static void
lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
struct lcs_card *card;
LCS_DBF_TEXT(5, trace, "txbuffcb");
/* Put buffer back to pool. */
lcs_release_buffer(channel, buffer);
card = container_of(channel, struct lcs_card, write);
if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
netif_wake_queue(card->dev);
spin_lock(&card->lock);
card->tx_emitted--;
if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
/*
* Last running tx buffer has finished. Submit partially
* filled current buffer.
*/
__lcs_emit_txbuffer(card);
spin_unlock(&card->lock);
}
/*
* Packet transmit function called by network stack
*/
static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
struct net_device *dev)
{
struct lcs_header *header;
int rc = NETDEV_TX_OK;
LCS_DBF_TEXT(5, trace, "hardxmit");
if (skb == NULL) {
card->stats.tx_dropped++;
card->stats.tx_errors++;
return NETDEV_TX_OK;
}
if (card->state != DEV_STATE_UP) {
dev_kfree_skb(skb);
card->stats.tx_dropped++;
card->stats.tx_errors++;
card->stats.tx_carrier_errors++;
return NETDEV_TX_OK;
}
if (skb->protocol == htons(ETH_P_IPV6)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
netif_stop_queue(card->dev);
spin_lock(&card->lock);
if (card->tx_buffer != NULL &&
card->tx_buffer->count + sizeof(struct lcs_header) +
skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
/* skb too big for current tx buffer. */
__lcs_emit_txbuffer(card);
if (card->tx_buffer == NULL) {
/* Get new tx buffer */
card->tx_buffer = lcs_get_buffer(&card->write);
if (card->tx_buffer == NULL) {
card->stats.tx_dropped++;
rc = NETDEV_TX_BUSY;
goto out;
}
card->tx_buffer->callback = lcs_txbuffer_cb;
card->tx_buffer->count = 0;
}
header = (struct lcs_header *)
(card->tx_buffer->data + card->tx_buffer->count);
card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
header->offset = card->tx_buffer->count;
header->type = card->lan_type;
header->slot = card->portno;
skb_copy_from_linear_data(skb, header + 1, skb->len);
spin_unlock(&card->lock);
card->stats.tx_bytes += skb->len;
card->stats.tx_packets++;
dev_kfree_skb(skb);
netif_wake_queue(card->dev);
spin_lock(&card->lock);
if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
/* If this is the first tx buffer emit it immediately. */
__lcs_emit_txbuffer(card);
out:
spin_unlock(&card->lock);
return rc;
}
static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lcs_card *card;
int rc;
LCS_DBF_TEXT(5, trace, "pktxmit");
card = (struct lcs_card *) dev->ml_priv;
rc = __lcs_start_xmit(card, skb, dev);
return rc;
}
/*
* send startlan and lanstat command to make LCS device ready
*/
static int
lcs_startlan_auto(struct lcs_card *card)
{
int rc;
LCS_DBF_TEXT(2, trace, "strtauto");
card->lan_type = LCS_FRAME_TYPE_ENET;
rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
if (rc == 0)
return 0;
return -EIO;
}
static int
lcs_startlan(struct lcs_card *card)
{
int rc, i;
LCS_DBF_TEXT(2, trace, "startlan");
rc = 0;
if (card->portno != LCS_INVALID_PORT_NO) {
if (card->lan_type == LCS_FRAME_TYPE_AUTO)
rc = lcs_startlan_auto(card);
else
rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
} else {
for (i = 0; i <= 16; i++) {
card->portno = i;
if (card->lan_type != LCS_FRAME_TYPE_AUTO)
rc = lcs_send_startlan(card,
LCS_INITIATOR_TCPIP);
else
/* autodetecting lan type */
rc = lcs_startlan_auto(card);
if (rc == 0)
break;
}
}
if (rc == 0)
return lcs_send_lanstat(card);
return rc;
}
/*
* LCS detect function
* setup channels and make them I/O ready
*/
static int
lcs_detect(struct lcs_card *card)
{
int rc = 0;
LCS_DBF_TEXT(2, setup, "lcsdetct");
/* start/reset card */
if (card->dev)
netif_stop_queue(card->dev);
rc = lcs_stop_channels(card);
if (rc == 0) {
rc = lcs_start_channels(card);
if (rc == 0) {
rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
if (rc == 0)
rc = lcs_startlan(card);
}
}
if (rc == 0) {
card->state = DEV_STATE_UP;
} else {
card->state = DEV_STATE_DOWN;
card->write.state = LCS_CH_STATE_INIT;
card->read.state = LCS_CH_STATE_INIT;
}
return rc;
}
/*
* LCS Stop card
*/
static int
lcs_stopcard(struct lcs_card *card)
{
int rc;
LCS_DBF_TEXT(3, setup, "stopcard");
if (card->read.state != LCS_CH_STATE_STOPPED &&
card->write.state != LCS_CH_STATE_STOPPED &&
card->read.state != LCS_CH_STATE_ERROR &&
card->write.state != LCS_CH_STATE_ERROR &&
card->state == DEV_STATE_UP) {
lcs_clear_multicast_list(card);
rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
rc = lcs_send_shutdown(card);
}
rc = lcs_stop_channels(card);
card->state = DEV_STATE_DOWN;
return rc;
}
/*
* Kernel Thread helper functions for LGW initiated commands
*/
static void
lcs_start_kernel_thread(struct work_struct *work)
{
struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
LCS_DBF_TEXT(5, trace, "krnthrd");
if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
kthread_run(lcs_recovery, card, "lcs_recover");
#ifdef CONFIG_IP_MULTICAST
if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
kthread_run(lcs_register_mc_addresses, card, "regipm");
#endif
}
/*
* Process control frames.
*/
static void
lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
{
LCS_DBF_TEXT(5, trace, "getctrl");
if (cmd->initiator == LCS_INITIATOR_LGW) {
switch(cmd->cmd_code) {
case LCS_CMD_STARTUP:
case LCS_CMD_STARTLAN:
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
if (card->dev) {
pr_warn("Stoplan for %s initiated by LGW\n",
card->dev->name);
netif_carrier_off(card->dev);
}
break;
default:
LCS_DBF_TEXT(5, trace, "noLGWcmd");
break;
}
} else
lcs_notify_lancmd_waiters(card, cmd);
}
/*
* Unpack network packet.
*/
static void
lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
{
struct sk_buff *skb;
LCS_DBF_TEXT(5, trace, "getskb");
if (card->dev == NULL ||
card->state != DEV_STATE_UP)
/* The card isn't up. Ignore the packet. */
return;
skb = dev_alloc_skb(skb_len);
if (skb == NULL) {
dev_err(&card->dev->dev,
" Allocating a socket buffer to interface %s failed\n",
card->dev->name);
card->stats.rx_dropped++;
return;
}
skb_put_data(skb, skb_data, skb_len);
skb->protocol = card->lan_type_trans(skb, card->dev);
card->stats.rx_bytes += skb_len;
card->stats.rx_packets++;
if (skb->protocol == htons(ETH_P_802_2))
*((__u32 *)skb->cb) = ++card->pkt_seq;
netif_rx(skb);
}
/*
* LCS main routine to get packets and lancmd replies from the buffers
*/
static void
lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
struct lcs_card *card;
struct lcs_header *lcs_hdr;
__u16 offset;
LCS_DBF_TEXT(5, trace, "lcsgtpkt");
lcs_hdr = (struct lcs_header *) buffer->data;
if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
LCS_DBF_TEXT(4, trace, "-eiogpkt");
return;
}
card = container_of(channel, struct lcs_card, read);
offset = 0;
while (lcs_hdr->offset != 0) {
if (lcs_hdr->offset <= 0 ||
lcs_hdr->offset > LCS_IOBUFFERSIZE ||
lcs_hdr->offset < offset) {
/* Offset invalid. */
card->stats.rx_length_errors++;
card->stats.rx_errors++;
return;
}
if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET)
lcs_get_skb(card, (char *)(lcs_hdr + 1),
lcs_hdr->offset - offset -
sizeof(struct lcs_header));
else
dev_info_once(&card->dev->dev,
"Unknown frame type %d\n",
lcs_hdr->type);
offset = lcs_hdr->offset;
lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
lcs_hdr = (struct lcs_header *) (buffer->data + offset);
}
/* The buffer is now empty. Make it ready again. */
lcs_ready_buffer(&card->read, buffer);
}
/*
* get network statistics for ifconfig and other user programs
*/
static struct net_device_stats *
lcs_getstats(struct net_device *dev)
{
struct lcs_card *card;
LCS_DBF_TEXT(4, trace, "netstats");
card = (struct lcs_card *) dev->ml_priv;
return &card->stats;
}
/*
* stop lcs device
* This function will be called by user doing ifconfig xxx down
*/
static int
lcs_stop_device(struct net_device *dev)
{
struct lcs_card *card;
int rc;
LCS_DBF_TEXT(2, trace, "stopdev");
card = (struct lcs_card *) dev->ml_priv;
netif_carrier_off(dev);
netif_tx_disable(dev);
dev->flags &= ~IFF_UP;
wait_event(card->write.wait_q,
(card->write.state != LCS_CH_STATE_RUNNING));
rc = lcs_stopcard(card);
if (rc)
dev_err(&card->dev->dev,
" Shutting down the LCS device failed\n");
return rc;
}
/*
* start lcs device and make it runnable
* This function will be called by user doing ifconfig xxx up
*/
static int
lcs_open_device(struct net_device *dev)
{
struct lcs_card *card;
int rc;
LCS_DBF_TEXT(2, trace, "opendev");
card = (struct lcs_card *) dev->ml_priv;
/* initialize statistics */
rc = lcs_detect(card);
if (rc) {
pr_err("Error in opening device!\n");
} else {
dev->flags |= IFF_UP;
netif_carrier_on(dev);
netif_wake_queue(dev);
card->state = DEV_STATE_UP;
}
return rc;
}
/*
* show function for portno called by cat or similar things
*/
static ssize_t
lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct lcs_card *card;
card = dev_get_drvdata(dev);
if (!card)
return 0;
return sysfs_emit(buf, "%d\n", card->portno);
}
/*
* store the value which is piped to file portno
*/
static ssize_t
lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct lcs_card *card;
int rc;
s16 value;
card = dev_get_drvdata(dev);
if (!card)
return 0;
rc = kstrtos16(buf, 0, &value);
if (rc)
return -EINVAL;
/* TODO: sanity checks */
card->portno = value;
if (card->dev)
card->dev->dev_port = card->portno;
return count;
}
static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
static const char *lcs_type[] = {
"not a channel",
"2216 parallel",
"2216 channel",
"OSA LCS card",
"unknown channel type",
"unsupported channel type",
};
static ssize_t
lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccwgroup_device *cgdev;
cgdev = to_ccwgroupdev(dev);
if (!cgdev)
return -ENODEV;
return sysfs_emit(buf, "%s\n",
lcs_type[cgdev->cdev[0]->id.driver_info]);
}
static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
static ssize_t
lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lcs_card *card;
card = dev_get_drvdata(dev);
return card ? sysfs_emit(buf, "%u\n", card->lancmd_timeout) : 0;
}
static ssize_t
lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct lcs_card *card;
unsigned int value;
int rc;
card = dev_get_drvdata(dev);
if (!card)
return 0;
rc = kstrtouint(buf, 0, &value);
if (rc)
return -EINVAL;
/* TODO: sanity checks */
card->lancmd_timeout = value;
return count;
}
static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
static ssize_t
lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct lcs_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
if (!card)
return -EINVAL;
if (card->state != DEV_STATE_UP)
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
if (i == 1)
lcs_schedule_recovery(card);
return count;
}
static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
static struct attribute * lcs_attrs[] = {
&dev_attr_portno.attr,
&dev_attr_type.attr,
&dev_attr_lancmd_timeout.attr,
&dev_attr_recover.attr,
NULL,
};
static struct attribute_group lcs_attr_group = {
.attrs = lcs_attrs,
};
static const struct attribute_group *lcs_attr_groups[] = {
&lcs_attr_group,
NULL,
};
static const struct device_type lcs_devtype = {
.name = "lcs",
.groups = lcs_attr_groups,
};
/*
* lcs_probe_device is called on establishing a new ccwgroup_device.
*/
static int
lcs_probe_device(struct ccwgroup_device *ccwgdev)
{
struct lcs_card *card;
if (!get_device(&ccwgdev->dev))
return -ENODEV;
LCS_DBF_TEXT(2, setup, "add_dev");
card = lcs_alloc_card();
if (!card) {
LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
put_device(&ccwgdev->dev);
return -ENOMEM;
}
dev_set_drvdata(&ccwgdev->dev, card);
ccwgdev->cdev[0]->handler = lcs_irq;
ccwgdev->cdev[1]->handler = lcs_irq;
card->gdev = ccwgdev;
INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
ccwgdev->dev.type = &lcs_devtype;
return 0;
}
static int
lcs_register_netdev(struct ccwgroup_device *ccwgdev)
{
struct lcs_card *card;
LCS_DBF_TEXT(2, setup, "regnetdv");
card = dev_get_drvdata(&ccwgdev->dev);
if (card->dev->reg_state != NETREG_UNINITIALIZED)
return 0;
SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
return register_netdev(card->dev);
}
/*
* lcs_new_device will be called by setting the group device online.
*/
static const struct net_device_ops lcs_netdev_ops = {
.ndo_open = lcs_open_device,
.ndo_stop = lcs_stop_device,
.ndo_get_stats = lcs_getstats,
.ndo_start_xmit = lcs_start_xmit,
};
static const struct net_device_ops lcs_mc_netdev_ops = {
.ndo_open = lcs_open_device,
.ndo_stop = lcs_stop_device,
.ndo_get_stats = lcs_getstats,
.ndo_start_xmit = lcs_start_xmit,
.ndo_set_rx_mode = lcs_set_multicast_list,
};
static int
lcs_new_device(struct ccwgroup_device *ccwgdev)
{
struct lcs_card *card;
struct net_device *dev=NULL;
enum lcs_dev_states recover_state;
int rc;
card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return -ENODEV;
LCS_DBF_TEXT(2, setup, "newdev");
LCS_DBF_HEX(3, setup, &card, sizeof(void*));
card->read.ccwdev = ccwgdev->cdev[0];
card->write.ccwdev = ccwgdev->cdev[1];
recover_state = card->state;
rc = ccw_device_set_online(card->read.ccwdev);
if (rc)
goto out_err;
rc = ccw_device_set_online(card->write.ccwdev);
if (rc)
goto out_werr;
LCS_DBF_TEXT(3, setup, "lcsnewdv");
lcs_setup_card(card);
rc = lcs_detect(card);
if (rc) {
LCS_DBF_TEXT(2, setup, "dtctfail");
dev_err(&ccwgdev->dev,
"Detecting a network adapter for LCS devices"
" failed with rc=%d (0x%x)\n", rc, rc);
lcs_stopcard(card);
goto out;
}
if (card->dev) {
LCS_DBF_TEXT(2, setup, "samedev");
LCS_DBF_HEX(3, setup, &card, sizeof(void*));
goto netdev_out;
}
switch (card->lan_type) {
case LCS_FRAME_TYPE_ENET:
card->lan_type_trans = eth_type_trans;
dev = alloc_etherdev(0);
break;
default:
LCS_DBF_TEXT(3, setup, "errinit");
pr_err(" Initialization failed\n");
goto out;
}
if (!dev)
goto out;
card->dev = dev;
card->dev->ml_priv = card;
card->dev->netdev_ops = &lcs_netdev_ops;
card->dev->dev_port = card->portno;
eth_hw_addr_set(card->dev, card->mac);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
card->dev->netdev_ops = &lcs_mc_netdev_ops;
#endif
netdev_out:
lcs_set_allowed_threads(card,0xffffffff);
if (recover_state == DEV_STATE_RECOVER) {
lcs_set_multicast_list(card->dev);
card->dev->flags |= IFF_UP;
netif_carrier_on(card->dev);
netif_wake_queue(card->dev);
card->state = DEV_STATE_UP;
} else {
lcs_stopcard(card);
}
if (lcs_register_netdev(ccwgdev) != 0)
goto out;
/* Print out supported assists: IPv6 */
pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
"with" : "without");
/* Print out supported assist: Multicast */
pr_info("LCS device %s %s Multicast support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
"with" : "without");
return 0;
out:
ccw_device_set_offline(card->write.ccwdev);
out_werr:
ccw_device_set_offline(card->read.ccwdev);
out_err:
return -ENODEV;
}
/*
* lcs_shutdown_device, called when setting the group device offline.
*/
static int
__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
{
struct lcs_card *card;
enum lcs_dev_states recover_state;
int ret = 0, ret2 = 0, ret3 = 0;
LCS_DBF_TEXT(3, setup, "shtdndev");
card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return -ENODEV;
if (recovery_mode == 0) {
lcs_set_allowed_threads(card, 0);
if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
return -ERESTARTSYS;
}
LCS_DBF_HEX(3, setup, &card, sizeof(void*));
recover_state = card->state;
ret = lcs_stop_device(card->dev);
ret2 = ccw_device_set_offline(card->read.ccwdev);
ret3 = ccw_device_set_offline(card->write.ccwdev);
if (!ret)
ret = (ret2) ? ret2 : ret3;
if (ret)
LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
if (recover_state == DEV_STATE_UP) {
card->state = DEV_STATE_RECOVER;
}
return 0;
}
static int
lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
{
return __lcs_shutdown_device(ccwgdev, 0);
}
/*
* drive lcs recovery after startup and startlan initiated by Lan Gateway
*/
static int
lcs_recovery(void *ptr)
{
struct lcs_card *card;
struct ccwgroup_device *gdev;
int rc;
card = (struct lcs_card *) ptr;
LCS_DBF_TEXT(4, trace, "recover1");
if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
return 0;
LCS_DBF_TEXT(4, trace, "recover2");
gdev = card->gdev;
dev_warn(&gdev->dev,
"A recovery process has been started for the LCS device\n");
rc = __lcs_shutdown_device(gdev, 1);
rc = lcs_new_device(gdev);
if (!rc)
pr_info("Device %s successfully recovered!\n",
card->dev->name);
else
pr_info("Device %s could not be recovered!\n",
card->dev->name);
lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
return 0;
}
/*
* lcs_remove_device, free buffers and card
*/
static void
lcs_remove_device(struct ccwgroup_device *ccwgdev)
{
struct lcs_card *card;
card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return;
LCS_DBF_TEXT(3, setup, "remdev");
LCS_DBF_HEX(3, setup, &card, sizeof(void*));
if (ccwgdev->state == CCWGROUP_ONLINE) {
lcs_shutdown_device(ccwgdev);
}
if (card->dev)
unregister_netdev(card->dev);
lcs_cleanup_card(card);
lcs_free_card(card);
dev_set_drvdata(&ccwgdev->dev, NULL);
put_device(&ccwgdev->dev);
}
static struct ccw_device_id lcs_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
{},
};
MODULE_DEVICE_TABLE(ccw, lcs_ids);
static struct ccw_driver lcs_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "lcs",
},
.ids = lcs_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
.int_class = IRQIO_LCS,
};
/*
* LCS ccwgroup driver registration
*/
static struct ccwgroup_driver lcs_group_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "lcs",
},
.ccw_driver = &lcs_ccw_driver,
.setup = lcs_probe_device,
.remove = lcs_remove_device,
.set_online = lcs_new_device,
.set_offline = lcs_shutdown_device,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
return err ? err : count;
}
static DRIVER_ATTR_WO(group);
static struct attribute *lcs_drv_attrs[] = {
&driver_attr_group.attr,
NULL,
};
static struct attribute_group lcs_drv_attr_group = {
.attrs = lcs_drv_attrs,
};
static const struct attribute_group *lcs_drv_attr_groups[] = {
&lcs_drv_attr_group,
NULL,
};
/*
* LCS Module/Kernel initialization function
*/
static int
__init lcs_init_module(void)
{
int rc;
pr_info("Loading %s\n", version);
rc = lcs_register_debug_facility();
LCS_DBF_TEXT(0, setup, "lcsinit");
if (rc)
goto out_err;
lcs_root_dev = root_device_register("lcs");
rc = PTR_ERR_OR_ZERO(lcs_root_dev);
if (rc)
goto register_err;
rc = ccw_driver_register(&lcs_ccw_driver);
if (rc)
goto ccw_err;
lcs_group_driver.driver.groups = lcs_drv_attr_groups;
rc = ccwgroup_driver_register(&lcs_group_driver);
if (rc)
goto ccwgroup_err;
return 0;
ccwgroup_err:
ccw_driver_unregister(&lcs_ccw_driver);
ccw_err:
root_device_unregister(lcs_root_dev);
register_err:
lcs_unregister_debug_facility();
out_err:
pr_err("Initializing the lcs device driver failed\n");
return rc;
}
/*
* LCS module cleanup function
*/
static void
__exit lcs_cleanup_module(void)
{
pr_info("Terminating lcs module.\n");
LCS_DBF_TEXT(0, trace, "cleanup");
ccwgroup_driver_unregister(&lcs_group_driver);
ccw_driver_unregister(&lcs_ccw_driver);
root_device_unregister(lcs_root_dev);
lcs_unregister_debug_facility();
}
module_init(lcs_init_module);
module_exit(lcs_cleanup_module);
MODULE_AUTHOR("Frank Pavlic <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/lcs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann ([email protected])
*
*/
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include "ctcm_dbug.h"
/*
* Debug Facility Stuff
*/
struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
[CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
[CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
};
void ctcm_unregister_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
debug_unregister(ctcm_dbf[x].id);
ctcm_dbf[x].id = NULL;
}
}
int ctcm_register_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
/* register the areas */
ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
ctcm_dbf[x].pages,
ctcm_dbf[x].areas,
ctcm_dbf[x].len);
if (ctcm_dbf[x].id == NULL) {
ctcm_unregister_dbf_views();
return -ENOMEM;
}
/* register a view */
debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
/* set a passing level */
debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
}
return 0;
}
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
{
char dbf_txt_buf[64];
va_list args;
if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
return;
va_start(args, fmt);
vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
}
| linux-master | drivers/s390/net/ctcm_dbug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <linux/hashtable.h>
#include <linux/inet.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
static int qeth_l3_string_to_ipaddr(const char *buf,
enum qeth_prot_versions proto, u8 *addr)
{
const char *end;
if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
(proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
return -EINVAL;
return 0;
}
static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
struct qeth_routing_info *route, char *buf)
{
switch (route->type) {
case PRIMARY_ROUTER:
return sysfs_emit(buf, "%s\n", "primary router");
case SECONDARY_ROUTER:
return sysfs_emit(buf, "%s\n", "secondary router");
case MULTICAST_ROUTER:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sysfs_emit(buf, "%s\n", "multicast router+");
else
return sysfs_emit(buf, "%s\n", "multicast router");
case PRIMARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sysfs_emit(buf, "%s\n", "primary connector+");
else
return sysfs_emit(buf, "%s\n", "primary connector");
case SECONDARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sysfs_emit(buf, "%s\n", "secondary connector+");
else
return sysfs_emit(buf, "%s\n", "secondary connector");
default:
return sysfs_emit(buf, "%s\n", "no");
}
}
static ssize_t qeth_l3_dev_route4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_route_show(card, &card->options.route4, buf);
}
static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
struct qeth_routing_info *route, enum qeth_prot_versions prot,
const char *buf, size_t count)
{
enum qeth_routing_types old_route_type = route->type;
int rc = 0;
mutex_lock(&card->conf_mutex);
if (sysfs_streq(buf, "no_router")) {
route->type = NO_ROUTER;
} else if (sysfs_streq(buf, "primary_connector")) {
route->type = PRIMARY_CONNECTOR;
} else if (sysfs_streq(buf, "secondary_connector")) {
route->type = SECONDARY_CONNECTOR;
} else if (sysfs_streq(buf, "primary_router")) {
route->type = PRIMARY_ROUTER;
} else if (sysfs_streq(buf, "secondary_router")) {
route->type = SECONDARY_ROUTER;
} else if (sysfs_streq(buf, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
rc = -EINVAL;
goto out;
}
if (qeth_card_hw_is_reachable(card) &&
(old_route_type != route->type)) {
if (prot == QETH_PROT_IPV4)
rc = qeth_l3_setrouting_v4(card);
else if (prot == QETH_PROT_IPV6)
rc = qeth_l3_setrouting_v6(card);
}
out:
if (rc)
route->type = old_route_type;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_route4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_route_store(card, &card->options.route4,
QETH_PROT_IPV4, buf, count);
}
static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
qeth_l3_dev_route4_store);
static ssize_t qeth_l3_dev_route6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_route_show(card, &card->options.route6, buf);
}
static ssize_t qeth_l3_dev_route6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_route_store(card, &card->options.route6,
QETH_PROT_IPV6, buf, count);
}
static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
qeth_l3_dev_route6_store);
static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
unsigned long i;
if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
return -EPERM;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
rc = kstrtoul(buf, 16, &i);
if (rc) {
rc = -EINVAL;
goto out;
}
switch (i) {
case 0:
card->options.sniffer = i;
break;
case 1:
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
card->options.sniffer = i;
qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
} else {
rc = -EPERM;
}
break;
default:
rc = -EINVAL;
}
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char tmp_hsuid[9];
if (!IS_IQD(card))
return -EPERM;
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
return sysfs_emit(buf, "%s\n", tmp_hsuid);
}
static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
char *tmp;
if (!IS_IQD(card))
return -EPERM;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
if (card->options.sniffer) {
rc = -EPERM;
goto out;
}
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
rc = -EPERM;
goto out;
}
tmp = strsep((char **)&buf, "\n");
if (strlen(tmp) > 8) {
rc = -EINVAL;
goto out;
}
if (card->options.hsuid[0])
/* delete old ip address */
qeth_l3_modify_hsuid(card, false);
if (strlen(tmp) == 0) {
/* delete ip address only */
card->options.hsuid[0] = '\0';
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
goto out;
}
if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
rc = -EPERM;
goto out;
}
scnprintf(card->options.hsuid, sizeof(card->options.hsuid),
"%-8s", tmp);
ASCEBC(card->options.hsuid, 8);
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
rc = qeth_l3_modify_hsuid(card, true);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
qeth_l3_dev_hsuid_store);
static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_route4.attr,
&dev_attr_route6.attr,
&dev_attr_sniffer.attr,
&dev_attr_hsuid.attr,
NULL,
};
static const struct attribute_group qeth_l3_device_attr_group = {
.attrs = qeth_l3_device_attrs,
};
static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", card->ipato.enabled ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool enable;
int rc = 0;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
enable = !card->ipato.enabled;
} else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
goto unlock_ip;
}
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
qeth_l3_update_ipato(card);
}
unlock_ip:
mutex_unlock(&card->ip_lock);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
qeth_l3_dev_ipato_enable_show,
qeth_l3_dev_ipato_enable_store);
static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool invert;
int rc = 0;
mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
} else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
goto out;
}
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
qeth_l3_update_ipato(card);
}
out:
mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
qeth_l3_dev_ipato_invert4_show,
qeth_l3_dev_ipato_invert4_store);
static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
char addr_str[INET6_ADDRSTRLEN];
int offset = 0;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
offset += sysfs_emit_at(buf, offset, "%s/%i\n",
addr_str, ipatoe->mask_bits);
}
mutex_unlock(&card->ip_lock);
return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
}
static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
u8 *addr, unsigned int *mask_bits)
{
char *sep;
int rc;
/* Expected input pattern: %addr/%mask */
sep = strnchr(buf, INET6_ADDRSTRLEN, '/');
if (!sep)
return -EINVAL;
/* Terminate the %addr sub-string, and parse it: */
*sep = '\0';
rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
if (rc)
return rc;
rc = kstrtouint(sep + 1, 10, mask_bits);
if (rc)
return rc;
if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))
return -EINVAL;
return 0;
}
static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
unsigned int mask_bits;
u8 addr[16];
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
return rc;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
if (!ipatoe)
return -ENOMEM;
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4) ? 4 : 16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
if (rc)
kfree(ipatoe);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
qeth_l3_dev_ipato_add4_show,
qeth_l3_dev_ipato_add4_store);
static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
unsigned int mask_bits;
u8 addr[16];
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
qeth_l3_dev_ipato_del4_store);
static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool invert;
int rc = 0;
mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
} else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
goto out;
}
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
qeth_l3_update_ipato(card);
}
out:
mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
qeth_l3_dev_ipato_invert6_show,
qeth_l3_dev_ipato_invert6_store);
static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
}
static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
qeth_l3_dev_ipato_add6_show,
qeth_l3_dev_ipato_add6_store);
static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
qeth_l3_dev_ipato_del6_store);
static struct attribute *qeth_ipato_device_attrs[] = {
&dev_attr_ipato_enable.attr,
&dev_attr_ipato_invert4.attr,
&dev_attr_ipato_add4.attr,
&dev_attr_ipato_del4.attr,
&dev_attr_ipato_invert6.attr,
&dev_attr_ipato_add6.attr,
&dev_attr_ipato_del6.attr,
NULL,
};
static const struct attribute_group qeth_device_ipato_group = {
.name = "ipa_takeover",
.attrs = qeth_ipato_device_attrs,
};
static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
enum qeth_prot_versions proto,
enum qeth_ip_types type)
{
struct qeth_card *card = dev_get_drvdata(dev);
char addr_str[INET6_ADDRSTRLEN];
struct qeth_ipaddr *ipaddr;
int offset = 0;
int i;
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u, addr_str);
offset += sysfs_emit_at(buf, offset, "%s\n", addr_str);
}
mutex_unlock(&card->ip_lock);
return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
QETH_IP_TYPE_VIPA);
}
static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add,
size_t count, enum qeth_prot_versions proto)
{
struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
if (!rc)
rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_VIPA, proto);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
qeth_l3_dev_vipa_add4_show,
qeth_l3_dev_vipa_add4_store);
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
qeth_l3_dev_vipa_del4_store);
static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
QETH_IP_TYPE_VIPA);
}
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
qeth_l3_dev_vipa_add6_show,
qeth_l3_dev_vipa_add6_store);
static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
qeth_l3_dev_vipa_del6_store);
static struct attribute *qeth_vipa_device_attrs[] = {
&dev_attr_vipa_add4.attr,
&dev_attr_vipa_del4.attr,
&dev_attr_vipa_add6.attr,
&dev_attr_vipa_del6.attr,
NULL,
};
static const struct attribute_group qeth_device_vipa_group = {
.name = "vipa",
.attrs = qeth_vipa_device_attrs,
};
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
QETH_IP_TYPE_RXIP);
}
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
u8 *addr)
{
__be32 ipv4_addr;
struct in6_addr ipv6_addr;
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
return -EINVAL;
}
if (proto == QETH_PROT_IPV4) {
memcpy(&ipv4_addr, addr, sizeof(ipv4_addr));
if (ipv4_is_multicast(ipv4_addr)) {
QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
return -EINVAL;
}
} else if (proto == QETH_PROT_IPV6) {
memcpy(&ipv6_addr, addr, sizeof(ipv6_addr));
if (ipv6_addr_is_multicast(&ipv6_addr)) {
QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
return -EINVAL;
}
}
return 0;
}
static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add,
size_t count, enum qeth_prot_versions proto)
{
struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_RXIP, proto);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
qeth_l3_dev_rxip_add4_show,
qeth_l3_dev_rxip_add4_store);
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
qeth_l3_dev_rxip_del4_store);
static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
QETH_IP_TYPE_RXIP);
}
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
qeth_l3_dev_rxip_add6_show,
qeth_l3_dev_rxip_add6_store);
static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
qeth_l3_dev_rxip_del6_store);
static struct attribute *qeth_rxip_device_attrs[] = {
&dev_attr_rxip_add4.attr,
&dev_attr_rxip_del4.attr,
&dev_attr_rxip_add6.attr,
&dev_attr_rxip_del6.attr,
NULL,
};
static const struct attribute_group qeth_device_rxip_group = {
.name = "rxip",
.attrs = qeth_rxip_device_attrs,
};
const struct attribute_group *qeth_l3_attr_groups[] = {
&qeth_l3_device_attr_group,
&qeth_device_ipato_group,
&qeth_device_vipa_group,
&qeth_device_rxip_group,
NULL,
};
| linux-master | drivers/s390/net/qeth_l3_sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert ([email protected])
* Peter Tiedemann ([email protected])
* MPC additions :
* Belinda Thompson ([email protected])
* Andy Richter ([email protected])
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/dst.h>
#include <linux/io.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/uaccess.h>
#include <asm/idals.h>
#include "fsm.h"
#include "ctcm_dbug.h"
#include "ctcm_main.h"
#include "ctcm_fsms.h"
const char *dev_state_names[] = {
[DEV_STATE_STOPPED] = "Stopped",
[DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
[DEV_STATE_STARTWAIT_RX] = "StartWait RX",
[DEV_STATE_STARTWAIT_TX] = "StartWait TX",
[DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
[DEV_STATE_STOPWAIT_RX] = "StopWait RX",
[DEV_STATE_STOPWAIT_TX] = "StopWait TX",
[DEV_STATE_RUNNING] = "Running",
};
const char *dev_event_names[] = {
[DEV_EVENT_START] = "Start",
[DEV_EVENT_STOP] = "Stop",
[DEV_EVENT_RXUP] = "RX up",
[DEV_EVENT_TXUP] = "TX up",
[DEV_EVENT_RXDOWN] = "RX down",
[DEV_EVENT_TXDOWN] = "TX down",
[DEV_EVENT_RESTART] = "Restart",
};
const char *ctc_ch_event_names[] = {
[CTC_EVENT_IO_SUCCESS] = "ccw_device success",
[CTC_EVENT_IO_EBUSY] = "ccw_device busy",
[CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
[CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
[CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
[CTC_EVENT_ATTN] = "Status ATTN",
[CTC_EVENT_BUSY] = "Status BUSY",
[CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
[CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
[CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
[CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
[CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
[CTC_EVENT_UC_ZERO] = "Unit check ZERO",
[CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
[CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
[CTC_EVENT_MC_FAIL] = "Machine check failure",
[CTC_EVENT_MC_GOOD] = "Machine check operational",
[CTC_EVENT_IRQ] = "IRQ normal",
[CTC_EVENT_FINSTAT] = "IRQ final",
[CTC_EVENT_TIMER] = "Timer",
[CTC_EVENT_START] = "Start",
[CTC_EVENT_STOP] = "Stop",
/*
* additional MPC events
*/
[CTC_EVENT_SEND_XID] = "XID Exchange",
[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
};
const char *ctc_ch_state_names[] = {
[CTC_STATE_IDLE] = "Idle",
[CTC_STATE_STOPPED] = "Stopped",
[CTC_STATE_STARTWAIT] = "StartWait",
[CTC_STATE_STARTRETRY] = "StartRetry",
[CTC_STATE_SETUPWAIT] = "SetupWait",
[CTC_STATE_RXINIT] = "RX init",
[CTC_STATE_TXINIT] = "TX init",
[CTC_STATE_RX] = "RX",
[CTC_STATE_TX] = "TX",
[CTC_STATE_RXIDLE] = "RX idle",
[CTC_STATE_TXIDLE] = "TX idle",
[CTC_STATE_RXERR] = "RX error",
[CTC_STATE_TXERR] = "TX error",
[CTC_STATE_TERM] = "Terminating",
[CTC_STATE_DTERM] = "Restarting",
[CTC_STATE_NOTOP] = "Not operational",
/*
* additional MPC states
*/
[CH_XID0_PENDING] = "Pending XID0 Start",
[CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
[CH_XID7_PENDING] = "Pending XID7 P1 Start",
[CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
[CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
[CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
[CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
};
static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
/*
* ----- static ctcm actions for channel statemachine -----
*
*/
static void chx_txdone(fsm_instance *fi, int event, void *arg);
static void chx_rx(fsm_instance *fi, int event, void *arg);
static void chx_rxidle(fsm_instance *fi, int event, void *arg);
static void chx_firstio(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
/*
* ----- static ctcmpc actions for ctcmpc channel statemachine -----
*
*/
static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
/* shared :
static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
*/
static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
static void ctcmpc_chx_resend(fsm_instance *, int, void *);
static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
/*
* Check return code of a preceding ccw_device call, halt_IO etc...
*
* ch : The channel, the error belongs to.
* Returns the error code (!= 0) to inspect.
*/
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
{
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s: %04x\n",
CTCM_FUNTAIL, ch->id, msg, rc);
switch (rc) {
case -EBUSY:
pr_info("%s: The communication peer is busy\n",
ch->id);
fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
break;
case -ENODEV:
pr_err("%s: The specified target device is not valid\n",
ch->id);
fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
break;
default:
pr_err("An I/O operation resulted in error %04x\n",
rc);
fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
}
}
void ctcm_purge_skb_queue(struct sk_buff_head *q)
{
struct sk_buff *skb;
CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
while ((skb = skb_dequeue(q))) {
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
/*
* NOP action for statemachines
*/
static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/*
* Actions for channel - statemachines.
*/
/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void chx_txdone(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct sk_buff *skb;
int first = 1;
int i;
unsigned long duration;
unsigned long done_stamp = jiffies;
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
duration = done_stamp - ch->prof.send_stamp;
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
if (ch->irb->scsw.cmd.count != 0)
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): TX not complete, remaining %d bytes",
CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
if (first) {
priv->stats.tx_bytes += 2;
first = 0;
}
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
}
spin_lock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[4]);
if (ch->collect_len > 0) {
int rc;
if (ctcm_checkalloc_buffer(ch)) {
spin_unlock(&ch->collect_lock);
return;
}
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
if (ch->prof.maxmulti < (ch->collect_len + 2))
ch->prof.maxmulti = ch->collect_len + 2;
if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
i = 0;
while ((skb = skb_dequeue(&ch->collect_queue))) {
skb_copy_from_linear_data(skb,
skb_put(ch->trans_skb, skb->len), skb->len);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
i++;
}
ch->collect_len = 0;
spin_unlock(&ch->collect_lock);
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
ch->prof.doios_multi++;
if (rc != 0) {
priv->stats.tx_dropped += i;
priv->stats.tx_errors += i;
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "chained TX");
}
} else {
spin_unlock(&ch->collect_lock);
fsm_newstate(fi, CTC_STATE_TXIDLE);
}
ctcm_clear_busy_do(dev);
}
/*
* Initial data is sent.
* Notify device statemachine that we are up and
* running.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
}
/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void chx_rx(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
struct sk_buff *skb = ch->trans_skb;
__u16 block_len = *((__u16 *)skb->data);
int check_len;
int rc;
fsm_deltimer(&ch->timer);
if (len < 8) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got packet with length %d < 8\n",
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto again;
}
if (len > ch->max_bufsize) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got packet with length %d > %d\n",
CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto again;
}
/*
* VM TCP seems to have a bug sending 2 trailing bytes of garbage.
*/
switch (ch->protocol) {
case CTCM_PROTO_S390:
case CTCM_PROTO_OS390:
check_len = block_len + 2;
break;
default:
check_len = block_len;
break;
}
if ((len < block_len) || (len > check_len)) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s): got block length %d != rx length %d\n",
CTCM_FUNTAIL, dev->name, block_len, len);
if (do_debug)
ctcmpc_dump_skb(skb, 0);
*((__u16 *)skb->data) = len;
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto again;
}
if (block_len > 2) {
*((__u16 *)skb->data) = block_len - 2;
ctcm_unpack_skb(ch, skb);
}
again:
skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(skb);
skb->len = 0;
if (ctcm_checkalloc_buffer(ch))
return;
ch->ccw[1].count = ch->max_bufsize;
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "normal RX");
}
/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void chx_firstio(fsm_instance *fi, int event, void *arg)
{
int rc;
struct channel *ch = arg;
int fsmstate = fsm_getstate(fi);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) : %02x",
CTCM_FUNTAIL, ch->id, fsmstate);
ch->sense_rc = 0; /* reset unit check report control */
if (fsmstate == CTC_STATE_TXIDLE)
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): remote side issued READ?, init.\n",
CTCM_FUNTAIL, ch->id);
fsm_deltimer(&ch->timer);
if (ctcm_checkalloc_buffer(ch))
return;
if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */
if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch);
chx_rxidle(fi, event, arg);
} else {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
}
return;
}
/*
* Don't setup a timer for receiving the initial RX frame
* if in compatibility mode, since VM TCP delays the initial
* frame until it has some data to send.
*/
if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_SETUPWAIT);
ctcm_ccw_check_rc(ch, rc, "init IO");
}
/*
* If in compatibility mode since we don't setup a timer, we
* also signal RX channel up immediately. This enables us
* to send packets early which in turn usually triggers some
* reply from VM TCP which brings up the RX channel to it's
* final state.
*/
if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
}
}
/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void chx_rxidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
__u16 buflen;
int rc;
fsm_deltimer(&ch->timer);
buflen = *((__u16 *)ch->trans_skb->data);
CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
__func__, dev->name, buflen);
if (buflen >= CTCM_INITIAL_BLOCKLEN) {
if (ctcm_checkalloc_buffer(ch))
return;
ch->ccw[1].count = ch->max_bufsize;
fsm_newstate(fi, CTC_STATE_RXIDLE);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0) {
fsm_newstate(fi, CTC_STATE_RXINIT);
ctcm_ccw_check_rc(ch, rc, "initial RX");
} else
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
} else {
CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
__func__, dev->name,
buflen, CTCM_INITIAL_BLOCKLEN);
chx_firstio(fi, event, arg);
}
}
/*
* Set channel into extended mode.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
int rc;
unsigned long saveflags = 0;
int timeout = CTCM_TIME_5_SEC;
fsm_deltimer(&ch->timer);
if (IS_MPC(ch)) {
timeout = 1500;
CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
}
fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
fsm_newstate(fi, CTC_STATE_SETUPWAIT);
CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
/* Such conditional locking is undeterministic in
* static view. => ignore sparse warnings here. */
rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0);
if (event == CTC_EVENT_TIMER) /* see above comments */
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_STARTWAIT);
ctcm_ccw_check_rc(ch, rc, "set Mode");
} else
ch->retry = 0;
}
/*
* Setup channel.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
unsigned long saveflags;
int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
}
if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ch->ccw[1].cmd_code = CCW_CMD_READ;
ch->ccw[1].flags = CCW_FLAG_SLI;
ch->ccw[1].count = 0;
} else {
ch->ccw[1].cmd_code = CCW_CMD_WRITE;
ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[1].count = 0;
}
if (ctcm_checkalloc_buffer(ch)) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %s trans_skb alloc delayed "
"until first transfer",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
}
ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[0].count = 0;
ch->ccw[0].cda = 0;
ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
ch->ccw[2].flags = CCW_FLAG_SLI;
ch->ccw[2].count = 0;
ch->ccw[2].cda = 0;
memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
ch->ccw[4].cda = 0;
ch->ccw[4].flags &= ~CCW_FLAG_IDA;
fsm_newstate(fi, CTC_STATE_STARTWAIT);
fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
rc = ccw_device_halt(ch->cdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
if (rc != -EBUSY)
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
}
}
/*
* Shutdown a channel.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
unsigned long saveflags = 0;
int rc;
int oldstate;
fsm_deltimer(&ch->timer);
if (IS_MPC(ch))
fsm_deltimer(&ch->sweep_timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
/* Such conditional locking is undeterministic in
* static view. => ignore sparse warnings here. */
oldstate = fsm_getstate(fi);
fsm_newstate(fi, CTC_STATE_TERM);
rc = ccw_device_halt(ch->cdev, 0);
if (event == CTC_EVENT_STOP)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
/* see remark above about conditional locking */
if (rc != 0 && rc != -EBUSY) {
fsm_deltimer(&ch->timer);
if (event != CTC_EVENT_STOP) {
fsm_newstate(fi, oldstate);
ctcm_ccw_check_rc(ch, rc, (char *)__func__);
}
}
}
/*
* Cleanup helper for chx_fail and chx_stopped
* cleanup channels queue and notify interface statemachine.
*
* fi An instance of a channel statemachine.
* state The next state (depending on caller).
* ch The channel to operate on.
*/
static void ctcm_chx_cleanup(fsm_instance *fi, int state,
struct channel *ch)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): %s[%d]\n",
CTCM_FUNTAIL, dev->name, ch->id, state);
fsm_deltimer(&ch->timer);
if (IS_MPC(ch))
fsm_deltimer(&ch->sweep_timer);
fsm_newstate(fi, state);
if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb_any(ch->trans_skb);
ch->trans_skb = NULL;
}
ch->th_seg = 0x00;
ch->th_seq_num = 0x00;
if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
skb_queue_purge(&ch->io_queue);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
ctcm_purge_skb_queue(&ch->io_queue);
if (IS_MPC(ch))
ctcm_purge_skb_queue(&ch->sweep_queue);
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
ch->collect_len = 0;
spin_unlock(&ch->collect_lock);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
}
}
/*
* A channel has successfully been halted.
* Cleanup it's queue and notify interface statemachine.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
{
ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
}
/*
* A stop command from device statemachine arrived and we are in
* not operational mode. Set state to stopped.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
{
fsm_newstate(fi, CTC_STATE_STOPPED);
}
/*
* A machine check for no path, not operational status or gone device has
* happened.
* Cleanup queue and notify interface statemachine.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
{
ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
}
/*
* Handle error during setup of channel.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
/*
* Special case: Got UC_RCRESET on setmode.
* This means that remote side isn't setup. In this case
* simply retry after some 10 secs...
*/
if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
((event == CTC_EVENT_UC_RCRESET) ||
(event == CTC_EVENT_UC_RSRESET))) {
fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
if (!IS_MPC(ch) &&
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
int rc = ccw_device_halt(ch->cdev, 0);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc,
"HaltIO in chx_setuperr");
}
return;
}
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
fsm_getstate_str(fi));
if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
fsm_newstate(fi, CTC_STATE_TXERR);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
}
}
/*
* Restart a channel after an error.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
unsigned long saveflags = 0;
int oldstate;
int rc;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s: %s[%d] of %s\n",
CTCM_FUNTAIL, ch->id, event, dev->name);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
oldstate = fsm_getstate(fi);
fsm_newstate(fi, CTC_STATE_STARTWAIT);
if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
/* Such conditional locking is a known problem for
* sparse because its undeterministic in static view.
* Warnings should be ignored here. */
rc = ccw_device_halt(ch->cdev, 0);
if (event == CTC_EVENT_TIMER)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
if (rc != -EBUSY) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, oldstate);
}
ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
}
}
/*
* Handle error during RX initial handshake (exchange of
* 0-length block header)
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
if (event == CTC_EVENT_TIMER) {
if (!IS_MPCDEV(dev))
/* TODO : check if MPC deletes timer somewhere */
fsm_deltimer(&ch->timer);
if (ch->retry++ < 3)
ctcm_chx_restart(fi, event, arg);
else {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
} else {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
ctc_ch_event_names[event], fsm_getstate_str(fi));
dev_warn(&dev->dev,
"Initialization failed with RX/TX init handshake "
"error %s\n", ctc_ch_event_names[event]);
}
}
/*
* Notify device statemachine if we gave up initialization
* of RX channel.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): RX %s busy, init. fail",
CTCM_FUNTAIL, dev->name, ch->id);
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
/*
* Handle RX Unit check remote reset (remote disconnected)
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct channel *ch2;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s: %s: remote disconnect - re-init ...",
CTCM_FUNTAIL, dev->name);
fsm_deltimer(&ch->timer);
/*
* Notify device statemachine
*/
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
fsm_newstate(fi, CTC_STATE_DTERM);
ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
ccw_device_halt(ch->cdev, 0);
ccw_device_halt(ch2->cdev, 0);
}
/*
* Handle error during TX channel initialization.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
if (event == CTC_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
if (ch->retry++ < 3)
ctcm_chx_restart(fi, event, arg);
else {
fsm_newstate(fi, CTC_STATE_TXERR);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
}
} else {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
ctc_ch_event_names[event], fsm_getstate_str(fi));
dev_warn(&dev->dev,
"Initialization failed with RX/TX init handshake "
"error %s\n", ctc_ch_event_names[event]);
}
}
/*
* Handle TX timeout by retrying operation.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct sk_buff *skb;
CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
struct mpc_group *gptr = priv->mpcg;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
"%s: %s: retries exceeded",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
/* call restart if not MPC or if MPC and mpcg fsm is ready.
use gptr as mpc indicator */
if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
ctcm_chx_restart(fi, event, arg);
goto done;
}
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s : %s: retry %d",
CTCM_FUNTAIL, ch->id, ch->retry);
skb = skb_peek(&ch->io_queue);
if (skb) {
int rc = 0;
unsigned long saveflags = 0;
clear_normalized_cda(&ch->ccw[4]);
ch->ccw[4].count = skb->len;
if (set_normalized_cda(&ch->ccw[4], skb->data)) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
"%s: %s: IDAL alloc failed",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
ctcm_chx_restart(fi, event, arg);
goto done;
}
fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
/* Such conditional locking is a known problem for
* sparse because its undeterministic in static view.
* Warnings should be ignored here. */
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[3],
sizeof(struct ccw1) * 3);
rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0);
if (event == CTC_EVENT_TIMER)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
saveflags);
if (rc != 0) {
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
ctcm_purge_skb_queue(&ch->io_queue);
}
}
done:
return;
}
/*
* Handle fatal errors during an I/O command.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
int rd = CHANNEL_DIRECTION(ch->flags);
fsm_deltimer(&ch->timer);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error",
CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
if (IS_MPC(ch)) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
}
if (rd == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
fsm_newstate(fi, CTC_STATE_TXERR);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
}
}
/*
* The ctcm statemachine for a channel.
*/
const fsm_node ch_fsm[] = {
{ CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
{ CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
{ CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
{ CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
{ CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
{ CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
{ CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
{ CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
{ CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
{ CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
{ CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
{ CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
{ CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
{ CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
{ CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
{ CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
{ CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
{ CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
{ CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
{ CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
{ CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
{ CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
{ CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
{ CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
{ CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
{ CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
{ CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
};
int ch_fsm_len = ARRAY_SIZE(ch_fsm);
/*
* MPC actions for mpc channel statemachine
* handling of MPC protocol requires extra
* statemachine and actions which are prefixed ctcmpc_ .
* The ctc_ch_states and ctc_ch_state_names,
* ctc_ch_events and ctc_ch_event_names share the ctcm definitions
* which are expanded by some elements.
*/
/*
* Actions for mpc channel statemachine.
*/
/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *skb;
int first = 1;
int i;
__u32 data_space;
unsigned long duration;
struct sk_buff *peekskb;
int rc;
struct th_header *header;
struct pdu *p_header;
unsigned long done_stamp = jiffies;
CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
__func__, dev->name, smp_processor_id());
duration = done_stamp - ch->prof.send_stamp;
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
if (ch->irb->scsw.cmd.count != 0)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s(%s): TX not complete, remaining %d bytes",
CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
if (first) {
priv->stats.tx_bytes += 2;
first = 0;
}
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
}
spin_lock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[4]);
if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
spin_unlock(&ch->collect_lock);
fsm_newstate(fi, CTC_STATE_TXIDLE);
goto done;
}
if (ctcm_checkalloc_buffer(ch)) {
spin_unlock(&ch->collect_lock);
goto done;
}
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
i = 0;
p_header = NULL;
data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
" data_space:%04x\n",
__func__, data_space);
while ((skb = skb_dequeue(&ch->collect_queue))) {
skb_put_data(ch->trans_skb, skb->data, skb->len);
p_header = (struct pdu *)
(skb_tail_pointer(ch->trans_skb) - skb->len);
p_header->pdu_flag = 0x00;
if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
p_header->pdu_flag |= 0x60;
else
p_header->pdu_flag |= 0x20;
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: pdu header and data for up"
" to 32 bytes sent to vtam\n", __func__);
CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
ch->collect_len -= skb->len;
data_space -= skb->len;
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
peekskb = skb_peek(&ch->collect_queue);
if (peekskb->len > data_space)
break;
i++;
}
/* p_header points to the last one we handled */
if (p_header)
p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
header = skb_push(ch->trans_skb, TH_HEADER_LENGTH);
memset(header, 0, TH_HEADER_LENGTH);
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
__func__, ch->th_seq_num);
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
"data to vtam from collect_q\n", __func__);
CTCM_D3_DUMP((char *)ch->trans_skb->data,
min_t(int, ch->trans_skb->len, 50));
spin_unlock(&ch->collect_lock);
clear_normalized_cda(&ch->ccw[1]);
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
(void *)(unsigned long)ch->ccw[1].cda,
ch->trans_skb->data);
ch->ccw[1].count = ch->max_bufsize;
if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
dev_kfree_skb_any(ch->trans_skb);
ch->trans_skb = NULL;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s: %s: IDAL alloc failed",
CTCM_FUNTAIL, ch->id);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
return;
}
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
(void *)(unsigned long)ch->ccw[1].cda,
ch->trans_skb->data);
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
ch->prof.send_stamp = jiffies;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
ch->prof.doios_multi++;
if (rc != 0) {
priv->stats.tx_dropped += i;
priv->stats.tx_errors += i;
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "chained TX");
}
done:
ctcm_clear_busy(dev);
return;
}
/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *skb = ch->trans_skb;
struct sk_buff *new_skb;
unsigned long saveflags = 0; /* avoids compiler warning */
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
CTCM_FUNTAIL, dev->name, smp_processor_id(),
ch->id, ch->max_bufsize, len);
fsm_deltimer(&ch->timer);
if (skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): TRANS_SKB = NULL",
CTCM_FUNTAIL, dev->name);
goto again;
}
if (len < TH_HEADER_LENGTH) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): packet length %d too short",
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
} else {
/* must have valid th header or game over */
__u32 block_len = len;
len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
if (new_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto again;
}
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_RESET:
case MPCG_STATE_INOP:
dev_kfree_skb_any(new_skb);
break;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
skb_put_data(new_skb, skb->data, block_len);
skb_queue_tail(&ch->io_queue, new_skb);
tasklet_schedule(&ch->ch_tasklet);
break;
default:
skb_put_data(new_skb, skb->data, len);
skb_queue_tail(&ch->io_queue, new_skb);
tasklet_hi_schedule(&ch->ch_tasklet);
break;
}
}
again:
switch (fsm_getstate(grp->fsm)) {
int rc, dolock;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
if (ctcm_checkalloc_buffer(ch))
break;
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = ch->max_bufsize;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
sizeof(struct ccw1) * 3);
dolock = !in_hardirq();
if (dolock)
spin_lock_irqsave(
get_ccwdev_lock(ch->cdev), saveflags);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (dolock) /* see remark about conditional locking */
spin_unlock_irqrestore(
get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "normal RX");
break;
default:
break;
}
CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
__func__, dev->name, ch, ch->id);
}
/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *gptr = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
__func__, ch->id, ch);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
fsm_getstate(gptr->fsm), ch->protocol);
if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
fsm_deltimer(&ch->timer);
if (ctcm_checkalloc_buffer(ch))
goto done;
switch (fsm_getstate(fi)) {
case CTC_STATE_STARTRETRY:
case CTC_STATE_SETUPWAIT:
if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ctcmpc_chx_rxidle(fi, event, arg);
} else {
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
}
goto done;
default:
break;
}
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done:
CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
__func__, ch->id, ch);
return;
}
/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
*
* fi An instance of a channel statemachine.
* event The event, just happened.
* arg Generic pointer, casted from channel * upon call.
*/
void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
int rc;
unsigned long saveflags = 0; /* avoids compiler warning */
fsm_deltimer(&ch->timer);
CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
__func__, ch->id, dev->name, smp_processor_id(),
fsm_getstate(fi), fsm_getstate(grp->fsm));
fsm_newstate(fi, CTC_STATE_RXIDLE);
/* XID processing complete */
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
if (ctcm_checkalloc_buffer(ch))
goto done;
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = ch->max_bufsize;
CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
if (event == CTC_EVENT_START)
/* see remark about conditional locking */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (event == CTC_EVENT_START)
spin_unlock_irqrestore(
get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
fsm_newstate(fi, CTC_STATE_RXINIT);
ctcm_ccw_check_rc(ch, rc, "initial RX");
goto done;
}
break;
default:
break;
}
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
done:
return;
}
/*
* ctcmpc channel FSM action
* called from several points in ctcmpc_ch_fsm
* ctcmpc only
*/
static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
__func__, dev->name, ch->id, ch, smp_processor_id(),
fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
/* ok..start yside xid exchanges */
if (!ch->in_mpcgroup)
break;
if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer,
MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
/* attn rcvd before xid0 processed via bh */
fsm_newstate(ch->fsm, CH_XID7_PENDING1);
break;
case MPCG_STATE_XID2INITX:
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID0IOWAIX:
/* attn rcvd before xid0 processed on ch
but mid-xid0 processing for group */
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
fsm_newstate(ch->fsm, CH_XID7_PENDING1);
break;
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
switch (fsm_getstate(ch->fsm)) {
case CH_XID7_PENDING:
fsm_newstate(ch->fsm, CH_XID7_PENDING1);
break;
case CH_XID7_PENDING2:
fsm_newstate(ch->fsm, CH_XID7_PENDING3);
break;
}
fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
break;
}
return;
}
/*
* ctcmpc channel FSM action
* called from one point in ctcmpc_ch_fsm
* ctcmpc only
*/
static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
__func__, dev->name, ch->id,
fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
fsm_deltimer(&ch->timer);
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID0IOWAIT:
/* vtam wants to be primary.start yside xid exchanges*/
/* only receive one attn-busy at a time so must not */
/* change state each time */
grp->changed_side = 1;
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
break;
case MPCG_STATE_XID2INITW:
if (grp->changed_side == 1) {
grp->changed_side = 2;
break;
}
/* process began via call to establish_conn */
/* so must report failure instead of reverting */
/* back to ready-for-xid passive state */
if (grp->estconnfunc)
goto done;
/* this attnbusy is NOT the result of xside xid */
/* collisions so yside must have been triggered */
/* by an ATTN that was not intended to start XID */
/* processing. Revert back to ready-for-xid and */
/* wait for ATTN interrupt to signal xid start */
if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
fsm_deltimer(&grp->timer);
goto done;
}
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
case MPCG_STATE_XID2INITX:
/* XID2 was received before ATTN Busy for second
channel.Send yside xid for second channel.
*/
if (grp->changed_side == 1) {
grp->changed_side = 2;
break;
}
fallthrough;
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
default:
/* multiple attn-busy indicates too out-of-sync */
/* and they are certainly not being received as part */
/* of valid mpc group negotiations.. */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
if (grp->changed_side == 1) {
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
}
if (ch->in_mpcgroup)
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
else
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): channel %s not added to group",
CTCM_FUNTAIL, dev->name, ch->id);
done:
return;
}
/*
* ctcmpc channel FSM action
* called from several points in ctcmpc_ch_fsm
* ctcmpc only
*/
static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
return;
}
/*
* ctcmpc channel FSM action
* called from several points in ctcmpc_ch_fsm
* ctcmpc only
*/
static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
{
struct channel *ach = arg;
struct net_device *dev = ach->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *wch = priv->channel[CTCM_WRITE];
struct channel *rch = priv->channel[CTCM_READ];
struct sk_buff *skb;
struct th_sweep *header;
int rc = 0;
unsigned long saveflags = 0;
CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ach, ach->id);
if (grp->in_sweep == 0)
goto done;
CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
__func__, wch->th_seq_num);
CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
__func__, rch->th_seq_num);
if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
/* give the previous IO time to complete */
fsm_addtimer(&wch->sweep_timer,
200, CTC_EVENT_RSWEEP_TIMER, wch);
goto done;
}
skb = skb_dequeue(&wch->sweep_queue);
if (!skb)
goto done;
if (set_normalized_cda(&wch->ccw[4], skb->data)) {
grp->in_sweep = 0;
ctcm_clear_busy_do(dev);
dev_kfree_skb_any(skb);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
} else {
refcount_inc(&skb->users);
skb_queue_tail(&wch->io_queue, skb);
}
/* send out the sweep */
wch->ccw[4].count = skb->len;
header = (struct th_sweep *)skb->data;
switch (header->th.th_ch_flag) {
case TH_SWEEP_REQ:
grp->sweep_req_pend_num--;
break;
case TH_SWEEP_RESP:
grp->sweep_rsp_pend_num--;
break;
}
header->sw.th_last_seq = wch->th_seq_num;
CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
fsm_newstate(wch->fsm, CTC_STATE_TX);
spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
wch->prof.send_stamp = jiffies;
rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
if ((grp->sweep_req_pend_num == 0) &&
(grp->sweep_rsp_pend_num == 0)) {
grp->in_sweep = 0;
rch->th_seq_num = 0x00;
wch->th_seq_num = 0x00;
ctcm_clear_busy_do(dev);
}
CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
__func__, wch->th_seq_num, rch->th_seq_num);
if (rc != 0)
ctcm_ccw_check_rc(wch, rc, "send sweep");
done:
return;
}
/*
* The ctcmpc statemachine for a channel.
*/
const fsm_node ctcmpc_ch_fsm[] = {
{ CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
{ CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
{ CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
{ CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
{ CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
{ CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
{ CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
{ CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
{ CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
{ CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
{ CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
{ CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
{ CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
{ CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
{ CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
{ CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
{ CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
{ CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
{ CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
{ CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
{ CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
{ CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
{ CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
{ CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
{ CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
{ CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
{ CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
{ CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
{ CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
{ CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
{ CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
{ CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
{ CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
{ CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
{ CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
{ CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
{ CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
{ CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
{ CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
{ CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
{ CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
{ CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
{ CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
{ CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
{ CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
{ CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
{ CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
{ CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
{ CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
{ CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
};
int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
/*
* Actions for interface - statemachine.
*/
/*
* Startup channels by sending CTC_EVENT_START to each channel.
*
* fi An instance of an interface statemachine.
* event The event, just happened.
* arg Generic pointer, casted from struct net_device * upon call.
*/
static void dev_action_start(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
int direction;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_deltimer(&priv->restart_timer);
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
if (IS_MPC(priv))
priv->mpcg->channels_terminating = 0;
for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_START, ch);
}
}
/*
* Shutdown channels by sending CTC_EVENT_STOP to each channel.
*
* fi An instance of an interface statemachine.
* event The event, just happened.
* arg Generic pointer, casted from struct net_device * upon call.
*/
static void dev_action_stop(fsm_instance *fi, int event, void *arg)
{
int direction;
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00;
CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
__func__, ch->th_seq_num);
}
if (IS_MPC(priv))
fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
}
static void dev_action_restart(fsm_instance *fi, int event, void *arg)
{
int restart_timer;
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(TRACE, dev, "");
if (IS_MPC(priv)) {
restart_timer = CTCM_TIME_1_SEC;
} else {
restart_timer = CTCM_TIME_5_SEC;
}
dev_info(&dev->dev, "Restarting device\n");
dev_action_stop(fi, event, arg);
fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
if (IS_MPC(priv))
fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
/* going back into start sequence too quickly can */
/* result in the other side becoming unreachable due */
/* to sense reported when IO is aborted */
fsm_addtimer(&priv->restart_timer, restart_timer,
DEV_EVENT_START, dev);
}
/*
* Called from channel statemachine
* when a channel is up and running.
*
* fi An instance of an interface statemachine.
* event The event, just happened.
* arg Generic pointer, casted from struct net_device * upon call.
*/
static void dev_action_chup(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
int dev_stat = fsm_getstate(fi);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
dev->name, dev->ml_priv, dev_stat, event);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
if (event == DEV_EVENT_RXUP)
fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
else
fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
break;
case DEV_STATE_STARTWAIT_RX:
if (event == DEV_EVENT_RXUP) {
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(&dev->dev,
"Connected with remote side\n");
ctcm_clear_busy(dev);
}
break;
case DEV_STATE_STARTWAIT_TX:
if (event == DEV_EVENT_TXUP) {
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(&dev->dev,
"Connected with remote side\n");
ctcm_clear_busy(dev);
}
break;
case DEV_STATE_STOPWAIT_TX:
if (event == DEV_EVENT_RXUP)
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
break;
case DEV_STATE_STOPWAIT_RX:
if (event == DEV_EVENT_TXUP)
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
break;
}
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXUP)
mpc_channel_action(priv->channel[CTCM_READ],
CTCM_READ, MPC_CHANNEL_ADD);
else
mpc_channel_action(priv->channel[CTCM_WRITE],
CTCM_WRITE, MPC_CHANNEL_ADD);
}
}
/*
* Called from device statemachine
* when a channel has been shutdown.
*
* fi An instance of an interface statemachine.
* event The event, just happened.
* arg Generic pointer, casted from struct net_device * upon call.
*/
static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
if (event == DEV_EVENT_TXDOWN)
fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
else
fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
break;
case DEV_STATE_STARTWAIT_RX:
if (event == DEV_EVENT_TXDOWN)
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
break;
case DEV_STATE_STARTWAIT_TX:
if (event == DEV_EVENT_RXDOWN)
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
break;
case DEV_STATE_STOPWAIT_RXTX:
if (event == DEV_EVENT_TXDOWN)
fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
else
fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
break;
case DEV_STATE_STOPWAIT_RX:
if (event == DEV_EVENT_RXDOWN)
fsm_newstate(fi, DEV_STATE_STOPPED);
break;
case DEV_STATE_STOPWAIT_TX:
if (event == DEV_EVENT_TXDOWN)
fsm_newstate(fi, DEV_STATE_STOPPED);
break;
}
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXDOWN)
mpc_channel_action(priv->channel[CTCM_READ],
CTCM_READ, MPC_CHANNEL_REMOVE);
else
mpc_channel_action(priv->channel[CTCM_WRITE],
CTCM_WRITE, MPC_CHANNEL_REMOVE);
}
}
const fsm_node dev_fsm[] = {
{ DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
{ DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
{ DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
{ DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
{ DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
{ DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
{ DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
{ DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
{ DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
{ DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
{ DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
{ DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
{ DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
{ DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
{ DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
{ DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
{ DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
};
int dev_fsm_len = ARRAY_SIZE(dev_fsm);
/* --- This is the END my friend --- */
| linux-master | drivers/s390/net/ctcm_fsms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <net/switchdev.h>
#include <asm/chsc.h>
#include <asm/css_chars.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
if (retcode)
QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
switch (retcode) {
case IPA_RC_SUCCESS:
rc = 0;
break;
case IPA_RC_L2_UNSUPPORTED_CMD:
rc = -EOPNOTSUPP;
break;
case IPA_RC_L2_ADDR_TABLE_FULL:
rc = -ENOSPC;
break;
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
rc = -EADDRINUSE;
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
rc = -EADDRNOTAVAIL;
break;
case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT;
break;
default:
rc = -EIO;
break;
}
return rc;
}
static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
IPA_DATA_SIZEOF(setdelmac));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setdelmac.mac_length = ETH_ALEN;
ether_addr_copy(cmd->data.setdelmac.mac, mac);
return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
}
static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
{
int rc;
QETH_CARD_TEXT(card, 2, "L2Setmac");
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
dev_info(&card->gdev->dev,
"MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
case -EADDRINUSE:
dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n", mac);
break;
case -EADDRNOTAVAIL:
dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n", mac);
break;
}
}
return rc;
}
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
int rc;
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EADDRINUSE)
QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
ether_addr_to_u64(mac), CARD_DEVID(card));
else if (rc)
QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
ether_addr_to_u64(mac), CARD_DEVID(card), rc);
return rc;
}
static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
{
enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
int rc;
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
ether_addr_to_u64(mac), CARD_DEVID(card), rc);
return rc;
}
static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
int i;
hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
hash_del(&mac->hnode);
kfree(mac);
}
}
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len)
{
int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
hdr->hdr.l2.pkt_length = data_len;
if (skb_is_gso(skb)) {
hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
}
/* set byte byte 3 to casting flags */
if (cast_type == RTN_MULTICAST)
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
else if (cast_type == RTN_BROADCAST)
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
else
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
/* VSWITCH relies on the VLAN
* information to be present in
* the QDIO header */
if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
}
}
static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
{
if (retcode)
QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
switch (retcode) {
case IPA_RC_SUCCESS:
return 0;
case IPA_RC_L2_INVALID_VLAN_ID:
return -EINVAL;
case IPA_RC_L2_DUP_VLAN_ID:
return -EEXIST;
case IPA_RC_L2_VLAN_ID_NOT_FOUND:
return -ENOENT;
case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
return -EPERM;
default:
return -EIO;
}
}
static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 2, "L2sdvcb");
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
cmd->data.setdelvlan.vlan_id,
CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
}
return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
}
static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
IPA_DATA_SIZEOF(setdelvlan));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setdelvlan.vlan_id = i;
return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
}
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
return 0;
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
}
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (!vid)
return 0;
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
static void qeth_l2_set_pnso_mode(struct qeth_card *card,
enum qeth_pnso_mode mode)
{
spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
WRITE_ONCE(card->info.pnso_mode, mode);
spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
if (mode == QETH_PNSO_NONE)
drain_workqueue(card->event_wq);
}
static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
{
struct switchdev_notifier_fdb_info info = {};
QETH_CARD_TEXT(card, 2, "fdbflush");
info.addr = NULL;
/* flush all VLANs: */
info.vid = 0;
info.added_by_user = false;
info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
card->dev, &info.info, NULL);
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
QETH_CARD_TEXT(card, 2, "l2reqmac");
if (MACHINE_IS_VM) {
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
/* Fall back once more, but some devices don't support a custom MAC
* address:
*/
if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
out:
QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
}
static void qeth_l2_register_dev_addr(struct qeth_card *card)
{
if (!is_valid_ether_addr(card->dev->dev_addr))
qeth_l2_request_initial_mac(card);
if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
card->info.dev_addr_is_registered = 1;
else
card->info.dev_addr_is_registered = 0;
}
static int qeth_l2_validate_addr(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
if (card->info.dev_addr_is_registered)
return eth_validate_addr(dev);
QETH_CARD_TEXT(card, 4, "nomacadr");
return -EPERM;
}
static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct qeth_card *card = dev->ml_priv;
u8 old_addr[ETH_ALEN];
int rc = 0;
QETH_CARD_TEXT(card, 3, "setmac");
if (IS_OSM(card) || IS_OSX(card)) {
QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
card->info.dev_addr_is_registered)
return 0;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
return rc;
ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr);
card->info.dev_addr_is_registered = 1;
return 0;
}
static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
{
int role;
int rc;
QETH_CARD_TEXT(card, 3, "pmisc2br");
if (enable) {
if (card->options.sbp.reflect_promisc_primary)
role = QETH_SBP_ROLE_PRIMARY;
else
role = QETH_SBP_ROLE_SECONDARY;
} else
role = QETH_SBP_ROLE_NONE;
rc = qeth_bridgeport_setrole(card, role);
QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
if (!rc) {
card->options.sbp.role = role;
card->info.promisc_mode = enable;
}
}
static void qeth_l2_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
if (card->info.promisc_mode == enable)
return;
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable);
} else {
mutex_lock(&card->sbp_lock);
if (card->options.sbp.reflect_promisc)
qeth_l2_promisc_to_bridge(card, enable);
mutex_unlock(&card->sbp_lock);
}
}
/* New MAC address is added to the hash table and marked to be written on card
* only if there is not in the hash table storage already
*
*/
static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
{
u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
struct qeth_mac *mac;
hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
return;
}
}
mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
if (!mac)
return;
ether_addr_copy(mac->mac_addr, ha->addr);
mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
}
static void qeth_l2_rx_mode_work(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
rx_mode_work);
struct net_device *dev = card->dev;
struct netdev_hw_addr *ha;
struct qeth_mac *mac;
struct hlist_node *tmp;
int i;
int rc;
QETH_CARD_TEXT(card, 3, "setmulti");
netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
netif_addr_unlock_bh(dev);
hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
switch (mac->disp_flag) {
case QETH_DISP_ADDR_DELETE:
qeth_l2_remove_mac(card, mac->mac_addr);
hash_del(&mac->hnode);
kfree(mac);
break;
case QETH_DISP_ADDR_ADD:
rc = qeth_l2_write_mac(card, mac->mac_addr);
if (rc) {
hash_del(&mac->hnode);
kfree(mac);
break;
}
fallthrough;
default:
/* for next call to set_rx_mode(): */
mac->disp_flag = QETH_DISP_ADDR_DELETE;
}
}
qeth_l2_set_promisc_mode(card);
}
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int rc;
if (!skb_is_gso(skb))
qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card))
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
qeth_l2_fill_header);
if (!rc)
return NETDEV_TX_OK;
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
sb_dev);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
schedule_work(&card->rx_mode_work);
}
/**
* qeth_l2_pnso() - perform network subchannel operation
* @card: qeth_card structure pointer
* @oc: Operation Code
* @cnc: Boolean Change-Notification Control
* @cb: Callback function will be executed for each element
* of the address list
* @priv: Pointer to pass to the callback function.
*
* Collects network information in a network address list and calls the
* callback function for every entry in the list. If "change-notification-
* control" is set, further changes in the address list will be reported
* via the IPA command.
*/
static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
void *priv)
{
struct ccw_device *ddev = CARD_DDEV(card);
struct chsc_pnso_area *rr;
u32 prev_instance = 0;
int isfirstblock = 1;
int i, size, elems;
int rc;
rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
if (rr == NULL)
return -ENOMEM;
do {
QETH_CARD_TEXT(card, 2, "PNSO");
/* on the first iteration, naihdr.resume_token will be zero */
rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
cnc);
if (rc)
continue;
if (cb == NULL)
continue;
size = rr->naihdr.naids;
if (size != sizeof(struct chsc_pnso_naid_l2)) {
WARN_ON_ONCE(1);
continue;
}
elems = (rr->response.length - sizeof(struct chsc_header) -
sizeof(struct chsc_pnso_naihdr)) / size;
if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
/* Inform the caller that they need to scrap */
/* the data that was already reported via cb */
rc = -EAGAIN;
break;
}
isfirstblock = 0;
prev_instance = rr->naihdr.instance;
for (i = 0; i < elems; i++)
(*cb)(priv, &rr->entries[i]);
} while ((rc == -EBUSY) || (!rc && /* list stored */
/* resume token is non-zero => list incomplete */
(rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
if (rc)
QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
free_page((unsigned long)rr);
return rc;
}
static bool qeth_is_my_net_if_token(struct qeth_card *card,
struct net_if_token *token)
{
return ((card->info.ddev_devno == token->devnum) &&
(card->info.cssid == token->cssid) &&
(card->info.iid == token->iid) &&
(card->info.ssid == token->ssid) &&
(card->info.chpid == token->chpid) &&
(card->info.chid == token->chid));
}
/**
* qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
* @card: qeth_card structure pointer
* @code: event bitmask: high order bit 0x80 set to
* 1 - removal of an object
* 0 - addition of an object
* Object type(s):
* 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
* @token: "network token" structure identifying 'physical' location
* of the target
* @addr_lnid: structure with MAC address and VLAN ID of the target
*/
static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
struct net_if_token *token,
struct mac_addr_lnid *addr_lnid)
{
struct switchdev_notifier_fdb_info info = {};
u8 ntfy_mac[ETH_ALEN];
ether_addr_copy(ntfy_mac, addr_lnid->mac);
/* Ignore VLAN only changes */
if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
return;
/* Ignore mcast entries */
if (is_multicast_ether_addr(ntfy_mac))
return;
/* Ignore my own addresses */
if (qeth_is_my_net_if_token(card, token))
return;
info.addr = ntfy_mac;
/* don't report VLAN IDs */
info.vid = 0;
info.added_by_user = false;
info.offloaded = true;
if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
card->dev, &info.info, NULL);
QETH_CARD_TEXT(card, 4, "andelmac");
QETH_CARD_TEXT_(card, 4,
"mc%012llx", ether_addr_to_u64(ntfy_mac));
} else {
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
card->dev, &info.info, NULL);
QETH_CARD_TEXT(card, 4, "anaddmac");
QETH_CARD_TEXT_(card, 4,
"mc%012llx", ether_addr_to_u64(ntfy_mac));
}
}
static void qeth_l2_dev2br_an_set_cb(void *priv,
struct chsc_pnso_naid_l2 *entry)
{
u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
struct qeth_card *card = priv;
if (entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_l2_dev2br_fdb_notify(card, code,
(struct net_if_token *)&entry->nit,
(struct mac_addr_lnid *)&entry->addr_lnid);
}
/**
* qeth_l2_dev2br_an_set() -
* Enable or disable 'dev to bridge network address notification'
* @card: qeth_card structure pointer
* @enable: Enable or disable 'dev to bridge network address notification'
*
* Returns negative errno-compatible error indication or 0 on success.
*
* On enable, emits a series of address notifications for all
* currently registered hosts.
*/
static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
{
int rc;
if (enable) {
QETH_CARD_TEXT(card, 2, "anseton");
rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
qeth_l2_dev2br_an_set_cb, card);
if (rc == -EAGAIN)
/* address notification enabled, but inconsistent
* addresses reported -> disable address notification
*/
qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
NULL, NULL);
} else {
QETH_CARD_TEXT(card, 2, "ansetoff");
rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
}
return rc;
}
struct qeth_l2_br2dev_event_work {
struct work_struct work;
struct net_device *br_dev;
struct net_device *lsync_dev;
struct net_device *dst_dev;
unsigned long event;
unsigned char addr[ETH_ALEN];
};
static const struct net_device_ops qeth_l2_iqd_netdev_ops;
static const struct net_device_ops qeth_l2_osa_netdev_ops;
static bool qeth_l2_must_learn(struct net_device *netdev,
struct net_device *dstdev)
{
struct qeth_priv *priv;
priv = netdev_priv(netdev);
return (netdev != dstdev &&
(priv->brport_features & BR_LEARNING_SYNC) &&
!(br_port_flag_is_set(netdev, BR_ISOLATED) &&
br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
(netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
}
/**
* qeth_l2_br2dev_worker() - update local MACs
* @work: bridge to device FDB update
*
* Update local MACs of a learning_sync bridgeport so it can receive
* messages for a destination port.
* In case of an isolated learning_sync port, also update its isolated
* siblings.
*/
static void qeth_l2_br2dev_worker(struct work_struct *work)
{
struct qeth_l2_br2dev_event_work *br2dev_event_work =
container_of(work, struct qeth_l2_br2dev_event_work, work);
struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
struct net_device *dstdev = br2dev_event_work->dst_dev;
struct net_device *brdev = br2dev_event_work->br_dev;
unsigned long event = br2dev_event_work->event;
unsigned char *addr = br2dev_event_work->addr;
struct qeth_card *card = lsyncdev->ml_priv;
struct net_device *lowerdev;
struct list_head *iter;
int err = 0;
QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
rcu_read_lock();
/* Verify preconditions are still valid: */
if (!netif_is_bridge_port(lsyncdev) ||
brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
goto unlock;
if (!qeth_l2_must_learn(lsyncdev, dstdev))
goto unlock;
if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
/* Update lsyncdev and its isolated sibling(s): */
iter = &brdev->adj_list.lower;
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
while (lowerdev) {
if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
err = dev_uc_add(lowerdev, addr);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = dev_uc_del(lowerdev, addr);
break;
default:
break;
}
if (err) {
QETH_CARD_TEXT(card, 2, "b2derris");
QETH_CARD_TEXT_(card, 2,
"err%02lx%03d", event,
lowerdev->ifindex);
}
}
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
}
} else {
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
err = dev_uc_add(lsyncdev, addr);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = dev_uc_del(lsyncdev, addr);
break;
default:
break;
}
if (err)
QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
}
unlock:
rcu_read_unlock();
dev_put(brdev);
dev_put(lsyncdev);
dev_put(dstdev);
kfree(br2dev_event_work);
}
static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
struct net_device *lsyncdev,
struct net_device *dstdev,
unsigned long event,
const unsigned char *addr)
{
struct qeth_l2_br2dev_event_work *worker_data;
struct qeth_card *card;
worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
if (!worker_data)
return -ENOMEM;
INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
worker_data->br_dev = brdev;
worker_data->lsync_dev = lsyncdev;
worker_data->dst_dev = dstdev;
worker_data->event = event;
ether_addr_copy(worker_data->addr, addr);
card = lsyncdev->ml_priv;
/* Take a reference on the sw port devices and the bridge */
dev_hold(brdev);
dev_hold(lsyncdev);
dev_hold(dstdev);
queue_work(card->event_wq, &worker_data->work);
return 0;
}
/* Called under rtnl_lock */
static int qeth_l2_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dstdev, *brdev, *lowerdev;
struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
struct list_head *iter;
struct qeth_card *card;
int rc;
if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
event == SWITCHDEV_FDB_DEL_TO_DEVICE))
return NOTIFY_DONE;
dstdev = switchdev_notifier_info_to_dev(info);
brdev = netdev_master_upper_dev_get_rcu(dstdev);
if (!brdev || !netif_is_bridge_master(brdev))
return NOTIFY_DONE;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
iter = &brdev->adj_list.lower;
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
while (lowerdev) {
if (qeth_l2_must_learn(lowerdev, dstdev)) {
card = lowerdev->ml_priv;
QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
dstdev, event,
fdb_info->addr);
if (rc) {
QETH_CARD_TEXT(card, 2, "b2dqwerr");
return NOTIFY_BAD;
}
}
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
}
return NOTIFY_DONE;
}
static struct notifier_block qeth_l2_sw_notifier = {
.notifier_call = qeth_l2_switchdev_event,
};
static refcount_t qeth_l2_switchdev_notify_refcnt;
/* Called under rtnl_lock */
static void qeth_l2_br2dev_get(void)
{
int rc;
if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
if (rc) {
QETH_DBF_MESSAGE(2,
"failed to register qeth_l2_sw_notifier: %d\n",
rc);
} else {
refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
}
}
QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
qeth_l2_switchdev_notify_refcnt.refs.counter);
}
/* Called under rtnl_lock */
static void qeth_l2_br2dev_put(void)
{
int rc;
if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
if (rc) {
QETH_DBF_MESSAGE(2,
"failed to unregister qeth_l2_sw_notifier: %d\n",
rc);
} else {
QETH_DBF_MESSAGE(2,
"qeth_l2_sw_notifier unregistered\n");
}
}
QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
qeth_l2_switchdev_notify_refcnt.refs.counter);
}
static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
{
struct qeth_priv *priv = netdev_priv(dev);
struct qeth_card *card = dev->ml_priv;
u16 mode = BRIDGE_MODE_UNDEF;
/* Do not even show qeth devs that cannot do bridge_setlink */
if (!priv->brport_hw_features || !netif_device_present(dev) ||
qeth_bridgeport_is_in_use(card))
return -EOPNOTSUPP;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode, priv->brport_features,
priv->brport_hw_features,
nlflags, filter_mask, NULL);
}
static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
};
/**
* qeth_l2_bridge_setlink() - set bridgeport attributes
* @dev: netdevice
* @nlh: netlink message header
* @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
* @extack: extended ACK report struct
*
* Called under rtnl_lock
*/
static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 flags, struct netlink_ext_ack *extack)
{
struct qeth_priv *priv = netdev_priv(dev);
struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
struct qeth_card *card = dev->ml_priv;
struct nlattr *attr, *nested_attr;
bool enable, has_protinfo = false;
int rem1, rem2;
int rc;
if (!netif_device_present(dev))
return -ENODEV;
nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
if (nla_type(attr) == IFLA_PROTINFO) {
rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
qeth_brport_policy, extack);
if (rc)
return rc;
has_protinfo = true;
} else if (nla_type(attr) == IFLA_AF_SPEC) {
nla_for_each_nested(nested_attr, attr, rem2) {
if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
continue;
NL_SET_ERR_MSG_ATTR(extack, nested_attr,
"Unsupported attribute");
return -EINVAL;
}
} else {
NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
return -EINVAL;
}
}
if (!has_protinfo)
return 0;
if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
return -EINVAL;
if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
"Operation not supported by HW");
return -EOPNOTSUPP;
}
if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
"Requires NET_SWITCHDEV");
return -EOPNOTSUPP;
}
enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
return 0;
mutex_lock(&card->sbp_lock);
/* do not change anything if BridgePort is enabled */
if (qeth_bridgeport_is_in_use(card)) {
NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
rc = -EBUSY;
} else if (enable) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
rc = qeth_l2_dev2br_an_set(card, true);
if (rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
} else {
priv->brport_features |= BR_LEARNING_SYNC;
qeth_l2_br2dev_get();
}
} else {
rc = qeth_l2_dev2br_an_set(card, false);
if (!rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
priv->brport_features ^= BR_LEARNING_SYNC;
qeth_l2_dev2br_fdb_flush(card);
qeth_l2_br2dev_put();
}
}
mutex_unlock(&card->sbp_lock);
return rc;
}
static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_select_queue = qeth_l2_iqd_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
.ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_bridge_getlink = qeth_l2_bridge_getlink,
.ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
static const struct net_device_ops qeth_l2_osa_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_select_queue = qeth_osa_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
.ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
};
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
&qeth_l2_osa_netdev_ops;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->priv_flags |= IFF_UNICAST_FLT;
if (IS_OSM(card)) {
card->dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
if (!IS_VM_NIC(card))
card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (IS_OSD(card) && !IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
card->dev->hw_features |= NETIF_F_IP_CSUM;
card->dev->vlan_features |= NETIF_F_IP_CSUM;
}
}
if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
card->dev->hw_features |= NETIF_F_IPV6_CSUM;
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO;
card->dev->vlan_features |= NETIF_F_TSO;
}
if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO6;
card->dev->vlan_features |= NETIF_F_TSO6;
}
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
netif_keep_dst(card->dev);
netif_set_tso_max_size(card->dev,
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
static void qeth_l2_trace_features(struct qeth_card *card)
{
/* Set BridgePort features */
QETH_CARD_TEXT(card, 2, "featuSBP");
QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
sizeof(card->options.sbp.supported_funcs));
/* VNIC Characteristics features */
QETH_CARD_TEXT(card, 2, "feaVNICC");
QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
sizeof(card->options.vnicc.sup_chars));
}
static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
{
if (!card->options.sbp.reflect_promisc &&
card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role);
/* Let the callback function refresh the stored role value. */
qeth_bridgeport_query_ports(card, &card->options.sbp.role,
NULL);
}
if (card->options.sbp.hostnotification) {
if (qeth_bridgeport_an_set(card, 1))
card->options.sbp.hostnotification = 0;
}
}
/**
* qeth_l2_detect_dev2br_support() -
* Detect whether this card supports 'dev to bridge fdb network address
* change notification' and thus can support the learning_sync bridgeport
* attribute
* @card: qeth_card structure pointer
*/
static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
{
struct qeth_priv *priv = netdev_priv(card->dev);
bool dev2br_supported;
QETH_CARD_TEXT(card, 2, "d2brsup");
if (!IS_IQD(card))
return;
/* dev2br requires valid cssid,iid,chid */
dev2br_supported = card->info.ids_valid &&
css_general_characteristics.enarf;
QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
if (dev2br_supported)
priv->brport_hw_features |= BR_LEARNING_SYNC;
else
priv->brport_hw_features &= ~BR_LEARNING_SYNC;
}
static void qeth_l2_enable_brport_features(struct qeth_card *card)
{
struct qeth_priv *priv = netdev_priv(card->dev);
int rc;
if (priv->brport_features & BR_LEARNING_SYNC) {
if (priv->brport_hw_features & BR_LEARNING_SYNC) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
rc = qeth_l2_dev2br_an_set(card, true);
if (rc == -EAGAIN) {
/* Recoverable error, retry once */
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
qeth_l2_dev2br_fdb_flush(card);
qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
rc = qeth_l2_dev2br_an_set(card, true);
}
if (rc) {
netdev_err(card->dev,
"failed to enable bridge learning_sync: %d\n",
rc);
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
qeth_l2_dev2br_fdb_flush(card);
priv->brport_features ^= BR_LEARNING_SYNC;
}
} else {
dev_warn(&card->gdev->dev,
"bridge learning_sync not supported\n");
priv->brport_features ^= BR_LEARNING_SYNC;
}
}
}
/* SETBRIDGEPORT support, async notifications */
enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
/**
* qeth_bridge_emit_host_event() - bridgeport address change notification
* @card: qeth_card structure pointer, for udev events.
* @evtype: "normal" register/unregister, or abort, or reset. For abort
* and reset token and addr_lnid are unused and may be NULL.
* @code: event bitmask: high order bit 0x80 value 1 means removal of an
* object, 0 - addition of an object.
* 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
* @token: "network token" structure identifying physical address of the port.
* @addr_lnid: pointer to structure with MAC address and VLAN ID.
*
* This function is called when registrations and deregistrations are
* reported by the hardware, and also when notifications are enabled -
* for all currently registered addresses.
*/
static void qeth_bridge_emit_host_event(struct qeth_card *card,
enum qeth_an_event_type evtype,
u8 code,
struct net_if_token *token,
struct mac_addr_lnid *addr_lnid)
{
char str[7][32];
char *env[8];
int i = 0;
switch (evtype) {
case anev_reg_unreg:
scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
(code & IPA_ADDR_CHANGE_CODE_REMOVAL)
? "deregister" : "register");
env[i] = str[i]; i++;
if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
addr_lnid->lnid);
env[i] = str[i]; i++;
}
if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
addr_lnid->mac);
env[i] = str[i]; i++;
}
scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
token->cssid, token->ssid, token->devnum);
env[i] = str[i]; i++;
scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
env[i] = str[i]; i++;
scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
token->chpid);
env[i] = str[i]; i++;
scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
token->chid);
env[i] = str[i]; i++;
break;
case anev_abort:
scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
env[i] = str[i]; i++;
break;
case anev_reset:
scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
env[i] = str[i]; i++;
break;
}
env[i] = NULL;
kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
}
struct qeth_bridge_state_data {
struct work_struct worker;
struct qeth_card *card;
u8 role;
u8 state;
};
static void qeth_bridge_state_change_worker(struct work_struct *work)
{
struct qeth_bridge_state_data *data =
container_of(work, struct qeth_bridge_state_data, worker);
char env_locrem[32];
char env_role[32];
char env_state[32];
char *env[] = {
env_locrem,
env_role,
env_state,
NULL
};
scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
scnprintf(env_role, sizeof(env_role), "ROLE=%s",
(data->role == QETH_SBP_ROLE_NONE) ? "none" :
(data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
(data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
"<INVALID>");
scnprintf(env_state, sizeof(env_state), "STATE=%s",
(data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
(data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
(data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
"<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
kfree(data);
}
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
struct qeth_bridge_state_data *data;
QETH_CARD_TEXT(card, 2, "brstchng");
if (qports->num_entries == 0) {
QETH_CARD_TEXT(card, 2, "BPempty");
return;
}
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
}
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data) {
QETH_CARD_TEXT(card, 2, "BPSalloc");
return;
}
INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
data->card = card;
/* Information for the local port: */
data->role = qports->entry[0].role;
data->state = qports->entry[0].state;
queue_work(card->event_wq, &data->worker);
}
struct qeth_addr_change_data {
struct delayed_work dwork;
struct qeth_card *card;
struct qeth_ipacmd_addr_change ac_event;
};
static void qeth_l2_dev2br_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct qeth_addr_change_data *data;
struct qeth_card *card;
struct qeth_priv *priv;
unsigned int i;
int rc;
data = container_of(dwork, struct qeth_addr_change_data, dwork);
card = data->card;
priv = netdev_priv(card->dev);
QETH_CARD_TEXT(card, 4, "dev2brew");
if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
goto free;
if (data->ac_event.lost_event_mask) {
/* Potential re-config in progress, try again later: */
if (!rtnl_trylock()) {
queue_delayed_work(card->event_wq, dwork,
msecs_to_jiffies(100));
return;
}
if (!netif_device_present(card->dev)) {
rtnl_unlock();
goto free;
}
QETH_DBF_MESSAGE(3,
"Address change notification overflow on device %x\n",
CARD_DEVID(card));
/* Card fdb and bridge fdb are out of sync, card has stopped
* notifications (no need to drain_workqueue). Purge all
* 'extern_learn' entries from the parent bridge and restart
* the notifications.
*/
qeth_l2_dev2br_fdb_flush(card);
rc = qeth_l2_dev2br_an_set(card, true);
if (rc) {
/* TODO: if we want to retry after -EAGAIN, be
* aware there could be stale entries in the
* workqueue now, that need to be drained.
* For now we give up:
*/
netdev_err(card->dev,
"bridge learning_sync failed to recover: %d\n",
rc);
WRITE_ONCE(card->info.pnso_mode,
QETH_PNSO_NONE);
/* To remove fdb entries reported by an_set: */
qeth_l2_dev2br_fdb_flush(card);
priv->brport_features ^= BR_LEARNING_SYNC;
} else {
QETH_DBF_MESSAGE(3,
"Address Notification resynced on device %x\n",
CARD_DEVID(card));
}
rtnl_unlock();
} else {
for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
&data->ac_event.entry[i];
qeth_l2_dev2br_fdb_notify(card,
entry->change_code,
&entry->token,
&entry->addr_lnid);
}
}
free:
kfree(data);
}
static void qeth_addr_change_event_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct qeth_addr_change_data *data;
struct qeth_card *card;
int i;
data = container_of(dwork, struct qeth_addr_change_data, dwork);
card = data->card;
QETH_CARD_TEXT(data->card, 4, "adrchgew");
if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
goto free;
if (data->ac_event.lost_event_mask) {
/* Potential re-config in progress, try again later: */
if (!mutex_trylock(&card->sbp_lock)) {
queue_delayed_work(card->event_wq, dwork,
msecs_to_jiffies(100));
return;
}
dev_info(&data->card->gdev->dev,
"Address change notification stopped on %s (%s)\n",
netdev_name(card->dev),
(data->ac_event.lost_event_mask == 0x01)
? "Overflow"
: (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
data->card->options.sbp.hostnotification = 0;
card->info.pnso_mode = QETH_PNSO_NONE;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
} else
for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
&data->ac_event.entry[i];
qeth_bridge_emit_host_event(data->card,
anev_reg_unreg,
entry->change_code,
&entry->token,
&entry->addr_lnid);
}
free:
kfree(data);
}
static void qeth_addr_change_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
struct qeth_ipacmd_addr_change *hostevs =
&cmd->data.addrchange;
struct qeth_addr_change_data *data;
int extrasize;
if (card->info.pnso_mode == QETH_PNSO_NONE)
return;
QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
if (hostevs->lost_event_mask == 0x00)
hostevs->lost_event_mask = 0xff;
} else {
QETH_CARD_TEXT_(card, 2, "ACHN%04x",
cmd->hdr.return_code);
return;
}
}
extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
hostevs->num_entries;
data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
GFP_ATOMIC);
if (!data) {
QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
else
INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
data->ac_event = *hostevs;
memcpy(data->ac_event.entry, hostevs->entry, extrasize);
queue_delayed_work(card->event_wq, &data->dwork, 0);
}
/* SETBRIDGEPORT support; sending commands */
struct _qeth_sbp_cbctl {
union {
u32 supported;
struct {
enum qeth_sbp_roles *role;
enum qeth_sbp_states *state;
} qports;
} data;
};
static int qeth_bridgeport_makerc(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
u16 ipa_rc = cmd->hdr.return_code;
u16 sbp_rc = sbp->hdr.return_code;
int rc;
if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
return 0;
if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
(!IS_IQD(card) && ipa_rc == sbp_rc)) {
switch (sbp_rc) {
case IPA_RC_SUCCESS:
rc = 0;
break;
case IPA_RC_L2_UNSUPPORTED_CMD:
case IPA_RC_UNSUPPORTED_COMMAND:
rc = -EOPNOTSUPP;
break;
case IPA_RC_SBP_OSA_NOT_CONFIGURED:
case IPA_RC_SBP_IQD_NOT_CONFIGURED:
rc = -ENODEV; /* maybe not the best code here? */
dev_err(&card->gdev->dev,
"The device is not configured as a Bridge Port\n");
break;
case IPA_RC_SBP_OSA_OS_MISMATCH:
case IPA_RC_SBP_IQD_OS_MISMATCH:
rc = -EPERM;
dev_err(&card->gdev->dev,
"A Bridge Port is already configured by a different operating system\n");
break;
case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
switch (setcmd) {
case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
rc = -EEXIST;
dev_err(&card->gdev->dev,
"The LAN already has a primary Bridge Port\n");
break;
case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a primary Bridge Port\n");
break;
default:
rc = -EIO;
}
break;
case IPA_RC_SBP_OSA_CURRENT_SECOND:
case IPA_RC_SBP_IQD_CURRENT_SECOND:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a secondary Bridge Port\n");
break;
case IPA_RC_SBP_OSA_LIMIT_SECOND:
case IPA_RC_SBP_IQD_LIMIT_SECOND:
rc = -EEXIST;
dev_err(&card->gdev->dev,
"The LAN cannot have more secondary Bridge Ports\n");
break;
case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a primary Bridge Port\n");
break;
case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
rc = -EACCES;
dev_err(&card->gdev->dev,
"The device is not authorized to be a Bridge Port\n");
break;
default:
rc = -EIO;
}
} else {
switch (ipa_rc) {
case IPA_RC_NOTSUPP:
rc = -EOPNOTSUPP;
break;
case IPA_RC_UNSUPPORTED_COMMAND:
rc = -EOPNOTSUPP;
break;
default:
rc = -EIO;
}
}
if (rc) {
QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
}
return rc;
}
static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
unsigned int data_length)
{
enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
IPA_CMD_SETBRIDGEPORT_OSA;
struct qeth_ipacmd_sbp_hdr *hdr;
struct qeth_cmd_buffer *iob;
iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
data_length +
offsetof(struct qeth_ipacmd_setbridgeport,
data));
if (!iob)
return iob;
hdr = &__ipa_cmd(iob)->data.sbp.hdr;
hdr->cmdlength = sizeof(*hdr) + data_length;
hdr->command_code = sbp_cmd;
hdr->used_total = 1;
hdr->seq_no = 1;
return iob;
}
static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
int rc;
QETH_CARD_TEXT(card, 2, "brqsupcb");
rc = qeth_bridgeport_makerc(card, cmd);
if (rc)
return rc;
cbctl->data.supported =
cmd->data.sbp.data.query_cmds_supp.supported_cmds;
return 0;
}
/**
* qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
* @card: qeth_card structure pointer.
*
* Sets bitmask of supported setbridgeport subfunctions in the qeth_card
* strucutre: card->options.sbp.supported_funcs.
*/
static void qeth_bridgeport_query_support(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
struct _qeth_sbp_cbctl cbctl;
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
SBP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return;
if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
&cbctl)) {
card->options.sbp.role = QETH_SBP_ROLE_NONE;
card->options.sbp.supported_funcs = 0;
return;
}
card->options.sbp.supported_funcs = cbctl.data.supported;
}
static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
struct qeth_sbp_port_data *qports;
int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
rc = qeth_bridgeport_makerc(card, cmd);
if (rc)
return rc;
qports = &cmd->data.sbp.data.port_data;
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
return -EINVAL;
}
/* first entry contains the state of the local port */
if (qports->num_entries > 0) {
if (cbctl->data.qports.role)
*cbctl->data.qports.role = qports->entry[0].role;
if (cbctl->data.qports.state)
*cbctl->data.qports.state = qports->entry[0].state;
}
return 0;
}
/**
* qeth_bridgeport_query_ports() - query local bridgeport status.
* @card: qeth_card structure pointer.
* @role: Role of the port: 0-none, 1-primary, 2-secondary.
* @state: State of the port: 0-inactive, 1-standby, 2-active.
*
* Returns negative errno-compatible error indication or 0 on success.
*
* 'role' and 'state' are not updated in case of hardware operation failure.
*/
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
{
struct qeth_cmd_buffer *iob;
struct _qeth_sbp_cbctl cbctl = {
.data = {
.qports = {
.role = role,
.state = state,
},
},
};
QETH_CARD_TEXT(card, 2, "brqports");
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
&cbctl);
}
static int qeth_bridgeport_set_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
QETH_CARD_TEXT(card, 2, "brsetrcb");
return qeth_bridgeport_makerc(card, cmd);
}
/**
* qeth_bridgeport_setrole() - Assign primary role to the port.
* @card: qeth_card structure pointer.
* @role: Role to assign.
*
* Returns negative errno-compatible error indication or 0 on success.
*/
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
{
struct qeth_cmd_buffer *iob;
enum qeth_ipa_sbp_cmd setcmd;
unsigned int cmdlength = 0;
QETH_CARD_TEXT(card, 2, "brsetrol");
switch (role) {
case QETH_SBP_ROLE_NONE:
setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
break;
case QETH_SBP_ROLE_PRIMARY:
setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
cmdlength = SBP_DATA_SIZEOF(set_primary);
break;
case QETH_SBP_ROLE_SECONDARY:
setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
break;
default:
return -EINVAL;
}
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
}
static void qeth_bridgeport_an_set_cb(void *priv,
struct chsc_pnso_naid_l2 *entry)
{
struct qeth_card *card = (struct qeth_card *)priv;
u8 code;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
if (entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&entry->nit,
(struct mac_addr_lnid *)&entry->addr_lnid);
}
/**
* qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
* @card: qeth_card structure pointer.
* @enable: 0 - disable, non-zero - enable notifications
*
* Returns negative errno-compatible error indication or 0 on success.
*
* On enable, emits a series of address notifications udev events for all
* currently registered hosts.
*/
int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
{
int rc;
if (!card->options.sbp.supported_funcs)
return -EOPNOTSUPP;
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
qeth_bridgeport_an_set_cb, card);
if (rc)
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
} else {
rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
}
return rc;
}
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
{
int rc;
switch (ipa_rc) {
case IPA_RC_SUCCESS:
return ipa_rc;
case IPA_RC_L2_UNSUPPORTED_CMD:
case IPA_RC_NOTSUPP:
rc = -EOPNOTSUPP;
break;
case IPA_RC_VNICC_OOSEQ:
rc = -EALREADY;
break;
case IPA_RC_VNICC_VNICBP:
rc = -EBUSY;
break;
case IPA_RC_L2_ADDR_TABLE_FULL:
rc = -ENOSPC;
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
rc = -EACCES;
break;
default:
rc = -EIO;
}
QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
return rc;
}
/* generic VNICC request call back */
static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
QETH_CARD_TEXT(card, 2, "vniccrcb");
if (cmd->hdr.return_code)
return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
/* return results to caller */
card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS)
*(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
*(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0;
}
static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
u32 vnicc_cmd,
unsigned int data_length)
{
struct qeth_ipacmd_vnicc_hdr *hdr;
struct qeth_cmd_buffer *iob;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
data_length +
offsetof(struct qeth_ipacmd_vnicc, data));
if (!iob)
return NULL;
hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
hdr->data_length = sizeof(*hdr) + data_length;
hdr->sub_command = vnicc_cmd;
return iob;
}
/* VNICC query VNIC characteristics request */
static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqch");
iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
}
/* VNICC query sub commands request */
static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm");
iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
VNICC_DATA_SIZEOF(query_cmds));
if (!iob)
return -ENOMEM;
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
}
/* VNICC enable/disable characteristic request */
static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
u32 cmd)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccedc");
iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
if (!iob)
return -ENOMEM;
__ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
}
/* VNICC get/set timeout for characteristic request */
static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
struct qeth_vnicc_getset_timeout *getset_timeout;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst");
iob = qeth_l2_vnicc_build_cmd(card, cmd,
VNICC_DATA_SIZEOF(getset_timeout));
if (!iob)
return -ENOMEM;
getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
getset_timeout->vnic_char = vnicc;
if (cmd == IPA_VNICC_SET_TIMEOUT)
getset_timeout->timeout = *timeout;
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
/* recover user timeout setting */
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
u32 *timeout)
{
if (card->options.vnicc.sup_chars & vnicc &&
card->options.vnicc.getset_timeout_sup & vnicc &&
!qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
timeout))
return false;
*timeout = QETH_VNICC_DEFAULT_TIMEOUT;
return true;
}
/* set current VNICC flag state; called from sysfs store function */
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
{
int rc = 0;
u32 cmd;
QETH_CARD_TEXT(card, 2, "vniccsch");
/* check if characteristic and enable/disable are supported */
if (!(card->options.vnicc.sup_chars & vnicc) ||
!(card->options.vnicc.set_char_sup & vnicc))
return -EOPNOTSUPP;
if (qeth_bridgeport_is_in_use(card))
return -EBUSY;
/* set enable/disable command and store wanted characteristic */
if (state) {
cmd = IPA_VNICC_ENABLE;
card->options.vnicc.wanted_chars |= vnicc;
} else {
cmd = IPA_VNICC_DISABLE;
card->options.vnicc.wanted_chars &= ~vnicc;
}
/* do we need to do anything? */
if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
return rc;
/* if card is not ready, simply stop here */
if (!qeth_card_hw_is_reachable(card)) {
if (state)
card->options.vnicc.cur_chars |= vnicc;
else
card->options.vnicc.cur_chars &= ~vnicc;
return rc;
}
rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
if (rc)
card->options.vnicc.wanted_chars =
card->options.vnicc.cur_chars;
else {
/* successful online VNICC change; handle special cases */
if (state && vnicc == QETH_VNICC_RX_BCAST)
card->options.vnicc.rx_bcast_enabled = true;
if (!state && vnicc == QETH_VNICC_LEARNING)
qeth_l2_vnicc_recover_timeout(card, vnicc,
&card->options.vnicc.learning_timeout);
}
return rc;
}
/* get current VNICC flag state; called from sysfs show function */
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
{
int rc = 0;
QETH_CARD_TEXT(card, 2, "vniccgch");
/* check if characteristic is supported */
if (!(card->options.vnicc.sup_chars & vnicc))
return -EOPNOTSUPP;
if (qeth_bridgeport_is_in_use(card))
return -EBUSY;
/* if card is ready, query current VNICC state */
if (qeth_card_hw_is_reachable(card))
rc = qeth_l2_vnicc_query_chars(card);
*state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
return rc;
}
/* set VNICC timeout; called from sysfs store function. Currently, only learning
* supports timeout
*/
int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
{
int rc = 0;
QETH_CARD_TEXT(card, 2, "vniccsto");
/* check if characteristic and set_timeout are supported */
if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
!(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
return -EOPNOTSUPP;
if (qeth_bridgeport_is_in_use(card))
return -EBUSY;
/* do we need to do anything? */
if (card->options.vnicc.learning_timeout == timeout)
return rc;
/* if card is not ready, simply store the value internally and return */
if (!qeth_card_hw_is_reachable(card)) {
card->options.vnicc.learning_timeout = timeout;
return rc;
}
/* send timeout value to card; if successful, store value internally */
rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
IPA_VNICC_SET_TIMEOUT, &timeout);
if (!rc)
card->options.vnicc.learning_timeout = timeout;
return rc;
}
/* get current VNICC timeout; called from sysfs show function. Currently, only
* learning supports timeout
*/
int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
{
int rc = 0;
QETH_CARD_TEXT(card, 2, "vniccgto");
/* check if characteristic and get_timeout are supported */
if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
!(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
return -EOPNOTSUPP;
if (qeth_bridgeport_is_in_use(card))
return -EBUSY;
/* if card is ready, get timeout. Otherwise, just return stored value */
*timeout = card->options.vnicc.learning_timeout;
if (qeth_card_hw_is_reachable(card))
rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
IPA_VNICC_GET_TIMEOUT,
timeout);
return rc;
}
/* check if VNICC is currently enabled */
static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
if (!card->options.vnicc.sup_chars)
return false;
/* default values are only OK if rx_bcast was not enabled by user
* or the card is offline.
*/
if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
if (!card->options.vnicc.rx_bcast_enabled ||
!qeth_card_hw_is_reachable(card))
return false;
}
return true;
}
/**
* qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
* @card: qeth_card structure pointer
*
* qeth_bridgeport functionality is mutually exclusive with usage of the
* VNIC Characteristics and dev2br address notifications
*/
bool qeth_bridgeport_allowed(struct qeth_card *card)
{
struct qeth_priv *priv = netdev_priv(card->dev);
return (!_qeth_l2_vnicc_is_in_use(card) &&
!(priv->brport_features & BR_LEARNING_SYNC));
}
/* recover user characteristic setting */
static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
bool enable)
{
u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
if (card->options.vnicc.sup_chars & vnicc &&
card->options.vnicc.set_char_sup & vnicc &&
!qeth_l2_vnicc_set_char(card, vnicc, cmd))
return false;
card->options.vnicc.wanted_chars &= ~vnicc;
card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
return true;
}
/* (re-)initialize VNICC */
static void qeth_l2_vnicc_init(struct qeth_card *card)
{
u32 *timeout = &card->options.vnicc.learning_timeout;
bool enable, error = false;
unsigned int chars_len, i;
unsigned long chars_tmp;
u32 sup_cmds, vnicc;
QETH_CARD_TEXT(card, 2, "vniccini");
/* reset rx_bcast */
card->options.vnicc.rx_bcast_enabled = 0;
/* initial query and storage of VNIC characteristics */
if (qeth_l2_vnicc_query_chars(card)) {
if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
*timeout != QETH_VNICC_DEFAULT_TIMEOUT)
dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
/* fail quietly if user didn't change the default config */
card->options.vnicc.sup_chars = 0;
card->options.vnicc.cur_chars = 0;
card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
return;
}
/* get supported commands for each supported characteristic */
chars_tmp = card->options.vnicc.sup_chars;
chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
sup_cmds = 0;
error = true;
}
if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
(sup_cmds & IPA_VNICC_GET_TIMEOUT))
card->options.vnicc.getset_timeout_sup |= vnicc;
else
card->options.vnicc.getset_timeout_sup &= ~vnicc;
if ((sup_cmds & IPA_VNICC_ENABLE) &&
(sup_cmds & IPA_VNICC_DISABLE))
card->options.vnicc.set_char_sup |= vnicc;
else
card->options.vnicc.set_char_sup &= ~vnicc;
}
/* enforce assumed default values and recover settings, if changed */
error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
timeout);
/* Change chars, if necessary */
chars_tmp = card->options.vnicc.wanted_chars ^
card->options.vnicc.cur_chars;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
enable = card->options.vnicc.wanted_chars & vnicc;
error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
}
if (error)
dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
}
/* configure default values of VNIC characteristics */
static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
{
/* characteristics values */
card->options.vnicc.sup_chars = QETH_VNICC_ALL;
card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
/* supported commands */
card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
/* settings wanted by users */
card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
}
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
};
static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
if (rc)
return rc;
} else {
gdev->dev.type = &qeth_l2_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
return 0;
}
static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
struct qeth_priv *priv;
if (gdev->dev.type != &qeth_l2_devtype)
device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
if (card->dev->reg_state == NETREG_REGISTERED) {
priv = netdev_priv(card->dev);
if (priv->brport_features & BR_LEARNING_SYNC) {
rtnl_lock();
qeth_l2_br2dev_put();
rtnl_unlock();
}
unregister_netdev(card->dev);
}
}
static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
{
struct net_device *dev = card->dev;
int rc = 0;
qeth_l2_detect_dev2br_support(card);
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs) {
qeth_l2_setup_bridgeport_attrs(card);
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
}
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
/* for the rx_bcast characteristic, init VNICC after setmac */
qeth_l2_vnicc_init(card);
qeth_l2_trace_features(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
if (dev->reg_state != NETREG_REGISTERED) {
rc = qeth_l2_setup_netdev(card);
if (rc)
goto err_setup;
if (carrier_ok)
netif_carrier_on(dev);
} else {
rtnl_lock();
rc = qeth_set_real_num_tx_queues(card,
qeth_tx_actual_queues(card));
if (rc) {
rtnl_unlock();
goto err_set_queues;
}
if (carrier_ok)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
netif_device_attach(dev);
qeth_enable_hw_features(dev);
qeth_l2_enable_brport_features(card);
if (netif_running(dev)) {
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
qeth_l2_set_rx_mode(dev);
}
rtnl_unlock();
}
return 0;
err_set_queues:
err_setup:
qeth_set_allowed_threads(card, 0, 1);
card->state = CARD_STATE_DOWN;
return rc;
}
static void qeth_l2_set_offline(struct qeth_card *card)
{
struct qeth_priv *priv = netdev_priv(card->dev);
qeth_set_allowed_threads(card, 0, 1);
qeth_l2_drain_rx_mode_cache(card);
if (card->state == CARD_STATE_SOFTSETUP)
card->state = CARD_STATE_DOWN;
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
if (priv->brport_features & BR_LEARNING_SYNC)
qeth_l2_dev2br_fdb_flush(card);
}
/* Returns zero if the command is successfully "consumed" */
static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
switch (cmd->hdr.command) {
case IPA_CMD_SETBRIDGEPORT_OSA:
case IPA_CMD_SETBRIDGEPORT_IQD:
if (cmd->data.sbp.hdr.command_code ==
IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
qeth_bridge_state_change(card, cmd);
return 0;
}
return 1;
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
qeth_addr_change_event(card, cmd);
return 0;
default:
return 1;
}
}
const struct qeth_discipline qeth_l2_discipline = {
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
.set_offline = qeth_l2_set_offline,
.control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static int __init qeth_l2_init(void)
{
pr_info("register layer 2 discipline\n");
refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
return 0;
}
static void __exit qeth_l2_exit(void)
{
pr_info("unregister layer 2 discipline\n");
}
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <[email protected]>");
MODULE_DESCRIPTION("qeth layer 2 discipline");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/qeth_l2_main.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV special message driver
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Martin Schwidefsky ([email protected])
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "smsgiucv.h"
struct smsg_callback {
struct list_head list;
const char *prefix;
int len;
void (*callback)(const char *from, char *str);
};
MODULE_AUTHOR
("(C) 2003 IBM Corporation by Martin Schwidefsky ([email protected])");
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
static struct iucv_path *smsg_path;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
static struct iucv_handler smsg_handler = {
.path_pending = smsg_path_pending,
.message_pending = smsg_message_pending,
};
static int smsg_path_pending(struct iucv_path *path, u8 *ipvmid, u8 *ipuser)
{
if (strncmp(ipvmid, "*MSG ", 8) != 0)
return -EINVAL;
/* Path pending from *MSG. */
return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
}
static void smsg_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct smsg_callback *cb;
unsigned char *buffer;
unsigned char sender[9];
int rc, i;
buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
if (!buffer) {
iucv_message_reject(path, msg);
return;
}
rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
if (rc == 0) {
buffer[msg->length] = 0;
EBCASC(buffer, msg->length);
memcpy(sender, buffer, 8);
sender[8] = 0;
/* Remove trailing whitespace from the sender name. */
for (i = 7; i >= 0; i--) {
if (sender[i] != ' ' && sender[i] != '\t')
break;
sender[i] = 0;
}
spin_lock(&smsg_list_lock);
list_for_each_entry(cb, &smsg_list, list)
if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
cb->callback(sender, buffer + 8);
break;
}
spin_unlock(&smsg_list_lock);
}
kfree(buffer);
}
int smsg_register_callback(const char *prefix,
void (*callback)(const char *from, char *str))
{
struct smsg_callback *cb;
cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
if (!cb)
return -ENOMEM;
cb->prefix = prefix;
cb->len = strlen(prefix);
cb->callback = callback;
spin_lock_bh(&smsg_list_lock);
list_add_tail(&cb->list, &smsg_list);
spin_unlock_bh(&smsg_list_lock);
return 0;
}
void smsg_unregister_callback(const char *prefix,
void (*callback)(const char *from,
char *str))
{
struct smsg_callback *cb, *tmp;
spin_lock_bh(&smsg_list_lock);
cb = NULL;
list_for_each_entry(tmp, &smsg_list, list)
if (tmp->callback == callback &&
strcmp(tmp->prefix, prefix) == 0) {
cb = tmp;
list_del(&cb->list);
break;
}
spin_unlock_bh(&smsg_list_lock);
kfree(cb);
}
static struct device_driver smsg_driver = {
.owner = THIS_MODULE,
.name = SMSGIUCV_DRV_NAME,
.bus = &iucv_bus,
};
static void __exit smsg_exit(void)
{
cpcmd("SET SMSG OFF", NULL, 0, NULL);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
}
static int __init smsg_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
rc = -EPROTONOSUPPORT;
goto out;
}
rc = driver_register(&smsg_driver);
if (rc != 0)
goto out;
rc = iucv_register(&smsg_handler, 1);
if (rc)
goto out_driver;
smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
if (!smsg_path) {
rc = -ENOMEM;
goto out_register;
}
rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
NULL, NULL, NULL);
if (rc)
goto out_free_path;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
out_register:
iucv_unregister(&smsg_handler, 1);
out_driver:
driver_unregister(&smsg_driver);
out:
return rc;
}
module_init(smsg_init);
module_exit(smsg_exit);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(smsg_register_callback);
EXPORT_SYMBOL(smsg_unregister_callback);
| linux-master | drivers/s390/net/smsgiucv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2009
* Author(s):
* Original CTC driver(s):
* Fritz Elfert ([email protected])
* Dieter Wellerdiek ([email protected])
* Martin Schwidefsky ([email protected])
* Denis Joseph Barrow ([email protected])
* Jochen Roehrig ([email protected])
* Cornelia Huck <[email protected]>
* MPC additions:
* Belinda Thompson ([email protected])
* Andy Richter ([email protected])
* Revived by:
* Peter Tiedemann ([email protected])
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/dst.h>
#include <linux/io.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/uaccess.h>
#include <asm/idals.h>
#include "ctcm_fsms.h"
#include "ctcm_main.h"
/* Some common global variables */
/*
* The root device for ctcm group devices
*/
static struct device *ctcm_root_dev;
/*
* Linked list of all detected channels.
*/
struct channel *channels;
/*
* Unpack a just received skb and hand it over to
* upper layers.
*
* ch The channel where this skb has been received.
* pskb The received skb.
*/
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
__u16 len = *((__u16 *) pskb->data);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
skb_pull(pskb, 2);
pskb->dev = dev;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
while (len > 0) {
struct sk_buff *skb;
int skblen;
struct ll_header *header = (struct ll_header *)pskb->data;
skb_pull(pskb, LL_HEADER_LENGTH);
if ((ch->protocol == CTCM_PROTO_S390) &&
(header->type != ETH_P_IP)) {
if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
ch->logflags |= LOG_FLAG_ILLEGALPKT;
/*
* Check packet type only if we stick strictly
* to S/390's protocol of OS390. This only
* supports IP. Otherwise allow any packet
* type.
*/
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Illegal packet type 0x%04x"
" - dropping",
CTCM_FUNTAIL, dev->name, header->type);
}
priv->stats.rx_dropped++;
priv->stats.rx_frame_errors++;
return;
}
pskb->protocol = cpu_to_be16(header->type);
if ((header->length <= LL_HEADER_LENGTH) ||
(len <= LL_HEADER_LENGTH)) {
if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Illegal packet size %d(%d,%d)"
"- dropping",
CTCM_FUNTAIL, dev->name,
header->length, dev->mtu, len);
ch->logflags |= LOG_FLAG_ILLEGALSIZE;
}
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
return;
}
header->length -= LL_HEADER_LENGTH;
len -= LL_HEADER_LENGTH;
if ((header->length > skb_tailroom(pskb)) ||
(header->length > len)) {
if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Packet size %d (overrun)"
" - dropping", CTCM_FUNTAIL,
dev->name, header->length);
ch->logflags |= LOG_FLAG_OVERRUN;
}
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
return;
}
skb_put(pskb, header->length);
skb_reset_mac_header(pskb);
len -= header->length;
skb = dev_alloc_skb(pskb->len);
if (!skb) {
if (!(ch->logflags & LOG_FLAG_NOMEM)) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): MEMORY allocation error",
CTCM_FUNTAIL, dev->name);
ch->logflags |= LOG_FLAG_NOMEM;
}
priv->stats.rx_dropped++;
return;
}
skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
pskb->len);
skb_reset_mac_header(skb);
skb->dev = pskb->dev;
skb->protocol = pskb->protocol;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
skblen = skb->len;
/*
* reset logflags
*/
ch->logflags = 0;
priv->stats.rx_packets++;
priv->stats.rx_bytes += skblen;
netif_rx(skb);
if (len > 0) {
skb_pull(pskb, header->length);
if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
CTCM_DBF_DEV_NAME(TRACE, dev,
"Overrun in ctcm_unpack_skb");
ch->logflags |= LOG_FLAG_OVERRUN;
return;
}
skb_put(pskb, LL_HEADER_LENGTH);
}
}
}
/*
* Release a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
*/
static void channel_free(struct channel *ch)
{
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
ch->flags &= ~CHANNEL_FLAGS_INUSE;
fsm_newstate(ch->fsm, CTC_STATE_IDLE);
}
/*
* Remove a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
*/
static void channel_remove(struct channel *ch)
{
struct channel **c = &channels;
char chid[CTCM_ID_SIZE+1];
int ok = 0;
if (ch == NULL)
return;
else
strncpy(chid, ch->id, CTCM_ID_SIZE);
channel_free(ch);
while (*c) {
if (*c == ch) {
*c = ch->next;
fsm_deltimer(&ch->timer);
if (IS_MPC(ch))
fsm_deltimer(&ch->sweep_timer);
kfree_fsm(ch->fsm);
clear_normalized_cda(&ch->ccw[4]);
if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb_any(ch->trans_skb);
}
if (IS_MPC(ch)) {
tasklet_kill(&ch->ch_tasklet);
tasklet_kill(&ch->ch_disc_tasklet);
kfree(ch->discontact_th);
}
kfree(ch->ccw);
kfree(ch->irb);
kfree(ch);
ok = 1;
break;
}
c = &((*c)->next);
}
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
chid, ok ? "OK" : "failed");
}
/*
* Get a specific channel from the channel list.
*
* type Type of channel we are interested in.
* id Id of channel we are interested in.
* direction Direction we want to use this channel for.
*
* returns Pointer to a channel or NULL if no matching channel available.
*/
static struct channel *channel_get(enum ctcm_channel_types type,
char *id, int direction)
{
struct channel *ch = channels;
while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
ch = ch->next;
if (!ch) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%d, %s, %d) not found in channel list\n",
CTCM_FUNTAIL, type, id, direction);
} else {
if (ch->flags & CHANNEL_FLAGS_INUSE)
ch = NULL;
else {
ch->flags |= CHANNEL_FLAGS_INUSE;
ch->flags &= ~CHANNEL_FLAGS_RWMASK;
ch->flags |= (direction == CTCM_WRITE)
? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
}
}
return ch;
}
static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
"irb error %ld on device %s\n",
PTR_ERR(irb), dev_name(&cdev->dev));
switch (PTR_ERR(irb)) {
case -EIO:
dev_err(&cdev->dev,
"An I/O-error occurred on the CTCM device\n");
break;
case -ETIMEDOUT:
dev_err(&cdev->dev,
"An adapter hardware operation timed out\n");
break;
default:
dev_err(&cdev->dev,
"An error occurred on the adapter hardware\n");
}
return PTR_ERR(irb);
}
/*
* Check sense of a unit check.
*
* ch The channel, the sense code belongs to.
* sense The sense code to inspect.
*/
static void ccw_unit_check(struct channel *ch, __u8 sense)
{
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %02x",
CTCM_FUNTAIL, ch->id, sense);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->sense_rc != 0x01) {
pr_notice(
"%s: The communication peer has "
"disconnected\n", ch->id);
ch->sense_rc = 0x01;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
} else {
if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
pr_notice(
"%s: The remote operating system is "
"not available\n", ch->id);
ch->sense_rc = SNS0_INTERVENTION_REQ;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
}
} else if (sense & SNS0_EQUIPMENT_CHECK) {
if (sense & SNS0_BUS_OUT_CHECK) {
if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): remote HW error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_BUS_OUT_CHECK;
}
fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
} else {
if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): remote read parity error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_EQUIPMENT_CHECK;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
}
} else if (sense & SNS0_BUS_OUT_CHECK) {
if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): BUS OUT error %02x",
CTCM_FUNTAIL, ch->id, sense);
ch->sense_rc = SNS0_BUS_OUT_CHECK;
}
if (sense & 0x04) /* data-streaming timeout */
fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
else /* Data-transfer parity error */
fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
} else if (sense & SNS0_CMD_REJECT) {
if (ch->sense_rc != SNS0_CMD_REJECT) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Command rejected",
CTCM_FUNTAIL, ch->id);
ch->sense_rc = SNS0_CMD_REJECT;
}
} else if (sense == 0) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Unit check ZERO",
CTCM_FUNTAIL, ch->id);
fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
} else {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): Unit check code %02x unknown",
CTCM_FUNTAIL, ch->id, sense);
fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
}
}
int ctcm_ch_alloc_buffer(struct channel *ch)
{
clear_normalized_cda(&ch->ccw[1]);
ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
if (ch->trans_skb == NULL) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
return -ENOMEM;
}
ch->ccw[1].count = ch->max_bufsize;
if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
return -ENOMEM;
}
ch->ccw[1].count = 0;
ch->trans_skb_data = ch->trans_skb->data;
ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
return 0;
}
/*
* Interface API for upper network layers
*/
/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
* dev Pointer to interface struct.
*
* returns 0 on success, -ERRNO on failure. (Never fails.)
*/
int ctcm_open(struct net_device *dev)
{
struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
if (!IS_MPC(priv))
fsm_event(priv->fsm, DEV_EVENT_START, dev);
return 0;
}
/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
* dev Pointer to interface struct.
*
* returns 0 on success, -ERRNO on failure. (Never fails.)
*/
int ctcm_close(struct net_device *dev)
{
struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
if (!IS_MPC(priv))
fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
return 0;
}
/*
* Transmit a packet.
* This is a helper function for ctcm_tx().
*
* ch Channel to be used for sending.
* skb Pointer to struct sk_buff of packet to send.
* The linklevel header has already been set up
* by ctcm_tx().
*
* returns 0 on success, -ERRNO on failure. (Never fails.)
*/
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
unsigned long saveflags;
struct ll_header header;
int rc = 0;
__u16 block_len;
int ccw_idx;
struct sk_buff *nskb;
unsigned long hi;
/* we need to acquire the lock for testing the state
* otherwise we can have an IRQ changing the state to
* TXIDLE after the test but before acquiring the lock.
*/
spin_lock_irqsave(&ch->collect_lock, saveflags);
if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
if (ch->collect_len + l > ch->max_bufsize - 2) {
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
return -EBUSY;
} else {
refcount_inc(&skb->users);
header.length = l;
header.type = be16_to_cpu(skb->protocol);
header.unused = 0;
memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
LL_HEADER_LENGTH);
skb_queue_tail(&ch->collect_queue, skb);
ch->collect_len += l;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
goto done;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
/*
* Protect skb against beeing free'd by upper
* layers.
*/
refcount_inc(&skb->users);
ch->prof.txlen += skb->len;
header.length = skb->len + LL_HEADER_LENGTH;
header.type = be16_to_cpu(skb->protocol);
header.unused = 0;
memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
block_len = skb->len + 2;
*((__u16 *)skb_push(skb, 2)) = block_len;
/*
* IDAL support in CTCM is broken, so we have to
* care about skb's above 2G ourselves.
*/
hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
if (hi) {
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
} else {
skb_put_data(nskb, skb->data, skb->len);
refcount_inc(&nskb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
}
ch->ccw[4].count = block_len;
if (set_normalized_cda(&ch->ccw[4], skb->data)) {
/*
* idal allocation failed, try via copying to
* trans_skb. trans_skb usually has a pre-allocated
* idal.
*/
if (ctcm_checkalloc_buffer(ch)) {
/*
* Remove our header. It gets added
* again on retransmit.
*/
refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
}
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
skb_copy_from_linear_data(skb,
skb_put(ch->trans_skb, skb->len), skb->len);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
} else {
skb_queue_tail(&ch->io_queue, skb);
ccw_idx = 3;
}
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
sizeof(struct ccw1) * 3);
ch->retry = 0;
fsm_newstate(ch->fsm, CTC_STATE_TX);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (ccw_idx == 3)
ch->prof.doios_single++;
if (rc != 0) {
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "single skb TX");
if (ccw_idx == 3)
skb_dequeue_tail(&ch->io_queue);
/*
* Remove our header. It gets added
* again on retransmit.
*/
skb_pull(skb, LL_HEADER_LENGTH + 2);
} else if (ccw_idx == 0) {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
}
done:
ctcm_clear_busy(ch->netdev);
return rc;
}
static void ctcmpc_send_sweep_req(struct channel *rch)
{
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv;
struct mpc_group *grp;
struct th_sweep *header;
struct sk_buff *sweep_skb;
struct channel *ch;
/* int rc = 0; */
priv = dev->ml_priv;
grp = priv->mpcg;
ch = priv->channel[CTCM_WRITE];
/* sweep processing is not complete until response and request */
/* has completed for all read channels in group */
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
}
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
if (sweep_skb == NULL) {
/* rc = -ENOMEM; */
goto nomem;
}
header = skb_put_zero(sweep_skb, TH_SWEEP_LENGTH);
header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
header->sw.th_last_seq = ch->th_seq_num;
netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
return;
nomem:
grp->in_sweep = 0;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
/*
* MPC mode version of transmit_skb
*/
static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
struct pdu *p_header;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct th_header *header;
struct sk_buff *nskb;
int rc = 0;
int ccw_idx;
unsigned long hi;
unsigned long saveflags = 0; /* avoids compiler warning */
CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
__func__, dev->name, smp_processor_id(), ch,
ch->id, fsm_getstate_str(ch->fsm));
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
refcount_inc(&skb->users);
p_header = skb_push(skb, PDU_HEADER_LENGTH);
p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
p_header->pdu_proto = 0x01;
if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
} else {
p_header->pdu_flag = PDU_FIRST;
}
p_header->pdu_seq = 0;
CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
"pdu header and data for up to 32 bytes:\n",
__func__, dev->name, skb->len);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
skb_queue_tail(&ch->collect_queue, skb);
ch->collect_len += skb->len;
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
goto done;
}
/*
* Protect skb against beeing free'd by upper
* layers.
*/
refcount_inc(&skb->users);
/*
* IDAL support in CTCM is broken, so we have to
* care about skb's above 2G ourselves.
*/
hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
if (hi) {
nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
goto nomem_exit;
} else {
skb_put_data(nskb, skb->data, skb->len);
refcount_inc(&nskb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
}
p_header = skb_push(skb, PDU_HEADER_LENGTH);
p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
p_header->pdu_proto = 0x01;
p_header->pdu_seq = 0;
if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
} else {
p_header->pdu_flag = PDU_FIRST;
}
if (ch->collect_len > 0) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
skb_queue_tail(&ch->collect_queue, skb);
ch->collect_len += skb->len;
skb = skb_dequeue(&ch->collect_queue);
ch->collect_len -= skb->len;
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
}
p_header = (struct pdu *)skb->data;
p_header->pdu_flag |= PDU_LAST;
ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
/* put the TH on the packet */
header = skb_push(skb, TH_HEADER_LENGTH);
memset(header, 0, TH_HEADER_LENGTH);
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
__func__, dev->name, ch->th_seq_num);
CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
"up to 32 bytes sent to vtam:\n",
__func__, dev->name, skb->len);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
ch->ccw[4].count = skb->len;
if (set_normalized_cda(&ch->ccw[4], skb->data)) {
/*
* idal allocation failed, try via copying to trans_skb.
* trans_skb usually has a pre-allocated idal.
*/
if (ctcm_checkalloc_buffer(ch)) {
/*
* Remove our header.
* It gets added again on retransmit.
*/
goto nomem_exit;
}
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
skb_put_data(ch->trans_skb, skb->data, skb->len);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
"up to 32 bytes sent to vtam:\n",
__func__, dev->name, ch->trans_skb->len);
CTCM_D3_DUMP((char *)ch->trans_skb->data,
min_t(int, 32, ch->trans_skb->len));
} else {
skb_queue_tail(&ch->io_queue, skb);
ccw_idx = 3;
}
ch->retry = 0;
fsm_newstate(ch->fsm, CTC_STATE_TX);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
sizeof(struct ccw1) * 3);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
ch->prof.send_stamp = jiffies;
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (ccw_idx == 3)
ch->prof.doios_single++;
if (rc != 0) {
fsm_deltimer(&ch->timer);
ctcm_ccw_check_rc(ch, rc, "single skb TX");
if (ccw_idx == 3)
skb_dequeue_tail(&ch->io_queue);
} else if (ccw_idx == 0) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
}
if (ch->th_seq_num > 0xf0000000) /* Chose at random. */
ctcmpc_send_sweep_req(ch);
goto done;
nomem_exit:
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
"%s(%s): MEMORY allocation ERROR\n",
CTCM_FUNTAIL, ch->id);
rc = -ENOMEM;
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:
CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
return rc;
}
/*
* Start transmission of a packet.
* Called from generic network device layer.
*/
/* first merge version - leaving both functions separated */
static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ctcm_priv *priv = dev->ml_priv;
if (skb == NULL) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): Got sk_buff with head room < %ld bytes",
CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
dev_kfree_skb(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/*
* If channels are not running, try to restart them
* and throw away packet.
*/
if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
fsm_event(priv->fsm, DEV_EVENT_START, dev);
dev_kfree_skb(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
return NETDEV_TX_OK;
}
if (ctcm_test_and_set_busy(dev))
return NETDEV_TX_BUSY;
netif_trans_update(dev);
if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
/* unmerged MPC variant of ctcm_tx */
static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *newskb = NULL;
/*
* Some sanity checks ...
*/
if (skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
goto done;
}
if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s(%s): Got sk_buff with head room < %ld bytes",
CTCM_FUNTAIL, dev->name,
TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
newskb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!newskb) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
"%s: %s: __dev_alloc_skb failed",
__func__, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
newskb->protocol = skb->protocol;
skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
skb_put_data(newskb, skb->data, skb->len);
dev_kfree_skb_any(skb);
skb = newskb;
}
/*
* If channels are not running,
* notify anybody about a link failure and throw
* away packet.
*/
if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
dev_kfree_skb_any(skb);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): inactive MPCGROUP - dropped",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
goto done;
}
if (ctcm_test_and_set_busy(dev)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device busy - dropped",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
netif_trans_update(dev);
if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
ctcm_clear_busy(dev);
done:
if (do_debug)
MPC_DBF_DEV_NAME(TRACE, dev, "exit");
return NETDEV_TX_OK; /* handle freeing of skb here */
}
/*
* Sets MTU of an interface.
*
* dev Pointer to interface struct.
* new_mtu The new MTU to use for this interface.
*
* returns 0 on success, -EINVAL if MTU is out of valid range.
* (valid range is 576 .. 65527). If VM is on the
* remote side, maximum MTU is 32760, however this is
* not checked here.
*/
static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
{
struct ctcm_priv *priv;
int max_bufsize;
priv = dev->ml_priv;
max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
if (IS_MPC(priv)) {
if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
return -EINVAL;
dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
} else {
if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
return -EINVAL;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
}
dev->mtu = new_mtu;
return 0;
}
/*
* Returns interface statistics of a device.
*
* dev Pointer to interface struct.
*
* returns Pointer to stats struct of this interface.
*/
static struct net_device_stats *ctcm_stats(struct net_device *dev)
{
return &((struct ctcm_priv *)dev->ml_priv)->stats;
}
static void ctcm_free_netdevice(struct net_device *dev)
{
struct ctcm_priv *priv;
struct mpc_group *grp;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s)", CTCM_FUNTAIL, dev->name);
priv = dev->ml_priv;
if (priv) {
grp = priv->mpcg;
if (grp) {
if (grp->fsm)
kfree_fsm(grp->fsm);
dev_kfree_skb(grp->xid_skb);
dev_kfree_skb(grp->rcvd_xid_skb);
tasklet_kill(&grp->mpc_tasklet2);
kfree(grp);
priv->mpcg = NULL;
}
if (priv->fsm) {
kfree_fsm(priv->fsm);
priv->fsm = NULL;
}
kfree(priv->xid);
priv->xid = NULL;
/*
* Note: kfree(priv); is done in "opposite" function of
* allocator function probe_device which is remove_device.
*/
}
#ifdef MODULE
free_netdev(dev);
#endif
}
struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
static const struct net_device_ops ctcm_netdev_ops = {
.ndo_open = ctcm_open,
.ndo_stop = ctcm_close,
.ndo_get_stats = ctcm_stats,
.ndo_change_mtu = ctcm_change_mtu,
.ndo_start_xmit = ctcm_tx,
};
static const struct net_device_ops ctcm_mpc_netdev_ops = {
.ndo_open = ctcm_open,
.ndo_stop = ctcm_close,
.ndo_get_stats = ctcm_stats,
.ndo_change_mtu = ctcm_change_mtu,
.ndo_start_xmit = ctcmpc_tx,
};
static void ctcm_dev_setup(struct net_device *dev)
{
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->min_mtu = 576;
dev->max_mtu = 65527;
}
/*
* Initialize everything of the net device except the name and the
* channel structs.
*/
static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
{
struct net_device *dev;
struct mpc_group *grp;
if (!priv)
return NULL;
if (IS_MPC(priv))
dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN,
ctcm_dev_setup);
else
dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN,
ctcm_dev_setup);
if (!dev) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s: MEMORY allocation ERROR",
CTCM_FUNTAIL);
return NULL;
}
dev->ml_priv = priv;
priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
dev_fsm, dev_fsm_len, GFP_KERNEL);
if (priv->fsm == NULL) {
CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
free_netdev(dev);
return NULL;
}
fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
fsm_settimer(priv->fsm, &priv->restart_timer);
if (IS_MPC(priv)) {
/* MPC Group Initializations */
grp = ctcmpc_init_mpc_group(priv);
if (grp == NULL) {
MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
free_netdev(dev);
return NULL;
}
tasklet_init(&grp->mpc_tasklet2,
mpc_group_ready, (unsigned long)dev);
dev->mtu = MPC_BUFSIZE_DEFAULT -
TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
dev->netdev_ops = &ctcm_mpc_netdev_ops;
dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
priv->buffer_size = MPC_BUFSIZE_DEFAULT;
} else {
dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
dev->netdev_ops = &ctcm_netdev_ops;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
}
CTCMY_DBF_DEV(SETUP, dev, "finished");
return dev;
}
/*
* Main IRQ handler.
*
* cdev The ccw_device the interrupt is for.
* intparm interruption parameter.
* irb interruption response block.
*/
static void ctcm_irq_handler(struct ccw_device *cdev,
unsigned long intparm, struct irb *irb)
{
struct channel *ch;
struct net_device *dev;
struct ctcm_priv *priv;
struct ccwgroup_device *cgdev;
int cstat;
int dstat;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
if (ctcm_check_irb_error(cdev, irb))
return;
cgdev = dev_get_drvdata(&cdev->dev);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
/* Check for unsolicited interrupts. */
if (cgdev == NULL) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
"%s(%s) unsolicited irq: c-%02x d-%02x\n",
CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
dev_warn(&cdev->dev,
"The adapter received a non-specific IRQ\n");
return;
}
priv = dev_get_drvdata(&cgdev->dev);
/* Try to extract channel from driver data. */
if (priv->channel[CTCM_READ]->cdev == cdev)
ch = priv->channel[CTCM_READ];
else if (priv->channel[CTCM_WRITE]->cdev == cdev)
ch = priv->channel[CTCM_WRITE];
else {
dev_err(&cdev->dev,
"%s: Internal error: Can't determine channel for "
"interrupt device %s\n",
__func__, dev_name(&cdev->dev));
/* Explain: inconsistent internal structures */
return;
}
dev = ch->netdev;
if (dev == NULL) {
dev_err(&cdev->dev,
"%s Internal error: net_device is NULL, ch = 0x%p\n",
__func__, ch);
/* Explain: inconsistent internal structures */
return;
}
/* Copy interruption response block. */
memcpy(ch->irb, irb, sizeof(struct irb));
/* Issue error message and return on subchannel error code */
if (irb->scsw.cmd.cstat) {
fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): sub-ch check %s: cs=%02x ds=%02x",
CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
dev_warn(&cdev->dev,
"A check occurred on the subchannel\n");
return;
}
/* Check the reason-code of a unit check */
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if ((irb->ecw[0] & ch->sense_rc) == 0)
/* print it only once */
CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): sense=%02x, ds=%02x",
CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
ccw_unit_check(ch, irb->ecw[0]);
return;
}
if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
else
fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
return;
}
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
return;
}
if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
(irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
else
fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
}
static const struct device_type ctcm_devtype = {
.name = "ctcm",
.groups = ctcm_attr_groups,
};
/*
* Add ctcm specific attributes.
* Add ctcm private data.
*
* cgdev pointer to ccwgroup_device just added
*
* returns 0 on success, !0 on failure.
*/
static int ctcm_probe_device(struct ccwgroup_device *cgdev)
{
struct ctcm_priv *priv;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s %p",
__func__, cgdev);
if (!get_device(&cgdev->dev))
return -ENODEV;
priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
if (!priv) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: memory allocation failure",
CTCM_FUNTAIL);
put_device(&cgdev->dev);
return -ENOMEM;
}
priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
cgdev->cdev[0]->handler = ctcm_irq_handler;
cgdev->cdev[1]->handler = ctcm_irq_handler;
dev_set_drvdata(&cgdev->dev, priv);
cgdev->dev.type = &ctcm_devtype;
return 0;
}
/*
* Add a new channel to the list of channels.
* Keeps the channel list sorted.
*
* cdev The ccw_device to be added.
* type The type class of the new channel.
* priv Points to the private data of the ccwgroup_device.
*
* returns 0 on success, !0 on error.
*/
static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
struct ctcm_priv *priv)
{
struct channel **c = &channels;
struct channel *ch;
int ccw_num;
int rc = 0;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s), type %d, proto %d",
__func__, dev_name(&cdev->dev), type, priv->protocol);
ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (ch == NULL)
return -ENOMEM;
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
ch->discontact_th = kzalloc(TH_HEADER_LENGTH, GFP_KERNEL);
if (ch->discontact_th == NULL)
goto nomem_return;
ch->discontact_th->th_blk_flag = TH_DISCONTACT;
tasklet_init(&ch->ch_disc_tasklet,
mpc_action_send_discontact, (unsigned long)ch);
tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
ccw_num = 17;
} else
ccw_num = 8;
ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (ch->ccw == NULL)
goto nomem_return;
ch->cdev = cdev;
scnprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
ch->type = type;
/*
* "static" ccws are used in the following way:
*
* ccw[0..2] (Channel program for generic I/O):
* 0: prepare
* 1: read or write (depending on direction) with fixed
* buffer (idal allocated once when buffer is allocated)
* 2: nop
* ccw[3..5] (Channel program for direct write of packets)
* 3: prepare
* 4: write (idal allocated on every write).
* 5: nop
* ccw[6..7] (Channel program for initial channel setup):
* 6: set extended mode
* 7: nop
*
* ch->ccw[0..5] are initialized in ch_action_start because
* the channel's direction is yet unknown here.
*
* ccws used for xid2 negotiations
* ch-ccw[8-14] need to be used for the XID exchange either
* X side XID2 Processing
* 8: write control
* 9: write th
* 10: write XID
* 11: read th from secondary
* 12: read XID from secondary
* 13: read 4 byte ID
* 14: nop
* Y side XID Processing
* 8: sense
* 9: read th
* 10: read XID
* 11: write th
* 12: write XID
* 13: write 4 byte ID
* 14: nop
*
* ccws used for double noop due to VM timing issues
* which result in unrecoverable Busy on channel
* 15: nop
* 16: nop
*/
ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
ch->ccw[6].flags = CCW_FLAG_SLI;
ch->ccw[7].cmd_code = CCW_CMD_NOOP;
ch->ccw[7].flags = CCW_FLAG_SLI;
if (IS_MPC(priv)) {
ch->ccw[15].cmd_code = CCW_CMD_WRITE;
ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[15].count = TH_HEADER_LENGTH;
ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
ch->ccw[16].cmd_code = CCW_CMD_NOOP;
ch->ccw[16].flags = CCW_FLAG_SLI;
ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
ctc_ch_event_names, CTC_MPC_NR_STATES,
CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
mpc_ch_fsm_len, GFP_KERNEL);
} else {
ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
ctc_ch_event_names, CTC_NR_STATES,
CTC_NR_EVENTS, ch_fsm,
ch_fsm_len, GFP_KERNEL);
}
if (ch->fsm == NULL)
goto nomem_return;
fsm_newstate(ch->fsm, CTC_STATE_IDLE);
ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
if (ch->irb == NULL)
goto nomem_return;
while (*c && ctcm_less_than((*c)->id, ch->id))
c = &(*c)->next;
if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s (%s) already in list, using old entry",
__func__, (*c)->id);
goto free_return;
}
spin_lock_init(&ch->collect_lock);
fsm_settimer(ch->fsm, &ch->timer);
skb_queue_head_init(&ch->io_queue);
skb_queue_head_init(&ch->collect_queue);
if (IS_MPC(priv)) {
fsm_settimer(ch->fsm, &ch->sweep_timer);
skb_queue_head_init(&ch->sweep_queue);
}
ch->next = *c;
*c = ch;
return 0;
nomem_return:
rc = -ENOMEM;
free_return: /* note that all channel pointers are 0 or valid */
kfree(ch->ccw);
kfree(ch->discontact_th);
kfree_fsm(ch->fsm);
kfree(ch->irb);
kfree(ch);
return rc;
}
/*
* Return type of a detected device.
*/
static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
{
enum ctcm_channel_types type;
type = (enum ctcm_channel_types)id->driver_info;
if (type == ctcm_channel_type_ficon)
type = ctcm_channel_type_escon;
return type;
}
/*
*
* Setup an interface.
*
* cgdev Device to be setup.
*
* returns 0 on success, !0 on failure.
*/
static int ctcm_new_device(struct ccwgroup_device *cgdev)
{
char read_id[CTCM_ID_SIZE];
char write_id[CTCM_ID_SIZE];
int direction;
enum ctcm_channel_types type;
struct ctcm_priv *priv;
struct net_device *dev;
struct ccw_device *cdev0;
struct ccw_device *cdev1;
struct channel *readc;
struct channel *writec;
int ret;
int result;
priv = dev_get_drvdata(&cgdev->dev);
if (!priv) {
result = -ENODEV;
goto out_err_result;
}
cdev0 = cgdev->cdev[0];
cdev1 = cgdev->cdev[1];
type = get_channel_type(&cdev0->id);
scnprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
scnprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
ret = add_channel(cdev0, type, priv);
if (ret) {
result = ret;
goto out_err_result;
}
ret = add_channel(cdev1, type, priv);
if (ret) {
result = ret;
goto out_remove_channel1;
}
ret = ccw_device_set_online(cdev0);
if (ret != 0) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) set_online rc=%d",
CTCM_FUNTAIL, read_id, ret);
result = -EIO;
goto out_remove_channel2;
}
ret = ccw_device_set_online(cdev1);
if (ret != 0) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s(%s) set_online rc=%d",
CTCM_FUNTAIL, write_id, ret);
result = -EIO;
goto out_ccw1;
}
dev = ctcm_init_netdevice(priv);
if (dev == NULL) {
result = -ENODEV;
goto out_ccw2;
}
for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
priv->channel[direction] =
channel_get(type, direction == CTCM_READ ?
read_id : write_id, direction);
if (priv->channel[direction] == NULL) {
if (direction == CTCM_WRITE)
channel_free(priv->channel[CTCM_READ]);
result = -ENODEV;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
priv->channel[direction]->protocol = priv->protocol;
priv->channel[direction]->max_bufsize = priv->buffer_size;
}
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev)) {
result = -ENODEV;
goto out_dev;
}
strscpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
priv->channel[CTCM_READ]->id,
priv->channel[CTCM_WRITE]->id, priv->protocol);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
priv->channel[CTCM_READ]->id,
priv->channel[CTCM_WRITE]->id, priv->protocol);
return 0;
out_dev:
ctcm_free_netdevice(dev);
out_ccw2:
ccw_device_set_offline(cgdev->cdev[1]);
out_ccw1:
ccw_device_set_offline(cgdev->cdev[0]);
out_remove_channel2:
readc = channel_get(type, read_id, CTCM_READ);
channel_remove(readc);
out_remove_channel1:
writec = channel_get(type, write_id, CTCM_WRITE);
channel_remove(writec);
out_err_result:
return result;
}
/*
* Shutdown an interface.
*
* cgdev Device to be shut down.
*
* returns 0 on success, !0 on failure.
*/
static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
{
struct ctcm_priv *priv;
struct net_device *dev;
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
if (priv->channel[CTCM_READ]) {
dev = priv->channel[CTCM_READ]->netdev;
CTCM_DBF_DEV(SETUP, dev, "");
/* Close the device */
ctcm_close(dev);
dev->flags &= ~IFF_RUNNING;
channel_free(priv->channel[CTCM_READ]);
} else
dev = NULL;
if (priv->channel[CTCM_WRITE])
channel_free(priv->channel[CTCM_WRITE]);
if (dev) {
unregister_netdev(dev);
ctcm_free_netdevice(dev);
}
if (priv->fsm)
kfree_fsm(priv->fsm);
ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]);
channel_remove(priv->channel[CTCM_READ]);
channel_remove(priv->channel[CTCM_WRITE]);
priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
return 0;
}
static void ctcm_remove_device(struct ccwgroup_device *cgdev)
{
struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"removing device %p, proto : %d",
cgdev, priv->protocol);
if (cgdev->state == CCWGROUP_ONLINE)
ctcm_shutdown_device(cgdev);
dev_set_drvdata(&cgdev->dev, NULL);
kfree(priv);
put_device(&cgdev->dev);
}
static struct ccw_device_id ctcm_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
{CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
{},
};
MODULE_DEVICE_TABLE(ccw, ctcm_ids);
static struct ccw_driver ctcm_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ctcm",
},
.ids = ctcm_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
.int_class = IRQIO_CTC,
};
static struct ccwgroup_driver ctcm_group_driver = {
.driver = {
.owner = THIS_MODULE,
.name = CTC_DRIVER_NAME,
},
.ccw_driver = &ctcm_ccw_driver,
.setup = ctcm_probe_device,
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
.set_offline = ctcm_shutdown_device,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
return err ? err : count;
}
static DRIVER_ATTR_WO(group);
static struct attribute *ctcm_drv_attrs[] = {
&driver_attr_group.attr,
NULL,
};
static struct attribute_group ctcm_drv_attr_group = {
.attrs = ctcm_drv_attrs,
};
static const struct attribute_group *ctcm_drv_attr_groups[] = {
&ctcm_drv_attr_group,
NULL,
};
/*
* Module related routines
*/
/*
* Prepare to be unloaded. Free IRQ's and release all resources.
* This is called just before this module is unloaded. It is
* not called, if the usage count is !0, so we don't need to check
* for that.
*/
static void __exit ctcm_exit(void)
{
ccwgroup_driver_unregister(&ctcm_group_driver);
ccw_driver_unregister(&ctcm_ccw_driver);
root_device_unregister(ctcm_root_dev);
ctcm_unregister_dbf_views();
pr_info("CTCM driver unloaded\n");
}
/*
* Print Banner.
*/
static void print_banner(void)
{
pr_info("CTCM driver initialized\n");
}
/*
* Initialize module.
* This is called just after the module is loaded.
*
* returns 0 on success, !0 on error.
*/
static int __init ctcm_init(void)
{
int ret;
channels = NULL;
ret = ctcm_register_dbf_views();
if (ret)
goto out_err;
ctcm_root_dev = root_device_register("ctcm");
ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
if (ret)
goto register_err;
ret = ccw_driver_register(&ctcm_ccw_driver);
if (ret)
goto ccw_err;
ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
ret = ccwgroup_driver_register(&ctcm_group_driver);
if (ret)
goto ccwgroup_err;
print_banner();
return 0;
ccwgroup_err:
ccw_driver_unregister(&ctcm_ccw_driver);
ccw_err:
root_device_unregister(ctcm_root_dev);
register_err:
ctcm_unregister_dbf_views();
out_err:
pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
__func__, ret);
return ret;
}
module_init(ctcm_init);
module_exit(ctcm_exit);
MODULE_AUTHOR("Peter Tiedemann <[email protected]>");
MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/ctcm_main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A generic FSM based on fsm used in isdn4linux
*
*/
#include "fsm.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert ([email protected])");
MODULE_DESCRIPTION("Finite state machine helper functions");
MODULE_LICENSE("GPL");
fsm_instance *
init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
{
int i;
fsm_instance *this;
fsm_function_t *m;
fsm *f;
this = kzalloc(sizeof(fsm_instance), order);
if (this == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
return NULL;
}
strscpy(this->name, name, sizeof(this->name));
init_waitqueue_head(&this->wait_q);
f = kzalloc(sizeof(fsm), order);
if (f == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
kfree_fsm(this);
return NULL;
}
f->nr_events = nr_events;
f->nr_states = nr_states;
f->event_names = event_names;
f->state_names = state_names;
this->f = f;
m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
if (m == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
kfree_fsm(this);
return NULL;
}
f->jumpmatrix = m;
for (i = 0; i < tmpl_len; i++) {
if ((tmpl[i].cond_state >= nr_states) ||
(tmpl[i].cond_event >= nr_events) ) {
printk(KERN_ERR
"fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
(long)tmpl[i].cond_event, (long)f->nr_events);
kfree_fsm(this);
return NULL;
} else
m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
tmpl[i].function;
}
return this;
}
void
kfree_fsm(fsm_instance *this)
{
if (this) {
if (this->f) {
kfree(this->f->jumpmatrix);
kfree(this->f);
}
kfree(this);
} else
printk(KERN_WARNING
"fsm: kfree_fsm called with NULL argument\n");
}
#if FSM_DEBUG_HISTORY
void
fsm_print_history(fsm_instance *fi)
{
int idx = 0;
int i;
if (fi->history_size >= FSM_HISTORY_SIZE)
idx = fi->history_index;
printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
for (i = 0; i < fi->history_size; i++) {
int e = fi->history[idx].event;
int s = fi->history[idx++].state;
idx %= FSM_HISTORY_SIZE;
if (e == -1)
printk(KERN_DEBUG " S=%s\n",
fi->f->state_names[s]);
else
printk(KERN_DEBUG " S=%s E=%s\n",
fi->f->state_names[s],
fi->f->event_names[e]);
}
fi->history_size = fi->history_index = 0;
}
void
fsm_record_history(fsm_instance *fi, int state, int event)
{
fi->history[fi->history_index].state = state;
fi->history[fi->history_index++].event = event;
fi->history_index %= FSM_HISTORY_SIZE;
if (fi->history_size < FSM_HISTORY_SIZE)
fi->history_size++;
}
#endif
const char *
fsm_getstate_str(fsm_instance *fi)
{
int st = atomic_read(&fi->state);
if (st >= fi->f->nr_states)
return "Invalid";
return fi->f->state_names[st];
}
static void
fsm_expire_timer(struct timer_list *t)
{
fsm_timer *this = from_timer(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
#endif
fsm_event(this->fi, this->expire_event, this->event_arg);
}
void
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
timer_setup(&this->tl, fsm_expire_timer, 0);
}
void
fsm_deltimer(fsm_timer *this)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
this);
#endif
del_timer(&this->tl);
}
int
fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
this->fi->name, this, millisec);
#endif
timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&this->tl);
return 0;
}
/* FIXME: this function is never used, why */
void
fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
this->fi->name, this, millisec);
#endif
del_timer(&this->tl);
timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&this->tl);
}
EXPORT_SYMBOL(init_fsm);
EXPORT_SYMBOL(kfree_fsm);
EXPORT_SYMBOL(fsm_settimer);
EXPORT_SYMBOL(fsm_deltimer);
EXPORT_SYMBOL(fsm_addtimer);
EXPORT_SYMBOL(fsm_modtimer);
EXPORT_SYMBOL(fsm_getstate_str);
#if FSM_DEBUG_HISTORY
EXPORT_SYMBOL(fsm_print_history);
EXPORT_SYMBOL(fsm_record_history);
#endif
| linux-master | drivers/s390/net/fsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/rcutree.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
#include <net/sock.h>
#include <asm/ebcdic.h>
#include <asm/chpid.h>
#include <asm/sysinfo.h>
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/cpcmd.h>
#include "qeth_core.h"
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
[QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
&debug_sprintf_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
static struct kmem_cache *qeth_core_header_cache;
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct kmem_cache *qeth_qaob_cache;
static struct device *qeth_core_root_dev;
static struct dentry *qeth_debugfs_root;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " Virtual NIC QDIO";
case QETH_CARD_TYPE_IQD:
return " Virtual NIC Hiper";
case QETH_CARD_TYPE_OSM:
return " Virtual NIC QDIO - OSM";
case QETH_CARD_TYPE_OSX:
return " Virtual NIC QDIO - OSX";
default:
return " unknown";
}
} else {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
case QETH_CARD_TYPE_OSM:
return " OSM QDIO";
case QETH_CARD_TYPE_OSX:
return " OSX QDIO";
default:
return " unknown";
}
}
return " n/a";
}
/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return "Virt.NIC QDIO";
case QETH_CARD_TYPE_IQD:
return "Virt.NIC Hiper";
case QETH_CARD_TYPE_OSM:
return "Virt.NIC OSM";
case QETH_CARD_TYPE_OSX:
return "Virt.NIC OSX";
default:
return "unknown";
}
} else {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
return "OSD_100";
case QETH_LINK_TYPE_HSTR:
return "HSTR";
case QETH_LINK_TYPE_GBIT_ETH:
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
case QETH_LINK_TYPE_25GBIT_ETH:
return "OSD_25GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
return "OSD_TR_LANE";
case QETH_LINK_TYPE_LANE_ETH1000:
return "OSD_GbE_LANE";
case QETH_LINK_TYPE_LANE:
return "OSD_ATM_LANE";
default:
return "OSD_Express";
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
case QETH_CARD_TYPE_OSM:
return "OSM_1000";
case QETH_CARD_TYPE_OSX:
return "OSX_10GIG";
default:
return "unknown";
}
}
return "n/a";
}
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_allowed_mask = threads;
if (clear_start_mask)
card->thread_start_mask &= threads;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
rc = (card->thread_running_mask & threads);
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
static void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
struct qeth_qdio_q *queue = card->qdio.in_q;
unsigned int i;
QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
if (entry->elements[i])
__free_page(entry->elements[i]);
}
kfree(entry);
}
static void qeth_free_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
init_list) {
list_del(&entry->init_list);
qeth_free_pool_entry(entry);
}
}
static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
struct qeth_buffer_pool_entry *entry;
unsigned int i;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return NULL;
for (i = 0; i < pages; i++) {
entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
if (!entry->elements[i]) {
qeth_free_pool_entry(entry);
return NULL;
}
}
return entry;
}
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
unsigned int i;
QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
struct qeth_buffer_pool_entry *entry;
entry = qeth_alloc_pool_entry(buf_elements);
if (!entry) {
qeth_free_buffer_pool(card);
return -ENOMEM;
}
list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
}
return 0;
}
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
{
unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
struct qeth_buffer_pool_entry *entry, *tmp;
int delta = count - pool->buf_count;
LIST_HEAD(entries);
QETH_CARD_TEXT(card, 2, "realcbp");
/* Defer until pool is allocated: */
if (list_empty(&pool->entry_list))
goto out;
/* Remove entries from the pool: */
while (delta < 0) {
entry = list_first_entry(&pool->entry_list,
struct qeth_buffer_pool_entry,
init_list);
list_del(&entry->init_list);
qeth_free_pool_entry(entry);
delta++;
}
/* Allocate additional entries: */
while (delta > 0) {
entry = qeth_alloc_pool_entry(buf_elements);
if (!entry) {
list_for_each_entry_safe(entry, tmp, &entries,
init_list) {
list_del(&entry->init_list);
qeth_free_pool_entry(entry);
}
return -ENOMEM;
}
list_add(&entry->init_list, &entries);
delta--;
}
list_splice(&entries, &pool->entry_list);
out:
card->qdio.in_buf_pool.buf_count = count;
pool->buf_count = count;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
if (!q)
return;
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
int i;
if (!q)
return NULL;
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
kfree(q);
return NULL;
}
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
q->bufs[i].buffer = q->qdio_bufs[i];
QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
return q;
}
static int qeth_cq_init(struct qeth_card *card)
{
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqinit");
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out;
}
}
rc = 0;
out:
return rc;
}
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
return -ENOMEM;
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
card->qdio.c_q = NULL;
}
return 0;
}
static void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
qeth_free_qdio_queue(card->qdio.c_q);
card->qdio.c_q = NULL;
}
}
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
enum iucv_tx_notify n;
switch (sbalf15) {
case 0:
n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
break;
case 4:
case 16:
case 17:
case 18:
n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
TX_NOTIFY_UNREACHABLE;
break;
default:
n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
TX_NOTIFY_GENERALERROR;
break;
}
return n;
}
static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
if (refcount_dec_and_test(&iob->ref_count)) {
kfree(iob->data);
kfree(iob);
}
}
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
ccw->cmd_code = cmd_code;
ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
ccw->cda = (__u32)virt_to_phys(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob = card->read_cmd;
struct qeth_channel *channel = iob->channel;
struct ccw1 *ccw = __ccw_from_cmd(iob);
int rc;
QETH_CARD_TEXT(card, 5, "issnxrd");
if (channel->state != CH_STATE_UP)
return -EIO;
memset(iob->data, 0, iob->length);
qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
iob->callback = qeth_issue_next_read_cb;
/* keep the cmd alive after completion: */
qeth_get_cmd(iob);
QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
if (!rc) {
channel->active_cmd = iob;
} else {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
qeth_unlock_channel(card, channel);
qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
}
return rc;
}
static int qeth_issue_next_read(struct qeth_card *card)
{
int ret;
spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
ret = __qeth_issue_next_read(card);
spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
return ret;
}
static void qeth_enqueue_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
spin_lock_irq(&card->lock);
list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
spin_unlock_irq(&card->lock);
}
static void qeth_dequeue_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
spin_lock_irq(&card->lock);
list_del(&iob->list_entry);
spin_unlock_irq(&card->lock);
}
static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
{
iob->rc = reason;
complete(&iob->done);
}
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
struct qeth_local_addr *addr;
struct hlist_node *tmp;
unsigned int i;
spin_lock_irq(&card->local_addrs4_lock);
hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
hash_del_rcu(&addr->hnode);
kfree_rcu(addr, rcu);
}
spin_unlock_irq(&card->local_addrs4_lock);
}
static void qeth_flush_local_addrs6(struct qeth_card *card)
{
struct qeth_local_addr *addr;
struct hlist_node *tmp;
unsigned int i;
spin_lock_irq(&card->local_addrs6_lock);
hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
hash_del_rcu(&addr->hnode);
kfree_rcu(addr, rcu);
}
spin_unlock_irq(&card->local_addrs6_lock);
}
static void qeth_flush_local_addrs(struct qeth_card *card)
{
qeth_flush_local_addrs4(card);
qeth_flush_local_addrs6(card);
}
static void qeth_add_local_addrs4(struct qeth_card *card,
struct qeth_ipacmd_local_addrs4 *cmd)
{
unsigned int i;
if (cmd->addr_length !=
sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
dev_err_ratelimited(&card->gdev->dev,
"Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
cmd->addr_length);
return;
}
spin_lock(&card->local_addrs4_lock);
for (i = 0; i < cmd->count; i++) {
unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
struct qeth_local_addr *addr;
bool duplicate = false;
hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
duplicate = true;
break;
}
}
if (duplicate)
continue;
addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
if (!addr) {
dev_err(&card->gdev->dev,
"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
&cmd->addrs[i].addr);
continue;
}
ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
hash_add_rcu(card->local_addrs4, &addr->hnode, key);
}
spin_unlock(&card->local_addrs4_lock);
}
static void qeth_add_local_addrs6(struct qeth_card *card,
struct qeth_ipacmd_local_addrs6 *cmd)
{
unsigned int i;
if (cmd->addr_length !=
sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
dev_err_ratelimited(&card->gdev->dev,
"Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
cmd->addr_length);
return;
}
spin_lock(&card->local_addrs6_lock);
for (i = 0; i < cmd->count; i++) {
u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
struct qeth_local_addr *addr;
bool duplicate = false;
hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
duplicate = true;
break;
}
}
if (duplicate)
continue;
addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
if (!addr) {
dev_err(&card->gdev->dev,
"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
&cmd->addrs[i].addr);
continue;
}
addr->addr = cmd->addrs[i].addr;
hash_add_rcu(card->local_addrs6, &addr->hnode, key);
}
spin_unlock(&card->local_addrs6_lock);
}
static void qeth_del_local_addrs4(struct qeth_card *card,
struct qeth_ipacmd_local_addrs4 *cmd)
{
unsigned int i;
if (cmd->addr_length !=
sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
dev_err_ratelimited(&card->gdev->dev,
"Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
cmd->addr_length);
return;
}
spin_lock(&card->local_addrs4_lock);
for (i = 0; i < cmd->count; i++) {
struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
unsigned int key = ipv4_addr_hash(addr->addr);
struct qeth_local_addr *tmp;
hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
if (tmp->addr.s6_addr32[3] == addr->addr) {
hash_del_rcu(&tmp->hnode);
kfree_rcu(tmp, rcu);
break;
}
}
}
spin_unlock(&card->local_addrs4_lock);
}
static void qeth_del_local_addrs6(struct qeth_card *card,
struct qeth_ipacmd_local_addrs6 *cmd)
{
unsigned int i;
if (cmd->addr_length !=
sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
dev_err_ratelimited(&card->gdev->dev,
"Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
cmd->addr_length);
return;
}
spin_lock(&card->local_addrs6_lock);
for (i = 0; i < cmd->count; i++) {
struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
u32 key = ipv6_addr_hash(&addr->addr);
struct qeth_local_addr *tmp;
hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
hash_del_rcu(&tmp->hnode);
kfree_rcu(tmp, rcu);
break;
}
}
}
spin_unlock(&card->local_addrs6_lock);
}
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
struct sk_buff *skb)
{
struct qeth_local_addr *tmp;
bool is_local = false;
unsigned int key;
__be32 next_hop;
if (hash_empty(card->local_addrs4))
return false;
rcu_read_lock();
next_hop = qeth_next_hop_v4_rcu(skb,
qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
key = ipv4_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
if (tmp->addr.s6_addr32[3] == next_hop) {
is_local = true;
break;
}
}
rcu_read_unlock();
return is_local;
}
static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
struct sk_buff *skb)
{
struct qeth_local_addr *tmp;
struct in6_addr *next_hop;
bool is_local = false;
u32 key;
if (hash_empty(card->local_addrs6))
return false;
rcu_read_lock();
next_hop = qeth_next_hop_v6_rcu(skb,
qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
key = ipv6_addr_hash(next_hop);
hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
if (ipv6_addr_equal(&tmp->addr, next_hop)) {
is_local = true;
break;
}
}
rcu_read_unlock();
return is_local;
}
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
struct qeth_card *card = m->private;
struct qeth_local_addr *tmp;
unsigned int i;
rcu_read_lock();
hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
seq_printf(m, "%pI6c\n", &tmp->addr);
rcu_read_unlock();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
ipa_name, com, CARD_DEVID(card), rc,
qeth_get_ipa_msg(rc));
else
QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
ipa_name, com, CARD_DEVID(card));
}
static void qeth_default_link_info(struct qeth_card *card)
{
struct qeth_link_info *link_info = &card->info.link_info;
QETH_CARD_TEXT(card, 2, "dftlinfo");
link_info->duplex = DUPLEX_FULL;
if (IS_IQD(card) || IS_VM_NIC(card)) {
link_info->speed = SPEED_10000;
link_info->port = PORT_FIBRE;
link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
} else {
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
link_info->speed = SPEED_100;
link_info->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
link_info->speed = SPEED_1000;
link_info->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
link_info->speed = SPEED_10000;
link_info->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_25GBIT_ETH:
link_info->speed = SPEED_25000;
link_info->port = PORT_FIBRE;
break;
default:
dev_info(&card->gdev->dev,
"Unknown link type %x\n",
card->info.link_type);
link_info->speed = SPEED_UNKNOWN;
link_info->port = PORT_OTHER;
}
link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
}
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA_REPLY(cmd)) {
if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
return cmd;
}
/* handle unsolicited event: */
switch (cmd->hdr.command) {
case IPA_CMD_STOPLAN:
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
netdev_name(card->dev));
/* Set offline, then probably fail to set online: */
qeth_schedule_recovery(card);
} else {
/* stay online for subsequent STARTLAN */
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
qeth_default_link_info(card);
}
return NULL;
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
"The link for %s on CHPID 0x%X has been restored\n",
netdev_name(card->dev), card->info.chpid);
if (card->info.hwtrap)
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
return NULL;
case IPA_CMD_SETBRIDGEPORT_IQD:
case IPA_CMD_SETBRIDGEPORT_OSA:
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
if (card->discipline->control_event_handler(card, cmd))
return cmd;
return NULL;
case IPA_CMD_REGISTER_LOCAL_ADDR:
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
QETH_CARD_TEXT(card, 3, "irla");
return NULL;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
QETH_CARD_TEXT(card, 3, "urla");
return NULL;
default:
QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
return cmd;
}
}
static void qeth_clear_ipacmd_list(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
unsigned long flags;
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
qeth_notify_cmd(iob, -ECANCELED);
spin_unlock_irqrestore(&card->lock, flags);
}
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
dev_err(&card->gdev->dev,
"The device does not support the configured transport mode\n");
return -EPROTONOSUPPORT;
}
return -EIO;
}
return 0;
}
static void qeth_release_buffer_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
{
qeth_put_cmd(iob);
}
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
unsigned int length,
unsigned int ccws, long timeout)
{
struct qeth_cmd_buffer *iob;
if (length > QETH_BUFSIZE)
return NULL;
iob = kzalloc(sizeof(*iob), GFP_KERNEL);
if (!iob)
return NULL;
iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
GFP_KERNEL | GFP_DMA);
if (!iob->data) {
kfree(iob);
return NULL;
}
init_completion(&iob->done);
spin_lock_init(&iob->lock);
refcount_set(&iob->ref_count, 1);
iob->channel = channel;
iob->timeout = timeout;
iob->length = length;
return iob;
}
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
{
struct qeth_cmd_buffer *request = NULL;
struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply = NULL;
struct qeth_cmd_buffer *tmp;
unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
case 0:
break;
case -EIO:
qeth_schedule_recovery(card);
fallthrough;
default:
qeth_clear_ipacmd_list(card);
goto err_idx;
}
cmd = __ipa_reply(iob);
if (cmd) {
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
}
/* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
if (tmp->match && tmp->match(tmp, iob)) {
request = tmp;
/* take the object outside the lock */
qeth_get_cmd(request);
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (!request)
goto out;
reply = &request->reply;
if (!reply->callback) {
rc = 0;
goto no_callback;
}
spin_lock_irqsave(&request->lock, flags);
if (request->rc)
/* Bail out when the requestor has already left: */
rc = request->rc;
else
rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
(unsigned long)iob);
spin_unlock_irqrestore(&request->lock, flags);
no_callback:
if (rc <= 0)
qeth_notify_cmd(request, rc);
qeth_put_cmd(request);
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
__qeth_issue_next_read(card);
err_idx:
qeth_put_cmd(iob);
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
if (!(card->thread_allowed_mask & thread))
rc = -EPERM;
else if (card->thread_start_mask & thread)
rc = -EBUSY;
else
card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
static void qeth_clear_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_start_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
static void qeth_clear_thread_running_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up_all(&card->wait_q);
}
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
if (card->thread_start_mask & thread) {
if ((card->thread_allowed_mask & thread) &&
!(card->thread_running_mask & thread)) {
rc = 1;
card->thread_start_mask &= ~thread;
card->thread_running_mask |= thread;
} else
rc = -EPERM;
}
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
wait_event(card->wait_q,
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
}
int qeth_schedule_recovery(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 2, "startrec");
rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
if (!rc)
schedule_work(&card->kernel_thread_starter);
return rc;
}
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
struct irb *irb)
{
int dstat, cstat;
char *sense;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return -EIO;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND");
return -EIO;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi");
return -EIO;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE");
return -EIO;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
return -EIO;
}
return 0;
}
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
switch (PTR_ERR(irb)) {
case -EIO:
QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
return -EIO;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
return -ETIMEDOUT;
default:
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
return PTR_ERR(irb);
}
}
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
int rc;
int cstat, dstat;
struct qeth_cmd_buffer *iob = NULL;
struct ccwgroup_device *gdev;
struct qeth_channel *channel;
struct qeth_card *card;
/* while we hold the ccwdev lock, this stays valid: */
gdev = dev_get_drvdata(&cdev->dev);
card = dev_get_drvdata(&gdev->dev);
QETH_CARD_TEXT(card, 5, "irq");
if (card->read.ccwdev == cdev) {
channel = &card->read;
QETH_CARD_TEXT(card, 5, "read");
} else if (card->write.ccwdev == cdev) {
channel = &card->write;
QETH_CARD_TEXT(card, 5, "write");
} else {
channel = &card->data;
QETH_CARD_TEXT(card, 5, "data");
}
if (intparm == 0) {
QETH_CARD_TEXT(card, 5, "irqunsol");
} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
QETH_CARD_TEXT(card, 5, "irqunexp");
dev_err(&cdev->dev,
"Received IRQ with intparm %lx, expected %px\n",
intparm, channel->active_cmd);
if (channel->active_cmd)
qeth_cancel_cmd(channel->active_cmd, -EIO);
} else {
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
qeth_cancel_cmd(iob, rc);
return;
}
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
channel->state = CH_STATE_STOPPED;
wake_up(&card->wait_q);
}
if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
channel->state = CH_STATE_HALTED;
wake_up(&card->wait_q);
}
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
SCSW_FCTL_HALT_FUNC))) {
qeth_cancel_cmd(iob, -ECANCELED);
iob = NULL;
}
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
if (irb->esw.esw0.erw.cons) {
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
CCW_DEVID(channel->ccwdev), cstat,
dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
}
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
if (iob)
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
return;
}
}
if (iob) {
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
qeth_cancel_cmd(iob, -EIO);
return;
}
if (iob->callback)
iob->callback(card, iob,
iob->length - irb->scsw.cmd.count);
}
}
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification)
{
struct sk_buff *skb;
skb_queue_walk(&buf->skb_list, skb) {
struct sock *sk = skb->sk;
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
if (sk && sk->sk_family == PF_IUCV)
iucv_sk(sk)->sk_txnotify(sk, notification);
}
}
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, bool error,
int budget)
{
struct sk_buff *skb;
/* Empty buffer? */
if (buf->next_element_to_fill == 0)
return;
QETH_TXQ_STAT_INC(queue, bufs);
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
if (error) {
QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
} else {
QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
}
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
unsigned int bytes = qdisc_pkt_len(skb);
bool is_tso = skb_is_gso(skb);
unsigned int packets;
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
if (!error) {
if (skb->ip_summed == CHECKSUM_PARTIAL)
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
if (skb_is_nonlinear(skb))
QETH_TXQ_STAT_INC(queue, skbs_sg);
if (is_tso) {
QETH_TXQ_STAT_INC(queue, skbs_tso);
QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
}
}
napi_consume_skb(skb, budget);
}
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
bool error, int budget)
{
int i;
/* is PCI flag set on buffer? */
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
atomic_dec(&queue->set_pci_flags_count);
QETH_TXQ_STAT_INC(queue, completion_irq);
}
qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
void *data = phys_to_virt(buf->buffer->element[i].addr);
if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
}
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
buf->frames = 0;
buf->bytes = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
if (buf->aob)
kmem_cache_free(qeth_qaob_cache, buf->aob);
kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
struct qeth_qdio_out_q *queue,
bool drain, int budget)
{
struct qeth_qdio_out_buffer *buf, *tmp;
list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
struct qeth_qaob_priv1 *priv;
struct qaob *aob = buf->aob;
enum iucv_tx_notify notify;
unsigned int i;
priv = (struct qeth_qaob_priv1 *)&aob->user1;
if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
QETH_CARD_TEXT(card, 5, "fp");
QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
notify = drain ? TX_NOTIFY_GENERALERROR :
qeth_compute_cq_notification(aob->aorc, 1);
qeth_notify_skbs(queue, buf, notify);
qeth_tx_complete_buf(queue, buf, drain, budget);
for (i = 0;
i < aob->sb_count && i < queue->max_elements;
i++) {
void *data = phys_to_virt(aob->sba[i]);
if (test_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache,
data);
}
list_del(&buf->list_entry);
qeth_free_out_buf(buf);
}
}
}
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
qeth_tx_complete_pending_bufs(q->card, q, true, 0);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
if (free) {
qeth_free_out_buf(q->bufs[j]);
q->bufs[j] = NULL;
}
}
}
static void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
if (card->qdio.out_qs[i])
qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int max = single ? 1 : card->dev->num_tx_queues;
if (card->qdio.no_out_queues == max)
return;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
card->qdio.no_out_queues = max;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
QETH_CARD_TEXT(card, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
if (!chp_dsc)
return -ENOMEM;
card->info.func_level = 0x4100 + chp_dsc->desc;
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
return 0;
}
static void qeth_init_qdio_info(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
/* inbound */
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
if (IS_IQD(card))
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
else
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}
static void qeth_set_initial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
}
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
rc = (card->thread_start_mask & thread);
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
static int qeth_do_reset(void *data);
static void qeth_start_kernel_thread(struct work_struct *work)
{
struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card, 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
ts = kthread_run(qeth_do_reset, card, "qeth_recover");
if (IS_ERR(ts)) {
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card,
QETH_RECOVER_THREAD);
}
}
}
static void qeth_buffer_reclaim_work(struct work_struct *);
static void qeth_setup_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "setupcrd");
card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
spin_lock_init(&card->lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
qeth_set_initial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
hash_init(card->rx_mode_addrs);
hash_init(card->local_addrs4);
hash_init(card->local_addrs6);
spin_lock_init(&card->local_addrs4_lock);
spin_lock_init(&card->local_addrs6_lock);
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
struct qeth_card *card = container_of(slr, struct qeth_card,
qeth_service_level);
if (card->info.mcl_level[0])
seq_printf(m, "qeth: %s firmware level %s\n",
CARD_BUS_ID(card), card->info.mcl_level);
}
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
card->gdev = gdev;
dev_set_drvdata(&gdev->dev, card);
CARD_RDEV(card) = gdev->cdev[0];
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
card->event_wq = alloc_ordered_workqueue("%s_event", 0,
dev_name(&gdev->dev));
if (!card->event_wq)
goto out_wq;
card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
if (!card->read_cmd)
goto out_read_cmd;
card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
qeth_debugfs_root);
debugfs_create_file("local_addrs", 0400, card->debugfs, card,
&qeth_debugfs_local_addr_fops);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
out_read_cmd:
destroy_workqueue(card->event_wq);
out_wq:
dev_set_drvdata(&gdev->dev, NULL);
kfree(card);
out:
return NULL;
}
static int qeth_clear_channel(struct qeth_card *card,
struct qeth_channel *channel)
{
int rc;
QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
return rc;
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_STOPPED)
return -ETIME;
channel->state = CH_STATE_DOWN;
return 0;
}
static int qeth_halt_channel(struct qeth_card *card,
struct qeth_channel *channel)
{
int rc;
QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
return rc;
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_HALTED)
return -ETIME;
return 0;
}
static int qeth_stop_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
rc = ccw_device_set_offline(cdev);
spin_lock_irq(get_ccwdev_lock(cdev));
if (channel->active_cmd)
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
channel->active_cmd);
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
static int qeth_start_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
channel->state = CH_STATE_DOWN;
xchg(&channel->active_cmd, NULL);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
spin_unlock_irq(get_ccwdev_lock(cdev));
rc = ccw_device_set_online(cdev);
if (rc)
goto err;
return 0;
err:
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "haltchs");
rc1 = qeth_halt_channel(card, &card->read);
rc2 = qeth_halt_channel(card, &card->write);
rc3 = qeth_halt_channel(card, &card->data);
if (rc1)
return rc1;
if (rc2)
return rc2;
return rc3;
}
static int qeth_clear_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "clearchs");
rc1 = qeth_clear_channel(card, &card->read);
rc2 = qeth_clear_channel(card, &card->write);
rc3 = qeth_clear_channel(card, &card->data);
if (rc1)
return rc1;
if (rc2)
return rc2;
return rc3;
}
static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "clhacrd");
if (halt)
rc = qeth_halt_channels(card);
if (rc)
return rc;
return qeth_clear_channels(card);
}
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (IS_IQD(card))
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
return rc;
default:
break;
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
return rc;
}
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
struct diag26c_vnic_resp *response = NULL;
struct diag26c_vnic_req *request = NULL;
struct ccw_dev_id id;
char userid[80];
int rc = 0;
QETH_CARD_TEXT(card, 2, "vmlayer");
cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
if (rc)
goto out;
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
if (!request || !response) {
rc = -ENOMEM;
goto out;
}
ccw_device_get_id(CARD_RDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION6_VM65918;
request->req_format = DIAG26C_VNIC_INFO;
ASCEBC(userid, 8);
memcpy(&request->sys_name, userid, 8);
request->devno = id.devno;
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_PORT_VNIC);
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
if (rc)
goto out;
QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
rc = -EIO;
goto out;
}
if (response->protocol == VNIC_INFO_PROT_L2)
disc = QETH_DISCIPLINE_LAYER2;
else if (response->protocol == VNIC_INFO_PROT_L3)
disc = QETH_DISCIPLINE_LAYER3;
out:
kfree(response);
kfree(request);
if (rc)
QETH_CARD_TEXT_(card, 2, "err%x", rc);
return disc;
}
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
if (IS_OSM(card))
disc = QETH_DISCIPLINE_LAYER2;
else if (IS_VM_NIC(card))
disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
qeth_vm_detect_layer(card);
switch (disc) {
case QETH_DISCIPLINE_LAYER2:
QETH_CARD_TEXT(card, 3, "force l2");
break;
case QETH_DISCIPLINE_LAYER3:
QETH_CARD_TEXT(card, 3, "force l3");
break;
default:
QETH_CARD_TEXT(card, 3, "force no");
}
return disc;
}
static void qeth_set_blkt_defaults(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "cfgblkt");
if (card->info.use_v1_blkt) {
card->info.blkt.time_total = 0;
card->info.blkt.inter_packet = 0;
card->info.blkt.inter_packet_jumbo = 0;
} else {
card->info.blkt.time_total = 250;
card->info.blkt.inter_packet = 5;
card->info.blkt.inter_packet_jumbo = 15;
}
}
static void qeth_idx_init(struct qeth_card *card)
{
memset(&card->seqno, 0, sizeof(card->seqno));
card->token.issuer_rm_w = 0x00010103UL;
card->token.cm_filter_w = 0x00010108UL;
card->token.cm_connection_w = 0x0001010aUL;
card->token.ulp_filter_w = 0x0001010bUL;
card->token.ulp_connection_w = 0x0001010dUL;
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
break;
}
}
static void qeth_idx_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
QETH_SEQ_NO_LENGTH);
if (iob->channel == &card->write)
card->seqno.trans_hdr++;
}
static int qeth_peer_func_level(int level)
{
if ((level & 0xff) == 8)
return (level & 0xff) + 0x400;
if (((level >> 8) & 3) == 1)
return (level & 0xff) + 0x200;
return level;
}
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
qeth_idx_finalize_cmd(card, iob);
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
iob->callback = qeth_release_buffer_cb;
}
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
struct qeth_cmd_buffer *reply)
{
/* MPC cmds are issued strictly in sequence. */
return !IS_IPA(reply->data);
}
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
const void *data,
unsigned int data_length)
{
struct qeth_cmd_buffer *iob;
iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
if (!iob)
return NULL;
memcpy(iob->data, data, data_length);
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
iob->data);
iob->finalize = qeth_mpc_finalize_cmd;
iob->match = qeth_mpc_match_reply;
return iob;
}
/**
* qeth_send_control_data() - send control command to the card
* @card: qeth_card structure pointer
* @iob: qeth_cmd_buffer pointer
* @reply_cb: callback function pointer
* cb_card: pointer to the qeth_card structure
* cb_reply: pointer to the qeth_reply structure
* cb_cmd: pointer to the original iob for non-IPA
* commands, or to the qeth_ipa_cmd structure
* for the IPA commands.
* @reply_param: private pointer passed to the callback
*
* Callback function gets called one or more times, with cb_cmd
* pointing to the response returned by the hardware. Callback
* function must return
* > 0 if more reply blocks are expected,
* 0 if the last or only reply block is received, and
* < 0 on error.
* Callback function can get the value of the reply_param pointer from the
* field 'param' of the structure qeth_reply.
*/
static int qeth_send_control_data(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *cb_card,
struct qeth_reply *cb_reply,
unsigned long cb_cmd),
void *reply_param)
{
struct qeth_channel *channel = iob->channel;
struct qeth_reply *reply = &iob->reply;
long timeout = iob->timeout;
int rc;
QETH_CARD_TEXT(card, 2, "sendctl");
reply->callback = reply_cb;
reply->param = reply_param;
timeout = wait_event_interruptible_timeout(card->wait_q,
qeth_trylock_channel(channel, iob),
timeout);
if (timeout <= 0) {
qeth_put_cmd(iob);
return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
if (iob->finalize)
iob->finalize(card, iob);
QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
qeth_enqueue_cmd(card, iob);
/* This pairs with iob->callback, and keeps the iob alive after IO: */
qeth_get_cmd(iob);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_cmd(card, iob);
qeth_put_cmd(iob);
qeth_unlock_channel(card, channel);
goto out;
}
timeout = wait_for_completion_interruptible_timeout(&iob->done,
timeout);
if (timeout <= 0)
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_cmd(card, iob);
if (reply_cb) {
/* Wait until the callback for a late reply has completed: */
spin_lock_irq(&iob->lock);
if (rc)
/* Zap any callback that's still pending: */
iob->rc = rc;
spin_unlock_irq(&iob->lock);
}
if (!rc)
rc = iob->rc;
out:
qeth_put_cmd(iob);
return rc;
}
struct qeth_node_desc {
struct node_descriptor nd1;
struct node_descriptor nd2;
struct node_descriptor nd3;
};
static void qeth_read_conf_data_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
{
struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
int rc = 0;
u8 *tag;
QETH_CARD_TEXT(card, 2, "cfgunit");
if (data_length < sizeof(*nd)) {
rc = -EINVAL;
goto out;
}
card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
nd->nd1.plant[1] == _ascebc['M'];
tag = (u8 *)&nd->nd1.tag;
card->info.chpid = tag[0];
card->info.unit_addr2 = tag[1];
tag = (u8 *)&nd->nd2.tag;
card->info.cula = tag[1];
card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
nd->nd3.model[1] == 0xF0 &&
nd->nd3.model[2] >= 0xF1 &&
nd->nd3.model[2] <= 0xF4;
out:
qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
static int qeth_read_conf_data(struct qeth_card *card)
{
struct qeth_channel *channel = &card->data;
struct qeth_cmd_buffer *iob;
struct ciw *ciw;
/* scan for RCD command in extended SenseID data */
ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP;
if (ciw->count < sizeof(struct qeth_node_desc))
return -EINVAL;
iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
if (!iob)
return -ENOMEM;
iob->callback = qeth_read_conf_data_cb;
qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
iob->data);
return qeth_send_control_data(card, iob, NULL, NULL);
}
static int qeth_idx_check_activate_response(struct qeth_card *card,
struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
int rc;
rc = qeth_check_idx_response(card, iob->data);
if (rc)
return rc;
if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
return 0;
/* negative reply: */
QETH_CARD_TEXT_(card, 2, "idxneg%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
case QETH_IDX_ACT_ERR_EXCL:
dev_err(&channel->ccwdev->dev,
"The adapter is used exclusively by another host\n");
return -EBUSY;
case QETH_IDX_ACT_ERR_AUTH:
case QETH_IDX_ACT_ERR_AUTH_USER:
dev_err(&channel->ccwdev->dev,
"Setting the device online failed because of insufficient authorization\n");
return -EPERM;
default:
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
CCW_DEVID(channel->ccwdev));
return -EIO;
}
}
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
{
struct qeth_channel *channel = iob->channel;
u16 peer_level;
int rc;
QETH_CARD_TEXT(card, 2, "idxrdcb");
rc = qeth_idx_check_activate_response(card, channel, iob);
if (rc)
goto out;
memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (peer_level != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
CCW_DEVID(channel->ccwdev),
card->info.func_level, peer_level);
rc = -EINVAL;
goto out;
}
memcpy(&card->token.issuer_rm_r,
QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
memcpy(&card->info.mcl_level[0],
QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
out:
qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
{
struct qeth_channel *channel = iob->channel;
u16 peer_level;
int rc;
QETH_CARD_TEXT(card, 2, "idxwrcb");
rc = qeth_idx_check_activate_response(card, channel, iob);
if (rc)
goto out;
memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((peer_level & ~0x0100) !=
qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
CCW_DEVID(channel->ccwdev),
card->info.func_level, peer_level);
rc = -EINVAL;
}
out:
qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
u8 port = ((u8)card->dev->dev_port) | 0x80;
struct ccw1 *ccw = __ccw_from_cmd(iob);
qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
iob->data);
qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
iob->finalize = qeth_idx_finalize_cmd;
port |= QETH_IDX_ACT_INVAL_FRAME;
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
&card->info.func_level, 2);
memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}
static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
struct qeth_channel *channel = &card->read;
struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 2, "idxread");
iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
if (!iob)
return -ENOMEM;
memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
qeth_idx_setup_activate_cmd(card, iob);
iob->callback = qeth_idx_activate_read_channel_cb;
rc = qeth_send_control_data(card, iob, NULL, NULL);
if (rc)
return rc;
channel->state = CH_STATE_UP;
return 0;
}
static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
struct qeth_channel *channel = &card->write;
struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 2, "idxwrite");
iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
if (!iob)
return -ENOMEM;
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
qeth_idx_setup_activate_cmd(card, iob);
iob->callback = qeth_idx_activate_write_channel_cb;
rc = qeth_send_control_data(card, iob, NULL, NULL);
if (rc)
return rc;
channel->state = CH_STATE_UP;
return 0;
}
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "cmenblcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_filter_r,
QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
return 0;
}
static int qeth_cm_enable(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "cmenable");
iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
if (!iob)
return -ENOMEM;
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
}
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "cmsetpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_connection_r,
QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
QETH_MPC_TOKEN_LENGTH);
return 0;
}
static int qeth_cm_setup(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "cmsetup");
iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
if (!iob)
return -ENOMEM;
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
&card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
&card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
}
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
if (link_type == QETH_LINK_TYPE_LANE_TR ||
link_type == QETH_LINK_TYPE_HSTR) {
dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
return false;
}
return true;
}
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
{
struct net_device *dev = card->dev;
unsigned int new_mtu;
if (!max_mtu) {
/* IQD needs accurate max MTU to set up its RX buffers: */
if (IS_IQD(card))
return -EINVAL;
/* tolerate quirky HW: */
max_mtu = ETH_MAX_MTU;
}
rtnl_lock();
if (IS_IQD(card)) {
/* move any device with default MTU to new max MTU: */
new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
/* adjust RX buffer size to new max MTU: */
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
if (dev->max_mtu && dev->max_mtu != max_mtu)
qeth_free_qdio_queues(card);
} else {
if (dev->mtu)
new_mtu = dev->mtu;
/* default MTUs for first setup: */
else if (IS_LAYER2(card))
new_mtu = ETH_DATA_LEN;
else
new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
}
dev->max_mtu = max_mtu;
dev->mtu = min(new_mtu, max_mtu);
rtnl_unlock();
return 0;
}
static int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
case 0x4000:
return 8192;
case 0x6000:
return 16384;
case 0xa000:
return 32768;
case 0xffff:
return 57344;
default:
return 0;
}
}
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
__u16 mtu, framesize;
__u16 len;
struct qeth_cmd_buffer *iob;
u8 link_type = 0;
QETH_CARD_TEXT(card, 2, "ulpenacb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
if (IS_IQD(card)) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
} else {
mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
}
*(u16 *)reply->param = mtu;
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
memcpy(&link_type,
QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
if (!qeth_is_supported_link_type(card, link_type))
return -EPROTONOSUPPORT;
}
card->info.link_type = link_type;
QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
return 0;
}
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
}
static int qeth_ulp_enable(struct qeth_card *card)
{
u8 prot_type = qeth_mpc_select_prot_type(card);
struct qeth_cmd_buffer *iob;
u16 max_mtu;
int rc;
QETH_CARD_TEXT(card, 2, "ulpenabl");
iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
if (!iob)
return -ENOMEM;
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
if (rc)
return rc;
return qeth_update_max_mtu(card, max_mtu);
}
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "ulpstpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_connection_r,
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
3)) {
QETH_CARD_TEXT(card, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
return -EMLINK;
}
return 0;
}
static int qeth_ulp_setup(struct qeth_card *card)
{
__u16 temp;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "ulpsetup");
iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
if (!iob)
return -ENOMEM;
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
}
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
gfp_t gfp)
{
struct qeth_qdio_out_buffer *newbuf;
newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
if (!newbuf)
return -ENOMEM;
newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
return 0;
}
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
qeth_drain_output_queue(q, true);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
unsigned int i;
if (!q)
return NULL;
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
goto err_qdio_bufs;
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
goto err_out_bufs;
}
return q;
err_out_bufs:
while (i > 0)
qeth_free_out_buf(q->bufs[--i]);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
kfree(q);
return NULL;
}
static void qeth_tx_completion_timer(struct timer_list *timer)
{
struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
napi_schedule(&queue->napi);
QETH_TXQ_STAT_INC(queue, completion_timer);
}
static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
unsigned int i;
QETH_CARD_TEXT(card, 2, "allcqdbf");
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
goto out_buffer_pool;
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
struct qeth_qdio_out_q *queue;
queue = qeth_alloc_output_queue();
if (!queue)
goto out_freeoutq;
QETH_CARD_TEXT_(card, 2, "outq %i", i);
QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
INIT_LIST_HEAD(&queue->pending_bufs);
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
if (IS_IQD(card)) {
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
queue->rescan_usecs = QETH_TX_TIMER_USECS;
} else {
queue->coalesce_usecs = USEC_PER_SEC;
queue->max_coalesced_frames = 0;
queue->rescan_usecs = 10 * USEC_PER_SEC;
}
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
/* completion */
if (qeth_alloc_cq(card))
goto out_freeoutq;
return 0;
out_freeoutq:
while (i > 0) {
qeth_free_output_queue(card->qdio.out_qs[--i]);
card->qdio.out_qs[i] = NULL;
}
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
}
static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
card->qdio.in_q->bufs[j].rx_skb = NULL;
}
}
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
for (i = 0; i < card->qdio.no_out_queues; i++) {
qeth_free_output_queue(card->qdio.out_qs[i]);
card->qdio.out_qs[i] = NULL;
}
}
static void qeth_fill_qib_parms(struct qeth_card *card,
struct qeth_qib_parms *parms)
{
struct qeth_qdio_out_q *queue;
unsigned int i;
parms->pcit_magic[0] = 'P';
parms->pcit_magic[1] = 'C';
parms->pcit_magic[2] = 'I';
parms->pcit_magic[3] = 'T';
ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
parms->blkt_magic[0] = 'B';
parms->blkt_magic[1] = 'L';
parms->blkt_magic[2] = 'K';
parms->blkt_magic[3] = 'T';
ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
parms->blkt_total = card->info.blkt.time_total;
parms->blkt_inter_packet = card->info.blkt.inter_packet;
parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
/* Prio-queueing implicitly uses the default priorities: */
if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
return;
parms->pque_magic[0] = 'P';
parms->pque_magic[1] = 'Q';
parms->pque_magic[2] = 'U';
parms->pque_magic[3] = 'E';
ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
qeth_for_each_output_queue(card, queue, i)
parms->pque_priority[i] = queue->priority;
}
static int qeth_qdio_activate(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "qdioact");
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "dmact");
iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
if (!iob)
return -ENOMEM;
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
return qeth_send_control_data(card, iob, NULL, NULL);
}
static int qeth_mpc_initialize(struct qeth_card *card)
{
int rc;
QETH_CARD_TEXT(card, 2, "mpcinit");
rc = qeth_issue_next_read(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
rc = qeth_cm_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%d", rc);
return rc;
}
rc = qeth_cm_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
return rc;
}
rc = qeth_ulp_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
return rc;
}
rc = qeth_ulp_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
return rc;
}
rc = qeth_alloc_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
return rc;
}
rc = qeth_qdio_establish(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
qeth_free_qdio_queues(card);
return rc;
}
rc = qeth_qdio_activate(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
return rc;
}
rc = qeth_dm_act(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
return rc;
}
return 0;
}
static void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
* */
if (!card->info.mcl_level[0]) {
scnprintf(card->info.mcl_level,
sizeof(card->info.mcl_level),
"%02x%02x",
card->info.mcl_level[2],
card->info.mcl_level[3]);
break;
}
fallthrough;
case QETH_CARD_TYPE_IQD:
if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
card->info.mcl_level[1]];
card->info.mcl_level[2] = (char) _ebcasc[(__u8)
card->info.mcl_level[2]];
card->info.mcl_level[3] = (char) _ebcasc[(__u8)
card->info.mcl_level[3]];
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
}
break;
default:
memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
}
dev_info(&card->gdev->dev,
"Device is a%s card%s%s%s\nwith link type %s.\n",
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
}
static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
QETH_CARD_TEXT(card, 5, "inwrklst");
list_for_each_entry(entry,
&card->qdio.init_pool.entry_list, init_list) {
qeth_put_buffer_pool_entry(card, entry);
}
}
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
int i, free;
if (list_empty(&card->qdio.in_buf_pool.entry_list))
return NULL;
list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
free = 1;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
free = 0;
break;
}
}
if (free) {
list_del_init(&entry->list);
return entry;
}
}
/* no free buffer in pool so take first one and swap pages */
entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
struct page *page = dev_alloc_page();
if (!page)
return NULL;
__free_page(entry->elements[i]);
entry->elements[i] = page;
QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
}
}
list_del_init(&entry->list);
return entry;
}
static int qeth_init_input_buffer(struct qeth_card *card,
struct qeth_qdio_buffer *buf)
{
struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
int i;
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
buf->rx_skb = netdev_alloc_skb(card->dev,
ETH_HLEN +
sizeof(struct ipv6hdr));
if (!buf->rx_skb)
return -ENOMEM;
}
if (!pool_entry) {
pool_entry = qeth_find_free_buffer_pool_entry(card);
if (!pool_entry)
return -ENOBUFS;
buf->pool_entry = pool_entry;
}
/*
* since the buffer is accessed only from the input_tasklet
* there shouldn't be a need to synchronize; also, since we use
* the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
* buffers
*/
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
buf->buffer->element[i].addr =
page_to_phys(pool_entry->elements[i]);
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
buf->buffer->element[i].eflags = 0;
buf->buffer->element[i].sflags = 0;
}
return 0;
}
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
struct qeth_qdio_out_q *queue)
{
if (!IS_IQD(card) ||
qeth_iqd_is_mcast_queue(card, queue) ||
card->options.cq == QETH_CQ_ENABLED ||
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
return 1;
return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}
static int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
unsigned int i;
int rc;
QETH_CARD_TEXT(card, 2, "initqdqs");
/* inbound queue */
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
memset(&card->rx, 0, sizeof(struct qeth_rx));
qeth_initialize_working_pool_list(card);
/*give only as many buffers to hardware as we have buffer pool entries*/
for (i = 0; i < rx_bufs; i++) {
rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
if (rc)
return rc;
}
card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
/* completion */
rc = qeth_cq_init(card);
if (rc) {
return rc;
}
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
queue->next_buf_to_fill = 0;
queue->do_pack = 0;
queue->prev_hdr = NULL;
queue->coalesced_frames = 0;
queue->bulk_start = 0;
queue->bulk_count = 0;
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
}
return 0;
}
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
qeth_mpc_finalize_cmd(card, iob);
/* override with IPA-specific values: */
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
}
static void qeth_prepare_ipa_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob, u16 cmd_length)
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
}
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
struct qeth_cmd_buffer *reply)
{
struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
unsigned int data_length)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipacmd_hdr *hdr;
data_length += offsetof(struct qeth_ipa_cmd, data);
iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
QETH_IPA_TIMEOUT);
if (!iob)
return NULL;
qeth_prepare_ipa_cmd(card, iob, data_length);
iob->match = qeth_ipa_match_reply;
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
hdr->initiator = IPA_CMD_INITIATOR_HOST;
/* hdr->seqno is set by qeth_send_control_data() */
hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
hdr->rel_adapter_no = (u8) card->dev->dev_port;
hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
hdr->param_count = 1;
hdr->prot_version = prot;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
return (cmd->hdr.return_code) ? -EIO : 0;
}
/*
* qeth_send_ipa_cmd() - send an IPA command
*
* See qeth_send_control_data() for explanation of the arguments.
*/
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
void *reply_param)
{
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
if (card->read_or_write_problem) {
qeth_put_cmd(iob);
return -EIO;
}
if (reply_cb == NULL)
reply_cb = qeth_send_ipa_cmd_cb;
rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
}
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
static int qeth_send_startlan_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
return -ENETDOWN;
return (cmd->hdr.return_code) ? -EIO : 0;
}
static int qeth_send_startlan(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "strtlan");
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
}
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
if (!cmd->hdr.return_code)
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
return cmd->hdr.return_code;
}
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_query_cmds_supp *query_cmd;
QETH_CARD_TEXT(card, 3, "quyadpcb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
if (query_cmd->lan_type & 0x7f) {
if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
return -EPROTONOSUPPORT;
card->info.link_type = query_cmd->lan_type;
QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
card->options.adp.supported = query_cmd->supported_cmds;
return 0;
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
enum qeth_ipa_setadp_cmd adp_cmd,
unsigned int data_length)
{
struct qeth_ipacmd_setadpparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
data_length +
offsetof(struct qeth_ipacmd_setadpparms,
data));
if (!iob)
return NULL;
hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
hdr->cmdlength = sizeof(*hdr) + data_length;
hdr->command_code = adp_cmd;
hdr->used_total = 1;
hdr->seq_no = 1;
return iob;
}
static int qeth_query_setadapterparms(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
SETADP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
static int qeth_query_ipassists_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
switch (cmd->hdr.return_code) {
case IPA_RC_SUCCESS:
break;
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_CARD_TEXT(card, 2, "ipaunsup");
card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
default:
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
CARD_DEVID(card), cmd->hdr.return_code);
return -EIO;
}
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
card->options.ipa4 = cmd->hdr.assists;
else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6 = cmd->hdr.assists;
else
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
CARD_DEVID(card));
return 0;
}
static int qeth_query_ipassists(struct qeth_card *card,
enum qeth_prot_versions prot)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_query_switch_attributes *attrs;
struct qeth_switch_info *sw_info;
QETH_CARD_TEXT(card, 2, "qswiatcb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
sw_info = (struct qeth_switch_info *)reply->param;
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
sw_info->capabilities = attrs->capabilities;
sw_info->settings = attrs->settings;
QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
sw_info->settings);
return 0;
}
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "qswiattr");
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
return -EOPNOTSUPP;
if (!netif_carrier_ok(card->dev))
return -ENOMEDIUM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
enum qeth_diags_cmds sub_cmd,
unsigned int data_length)
{
struct qeth_ipacmd_diagass *cmd;
struct qeth_cmd_buffer *iob;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
DIAG_HDR_LEN + data_length);
if (!iob)
return NULL;
cmd = &__ipa_cmd(iob)->data.diagass;
cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
cmd->subcmd = sub_cmd;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
u16 rc = cmd->hdr.return_code;
if (rc) {
QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
return -EIO;
}
card->info.diagass_support = cmd->data.diagass.ext;
return 0;
}
static int qeth_query_setdiagass(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "qdiagass");
iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}
static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
unsigned long info = get_zeroed_page(GFP_KERNEL);
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
struct ccw_dev_id ccwid;
int level;
tid->chpid = card->info.chpid;
ccw_device_get_id(CARD_RDEV(card), &ccwid);
tid->ssid = ccwid.ssid;
tid->devno = ccwid.devno;
if (!info)
return;
level = stsi(NULL, 0, 0, 0);
if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
tid->lparnr = info222->lpar_number;
if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
}
free_page(info);
}
static int qeth_hw_trap_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
u16 rc = cmd->hdr.return_code;
if (rc) {
QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
return -EIO;
}
return 0;
}
int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "diagtrap");
iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.diagass.type = 1;
cmd->data.diagass.action = action;
switch (action) {
case QETH_DIAGS_TRAP_ARM:
cmd->data.diagass.options = 0x0003;
cmd->data.diagass.ext = 0x00010000 +
sizeof(struct qeth_trap_id);
qeth_get_trap_id(card,
(struct qeth_trap_id *)cmd->data.diagass.cdata);
break;
case QETH_DIAGS_TRAP_DISARM:
cmd->data.diagass.options = 0x0001;
break;
case QETH_DIAGS_TRAP_CAPTURE:
break;
}
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}
static int qeth_check_qdio_errors(struct qeth_card *card,
struct qdio_buffer *buf,
unsigned int qdio_error,
const char *dbftext)
{
if (qdio_error) {
QETH_CARD_TEXT(card, 2, dbftext);
QETH_CARD_TEXT_(card, 2, " F15=%02X",
buf->element[15].sflags);
QETH_CARD_TEXT_(card, 2, " F14=%02X",
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
QETH_CARD_STAT_INC(card, rx_fifo_errors);
return 0;
} else
return 1;
}
return 0;
}
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
unsigned int count)
{
struct qeth_qdio_q *queue = card->qdio.in_q;
struct list_head *lh;
int i;
int rc;
int newcount = 0;
/* only requeue at a certain threshold to avoid SIGAs */
if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
for (i = queue->next_buf_to_init;
i < queue->next_buf_to_init + count; ++i) {
if (qeth_init_input_buffer(card,
&queue->bufs[QDIO_BUFNR(i)])) {
break;
} else {
newcount++;
}
}
if (newcount < count) {
/* we are in memory shortage so we switch back to
traditional skb allocation and drop packages */
atomic_set(&card->force_alloc_skb, 3);
count = newcount;
} else {
atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
if (!count) {
i = 0;
list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
i++;
if (i == card->qdio.in_buf_pool.buf_count) {
QETH_CARD_TEXT(card, 2, "qsarbw");
schedule_delayed_work(
&card->buffer_reclaim_work,
QETH_RECLAIM_WORK_TIME);
}
return 0;
}
rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
queue->next_buf_to_init,
count);
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
count);
return count;
}
return 0;
}
static void qeth_buffer_reclaim_work(struct work_struct *work)
{
struct qeth_card *card = container_of(to_delayed_work(work),
struct qeth_card,
buffer_reclaim_work);
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
}
static void qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
return;
if ((sbalf15 >= 15) && (sbalf15 <= 31))
return;
QETH_CARD_TEXT(card, 1, "lnkfail");
QETH_CARD_TEXT_(card, 1, "%04x %02x",
(u16)qdio_err, (u8)sbalf15);
}
/**
* qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
* @queue: queue to check for packing buffer
*
* Returns number of buffers that were prepared for flush.
*/
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
buffer = queue->bufs[queue->next_buf_to_fill];
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
QDIO_BUFNR(queue->next_buf_to_fill + 1);
return 1;
}
return 0;
}
/*
* Switched to packing state if the number of used buffers on a queue
* reaches a certain limit.
*/
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_CARD_TEXT(queue->card, 6, "np->pack");
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 1;
}
}
}
/*
* Switches from packing to non-packing mode. If there is a packing
* buffer on the queue this buffer will be prepared to be flushed.
* In that case 1 is returned to inform the caller. If no buffer
* has to be flushed, zero is returned.
*/
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
if (queue->do_pack) {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_CARD_TEXT(queue->card, 6, "pack->np");
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 0;
return qeth_prep_flush_pack_buffer(queue);
}
}
return 0;
}
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf = queue->bufs[index];
struct qeth_card *card = queue->card;
unsigned int frames, usecs;
struct qaob *aob = NULL;
int rc;
int i;
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
struct sk_buff *skb;
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
queue->coalesced_frames += buf->frames;
if (IS_IQD(card)) {
skb_queue_walk(&buf->skb_list, skb)
skb_tx_timestamp(skb);
}
}
if (IS_IQD(card)) {
if (card->options.cq == QETH_CQ_ENABLED &&
!qeth_iqd_is_mcast_queue(card, queue) &&
count == 1) {
if (!buf->aob)
buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
GFP_ATOMIC);
if (buf->aob) {
struct qeth_qaob_priv1 *priv;
aob = buf->aob;
priv = (struct qeth_qaob_priv1 *)&aob->user1;
priv->state = QETH_QAOB_ISSUED;
priv->queue_no = queue->queue_no;
}
}
} else {
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
(QETH_HIGH_WATERMARK_PACK -
QETH_WATERMARK_PACK_FUZZ)) &&
!atomic_read(&queue->set_pci_flags_count)) {
/* it's likely that we'll go to packing
* mode soon */
atomic_inc(&queue->set_pci_flags_count);
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
} else {
if (!atomic_read(&queue->set_pci_flags_count)) {
/*
* there's no outstanding PCI any more, so we
* have to request a PCI to be sure the PCI
* will wake at some time in the future then we
* can flush packed buffers that might still be
* hanging around, which can happen if no
* further send was requested by the stack
*/
atomic_inc(&queue->set_pci_flags_count);
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
}
QETH_TXQ_STAT_INC(queue, doorbell);
rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
index, count, aob);
switch (rc) {
case 0:
case -ENOBUFS:
/* ignore temporary SIGA errors without busy condition */
/* Fake the TX completion interrupt: */
frames = READ_ONCE(queue->max_coalesced_frames);
usecs = READ_ONCE(queue->coalesce_usecs);
if (frames && queue->coalesced_frames >= frames) {
napi_schedule(&queue->napi);
queue->coalesced_frames = 0;
QETH_TXQ_STAT_INC(queue, coal_frames);
} else if (qeth_use_tx_irqs(card) &&
atomic_read(&queue->used_buffers) >= 32) {
/* Old behaviour carried over from the qdio layer: */
napi_schedule(&queue->napi);
QETH_TXQ_STAT_INC(queue, coal_frames);
} else if (usecs) {
qeth_tx_arm_timer(queue, usecs);
}
break;
default:
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
}
}
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
queue->prev_hdr = NULL;
queue->bulk_count = 0;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
/*
* check if weed have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
unsigned int index, flush_cnt;
spin_lock(&queue->lock);
index = queue->next_buf_to_fill;
flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
flush_cnt = qeth_prep_flush_pack_buffer(queue);
if (flush_cnt) {
qeth_flush_buffers(queue, index, flush_cnt);
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
}
spin_unlock(&queue->lock);
}
}
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
int rc;
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
rc = -1;
goto out;
} else {
if (card->options.cq == cq) {
rc = 0;
goto out;
}
qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
}
out:
return rc;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
unsigned int queue_no = priv->queue_no;
BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
queue_no < card->qdio.no_out_queues)
napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
unsigned int queue, int first_element,
int count)
{
struct qeth_qdio_q *cq = card->qdio.c_q;
int i;
int rc;
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) {
netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card);
return;
}
for (i = first_element; i < first_element + count; ++i) {
struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
int e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
unsigned long phys_aob_addr = buffer->element[e].addr;
qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
}
rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
cq->next_buf_to_init, count);
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qcqherr");
}
cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int qdio_err, int queue,
int first_elem, int count,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
if (qdio_err)
qeth_schedule_recovery(card);
}
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, int __queue,
int first_element, int count,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
QETH_CARD_TEXT(card, 2, "achkcond");
netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card);
}
/*
* Note: Function assumes that we have 4 outbound queues.
*/
static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
case htons(ETH_P_IPV6):
tos = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
return ~tos >> 6 & 3;
if (tos & IPTOS_MINCOST)
return 3;
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
return 1;
if (tos & IPTOS_LOWDELAY)
return 0;
break;
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
return ~skb->priority >> 1 & 3;
case QETH_PRIO_Q_ING_VLAN:
if (veth->h_vlan_proto == htons(ETH_P_8021Q))
return ~ntohs(veth->h_vlan_TCI) >>
(VLAN_PRIO_SHIFT + 1) & 3;
break;
case QETH_PRIO_Q_ING_FIXED:
return card->qdio.default_out_queue;
default:
break;
}
return card->qdio.default_out_queue;
}
/**
* qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
* @skb: SKB address
*
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* fragmented part of the SKB. Returns zero for linear SKB.
*/
static int qeth_get_elements_for_frags(struct sk_buff *skb)
{
int cnt, elements = 0;
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
elements += qeth_get_elements_for_range(
(addr_t)skb_frag_address(frag),
(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
}
return elements;
}
/**
* qeth_count_elements() - Counts the number of QDIO buffer elements needed
* to transmit an skb.
* @skb: the skb to operate on.
* @data_offset: skip this part of the skb's linear data
*
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
* skb's data (both its linear part and paged fragments).
*/
static unsigned int qeth_count_elements(struct sk_buff *skb,
unsigned int data_offset)
{
unsigned int elements = qeth_get_elements_for_frags(skb);
addr_t end = (addr_t)skb->data + skb_headlen(skb);
addr_t start = (addr_t)skb->data + data_offset;
if (start != end)
elements += qeth_get_elements_for_range(start, end);
return elements;
}
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
MAX_TCP_HEADER)
/**
* qeth_add_hw_header() - add a HW header to an skb.
* @queue: TX queue that the skb will be placed on.
* @skb: skb that the HW header should be added to.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
* @hdr_len: length of the HW header.
* @proto_len: length of protocol headers that need to be in same page as the
* HW header.
* @elements: returns the required number of buffer elements for this skb.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
* the cache instead and 0 is returned.
* The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr **hdr,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
const unsigned int contiguous = proto_len ? proto_len : 1;
const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
addr_t start, end;
bool push_ok;
int rc;
check_layout:
start = (addr_t)skb->data - hdr_len;
end = (addr_t)skb->data;
if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
/* Push HW header into same page as first protocol header. */
push_ok = true;
/* ... but TSO always needs a separate element for headers: */
if (skb_is_gso(skb))
__elements = 1 + qeth_count_elements(skb, proto_len);
else
__elements = qeth_count_elements(skb, 0);
} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
/* Push HW header into preceding page, flush with skb->data. */
push_ok = true;
__elements = 1 + qeth_count_elements(skb, 0);
} else {
/* Use header cache, copy protocol headers up. */
push_ok = false;
__elements = 1 + qeth_count_elements(skb, proto_len);
}
/* Compress skb to fit into one IO buffer: */
if (__elements > max_elements) {
if (!skb_is_nonlinear(skb)) {
/* Drop it, no easy way of shrinking it further. */
QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
max_elements, __elements, skb->len);
return -E2BIG;
}
rc = skb_linearize(skb);
if (rc) {
QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
return rc;
}
QETH_TXQ_STAT_INC(queue, skbs_linearized);
/* Linearization changed the layout, re-evaluate: */
goto check_layout;
}
*elements = __elements;
/* Add the header: */
if (push_ok) {
*hdr = skb_push(skb, hdr_len);
return hdr_len;
}
/* Fall back to cache element with known-good alignment: */
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
return -E2BIG;
*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!*hdr)
return -ENOMEM;
/* Copy protocol headers behind HW header: */
skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
return 0;
}
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
struct sk_buff *curr_skb,
struct qeth_hdr *curr_hdr)
{
struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
struct qeth_hdr *prev_hdr = queue->prev_hdr;
if (!prev_hdr)
return true;
/* All packets must have the same target: */
if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
eth_hdr(curr_skb)->h_dest) &&
qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
}
return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}
/**
* qeth_fill_buffer() - map skb into an output buffer
* @buf: buffer to transport the skb
* @skb: skb to map into the buffer
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
*/
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
unsigned int elem_length, cnt;
bool is_first_elem = true;
__skb_queue_tail(&buf->skb_list, skb);
/* build dedicated element for HW Header */
if (hd_len) {
is_first_elem = false;
buffer->element[element].addr = virt_to_phys(hdr);
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
/* HW header is allocated from cache: */
if ((void *)hdr != skb->data)
__set_bit(element, buf->from_kmem_cache);
/* HW header was pushed and is contiguous with linear part: */
else if (length > 0 && !PAGE_ALIGNED(data) &&
(data == (char *)hdr + hd_len))
buffer->element[element].eflags |=
SBAL_EFLAGS_CONTIGUOUS;
element++;
}
/* map linear part into buffer element(s) */
while (length > 0) {
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = virt_to_phys(data);
buffer->element[element].length = elem_length;
length -= elem_length;
if (is_first_elem) {
is_first_elem = false;
if (length || skb_is_nonlinear(skb))
/* skb needs additional elements */
buffer->element[element].eflags =
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags = 0;
} else {
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
}
data += elem_length;
element++;
}
/* map page frags into buffer element(s) */
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
data = skb_frag_address(frag);
length = skb_frag_size(frag);
while (length > 0) {
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = virt_to_phys(data);
buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
length -= elem_length;
data += elem_length;
element++;
}
}
if (buffer->element[element - 1].eflags)
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
buf->next_element_to_fill = element;
return element;
}
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, unsigned int elements,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
unsigned int bytes = qdisc_pkt_len(skb);
struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
bool flush;
buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
flush = !qeth_iqd_may_bulk(queue, skb, hdr);
if (flush ||
(buffer->next_element_to_fill + elements > queue->max_elements)) {
if (buffer->next_element_to_fill > 0) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->bulk_count++;
}
if (queue->bulk_count >= queue->bulk_max)
flush = true;
if (flush)
qeth_flush_queue(queue);
buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
queue->bulk_count)];
/* Sanity-check again: */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
}
if (buffer->next_element_to_fill == 0 &&
atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
/* If a TX completion happens right _here_ and misses to wake
* the txq, then our re-check below will catch the race.
*/
QETH_TXQ_STAT_INC(queue, stopped);
netif_tx_stop_queue(txq);
stopped = true;
}
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
buffer->bytes += bytes;
buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
queue->prev_hdr = hdr;
flush = __netdev_tx_sent_queue(txq, bytes,
!stopped && netdev_xmit_more());
if (flush || next_element >= queue->max_elements) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->bulk_count++;
if (queue->bulk_count >= queue->bulk_max)
flush = true;
if (flush)
qeth_flush_queue(queue);
}
if (stopped && !qeth_out_queue_is_full(queue))
netif_tx_start_queue(txq);
return 0;
}
static int qeth_do_send_packet(struct qeth_card *card,
struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len,
unsigned int elements_needed)
{
unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
int flush_count = 0;
int do_pack = 0;
int rc = 0;
buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
/* does packet fit in current buffer? */
if (buffer->next_element_to_fill + elements_needed >
queue->max_elements) {
/* ... no -> set state PRIMED */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
QDIO_BUFNR(queue->next_buf_to_fill + 1);
buffer = queue->bufs[queue->next_buf_to_fill];
/* We stepped forward, so sanity-check again: */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
flush_count);
rc = -EBUSY;
goto out;
}
}
}
if (buffer->next_element_to_fill == 0 &&
atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
/* If a TX completion happens right _here_ and misses to wake
* the txq, then our re-check below will catch the race.
*/
QETH_TXQ_STAT_INC(queue, stopped);
netif_tx_stop_queue(txq);
stopped = true;
}
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
buffer->bytes += qdisc_pkt_len(skb);
buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
if (queue->do_pack)
QETH_TXQ_STAT_INC(queue, skbs_pack);
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
flush_count++;
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
QDIO_BUFNR(queue->next_buf_to_fill + 1);
}
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
out:
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
if (stopped && !qeth_out_queue_is_full(queue))
netif_tx_start_queue(txq);
return rc;
}
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
unsigned int payload_len, struct sk_buff *skb,
unsigned int proto_len)
{
struct qeth_hdr_ext_tso *ext = &hdr->ext;
ext->hdr_tot_len = sizeof(*ext);
ext->imb_hdr_no = 1;
ext->hdr_type = 1;
ext->hdr_version = 1;
ext->hdr_len = 28;
ext->payload_len = payload_len;
ext->mss = skb_shinfo(skb)->gso_size;
ext->dg_hdr_len = proto_len;
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, __be16 proto,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
bool is_tso = skb_is_gso(skb);
unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
unsigned int elements;
int push_len, rc;
if (is_tso) {
hw_hdr_len = sizeof(struct qeth_hdr_tso);
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
hw_hdr_len = sizeof(struct qeth_hdr);
proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
}
rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
&elements);
if (push_len < 0)
return push_len;
if (is_tso || !push_len) {
/* HW header needs its own buffer element. */
hd_len = hw_hdr_len + proto_len;
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
fill_header(queue, hdr, skb, proto, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
if (IS_IQD(card)) {
rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
hd_len);
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
spin_lock(&queue->lock);
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
hd_len, elements);
spin_unlock(&queue->lock);
}
if (rc && !push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_setadpparms *setparms;
QETH_CARD_TEXT(card, 4, "prmadpcb");
setparms = &(cmd->data.setadapterparms);
if (qeth_setadpparms_inspect_rc(cmd)) {
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
return (cmd->hdr.return_code) ? -EIO : 0;
}
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
{
enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
SET_PROMISC_MODE_OFF;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "setprom");
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
SETADP_DATA_SIZEOF(mode));
if (!iob)
return;
cmd = __ipa_cmd(iob);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_setadpparms *adp_cmd;
QETH_CARD_TEXT(card, 4, "chgmaccb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
adp_cmd = &cmd->data.setadapterparms;
if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
return -EADDRNOTAVAIL;
if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
!(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
return -EADDRNOTAVAIL;
eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
return 0;
}
int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
SETADP_DATA_SIZEOF(change_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
card->dev->dev_addr);
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_set_access_ctrl *access_ctrl_req;
QETH_CARD_TEXT(card, 4, "setaccb");
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_CARD_TEXT_(card, 2, "rc=%d",
cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
access_ctrl_req->subcmd_code, CARD_DEVID(card),
cmd->data.setadapterparms.hdr.return_code);
switch (qeth_setadpparms_inspect_rc(cmd)) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
dev_info(&card->gdev->dev,
"QDIO data connection isolation is deactivated\n");
else
dev_info(&card->gdev->dev,
"QDIO data connection isolation is activated\n");
return 0;
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
CARD_DEVID(card));
return 0;
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
CARD_DEVID(card));
return 0;
case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
return -EOPNOTSUPP;
case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
dev_err(&card->gdev->dev,
"Adapter is dedicated. "
"QDIO data connection isolation not supported\n");
return -EOPNOTSUPP;
case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
dev_err(&card->gdev->dev,
"TSO does not permit QDIO data connection isolation\n");
return -EPERM;
case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
dev_err(&card->gdev->dev, "The adjacent switch port does not "
"support reflective relay mode\n");
return -EOPNOTSUPP;
case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
"enabled at the adjacent switch port");
return -EREMOTEIO;
case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
"at the adjacent switch failed\n");
/* benign error while disabling ISOLATION_MODE_FWD */
return 0;
default:
return -EIO;
}
}
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode)
{
int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
QETH_CARD_TEXT(card, 4, "setacctl");
if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
dev_err(&card->gdev->dev,
"Adapter does not support QDIO data connection isolation\n");
return -EOPNOTSUPP;
}
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
SETADP_DATA_SIZEOF(set_access_ctrl));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = mode;
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
NULL);
if (rc) {
QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
rc, CARD_DEVID(card));
}
return rc;
}
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qeth_card *card;
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
switch (regnum) {
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
BMSR_100BASE4;
break;
case MII_PHYSID1: /* PHYS ID 1 */
rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
dev->dev_addr[2];
rc = (rc >> 5) & 0xFFFF;
break;
case MII_PHYSID2: /* PHYS ID 2 */
rc = (dev->dev_addr[2] << 10) & 0xFFFF;
break;
case MII_ADVERTISE: /* Advertisement control reg */
rc = ADVERTISE_ALL;
break;
case MII_LPA: /* Link partner ability reg */
rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
LPA_100BASE4 | LPA_LPACK;
break;
case MII_EXPANSION: /* Expansion register */
break;
case MII_DCOUNTER: /* disconnect counter */
break;
case MII_FCSCOUNTER: /* false carrier counter */
break;
case MII_NWAYTEST: /* N-way auto-neg test register */
break;
case MII_RERRCOUNTER: /* rx error counter */
rc = card->stats.rx_length_errors +
card->stats.rx_frame_errors +
card->stats.rx_fifo_errors;
break;
case MII_SREVISION: /* silicon revision */
break;
case MII_RESV1: /* reserved 1 */
break;
case MII_LBRERROR: /* loopback, rx, bypass error */
break;
case MII_PHYADDR: /* physical address */
break;
case MII_RESV2: /* reserved 2 */
break;
case MII_TPISTATUS: /* TPI status for 10mbps */
break;
case MII_NCONFIG: /* network interface config */
break;
default:
break;
}
return rc;
}
static int qeth_snmp_command_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_arp_query_info *qinfo = reply->param;
struct qeth_ipacmd_setadpparms *adp_cmd;
unsigned int data_len;
void *snmp_data;
QETH_CARD_TEXT(card, 3, "snpcmdcb");
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
return -EIO;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
return -EIO;
}
adp_cmd = &cmd->data.setadapterparms;
data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
if (adp_cmd->hdr.seq_no == 1) {
snmp_data = &adp_cmd->data.snmp;
} else {
snmp_data = &adp_cmd->data.snmp.request;
data_len -= offsetof(struct qeth_snmp_cmd, request);
}
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
return -ENOSPC;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
qinfo->udata_offset += data_len;
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
return 0;
}
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
struct qeth_snmp_ureq __user *ureq;
struct qeth_cmd_buffer *iob;
unsigned int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
QETH_CARD_TEXT(card, 3, "snmpcmd");
if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
IS_LAYER3(card))
return -EOPNOTSUPP;
ureq = (struct qeth_snmp_ureq __user *) udata;
if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
get_user(req_len, &ureq->hdr.req_len))
return -EFAULT;
/* Sanitize user input, to avoid overflows in iob size calculation: */
if (req_len > QETH_BUFSIZE)
return -EINVAL;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
if (!iob)
return -ENOMEM;
if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
&ureq->cmd, req_len)) {
qeth_put_cmd(iob);
return -EFAULT;
}
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
qeth_put_cmd(iob);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
CARD_DEVID(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
kfree(qinfo.udata);
return rc;
}
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_qoat_priv *priv = reply->param;
int resdatalen;
QETH_CARD_TEXT(card, 3, "qoatcb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
if (resdatalen > (priv->buffer_len - priv->response_len))
return -ENOSPC;
memcpy(priv->buffer + priv->response_len,
&cmd->data.setadapterparms.hdr, resdatalen);
priv->response_len += resdatalen;
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
return 0;
}
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
{
int rc = 0;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_query_oat *oat_req;
struct qeth_query_oat_data oat_data;
struct qeth_qoat_priv priv;
void __user *tmp;
QETH_CARD_TEXT(card, 3, "qoatcmd");
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
return -EOPNOTSUPP;
if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
return -EFAULT;
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
priv.buffer = vzalloc(oat_data.buffer_len);
if (!priv.buffer)
return -ENOMEM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
SETADP_DATA_SIZEOF(query_oat));
if (!iob) {
rc = -ENOMEM;
goto out_free;
}
cmd = __ipa_cmd(iob);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
if (!rc) {
tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
u64_to_user_ptr(oat_data.ptr);
oat_data.response_len = priv.response_len;
if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
copy_to_user(udata, &oat_data, sizeof(oat_data)))
rc = -EFAULT;
}
out_free:
vfree(priv.buffer);
return rc;
}
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_reply *reply_priv,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_link_info *link_info = reply_priv->param;
struct qeth_query_oat_physical_if *phys_if;
struct qeth_query_oat_reply *reply;
QETH_CARD_TEXT(card, 2, "qoatincb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
/* Multi-part reply is unexpected, don't bother: */
if (cmd->data.setadapterparms.hdr.used_total > 1)
return -EINVAL;
/* Expect the reply to start with phys_if data: */
reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
reply->length < sizeof(*reply))
return -EINVAL;
phys_if = &reply->phys_if;
switch (phys_if->speed_duplex) {
case QETH_QOAT_PHYS_SPEED_10M_HALF:
link_info->speed = SPEED_10;
link_info->duplex = DUPLEX_HALF;
break;
case QETH_QOAT_PHYS_SPEED_10M_FULL:
link_info->speed = SPEED_10;
link_info->duplex = DUPLEX_FULL;
break;
case QETH_QOAT_PHYS_SPEED_100M_HALF:
link_info->speed = SPEED_100;
link_info->duplex = DUPLEX_HALF;
break;
case QETH_QOAT_PHYS_SPEED_100M_FULL:
link_info->speed = SPEED_100;
link_info->duplex = DUPLEX_FULL;
break;
case QETH_QOAT_PHYS_SPEED_1000M_HALF:
link_info->speed = SPEED_1000;
link_info->duplex = DUPLEX_HALF;
break;
case QETH_QOAT_PHYS_SPEED_1000M_FULL:
link_info->speed = SPEED_1000;
link_info->duplex = DUPLEX_FULL;
break;
case QETH_QOAT_PHYS_SPEED_10G_FULL:
link_info->speed = SPEED_10000;
link_info->duplex = DUPLEX_FULL;
break;
case QETH_QOAT_PHYS_SPEED_25G_FULL:
link_info->speed = SPEED_25000;
link_info->duplex = DUPLEX_FULL;
break;
case QETH_QOAT_PHYS_SPEED_UNKNOWN:
default:
link_info->speed = SPEED_UNKNOWN;
link_info->duplex = DUPLEX_UNKNOWN;
break;
}
switch (phys_if->media_type) {
case QETH_QOAT_PHYS_MEDIA_COPPER:
link_info->port = PORT_TP;
link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
break;
case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
link_info->port = PORT_FIBRE;
link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
break;
case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
link_info->port = PORT_FIBRE;
link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
break;
default:
link_info->port = PORT_OTHER;
link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
break;
}
return 0;
}
static void qeth_init_link_info(struct qeth_card *card)
{
qeth_default_link_info(card);
/* Get more accurate data via QUERY OAT: */
if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
struct qeth_link_info link_info;
struct qeth_cmd_buffer *iob;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
SETADP_DATA_SIZEOF(query_oat));
if (iob) {
struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
struct qeth_query_oat *oat_req;
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
if (!qeth_send_ipa_cmd(card, iob,
qeth_init_link_info_oat_cb,
&link_info)) {
if (link_info.speed != SPEED_UNKNOWN)
card->info.link_info.speed = link_info.speed;
if (link_info.duplex != DUPLEX_UNKNOWN)
card->info.link_info.duplex = link_info.duplex;
if (link_info.port != PORT_OTHER)
card->info.link_info.port = link_info.port;
if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
card->info.link_info.link_mode = link_info.link_mode;
}
}
}
}
/**
* qeth_vm_request_mac() - Request a hypervisor-managed MAC address
* @card: pointer to a qeth_card
*
* Returns
* 0, if a MAC address has been set for the card's netdevice
* a return code, for various error conditions
*/
int qeth_vm_request_mac(struct qeth_card *card)
{
struct diag26c_mac_resp *response;
struct diag26c_mac_req *request;
int rc;
QETH_CARD_TEXT(card, 2, "vmreqmac");
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
if (!request || !response) {
rc = -ENOMEM;
goto out;
}
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
request->devno = card->info.ddev_devno;
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
if (rc)
goto out;
QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
rc = -EIO;
QETH_CARD_TEXT(card, 2, "badresp");
QETH_CARD_HEX(card, 2, &request->resp_buf_len,
sizeof(request->resp_buf_len));
} else if (!is_valid_ether_addr(response->mac)) {
rc = -EINVAL;
QETH_CARD_TEXT(card, 2, "badmac");
QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
} else {
eth_hw_addr_set(card->dev, response->mac);
}
out:
kfree(response);
kfree(request);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
static void qeth_determine_capabilities(struct qeth_card *card)
{
struct qeth_channel *channel = &card->data;
struct ccw_device *ddev = channel->ccwdev;
int rc;
int ddev_offline = 0;
QETH_CARD_TEXT(card, 2, "detcapab");
if (!ddev->online) {
ddev_offline = 1;
rc = qeth_start_channel(channel);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
}
}
rc = qeth_read_conf_data(card);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_offline;
}
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
if (rc)
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
dev_info(&card->gdev->dev,
"Completion Queueing supported\n");
} else {
card->options.cq = QETH_CQ_NOTAVAILABLE;
}
out_offline:
if (ddev_offline == 1)
qeth_stop_channel(channel);
out:
return;
}
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
struct qeth_card_info *info = &card->info;
struct ccw_device *cdev = CARD_DDEV(card);
struct ccw_dev_id dev_id;
QETH_CARD_TEXT(card, 2, "ccwconfd");
ccw_device_get_id(cdev, &dev_id);
info->ddev_devno = dev_id.devno;
info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
!ccw_device_get_iid(cdev, &info->iid) &&
!ccw_device_get_chid(cdev, 0, &info->chid);
info->ssid = dev_id.ssid;
dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
info->chid, info->chpid);
QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
struct qeth_qib_parms *qib_parms = NULL;
struct qdio_initialize init_data;
unsigned int no_input_qs = 1;
unsigned int i;
int rc = 0;
QETH_CARD_TEXT(card, 2, "qdioest");
if (!IS_IQD(card) && !IS_VM_NIC(card)) {
qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
if (!qib_parms)
return -ENOMEM;
qeth_fill_qib_parms(card, qib_parms);
}
in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
if (card->options.cq == QETH_CQ_ENABLED) {
in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
no_input_qs++;
}
for (i = 0; i < card->qdio.no_out_queues; i++)
out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
init_data.qib_param_field = (void *)qib_parms;
init_data.no_input_qs = no_input_qs;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
init_data.output_handler = qeth_qdio_output_handler;
init_data.irq_poll = qeth_qdio_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
init_data.no_output_qs);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
goto out;
}
rc = qdio_establish(CARD_DDEV(card), &init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
qdio_free(CARD_DDEV(card));
}
}
switch (card->options.cq) {
case QETH_CQ_ENABLED:
dev_info(&card->gdev->dev, "Completion Queue support enabled");
break;
case QETH_CQ_DISABLED:
dev_info(&card->gdev->dev, "Completion Queue support disabled");
break;
default:
break;
}
out:
kfree(qib_parms);
return rc;
}
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
unregister_service_level(&card->qeth_service_level);
debugfs_remove_recursive(card->debugfs);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
}
static void qeth_trace_features(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "features");
QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
QETH_CARD_HEX(card, 2, &card->info.diagass_support,
sizeof(card->info.diagass_support));
}
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
#ifdef CONFIG_QETH_OSX
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
.driver_info = QETH_CARD_TYPE_OSX},
#endif
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
static struct ccw_driver qeth_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
.ids = qeth_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
};
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
QETH_CARD_TEXT(card, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
rc = qeth_update_from_chp_desc(card);
if (rc)
return rc;
retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, !IS_IQD(card));
qeth_stop_channel(&card->data);
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
rc = qeth_start_channel(&card->read);
if (rc)
goto retriable;
rc = qeth_start_channel(&card->write);
if (rc)
goto retriable;
rc = qeth_start_channel(&card->data);
if (rc)
goto retriable;
retriable:
if (rc == -ERESTARTSYS) {
QETH_CARD_TEXT(card, 2, "break1");
return rc;
} else if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
qeth_determine_capabilities(card);
qeth_read_ccw_conf_data(card);
qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
if (rc == -EINTR) {
QETH_CARD_TEXT(card, 2, "break2");
return rc;
} else if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
rc = qeth_idx_activate_write_channel(card);
if (rc == -EINTR) {
QETH_CARD_TEXT(card, 2, "break3");
return rc;
} else if (rc) {
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
card->read_or_write_problem = 0;
rc = qeth_mpc_initialize(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out;
}
rc = qeth_send_startlan(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
if (rc == -ENETDOWN) {
dev_warn(&card->gdev->dev, "The LAN is offline\n");
*carrier_ok = false;
} else {
goto out;
}
} else {
*carrier_ok = true;
}
card->options.ipa4.supported = 0;
card->options.ipa6.supported = 0;
card->options.adp.supported = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
if (rc == -ENOMEM)
goto out;
if (qeth_is_supported(card, IPA_IPV6)) {
rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
if (rc == -ENOMEM)
goto out;
}
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
}
qeth_trace_features(card);
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
(card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
card->info.hwtrap = 0;
if (card->options.isolation != ISOLATION_MODE_NONE) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation);
if (rc)
goto out;
}
qeth_init_link_info(card);
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "9err%d", rc);
goto out;
}
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
CARD_DEVID(card), rc);
return rc;
}
static int qeth_set_online(struct qeth_card *card,
const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
rc = qeth_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
rc = -ENODEV;
goto err_hardsetup;
}
qeth_print_status_message(card);
if (card->dev->reg_state != NETREG_REGISTERED)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
/* let user_space know that device is online */
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
return 0;
err_online:
err_hardsetup:
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
qeth_stop_channel(&card->data);
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
return rc;
}
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
bool resetting)
{
int rc, rc2, rc3;
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
card->info.hwtrap = 1;
}
/* cancel any stalled cmd that might block the rtnl: */
qeth_clear_ipacmd_list(card);
rtnl_lock();
netif_device_detach(card->dev);
netif_carrier_off(card->dev);
rtnl_unlock();
cancel_work_sync(&card->rx_mode_work);
disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
qeth_default_link_info(card);
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
/* let user_space know that device is offline */
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
/* Lock-free, other users will block until we are done. */
disc = card->discipline;
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
qeth_set_offline(card, disc, true);
rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
} else {
qeth_set_offline(card, disc, true);
ccwgroup_set_offline(card->gdev, false);
dev_warn(&card->gdev->dev,
"The qeth device driver failed to recover an error on the device\n");
}
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
}
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct net_device *dev = skb->dev;
if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
"FAKELL", skb->len);
return;
}
if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
ETH_P_IP;
unsigned char tg_addr[ETH_ALEN];
skb_reset_network_header(skb);
switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
case QETH_CAST_MULTICAST:
if (prot == ETH_P_IP)
ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
else
ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
QETH_CARD_STAT_INC(card, rx_multicast);
break;
case QETH_CAST_BROADCAST:
ether_addr_copy(tg_addr, dev->broadcast);
QETH_CARD_STAT_INC(card, rx_multicast);
break;
default:
if (card->options.sniffer)
skb->pkt_type = PACKET_OTHERHOST;
ether_addr_copy(tg_addr, dev->dev_addr);
}
if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
dev_hard_header(skb, dev, prot, tg_addr,
&l3_hdr->next_hop.rx.src_mac, skb->len);
else
dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
skb->len);
}
/* copy VLAN tag from hdr into skb */
if (!card->options.sniffer &&
(l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
l3_hdr->vlan_id :
l3_hdr->next_hop.rx.vlan_id;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
}
#endif
static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
bool uses_frags, bool is_cso)
{
struct napi_struct *napi = &card->napi;
if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
QETH_CARD_STAT_INC(card, rx_skb_csum);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
QETH_CARD_STAT_INC(card, rx_packets);
if (skb_is_nonlinear(skb)) {
QETH_CARD_STAT_INC(card, rx_sg_skbs);
QETH_CARD_STAT_ADD(card, rx_sg_frags,
skb_shinfo(skb)->nr_frags);
}
if (uses_frags) {
napi_gro_frags(napi);
} else {
skb->protocol = eth_type_trans(skb, skb->dev);
napi_gro_receive(napi, skb);
}
}
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
{
struct page *page = virt_to_page(data);
unsigned int next_frag;
next_frag = skb_shinfo(skb)->nr_frags;
get_page(page);
skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
data_len);
}
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
static int qeth_extract_skb(struct qeth_card *card,
struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
int *__offset)
{
struct qeth_priv *priv = netdev_priv(card->dev);
struct qdio_buffer *buffer = qethbuffer->buffer;
struct napi_struct *napi = &card->napi;
struct qdio_buffer_element *element;
unsigned int linear_len = 0;
bool uses_frags = false;
int offset = *__offset;
bool use_rx_sg = false;
unsigned int headroom;
struct qeth_hdr *hdr;
struct sk_buff *skb;
int skb_len = 0;
bool is_cso;
element = &buffer->element[*element_no];
next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
return -ENODATA;
element++;
offset = 0;
}
hdr = phys_to_virt(element->addr) + offset;
offset += sizeof(*hdr);
skb = NULL;
switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = hdr->hdr.l2.pkt_length;
is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
linear_len = ETH_HLEN;
headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = hdr->hdr.l3.length;
is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
if (!IS_LAYER3(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
}
if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
linear_len = ETH_HLEN;
headroom = 0;
break;
}
if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
linear_len = sizeof(struct ipv6hdr);
else
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
default:
if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
else
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
/* Can't determine packet length, drop the whole buffer. */
return -EPROTONOSUPPORT;
}
if (skb_len < linear_len) {
QETH_CARD_STAT_INC(card, rx_dropped_runt);
goto walk_packet;
}
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
(skb_len > READ_ONCE(priv->rx_copybreak) &&
!atomic_read(&card->force_alloc_skb));
if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
if (qethbuffer->rx_skb &&
skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
skb = qethbuffer->rx_skb;
qethbuffer->rx_skb = NULL;
goto use_skb;
}
skb = napi_get_frags(napi);
if (!skb) {
/* -ENOMEM, no point in falling back further. */
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
goto walk_packet;
}
if (skb_tailroom(skb) >= linear_len + headroom) {
uses_frags = true;
goto use_skb;
}
netdev_info_once(card->dev,
"Insufficient linear space in NAPI frags skb, need %u but have %u\n",
linear_len + headroom, skb_tailroom(skb));
/* Shouldn't happen. Don't optimize, fall back to linear skb. */
}
linear_len = skb_len;
skb = napi_alloc_skb(napi, linear_len + headroom);
if (!skb) {
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
goto walk_packet;
}
use_skb:
if (headroom)
skb_reserve(skb, headroom);
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
char *data = phys_to_virt(element->addr) + offset;
skb_len -= data_len;
offset += data_len;
/* Extract data from current element: */
if (skb && data_len) {
if (linear_len) {
unsigned int copy_len;
copy_len = min_t(unsigned int, linear_len,
data_len);
skb_put_data(skb, data, copy_len);
linear_len -= copy_len;
data_len -= copy_len;
data += copy_len;
}
if (data_len)
qeth_create_skb_frag(skb, data, data_len);
}
/* Step forward to next element: */
if (skb_len) {
if (qeth_is_last_sbale(element)) {
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
if (skb) {
if (uses_frags)
napi_free_frags(napi);
else
kfree_skb(skb);
QETH_CARD_STAT_INC(card,
rx_length_errors);
}
return -EMSGSIZE;
}
element++;
offset = 0;
}
}
/* This packet was skipped, go get another one: */
if (!skb)
goto next_packet;
*element_no = element - &buffer->element[0];
*__offset = offset;
#if IS_ENABLED(CONFIG_QETH_L3)
if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
qeth_l3_rebuild_skb(card, skb, hdr);
#endif
qeth_receive_skb(card, skb, uses_frags, is_cso);
return 0;
}
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
struct qeth_qdio_buffer *buf, bool *done)
{
unsigned int work_done = 0;
while (budget) {
if (qeth_extract_skb(card, buf, &card->rx.buf_element,
&card->rx.e_offset)) {
*done = true;
break;
}
work_done++;
budget--;
}
return work_done;
}
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
{
struct qeth_rx *ctx = &card->rx;
unsigned int work_done = 0;
while (budget > 0) {
struct qeth_qdio_buffer *buffer;
unsigned int skbs_done = 0;
bool done = false;
/* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
card->rx.b_count =
qdio_inspect_input_queue(CARD_DDEV(card), 0,
&card->rx.b_index,
&card->rx.qdio_err);
if (card->rx.b_count <= 0) {
card->rx.b_count = 0;
break;
}
}
/* Process one completed RX buffer: */
buffer = &card->qdio.in_q->bufs[card->rx.b_index];
if (!(card->rx.qdio_err &&
qeth_check_qdio_errors(card, buffer->buffer,
card->rx.qdio_err, "qinerr")))
skbs_done = qeth_extract_skbs(card, budget, buffer,
&done);
else
done = true;
work_done += skbs_done;
budget -= skbs_done;
if (done) {
QETH_CARD_STAT_INC(card, rx_bufs);
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
buffer->pool_entry = NULL;
card->rx.b_count--;
ctx->bufs_refill++;
ctx->bufs_refill -= qeth_rx_refill_queue(card,
ctx->bufs_refill);
/* Step forward to next buffer: */
card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
card->rx.buf_element = 0;
card->rx.e_offset = 0;
}
}
return work_done;
}
static void qeth_cq_poll(struct qeth_card *card)
{
unsigned int work_done = 0;
while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
unsigned int start, error;
int completed;
completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
&error);
if (completed <= 0)
return;
qeth_qdio_cq_handler(card, error, 1, start, completed);
work_done += completed;
}
}
int qeth_poll(struct napi_struct *napi, int budget)
{
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
unsigned int work_done;
work_done = qeth_rx_poll(card, budget);
if (qeth_use_tx_irqs(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
qeth_for_each_output_queue(card, queue, i) {
if (!qeth_out_queue_is_empty(queue))
napi_schedule(&queue->napi);
}
}
if (card->options.cq == QETH_CQ_ENABLED)
qeth_cq_poll(card);
if (budget) {
struct qeth_rx *ctx = &card->rx;
/* Process any substantial refill backlog: */
ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
if (work_done >= budget)
return work_done;
}
if (napi_complete_done(napi, work_done) &&
qdio_start_irq(CARD_DDEV(card)))
napi_schedule(napi);
return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
unsigned int bidx, unsigned int qdio_error,
int budget)
{
struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
u8 sflags = buffer->buffer->element[15].sflags;
struct qeth_card *card = queue->card;
bool error = !!qdio_error;
if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
struct qaob *aob = buffer->aob;
struct qeth_qaob_priv1 *priv;
enum iucv_tx_notify notify;
if (!aob) {
netdev_WARN_ONCE(card->dev,
"Pending TX buffer %#x without QAOB on TX queue %u\n",
bidx, queue->queue_no);
qeth_schedule_recovery(card);
return;
}
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
priv = (struct qeth_qaob_priv1 *)&aob->user1;
/* QAOB hasn't completed yet: */
if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
/* Prepare the queue slot for immediate re-use: */
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
}
list_add(&buffer->list_entry, &queue->pending_bufs);
/* Skip clearing the buffer: */
return;
}
/* QAOB already completed: */
notify = qeth_compute_cq_notification(aob->aorc, 0);
qeth_notify_skbs(queue, buffer, notify);
error = !!aob->aorc;
memset(aob, 0, sizeof(*aob));
} else if (card->options.cq == QETH_CQ_ENABLED) {
qeth_notify_skbs(queue, buffer,
qeth_compute_cq_notification(sflags, 0));
}
qeth_clear_output_buffer(queue, buffer, error, budget);
}
static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
unsigned int queue_no = queue->queue_no;
struct qeth_card *card = queue->card;
struct net_device *dev = card->dev;
unsigned int work_done = 0;
struct netdev_queue *txq;
if (IS_IQD(card))
txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
else
txq = netdev_get_tx_queue(dev, queue_no);
while (1) {
unsigned int start, error, i;
unsigned int packets = 0;
unsigned int bytes = 0;
int completed;
qeth_tx_complete_pending_bufs(card, queue, false, budget);
if (qeth_out_queue_is_empty(queue)) {
napi_complete(napi);
return 0;
}
/* Give the CPU a breather: */
if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
QETH_TXQ_STAT_INC(queue, completion_yield);
if (napi_complete_done(napi, 0))
napi_schedule(napi);
return 0;
}
completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
&start, &error);
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
if (napi_complete_done(napi, 0) &&
!atomic_read(&queue->set_pci_flags_count))
qeth_tx_arm_timer(queue, queue->rescan_usecs);
return 0;
}
for (i = start; i < start + completed; i++) {
struct qeth_qdio_out_buffer *buffer;
unsigned int bidx = QDIO_BUFNR(i);
buffer = queue->bufs[bidx];
packets += buffer->frames;
bytes += buffer->bytes;
qeth_handle_send_error(card, buffer, error);
if (IS_IQD(card))
qeth_iqd_tx_complete(queue, bidx, error, budget);
else
qeth_clear_output_buffer(queue, buffer, error,
budget);
}
atomic_sub(completed, &queue->used_buffers);
work_done += completed;
if (IS_IQD(card))
netdev_tx_completed_queue(txq, packets, bytes);
else
qeth_check_outbound_queue(queue);
/* xmit may have observed the full-condition, but not yet
* stopped the txq. In which case the code below won't trigger.
* So before returning, xmit will re-check the txq's fill level
* and wake it up if needed.
*/
if (netif_tx_queue_stopped(txq) &&
!qeth_out_queue_is_full(queue))
netif_tx_wake_queue(txq);
}
}
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
if (!cmd->hdr.return_code)
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
return cmd->hdr.return_code;
}
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipa_caps *caps = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
return -EIO;
caps->supported = cmd->data.setassparms.data.caps.supported;
caps->enabled = cmd->data.setassparms.data.caps.enabled;
return 0;
}
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "defadpcb");
if (cmd->hdr.return_code)
return -EIO;
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
card->options.ipa4.enabled = cmd->hdr.assists.enabled;
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6.enabled = cmd->hdr.assists.enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code,
unsigned int data_length,
enum qeth_prot_versions prot)
{
struct qeth_ipacmd_setassparms *setassparms;
struct qeth_ipacmd_setassparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
data_length +
offsetof(struct qeth_ipacmd_setassparms,
data));
if (!iob)
return NULL;
setassparms = &__ipa_cmd(iob)->data.setassparms;
setassparms->assist_no = ipa_func;
hdr = &setassparms->hdr;
hdr->length = sizeof(*hdr) + data_length;
hdr->command_code = cmd_code;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code, u32 *data,
enum qeth_prot_versions prot)
{
unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
if (data)
__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
static void qeth_unregister_dbf_views(void)
{
int x;
for (x = 0; x < QETH_DBF_INFOS; x++) {
debug_unregister(qeth_dbf[x].id);
qeth_dbf[x].id = NULL;
}
}
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
{
char dbf_txt_buf[32];
va_list args;
if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(id, level, dbf_txt_buf);
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
static int qeth_register_dbf_views(void)
{
int ret;
int x;
for (x = 0; x < QETH_DBF_INFOS; x++) {
/* register the areas */
qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
qeth_dbf[x].pages,
qeth_dbf[x].areas,
qeth_dbf[x].len);
if (qeth_dbf[x].id == NULL) {
qeth_unregister_dbf_views();
return -ENOMEM;
}
/* register a view */
ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
if (ret) {
qeth_unregister_dbf_views();
return ret;
}
/* set a passing level */
debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
}
return 0;
}
static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
int qeth_setup_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
{
int rc;
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
card->discipline = try_then_request_module(
symbol_get(qeth_l3_discipline), "qeth_l3");
break;
case QETH_DISCIPLINE_LAYER2:
card->discipline = try_then_request_module(
symbol_get(qeth_l2_discipline), "qeth_l2");
break;
default:
break;
}
mutex_unlock(&qeth_mod_mutex);
if (!card->discipline) {
dev_err(&card->gdev->dev, "There is no kernel module to "
"support discipline %d\n", discipline);
return -EINVAL;
}
rc = card->discipline->setup(card->gdev);
if (rc) {
if (discipline == QETH_DISCIPLINE_LAYER2)
symbol_put(qeth_l2_discipline);
else
symbol_put(qeth_l3_discipline);
card->discipline = NULL;
return rc;
}
card->options.layer = discipline;
return 0;
}
void qeth_remove_discipline(struct qeth_card *card)
{
card->discipline->remove(card->gdev);
if (IS_LAYER2(card))
symbol_put(qeth_l2_discipline);
else
symbol_put(qeth_l3_discipline);
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
card->discipline = NULL;
}
static const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
};
#define DBF_NAME_LEN 20
struct qeth_dbf_entry {
char dbf_name[DBF_NAME_LEN];
debug_info_t *dbf_info;
struct list_head dbf_list;
};
static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);
static debug_info_t *qeth_get_dbf_entry(char *name)
{
struct qeth_dbf_entry *entry;
debug_info_t *rc = NULL;
mutex_lock(&qeth_dbf_list_mutex);
list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
if (strcmp(entry->dbf_name, name) == 0) {
rc = entry->dbf_info;
break;
}
}
mutex_unlock(&qeth_dbf_list_mutex);
return rc;
}
static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
struct qeth_dbf_entry *new_entry;
card->debug = debug_register(name, 2, 1, 8);
if (!card->debug) {
QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
goto err;
}
if (debug_register_view(card->debug, &debug_hex_ascii_view))
goto err_dbg;
new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
if (!new_entry)
goto err_dbg;
strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
new_entry->dbf_info = card->debug;
mutex_lock(&qeth_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qeth_dbf_list);
mutex_unlock(&qeth_dbf_list_mutex);
return 0;
err_dbg:
debug_unregister(card->debug);
err:
return -ENOMEM;
}
static void qeth_clear_dbf_list(void)
{
struct qeth_dbf_entry *entry, *tmp;
mutex_lock(&qeth_dbf_list_mutex);
list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
list_del(&entry->dbf_list);
debug_unregister(entry->dbf_info);
kfree(entry);
}
mutex_unlock(&qeth_dbf_list_mutex);
}
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
struct net_device *dev;
struct qeth_priv *priv;
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
ether_setup, QETH_MAX_OUT_QUEUES, 1);
break;
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(sizeof(*priv));
break;
default:
dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
if (!dev)
return NULL;
priv = netdev_priv(dev);
priv->rx_copybreak = QETH_RX_COPYBREAK;
priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
dev->min_mtu = 576;
/* initialized when device first goes online: */
dev->max_mtu = 0;
dev->mtu = 0;
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
dev->ethtool_ops = &qeth_ethtool_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card))
dev->features |= NETIF_F_SG;
return dev;
}
struct net_device *qeth_clone_netdev(struct net_device *orig)
{
struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
if (!clone)
return NULL;
clone->dev_port = orig->dev_port;
return clone;
}
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
struct device *dev;
int rc;
enum qeth_discipline_id enforced_disc;
char dbf_name[DBF_NAME_LEN];
QETH_DBF_TEXT(SETUP, 2, "probedev");
dev = &gdev->dev;
if (!get_device(dev))
return -ENODEV;
QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
card = qeth_alloc_card(gdev);
if (!card) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
rc = -ENOMEM;
goto err_dev;
}
scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
dev_name(&gdev->dev));
card->debug = qeth_get_dbf_entry(dbf_name);
if (!card->debug) {
rc = qeth_add_dbf_entry(card, dbf_name);
if (rc)
goto err_card;
}
qeth_setup_card(card);
card->dev = qeth_alloc_netdev(card);
if (!card->dev) {
rc = -ENOMEM;
goto err_card;
}
qeth_determine_capabilities(card);
qeth_set_blkt_defaults(card);
card->qdio.in_q = qeth_alloc_qdio_queue();
if (!card->qdio.in_q) {
rc = -ENOMEM;
goto err_rx_queue;
}
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
goto err_chp_desc;
gdev->dev.groups = qeth_dev_groups;
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
case QETH_DISCIPLINE_UNDETERMINED:
gdev->dev.type = &qeth_generic_devtype;
break;
default:
card->info.layer_enforced = true;
/* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_setup_discipline(card, enforced_disc);
if (rc)
goto err_setup_disc;
break;
}
return 0;
err_setup_disc:
err_chp_desc:
qeth_free_qdio_queue(card->qdio.in_q);
err_rx_queue:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
err_dev:
put_device(dev);
return rc;
}
static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
QETH_CARD_TEXT(card, 2, "removedv");
mutex_lock(&card->discipline_mutex);
if (card->discipline)
qeth_remove_discipline(card);
mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
qeth_free_qdio_queue(card->qdio.in_q);
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
}
static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_discipline_id def_discipline;
mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
rc = qeth_setup_discipline(card, def_discipline);
if (rc)
goto err;
}
rc = qeth_set_online(card, card->discipline);
err:
mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
mutex_lock(&card->discipline_mutex);
rc = qeth_set_offline(card, card->discipline, false);
mutex_unlock(&card->discipline_mutex);
return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
qdio_free(CARD_DDEV(card));
}
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
buf);
return err ? err : count;
}
static DRIVER_ATTR_WO(group);
static struct attribute *qeth_drv_attrs[] = {
&driver_attr_group.attr,
NULL,
};
static struct attribute_group qeth_drv_attr_group = {
.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
&qeth_drv_attr_group,
NULL,
};
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.driver = {
.groups = qeth_drv_attr_groups,
.owner = THIS_MODULE,
.name = "qeth",
},
.ccw_driver = &qeth_ccw_driver,
.setup = qeth_core_probe_device,
.remove = qeth_core_remove_device,
.set_online = qeth_core_set_online,
.set_offline = qeth_core_set_offline,
.shutdown = qeth_core_shutdown,
};
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, data);
break;
case SIOC_QETH_GET_CARD_TYPE:
if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
!IS_VM_NIC(card))
return 1;
return 0;
case SIOC_QETH_QUERY_OAT:
rc = qeth_query_oat_command(card, data);
break;
default:
rc = -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct mii_ioctl_data *mii_data;
int rc = 0;
switch (cmd) {
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
break;
case SIOCGMIIREG:
mii_data = if_mii(rq);
if (mii_data->phy_id != 0)
rc = -EINVAL;
else
mii_data->val_out = qeth_mdio_read(dev,
mii_data->phy_id, mii_data->reg_num);
break;
default:
return -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
u32 *features = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
return -EIO;
*features = cmd->data.setassparms.data.flags_32bit;
return 0;
}
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
enum qeth_prot_versions prot)
{
return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
NULL, prot);
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
enum qeth_prot_versions prot, u8 *lp2lp)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_caps caps;
u32 features;
int rc;
/* some L3 HW requires combined L3+L4 csum offload: */
if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
cstype == IPA_OUTBOUND_CHECKSUM)
required_features |= QETH_IPA_CHECKSUM_IP_HDR;
iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
prot);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
if (rc)
return rc;
if ((required_features & features) != required_features) {
qeth_set_csum_off(card, cstype, prot);
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
SETASS_DATA_SIZEOF(flags_32bit),
prot);
if (!iob) {
qeth_set_csum_off(card, cstype, prot);
return -ENOMEM;
}
if (features & QETH_IPA_CHECKSUM_LP2LP)
required_features |= QETH_IPA_CHECKSUM_LP2LP;
__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
qeth_set_csum_off(card, cstype, prot);
return rc;
}
if (!qeth_ipa_caps_supported(&caps, required_features) ||
!qeth_ipa_caps_enabled(&caps, required_features)) {
qeth_set_csum_off(card, cstype, prot);
return -EOPNOTSUPP;
}
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
if (lp2lp)
*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
enum qeth_prot_versions prot, u8 *lp2lp)
{
return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
qeth_set_csum_off(card, cstype, prot);
}
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_tso_start_data *tso_data = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
return -EIO;
tso_data->mss = cmd->data.setassparms.data.tso.mss;
tso_data->supported = cmd->data.setassparms.data.tso.supported;
return 0;
}
static int qeth_set_tso_off(struct qeth_card *card,
enum qeth_prot_versions prot)
{
return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_STOP, NULL, prot);
}
static int qeth_set_tso_on(struct qeth_card *card,
enum qeth_prot_versions prot)
{
struct qeth_tso_start_data tso_data;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_caps caps;
int rc;
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0, prot);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
if (rc)
return rc;
if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
qeth_set_tso_off(card, prot);
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_ENABLE,
SETASS_DATA_SIZEOF(caps), prot);
if (!iob) {
qeth_set_tso_off(card, prot);
return -ENOMEM;
}
/* enable TSO capability */
__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
QETH_IPA_LARGE_SEND_TCP;
rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
qeth_set_tso_off(card, prot);
return rc;
}
if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
!qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
qeth_set_tso_off(card, prot);
return -EOPNOTSUPP;
}
dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
tso_data.mss);
return 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
enum qeth_prot_versions prot)
{
return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
int rc_ipv6;
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
QETH_PROT_IPV4, NULL);
if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
/* no/one Offload Assist available, so the rc is trivial */
return rc_ipv4;
rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
QETH_PROT_IPV6, NULL);
if (on)
/* enable: success if any Assist is active */
return (rc_ipv6) ? rc_ipv4 : 0;
/* disable: failure if any Assist is still active */
return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
*/
void qeth_enable_hw_features(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t features;
features = dev->features;
/* force-off any feature that might need an IPA sequence.
* netdev_update_features() will restart them.
*/
dev->features &= ~dev->hw_features;
/* toggle VLAN filter, so that VIDs are re-programmed: */
if (IS_LAYER2(card) && IS_VM_NIC(card)) {
dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
netdev_update_features(dev);
if (features != dev->features)
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
static void qeth_check_restricted_features(struct qeth_card *card,
netdev_features_t changed,
netdev_features_t actual)
{
netdev_features_t ipv6_features = NETIF_F_TSO6;
netdev_features_t ipv4_features = NETIF_F_TSO;
if (!card->info.has_lp2lp_cso_v6)
ipv6_features |= NETIF_F_IPV6_CSUM;
if (!card->info.has_lp2lp_cso_v4)
ipv4_features |= NETIF_F_IP_CSUM;
if ((changed & ipv6_features) && !(actual & ipv6_features))
qeth_flush_local_addrs6(card);
if ((changed & ipv4_features) && !(actual & ipv4_features))
qeth_flush_local_addrs4(card);
}
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t changed = dev->features ^ features;
int rc = 0;
QETH_CARD_TEXT(card, 2, "setfeat");
QETH_CARD_HEX(card, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
&card->info.has_lp2lp_cso_v4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
if (changed & NETIF_F_IPV6_CSUM) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
&card->info.has_lp2lp_cso_v6);
if (rc)
changed ^= NETIF_F_IPV6_CSUM;
}
if (changed & NETIF_F_RXCSUM) {
rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
if (rc)
changed ^= NETIF_F_RXCSUM;
}
if (changed & NETIF_F_TSO) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_TSO;
}
if (changed & NETIF_F_TSO6) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
QETH_PROT_IPV6);
if (rc)
changed ^= NETIF_F_TSO6;
}
qeth_check_restricted_features(card, dev->features ^ features,
dev->features ^ changed);
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
return 0;
/* something went wrong. save changed features and return error */
dev->features ^= changed;
return -EIO;
}
EXPORT_SYMBOL_GPL(qeth_set_features);
netdev_features_t qeth_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
features &= ~NETIF_F_IPV6_CSUM;
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
QETH_CARD_HEX(card, 2, &features, sizeof(features));
return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
/* Traffic with local next-hop is not eligible for some offloads: */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
netdev_features_t restricted = 0;
if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
restricted |= NETIF_F_ALL_TSO;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
if (!card->info.has_lp2lp_cso_v4)
restricted |= NETIF_F_IP_CSUM;
if (restricted && qeth_next_hop_is_local_v4(card, skb))
features &= ~restricted;
break;
case htons(ETH_P_IPV6):
if (!card->info.has_lp2lp_cso_v6)
restricted |= NETIF_F_IPV6_CSUM;
if (restricted && qeth_next_hop_is_local_v6(card, skb))
features &= ~restricted;
break;
default:
break;
}
}
/* GSO segmentation builds skbs with
* a (small) linear part for the headers, and
* page frags for the data.
* Compared to a linear skb, the header-only part consumes an
* additional buffer element. This reduces buffer utilization, and
* hurts throughput. So compress small segments into one element.
*/
if (netif_needs_gso(skb, features)) {
/* match skb_segment(): */
unsigned int doffset = skb->data - skb_mac_header(skb);
unsigned int hsize = skb_shinfo(skb)->gso_size;
unsigned int hroom = skb_headroom(skb);
/* linearize only if resulting skb allocations are order-0: */
if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
features &= ~NETIF_F_SG;
}
return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
QETH_CARD_TEXT(card, 5, "getstat");
stats->rx_packets = card->stats.rx_packets;
stats->rx_bytes = card->stats.rx_bytes;
stats->rx_errors = card->stats.rx_length_errors +
card->stats.rx_frame_errors +
card->stats.rx_fifo_errors;
stats->rx_dropped = card->stats.rx_dropped_nomem +
card->stats.rx_dropped_notsupp +
card->stats.rx_dropped_runt;
stats->multicast = card->stats.rx_multicast;
stats->rx_length_errors = card->stats.rx_length_errors;
stats->rx_frame_errors = card->stats.rx_frame_errors;
stats->rx_fifo_errors = card->stats.rx_fifo_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
stats->tx_packets += queue->stats.tx_packets;
stats->tx_bytes += queue->stats.tx_bytes;
stats->tx_errors += queue->stats.tx_errors;
stats->tx_dropped += queue->stats.tx_dropped;
}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
#define TC_IQD_UCAST 0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
unsigned int ucast_txqs)
{
unsigned int prio;
/* IQD requires mcast traffic to be placed on a dedicated queue, and
* qeth_iqd_select_queue() deals with this.
* For unicast traffic, we defer the queue selection to the stack.
* By installing a trivial prio map that spans over only the unicast
* queues, we can encourage the stack to spread the ucast traffic evenly
* without selecting the mcast queue.
*/
/* One traffic class, spanning over all active ucast queues: */
netdev_set_num_tc(dev, 1);
netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
QETH_IQD_MIN_UCAST_TXQ);
/* Map all priorities to this traffic class: */
for (prio = 0; prio <= TC_BITMASK; prio++)
netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
struct net_device *dev = card->dev;
int rc;
/* Per netif_setup_tc(), adjust the mapping first: */
if (IS_IQD(card))
qeth_iqd_set_prio_tc_map(dev, count - 1);
rc = netif_set_real_num_tx_queues(dev, count);
if (rc && IS_IQD(card))
qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
{
u16 txq;
if (cast_type != RTN_UNICAST)
return QETH_IQD_MCAST_TXQ;
if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
return QETH_IQD_MIN_UCAST_TXQ;
txq = netdev_pick_tx(dev, skb, sb_dev);
return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct qeth_card *card = dev->ml_priv;
if (qeth_uses_tx_prio_queueing(card))
return qeth_get_priority_queue(card, skb);
return netdev_pick_tx(dev, skb, sb_dev);
}
EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
QETH_CARD_TEXT(card, 4, "qethopen");
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
local_bh_disable();
qeth_for_each_output_queue(card, queue, i) {
netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
napi_enable(&queue->napi);
napi_schedule(&queue->napi);
}
napi_enable(&card->napi);
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);
int qeth_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
QETH_CARD_TEXT(card, 4, "qethstop");
napi_disable(&card->napi);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
qdio_stop_irq(CARD_DDEV(card));
/* Quiesce the NAPI instances: */
qeth_for_each_output_queue(card, queue, i)
napi_disable(&queue->napi);
/* Stop .ndo_start_xmit, might still access queue->napi. */
netif_tx_disable(dev);
qeth_for_each_output_queue(card, queue, i) {
del_timer_sync(&queue->timer);
/* Queues may get re-allocated, so remove the NAPIs. */
netif_napi_del(&queue->napi);
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);
static int __init qeth_core_init(void)
{
int rc;
pr_info("loading core functions\n");
qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
rc = qeth_register_dbf_views();
if (rc)
goto dbf_err;
qeth_core_root_dev = root_device_register("qeth");
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
goto register_err;
qeth_core_header_cache =
kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
0, NULL);
if (!qeth_core_header_cache) {
rc = -ENOMEM;
goto slab_err;
}
qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
if (!qeth_qdio_outbuf_cache) {
rc = -ENOMEM;
goto cqslab_err;
}
qeth_qaob_cache = kmem_cache_create("qeth_qaob",
sizeof(struct qaob),
sizeof(struct qaob),
0, NULL);
if (!qeth_qaob_cache) {
rc = -ENOMEM;
goto qaob_err;
}
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
goto ccw_err;
rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
if (rc)
goto ccwgroup_err;
return 0;
ccwgroup_err:
ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
kmem_cache_destroy(qeth_qaob_cache);
qaob_err:
kmem_cache_destroy(qeth_qdio_outbuf_cache);
cqslab_err:
kmem_cache_destroy(qeth_core_header_cache);
slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
qeth_unregister_dbf_views();
dbf_err:
debugfs_remove_recursive(qeth_debugfs_root);
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
static void __exit qeth_core_exit(void)
{
qeth_clear_dbf_list();
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_qaob_cache);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
kmem_cache_destroy(qeth_core_header_cache);
root_device_unregister(qeth_core_root_dev);
qeth_unregister_dbf_views();
debugfs_remove_recursive(qeth_debugfs_root);
pr_info("core functions removed\n");
}
module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <[email protected]>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/qeth_core_main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2018
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ethtool.h>
#include "qeth_core.h"
#define QETH_TXQ_STAT(_name, _stat) { \
.name = _name, \
.offset = offsetof(struct qeth_out_q_stats, _stat) \
}
#define QETH_CARD_STAT(_name, _stat) { \
.name = _name, \
.offset = offsetof(struct qeth_card_stats, _stat) \
}
struct qeth_stats {
char name[ETH_GSTRING_LEN];
unsigned int offset;
};
static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("IO buffers", bufs),
QETH_TXQ_STAT("IO buffer elements", buf_elements),
QETH_TXQ_STAT("packed IO buffers", bufs_pack),
QETH_TXQ_STAT("skbs", tx_packets),
QETH_TXQ_STAT("packed skbs", skbs_pack),
QETH_TXQ_STAT("SG skbs", skbs_sg),
QETH_TXQ_STAT("HW csum skbs", skbs_csum),
QETH_TXQ_STAT("TSO skbs", skbs_tso),
QETH_TXQ_STAT("linearized skbs", skbs_linearized),
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
QETH_TXQ_STAT("Doorbell", doorbell),
QETH_TXQ_STAT("IRQ for frames", coal_frames),
QETH_TXQ_STAT("Completion IRQ", completion_irq),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
};
static const struct qeth_stats card_stats[] = {
QETH_CARD_STAT("rx0 IO buffers", rx_bufs),
QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum),
QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt),
};
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
#define CARD_STATS_LEN ARRAY_SIZE(card_stats)
static void qeth_add_stat_data(u64 **dst, void *src,
const struct qeth_stats stats[],
unsigned int size)
{
unsigned int i;
char *stat;
for (i = 0; i < size; i++) {
stat = (char *)src + stats[i].offset;
**dst = *(u64 *)stat;
(*dst)++;
}
}
static void qeth_add_stat_strings(u8 **data, const char *prefix,
const struct qeth_stats stats[],
unsigned int size)
{
unsigned int i;
for (i = 0; i < size; i++)
ethtool_sprintf(data, "%s%s", prefix, stats[i].name);
}
static int qeth_get_sset_count(struct net_device *dev, int stringset)
{
struct qeth_card *card = dev->ml_priv;
switch (stringset) {
case ETH_SS_STATS:
return CARD_STATS_LEN +
card->qdio.no_out_queues * TXQ_STATS_LEN;
default:
return -EINVAL;
}
}
static void qeth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qeth_card *card = dev->ml_priv;
unsigned int i;
qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++)
qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats,
txq_stats, TXQ_STATS_LEN);
}
static void __qeth_set_coalesce(struct net_device *dev,
struct qeth_qdio_out_q *queue,
struct ethtool_coalesce *coal)
{
WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
if (coal->tx_coalesce_usecs &&
netif_running(dev) &&
!qeth_out_queue_is_empty(queue))
qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
}
static int qeth_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
return -EINVAL;
qeth_for_each_output_queue(card, queue, i)
__qeth_set_coalesce(dev, queue, coal);
return 0;
}
static void qeth_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct qeth_card *card = dev->ml_priv;
param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
param->rx_pending = card->qdio.in_buf_pool.buf_count;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
param->tx_pending = QDIO_MAX_BUFFERS_PER_Q;
}
static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qeth_card *card = dev->ml_priv;
char prefix[ETH_GSTRING_LEN] = "";
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
qeth_add_stat_strings(&data, prefix, card_stats,
CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++) {
scnprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
qeth_add_stat_strings(&data, prefix, txq_stats,
TXQ_STATS_LEN);
}
break;
default:
WARN_ON(1);
break;
}
}
static void qeth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct qeth_card *card = dev->ml_priv;
strscpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
scnprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
static void qeth_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qeth_card *card = dev->ml_priv;
channels->max_rx = dev->num_rx_queues;
channels->max_tx = card->qdio.no_out_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = dev->real_num_rx_queues;
channels->tx_count = dev->real_num_tx_queues;
channels->other_count = 0;
channels->combined_count = 0;
}
static int qeth_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qeth_priv *priv = netdev_priv(dev);
struct qeth_card *card = dev->ml_priv;
int rc;
if (channels->rx_count == 0 || channels->tx_count == 0)
return -EINVAL;
if (channels->tx_count > card->qdio.no_out_queues)
return -EINVAL;
/* Prio-queueing needs all TX queues: */
if (qeth_uses_tx_prio_queueing(card))
return -EPERM;
if (IS_IQD(card)) {
if (channels->tx_count < QETH_IQD_MIN_TXQ)
return -EINVAL;
/* Reject downgrade while running. It could push displaced
* ucast flows onto txq0, which is reserved for mcast.
*/
if (netif_running(dev) &&
channels->tx_count < dev->real_num_tx_queues)
return -EPERM;
}
rc = qeth_set_real_num_tx_queues(card, channels->tx_count);
if (!rc)
priv->tx_wanted_queues = channels->tx_count;
return rc;
}
static int qeth_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct qeth_card *card = dev->ml_priv;
if (!IS_IQD(card))
return -EOPNOTSUPP;
return ethtool_op_get_ts_info(dev, info);
}
static int qeth_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna, void *data)
{
struct qeth_priv *priv = netdev_priv(dev);
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = priv->rx_copybreak;
return 0;
default:
return -EOPNOTSUPP;
}
}
static int qeth_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct qeth_priv *priv = netdev_priv(dev);
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
WRITE_ONCE(priv->rx_copybreak, *(u32 *)data);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
struct ethtool_coalesce *coal)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (__queue >= card->qdio.no_out_queues)
return -EINVAL;
queue = card->qdio.out_qs[__queue];
coal->tx_coalesce_usecs = queue->coalesce_usecs;
coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
return 0;
}
static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal)
{
struct qeth_card *card = dev->ml_priv;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (queue >= card->qdio.no_out_queues)
return -EINVAL;
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
return -EINVAL;
__qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
return 0;
}
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
/* specified port type. */
static void qeth_set_ethtool_link_modes(struct ethtool_link_ksettings *cmd,
enum qeth_link_mode link_mode)
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
switch (cmd->base.port) {
case PORT_TP:
ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
switch (cmd->base.speed) {
case SPEED_10000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10000baseT_Full);
fallthrough;
case SPEED_1000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
1000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
1000baseT_Half);
fallthrough;
case SPEED_100:
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Half);
fallthrough;
case SPEED_10:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, supported,
10baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10baseT_Half);
break;
default:
break;
}
break;
case PORT_FIBRE:
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
switch (cmd->base.speed) {
case SPEED_25000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
25000baseSR_Full);
break;
case SPEED_10000:
if (link_mode == QETH_LINK_MODE_FIBRE_LONG) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseLR_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10000baseLR_Full);
} else if (link_mode == QETH_LINK_MODE_FIBRE_SHORT) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10000baseSR_Full);
}
break;
case SPEED_1000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseX_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
1000baseX_Full);
break;
default:
break;
}
break;
default:
break;
}
}
static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
QETH_CARD_TEXT(card, 4, "ethtglks");
cmd->base.speed = card->info.link_info.speed;
cmd->base.duplex = card->info.link_info.duplex;
cmd->base.port = card->info.link_info.port;
cmd->base.autoneg = AUTONEG_ENABLE;
cmd->base.phy_address = 0;
cmd->base.mdio_support = 0;
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
}
const struct ethtool_ops qeth_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_link = ethtool_op_get_link,
.set_coalesce = qeth_set_coalesce,
.get_ringparam = qeth_get_ringparam,
.get_strings = qeth_get_strings,
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
.set_channels = qeth_set_channels,
.get_ts_info = qeth_get_ts_info,
.get_tunable = qeth_get_tunable,
.set_tunable = qeth_set_tunable,
.get_per_queue_coalesce = qeth_get_per_queue_coalesce,
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
| linux-master | drivers/s390/net/qeth_ethtool.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV network driver
*
* Copyright IBM Corp. 2001, 2009
*
* Author(s):
* Original netiucv driver:
* Fritz Elfert ([email protected], [email protected])
* Sysfs integration and all bugs therein:
* Cornelia Huck ([email protected])
* PM functions:
* Ursula Braun ([email protected])
*
* Documentation used:
* the source of the original IUCV driver by:
* Stefan Hegewald <[email protected]>
* Hartmut Penner <[email protected]>
* Denis Joseph Barrow ([email protected],[email protected])
* Martin Schwidefsky ([email protected])
* Alan Altmark ([email protected]) Sept. 2000
*/
#define KMSG_COMPONENT "netiucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#undef DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/dst.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include "fsm.h"
MODULE_AUTHOR
("(C) 2001 IBM Corporation by Fritz Elfert ([email protected])");
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
/*
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
#define IUCV_DBF_SETUP_LEN 64
#define IUCV_DBF_SETUP_PAGES 2
#define IUCV_DBF_SETUP_NR_AREAS 1
#define IUCV_DBF_SETUP_LEVEL 3
#define IUCV_DBF_DATA_NAME "iucv_data"
#define IUCV_DBF_DATA_LEN 128
#define IUCV_DBF_DATA_PAGES 2
#define IUCV_DBF_DATA_NR_AREAS 1
#define IUCV_DBF_DATA_LEVEL 2
#define IUCV_DBF_TRACE_NAME "iucv_trace"
#define IUCV_DBF_TRACE_LEN 16
#define IUCV_DBF_TRACE_PAGES 4
#define IUCV_DBF_TRACE_NR_AREAS 1
#define IUCV_DBF_TRACE_LEVEL 3
#define IUCV_DBF_TEXT(name,level,text) \
do { \
debug_text_event(iucv_dbf_##name,level,text); \
} while (0)
#define IUCV_DBF_HEX(name,level,addr,len) \
do { \
debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
} while (0)
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
if (debug_level_enabled(iucv_dbf_##name, level)) { \
char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
sprintf(__buf, text); \
debug_text_event(iucv_dbf_##name, level, __buf); \
put_cpu_var(iucv_dbf_txt_buf); \
} \
} while (0)
#define IUCV_DBF_SPRINTF(name,level,text...) \
do { \
debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
debug_sprintf_event(iucv_dbf_trace, level, text ); \
} while (0)
/*
* some more debug stuff
*/
#define PRINTK_HEADER " iucv: " /* for debugging */
static struct device_driver netiucv_driver = {
.owner = THIS_MODULE,
.name = "netiucv",
.bus = &iucv_bus,
};
/*
* Per connection profiling data
*/
struct connection_profile {
unsigned long maxmulti;
unsigned long maxcqueue;
unsigned long doios_single;
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
unsigned long send_stamp;
unsigned long tx_pending;
unsigned long tx_max_pending;
};
/*
* Representation of one iucv connection
*/
struct iucv_connection {
struct list_head list;
struct iucv_path *path;
struct sk_buff *rx_buff;
struct sk_buff *tx_buff;
struct sk_buff_head collect_queue;
struct sk_buff_head commit_queue;
spinlock_t collect_lock;
int collect_len;
int max_buffsize;
fsm_timer timer;
fsm_instance *fsm;
struct net_device *netdev;
struct connection_profile prof;
char userid[9];
char userdata[17];
};
/*
* Linked list of all connection structs.
*/
static LIST_HEAD(iucv_connection_list);
static DEFINE_RWLOCK(iucv_connection_rwlock);
/*
* Representation of event-data for the
* connection state machine.
*/
struct iucv_event {
struct iucv_connection *conn;
void *data;
};
/*
* Private part of the network device structure
*/
struct netiucv_priv {
struct net_device_stats stats;
unsigned long tbusy;
fsm_instance *fsm;
struct iucv_connection *conn;
struct device *dev;
};
/*
* Link level header for a packet.
*/
struct ll_header {
u16 next;
};
#define NETIUCV_HDRLEN (sizeof(struct ll_header))
#define NETIUCV_BUFSIZE_MAX 65537
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
#define NETIUCV_MTU_DEFAULT 9216
#define NETIUCV_QUEUELEN_DEFAULT 50
#define NETIUCV_TIMEOUT_5SEC 5000
/*
* Compatibility macros for busy handling
* of network devices.
*/
static void netiucv_clear_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
clear_bit(0, &priv->tbusy);
netif_wake_queue(dev);
}
static int netiucv_test_and_set_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
return test_and_set_bit(0, &priv->tbusy);
}
static u8 iucvMagic_ascii[16] = {
0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
};
static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
/*
* Convert an iucv userId to its printable
* form (strip whitespace at end).
*
* @param An iucv userId
*
* @returns The printable string (static data!!)
*/
static char *netiucv_printname(char *name, int len)
{
static char tmp[17];
char *p = tmp;
memcpy(tmp, name, len);
tmp[len] = '\0';
while (*p && ((p - tmp) < len) && (!isspace(*p)))
p++;
*p = '\0';
return tmp;
}
static char *netiucv_printuser(struct iucv_connection *conn)
{
static char tmp_uid[9];
static char tmp_udat[17];
static char buf[100];
if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
tmp_uid[8] = '\0';
tmp_udat[16] = '\0';
memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
memcpy(tmp_udat, conn->userdata, 16);
EBCASC(tmp_udat, 16);
memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
return buf;
} else
return netiucv_printname(conn->userid, 8);
}
/*
* States of the interface statemachine.
*/
enum dev_states {
DEV_STATE_STOPPED,
DEV_STATE_STARTWAIT,
DEV_STATE_STOPWAIT,
DEV_STATE_RUNNING,
/*
* MUST be always the last element!!
*/
NR_DEV_STATES
};
static const char *dev_state_names[] = {
"Stopped",
"StartWait",
"StopWait",
"Running",
};
/*
* Events of the interface statemachine.
*/
enum dev_events {
DEV_EVENT_START,
DEV_EVENT_STOP,
DEV_EVENT_CONUP,
DEV_EVENT_CONDOWN,
/*
* MUST be always the last element!!
*/
NR_DEV_EVENTS
};
static const char *dev_event_names[] = {
"Start",
"Stop",
"Connection up",
"Connection down",
};
/*
* Events of the connection statemachine
*/
enum conn_events {
/*
* Events, representing callbacks from
* lowlevel iucv layer)
*/
CONN_EVENT_CONN_REQ,
CONN_EVENT_CONN_ACK,
CONN_EVENT_CONN_REJ,
CONN_EVENT_CONN_SUS,
CONN_EVENT_CONN_RES,
CONN_EVENT_RX,
CONN_EVENT_TXDONE,
/*
* Events, representing errors return codes from
* calls to lowlevel iucv layer
*/
/*
* Event, representing timer expiry.
*/
CONN_EVENT_TIMER,
/*
* Events, representing commands from upper levels.
*/
CONN_EVENT_START,
CONN_EVENT_STOP,
/*
* MUST be always the last element!!
*/
NR_CONN_EVENTS,
};
static const char *conn_event_names[] = {
"Remote connection request",
"Remote connection acknowledge",
"Remote connection reject",
"Connection suspended",
"Connection resumed",
"Data received",
"Data sent",
"Timer",
"Start",
"Stop",
};
/*
* States of the connection statemachine.
*/
enum conn_states {
/*
* Connection not assigned to any device,
* initial state, invalid
*/
CONN_STATE_INVALID,
/*
* Userid assigned but not operating
*/
CONN_STATE_STOPPED,
/*
* Connection registered,
* no connection request sent yet,
* no connection request received
*/
CONN_STATE_STARTWAIT,
/*
* Connection registered and connection request sent,
* no acknowledge and no connection request received yet.
*/
CONN_STATE_SETUPWAIT,
/*
* Connection up and running idle
*/
CONN_STATE_IDLE,
/*
* Data sent, awaiting CONN_EVENT_TXDONE
*/
CONN_STATE_TX,
/*
* Error during registration.
*/
CONN_STATE_REGERR,
/*
* Error during registration.
*/
CONN_STATE_CONNERR,
/*
* MUST be always the last element!!
*/
NR_CONN_STATES,
};
static const char *conn_state_names[] = {
"Invalid",
"Stopped",
"StartWait",
"SetupWait",
"Idle",
"TX",
"Terminating",
"Registration error",
"Connect error",
};
/*
* Debug Facility Stuff
*/
static debug_info_t *iucv_dbf_setup = NULL;
static debug_info_t *iucv_dbf_data = NULL;
static debug_info_t *iucv_dbf_trace = NULL;
DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
static void iucv_unregister_dbf_views(void)
{
debug_unregister(iucv_dbf_setup);
debug_unregister(iucv_dbf_data);
debug_unregister(iucv_dbf_trace);
}
static int iucv_register_dbf_views(void)
{
iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
IUCV_DBF_SETUP_PAGES,
IUCV_DBF_SETUP_NR_AREAS,
IUCV_DBF_SETUP_LEN);
iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
IUCV_DBF_DATA_PAGES,
IUCV_DBF_DATA_NR_AREAS,
IUCV_DBF_DATA_LEN);
iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
IUCV_DBF_TRACE_PAGES,
IUCV_DBF_TRACE_NR_AREAS,
IUCV_DBF_TRACE_LEN);
if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
(iucv_dbf_trace == NULL)) {
iucv_unregister_dbf_views();
return -ENOMEM;
}
debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
return 0;
}
/*
* Callback-wrappers, called from lowlevel iucv layer.
*/
static void netiucv_callback_rx(struct iucv_path *path,
struct iucv_message *msg)
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
ev.conn = conn;
ev.data = msg;
fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
}
static void netiucv_callback_txdone(struct iucv_path *path,
struct iucv_message *msg)
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
ev.conn = conn;
ev.data = msg;
fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
}
static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
{
struct iucv_connection *conn = path->private;
fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
}
static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
u8 *ipuser)
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
static char tmp_user[9];
static char tmp_udat[17];
int rc;
rc = -EINVAL;
memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
memcpy(tmp_udat, ipuser, 16);
EBCASC(tmp_udat, 16);
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(conn, &iucv_connection_list, list) {
if (strncmp(ipvmid, conn->userid, 8) ||
strncmp(ipuser, conn->userdata, 16))
continue;
/* Found a matching connection for this path. */
conn->path = path;
ev.conn = conn;
ev.data = path;
fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
rc = 0;
}
IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
tmp_user, netiucv_printname(tmp_udat, 16));
read_unlock_bh(&iucv_connection_rwlock);
return rc;
}
static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
{
struct iucv_connection *conn = path->private;
fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
}
static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
{
struct iucv_connection *conn = path->private;
fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
}
static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
{
struct iucv_connection *conn = path->private;
fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
}
/*
* NOP action for statemachines
*/
static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/*
* Actions of the connection statemachine
*/
/*
* netiucv_unpack_skb
* @conn: The connection where this skb has been received.
* @pskb: The received skb.
*
* Unpack a just received skb and hand it over to upper layers.
* Helper function for conn_action_rx.
*/
static void netiucv_unpack_skb(struct iucv_connection *conn,
struct sk_buff *pskb)
{
struct net_device *dev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(dev);
u16 offset = 0;
skb_put(pskb, NETIUCV_HDRLEN);
pskb->dev = dev;
pskb->ip_summed = CHECKSUM_NONE;
pskb->protocol = cpu_to_be16(ETH_P_IP);
while (1) {
struct sk_buff *skb;
struct ll_header *header = (struct ll_header *) pskb->data;
if (!header->next)
break;
skb_pull(pskb, NETIUCV_HDRLEN);
header->next -= offset;
offset += header->next;
header->next -= NETIUCV_HDRLEN;
if (skb_tailroom(pskb) < header->next) {
IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
header->next, skb_tailroom(pskb));
return;
}
skb_put(pskb, header->next);
skb_reset_mac_header(pskb);
skb = dev_alloc_skb(pskb->len);
if (!skb) {
IUCV_DBF_TEXT(data, 2,
"Out of memory in netiucv_unpack_skb\n");
privptr->stats.rx_dropped++;
return;
}
skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
pskb->len);
skb_reset_mac_header(skb);
skb->dev = pskb->dev;
skb->protocol = pskb->protocol;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
privptr->stats.rx_packets++;
privptr->stats.rx_bytes += skb->len;
netif_rx(skb);
skb_pull(pskb, header->next);
skb_put(pskb, NETIUCV_HDRLEN);
}
}
static void conn_action_rx(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
struct iucv_message *msg = ev->data;
struct netiucv_priv *privptr = netdev_priv(conn->netdev);
int rc;
IUCV_DBF_TEXT(trace, 4, __func__);
if (!conn->netdev) {
iucv_message_reject(conn->path, msg);
IUCV_DBF_TEXT(data, 2,
"Received data for unlinked connection\n");
return;
}
if (msg->length > conn->max_buffsize) {
iucv_message_reject(conn->path, msg);
privptr->stats.rx_dropped++;
IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
msg->length, conn->max_buffsize);
return;
}
conn->rx_buff->data = conn->rx_buff->head;
skb_reset_tail_pointer(conn->rx_buff);
conn->rx_buff->len = 0;
rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
msg->length, NULL);
if (rc || msg->length < 5) {
privptr->stats.rx_errors++;
IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
return;
}
netiucv_unpack_skb(conn, conn->rx_buff);
}
static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
struct iucv_message *msg = ev->data;
struct iucv_message txmsg;
struct netiucv_priv *privptr = NULL;
u32 single_flag = msg->tag;
u32 txbytes = 0;
u32 txpackets = 0;
u32 stat_maxcq = 0;
struct sk_buff *skb;
unsigned long saveflags;
struct ll_header header;
int rc;
IUCV_DBF_TEXT(trace, 4, __func__);
if (!conn || !conn->netdev) {
IUCV_DBF_TEXT(data, 2,
"Send confirmation for unlinked connection\n");
return;
}
privptr = netdev_priv(conn->netdev);
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
refcount_dec(&skb->users);
if (privptr) {
privptr->stats.tx_packets++;
privptr->stats.tx_bytes +=
(skb->len - NETIUCV_HDRLEN
- NETIUCV_HDRLEN);
}
dev_kfree_skb_any(skb);
}
}
conn->tx_buff->data = conn->tx_buff->head;
skb_reset_tail_pointer(conn->tx_buff);
conn->tx_buff->len = 0;
spin_lock_irqsave(&conn->collect_lock, saveflags);
while ((skb = skb_dequeue(&conn->collect_queue))) {
header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
skb_copy_from_linear_data(skb,
skb_put(conn->tx_buff, skb->len),
skb->len);
txbytes += skb->len;
txpackets++;
stat_maxcq++;
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
if (conn->collect_len > conn->prof.maxmulti)
conn->prof.maxmulti = conn->collect_len;
conn->collect_len = 0;
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
if (conn->tx_buff->len == 0) {
fsm_newstate(fi, CONN_STATE_IDLE);
return;
}
header.next = 0;
skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
conn->prof.send_stamp = jiffies;
txmsg.class = 0;
txmsg.tag = 0;
rc = iucv_message_send(conn->path, &txmsg, 0, 0,
conn->tx_buff->data, conn->tx_buff->len);
conn->prof.doios_multi++;
conn->prof.txlen += conn->tx_buff->len;
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
if (rc) {
conn->prof.tx_pending--;
fsm_newstate(fi, CONN_STATE_IDLE);
if (privptr)
privptr->stats.tx_errors += txpackets;
IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
} else {
if (privptr) {
privptr->stats.tx_packets += txpackets;
privptr->stats.tx_bytes += txbytes;
}
if (stat_maxcq > conn->prof.maxcqueue)
conn->prof.maxcqueue = stat_maxcq;
}
}
static struct iucv_handler netiucv_handler = {
.path_pending = netiucv_callback_connreq,
.path_complete = netiucv_callback_connack,
.path_severed = netiucv_callback_connrej,
.path_quiesced = netiucv_callback_connsusp,
.path_resumed = netiucv_callback_connres,
.message_pending = netiucv_callback_rx,
.message_complete = netiucv_callback_txdone,
};
static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
struct iucv_path *path = ev->data;
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
int rc;
IUCV_DBF_TEXT(trace, 3, __func__);
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
path->flags = 0;
rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
}
fsm_newstate(fi, CONN_STATE_IDLE);
netdev->tx_queue_len = conn->path->msglim;
fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
}
static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
struct iucv_path *path = ev->data;
IUCV_DBF_TEXT(trace, 3, __func__);
iucv_path_sever(path, NULL);
}
static void conn_action_connack(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
netdev->tx_queue_len = conn->path->msglim;
fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
}
static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, conn->userdata);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, conn->userdata);
dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
"connection\n", netiucv_printuser(conn));
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
static void conn_action_start(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
int rc;
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
/*
* We must set the state before calling iucv_connect because the
* callback handler could be called at any point after the connection
* request is sent
*/
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
netdev->name, netiucv_printuser(conn));
rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
NULL, conn->userdata, conn);
switch (rc) {
case 0:
netdev->tx_queue_len = conn->path->msglim;
fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
CONN_EVENT_TIMER, conn);
return;
case 11:
dev_warn(privptr->dev,
"The IUCV device failed to connect to z/VM guest %s\n",
netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
dev_warn(privptr->dev,
"The IUCV device failed to connect to the peer on z/VM"
" guest %s\n", netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
dev_err(privptr->dev,
"Connecting the IUCV device would exceed the maximum"
" number of IUCV connections\n");
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 14:
dev_err(privptr->dev,
"z/VM guest %s has too many IUCV connections"
" to connect with the IUCV device\n",
netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
dev_err(privptr->dev,
"The IUCV device cannot connect to a z/VM guest with no"
" IUCV authorization\n");
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
default:
dev_err(privptr->dev,
"Connecting the IUCV device failed with error %d\n",
rc);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
}
IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
kfree(conn->path);
conn->path = NULL;
}
static void netiucv_purge_skb_queue(struct sk_buff_head *q)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(q))) {
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
static void conn_action_stop(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
netiucv_purge_skb_queue(&conn->commit_queue);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
static void conn_action_inval(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
netdev->name, conn->userid);
}
static const fsm_node conn_fsm[] = {
{ CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
{ CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
{ CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
{ CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
{ CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
{ CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
{ CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
{ CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
{ CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
{ CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
{ CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
{ CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
{ CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
};
static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
/*
* Actions for interface - statemachine.
*/
/*
* dev_action_start
* @fi: An instance of an interface statemachine.
* @event: The event, just happened.
* @arg: Generic pointer, casted from struct net_device * upon call.
*
* Startup connection by sending CONN_EVENT_START to it.
*/
static void dev_action_start(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, DEV_STATE_STARTWAIT);
fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
}
/*
* Shutdown connection by sending CONN_EVENT_STOP to it.
*
* @param fi An instance of an interface statemachine.
* @param event The event, just happened.
* @param arg Generic pointer, casted from struct net_device * upon call.
*/
static void
dev_action_stop(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev);
struct iucv_event ev;
IUCV_DBF_TEXT(trace, 3, __func__);
ev.conn = privptr->conn;
fsm_newstate(fi, DEV_STATE_STOPWAIT);
fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
}
/*
* Called from connection statemachine
* when a connection is up and running.
*
* @param fi An instance of an interface statemachine.
* @param event The event, just happened.
* @param arg Generic pointer, casted from struct net_device * upon call.
*/
static void
dev_action_connup(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(privptr->dev,
"The IUCV device has been connected"
" successfully to %s\n",
netiucv_printuser(privptr->conn));
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
case DEV_STATE_STOPWAIT:
IUCV_DBF_TEXT(data, 2,
"dev_action_connup: in DEV_STATE_STOPWAIT\n");
break;
}
}
/*
* Called from connection statemachine
* when a connection has been shutdown.
*
* @param fi An instance of an interface statemachine.
* @param event The event, just happened.
* @param arg Generic pointer, casted from struct net_device * upon call.
*/
static void
dev_action_conndown(fsm_instance *fi, int event, void *arg)
{
IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
fsm_newstate(fi, DEV_STATE_STARTWAIT);
break;
case DEV_STATE_STOPWAIT:
fsm_newstate(fi, DEV_STATE_STOPPED);
IUCV_DBF_TEXT(setup, 3, "connection is down\n");
break;
}
}
static const fsm_node dev_fsm[] = {
{ DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
{ DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
{ DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
{ DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
{ DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
};
static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
/*
* Transmit a packet.
* This is a helper function for netiucv_tx().
*
* @param conn Connection to be used for sending.
* @param skb Pointer to struct sk_buff of packet to send.
* The linklevel header has already been set up
* by netiucv_tx().
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
static int netiucv_transmit_skb(struct iucv_connection *conn,
struct sk_buff *skb)
{
struct iucv_message msg;
unsigned long saveflags;
struct ll_header header;
int rc;
if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
int l = skb->len + NETIUCV_HDRLEN;
spin_lock_irqsave(&conn->collect_lock, saveflags);
if (conn->collect_len + l >
(conn->max_buffsize - NETIUCV_HDRLEN)) {
rc = -EBUSY;
IUCV_DBF_TEXT(data, 2,
"EBUSY from netiucv_transmit_skb\n");
} else {
refcount_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
rc = 0;
}
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
} else {
struct sk_buff *nskb = skb;
/*
* Copy the skb to a new allocated skb in lowmem only if the
* data is located above 2G in memory or tailroom is < 2.
*/
unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
NETIUCV_HDRLEN)) >> 31;
int copied = 0;
if (hi || (skb_tailroom(skb) < 2)) {
nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
rc = -ENOMEM;
return rc;
} else {
skb_reserve(nskb, NETIUCV_HDRLEN);
skb_put_data(nskb, skb->data, skb->len);
}
copied = 1;
}
/*
* skb now is below 2G and has enough room. Add headers.
*/
header.next = nskb->len + NETIUCV_HDRLEN;
memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
header.next = 0;
skb_put_data(nskb, &header, NETIUCV_HDRLEN);
fsm_newstate(conn->fsm, CONN_STATE_TX);
conn->prof.send_stamp = jiffies;
msg.tag = 1;
msg.class = 0;
rc = iucv_message_send(conn->path, &msg, 0, 0,
nskb->data, nskb->len);
conn->prof.doios_single++;
conn->prof.txlen += skb->len;
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
if (rc) {
struct netiucv_priv *privptr;
fsm_newstate(conn->fsm, CONN_STATE_IDLE);
conn->prof.tx_pending--;
privptr = netdev_priv(conn->netdev);
if (privptr)
privptr->stats.tx_errors++;
if (copied)
dev_kfree_skb(nskb);
else {
/*
* Remove our headers. They get added
* again on retransmit.
*/
skb_pull(skb, NETIUCV_HDRLEN);
skb_trim(skb, skb->len - NETIUCV_HDRLEN);
}
IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
} else {
if (copied)
dev_kfree_skb(skb);
refcount_inc(&nskb->users);
skb_queue_tail(&conn->commit_queue, nskb);
}
}
return rc;
}
/*
* Interface API for upper network layers
*/
/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
* @param dev Pointer to interface struct.
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
static int netiucv_open(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
fsm_event(priv->fsm, DEV_EVENT_START, dev);
return 0;
}
/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
* @param dev Pointer to interface struct.
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
static int netiucv_close(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
return 0;
}
/*
* Start transmission of a packet.
* Called from generic network device layer.
*/
static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netiucv_priv *privptr = netdev_priv(dev);
int rc;
IUCV_DBF_TEXT(trace, 4, __func__);
/*
* Some sanity checks ...
*/
if (skb == NULL) {
IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
privptr->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (skb_headroom(skb) < NETIUCV_HDRLEN) {
IUCV_DBF_TEXT(data, 2,
"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/*
* If connection is not running, try to restart it
* and throw away packet.
*/
if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
privptr->stats.tx_errors++;
privptr->stats.tx_carrier_errors++;
return NETDEV_TX_OK;
}
if (netiucv_test_and_set_busy(dev)) {
IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
return NETDEV_TX_BUSY;
}
netif_trans_update(dev);
rc = netiucv_transmit_skb(privptr->conn, skb);
netiucv_clear_busy(dev);
return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
/*
* netiucv_stats
* @dev: Pointer to interface struct.
*
* Returns interface statistics of a device.
*
* Returns pointer to stats struct of this interface.
*/
static struct net_device_stats *netiucv_stats (struct net_device * dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return &priv->stats;
}
/*
* attributes in sysfs
*/
static ssize_t user_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
}
static int netiucv_check_user(const char *buf, size_t count, char *username,
char *userdata)
{
const char *p;
int i;
p = strchr(buf, '.');
if ((p && ((count > 26) ||
((p - buf) > 8) ||
(buf + count - p > 18))) ||
(!p && (count > 9))) {
IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
if (isalnum(*p) || *p == '$') {
username[i] = toupper(*p);
continue;
}
if (*p == '\n')
/* trailing lf, grr */
break;
IUCV_DBF_TEXT_(setup, 2,
"conn_write: invalid character %02x\n", *p);
return -EINVAL;
}
while (i < 8)
username[i++] = ' ';
username[8] = '\0';
if (*p == '.') {
p++;
for (i = 0; i < 16 && *p; i++, p++) {
if (*p == '\n')
break;
userdata[i] = toupper(*p);
}
while (i > 0 && i < 16)
userdata[i++] = ' ';
} else
memcpy(userdata, iucvMagic_ascii, 16);
userdata[16] = '\0';
ASCEBC(userdata, 16);
return 0;
}
static ssize_t user_write(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->conn->netdev;
char username[9];
char userdata[17];
int rc;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
rc = netiucv_check_user(buf, count, username, userdata);
if (rc)
return rc;
if (memcmp(username, priv->conn->userid, 9) &&
(ndev->flags & (IFF_UP | IFF_RUNNING))) {
/* username changed while the interface is active. */
IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
return -EPERM;
}
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
if (!strncmp(username, cp->userid, 9) &&
!strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
read_unlock_bh(&iucv_connection_rwlock);
IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
"already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
memcpy(priv->conn->userdata, userdata, 17);
return count;
}
static DEVICE_ATTR(user, 0644, user_show, user_write);
static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->conn->netdev;
unsigned int bs1;
int rc;
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= 39)
return -EINVAL;
rc = kstrtouint(buf, 0, &bs1);
if (rc == -EINVAL) {
IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
buf);
return -EINVAL;
}
if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
IUCV_DBF_TEXT_(setup, 2,
"buffer_write: buffer size %d too large\n",
bs1);
return -EINVAL;
}
if ((ndev->flags & IFF_RUNNING) &&
(bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
IUCV_DBF_TEXT_(setup, 2,
"buffer_write: buffer size %d too small\n",
bs1);
return -EINVAL;
}
if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
IUCV_DBF_TEXT_(setup, 2,
"buffer_write: buffer size %d too small\n",
bs1);
return -EINVAL;
}
priv->conn->max_buffsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
return count;
}
static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
static ssize_t conn_fsm_show (struct device *dev,
struct device_attribute *attr, char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
static ssize_t maxmulti_show (struct device *dev,
struct device_attribute *attr, char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
static ssize_t maxmulti_write (struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxmulti = 0;
return count;
}
static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxcqueue = 0;
return count;
}
static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.doios_single = 0;
return count;
}
static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
priv->conn->prof.doios_multi = 0;
return count;
}
static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.txlen = 0;
return count;
}
static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_time = 0;
return count;
}
static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_pending = 0;
return count;
}
static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_max_pending = 0;
return count;
}
static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
static struct attribute *netiucv_attrs[] = {
&dev_attr_buffer.attr,
&dev_attr_user.attr,
NULL,
};
static struct attribute_group netiucv_attr_group = {
.attrs = netiucv_attrs,
};
static struct attribute *netiucv_stat_attrs[] = {
&dev_attr_device_fsm_state.attr,
&dev_attr_connection_fsm_state.attr,
&dev_attr_max_tx_buffer_used.attr,
&dev_attr_max_chained_skbs.attr,
&dev_attr_tx_single_write_ops.attr,
&dev_attr_tx_multi_write_ops.attr,
&dev_attr_netto_bytes.attr,
&dev_attr_max_tx_io_time.attr,
&dev_attr_tx_pending.attr,
&dev_attr_tx_max_pending.attr,
NULL,
};
static struct attribute_group netiucv_stat_attr_group = {
.name = "stats",
.attrs = netiucv_stat_attrs,
};
static const struct attribute_group *netiucv_attr_groups[] = {
&netiucv_stat_attr_group,
&netiucv_attr_group,
NULL,
};
static int netiucv_register_device(struct net_device *ndev)
{
struct netiucv_priv *priv = netdev_priv(ndev);
struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
int ret;
IUCV_DBF_TEXT(trace, 3, __func__);
if (dev) {
dev_set_name(dev, "net%s", ndev->name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->groups = netiucv_attr_groups;
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
* free the struct. Therefore, we specify kfree()
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
dev->driver = &netiucv_driver;
} else
return -ENOMEM;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
priv->dev = dev;
dev_set_drvdata(dev, priv);
return 0;
}
static void netiucv_unregister_device(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __func__);
device_unregister(dev);
}
/*
* Allocate and initialize a new connection structure.
* Add it to the list of netiucv connections;
*/
static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
char *username,
char *userdata)
{
struct iucv_connection *conn;
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn)
goto out;
skb_queue_head_init(&conn->collect_queue);
skb_queue_head_init(&conn->commit_queue);
spin_lock_init(&conn->collect_lock);
conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
conn->netdev = dev;
conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
if (!conn->rx_buff)
goto out_conn;
conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
if (!conn->tx_buff)
goto out_rx;
conn->fsm = init_fsm("netiucvconn", conn_state_names,
conn_event_names, NR_CONN_STATES,
NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
GFP_KERNEL);
if (!conn->fsm)
goto out_tx;
fsm_settimer(conn->fsm, &conn->timer);
fsm_newstate(conn->fsm, CONN_STATE_INVALID);
if (userdata)
memcpy(conn->userdata, userdata, 17);
if (username) {
memcpy(conn->userid, username, 9);
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
}
write_lock_bh(&iucv_connection_rwlock);
list_add_tail(&conn->list, &iucv_connection_list);
write_unlock_bh(&iucv_connection_rwlock);
return conn;
out_tx:
kfree_skb(conn->tx_buff);
out_rx:
kfree_skb(conn->rx_buff);
out_conn:
kfree(conn);
out:
return NULL;
}
/*
* Release a connection structure and remove it from the
* list of netiucv connections.
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
write_unlock_bh(&iucv_connection_rwlock);
fsm_deltimer(&conn->timer);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
netiucv_purge_skb_queue(&conn->commit_queue);
kfree_fsm(conn->fsm);
kfree_skb(conn->rx_buff);
kfree_skb(conn->tx_buff);
}
/*
* Release everything of a net device.
*/
static void netiucv_free_netdevice(struct net_device *dev)
{
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __func__);
if (!dev)
return;
if (privptr) {
if (privptr->conn)
netiucv_remove_connection(privptr->conn);
if (privptr->fsm)
kfree_fsm(privptr->fsm);
privptr->conn = NULL; privptr->fsm = NULL;
/* privptr gets freed by free_netdev() */
}
}
/*
* Initialize a net device. (Called from kernel in alloc_netdev())
*/
static const struct net_device_ops netiucv_netdev_ops = {
.ndo_open = netiucv_open,
.ndo_stop = netiucv_close,
.ndo_get_stats = netiucv_stats,
.ndo_start_xmit = netiucv_tx,
};
static void netiucv_setup_netdevice(struct net_device *dev)
{
dev->mtu = NETIUCV_MTU_DEFAULT;
dev->min_mtu = 576;
dev->max_mtu = NETIUCV_MTU_MAX;
dev->needs_free_netdev = true;
dev->priv_destructor = netiucv_free_netdevice;
dev->hard_header_len = NETIUCV_HDRLEN;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->netdev_ops = &netiucv_netdev_ops;
}
/*
* Allocate and initialize everything of a net device.
*/
static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
{
struct netiucv_priv *privptr;
struct net_device *dev;
dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
NET_NAME_UNKNOWN, netiucv_setup_netdevice);
if (!dev)
return NULL;
rtnl_lock();
if (dev_alloc_name(dev, dev->name) < 0)
goto out_netdev;
privptr = netdev_priv(dev);
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
if (!privptr->fsm)
goto out_netdev;
privptr->conn = netiucv_new_connection(dev, username, userdata);
if (!privptr->conn) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
goto out_fsm;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
return dev;
out_fsm:
kfree_fsm(privptr->fsm);
out_netdev:
rtnl_unlock();
free_netdev(dev);
return NULL;
}
static ssize_t connection_store(struct device_driver *drv, const char *buf,
size_t count)
{
char username[9];
char userdata[17];
int rc;
struct net_device *dev;
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
rc = netiucv_check_user(buf, count, username, userdata);
if (rc)
return rc;
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
if (!strncmp(username, cp->userid, 9) &&
!strncmp(userdata, cp->userdata, 17)) {
read_unlock_bh(&iucv_connection_rwlock);
IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
"already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
dev = netiucv_init_netdevice(username, userdata);
if (!dev) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
}
rc = netiucv_register_device(dev);
if (rc) {
rtnl_unlock();
IUCV_DBF_TEXT_(setup, 2,
"ret %d from netiucv_register_device\n", rc);
goto out_free_ndev;
}
/* sysfs magic */
priv = netdev_priv(dev);
SET_NETDEV_DEV(dev, priv->dev);
rc = register_netdevice(dev);
rtnl_unlock();
if (rc)
goto out_unreg;
dev_info(priv->dev, "The IUCV interface to %s has been established "
"successfully\n",
netiucv_printuser(priv->conn));
return count;
out_unreg:
netiucv_unregister_device(priv->dev);
out_free_ndev:
netiucv_free_netdevice(dev);
return rc;
}
static DRIVER_ATTR_WO(connection);
static ssize_t remove_store(struct device_driver *drv, const char *buf,
size_t count)
{
struct iucv_connection *cp;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
char name[IFNAMSIZ];
const char *p;
int i;
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= IFNAMSIZ)
count = IFNAMSIZ - 1;
for (i = 0, p = buf; i < count && *p; i++, p++) {
if (*p == '\n' || *p == ' ')
/* trailing lf, grr */
break;
name[i] = *p;
}
name[i] = '\0';
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
ndev = cp->netdev;
priv = netdev_priv(ndev);
dev = priv->dev;
if (strncmp(name, ndev->name, count))
continue;
read_unlock_bh(&iucv_connection_rwlock);
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
dev_warn(dev, "The IUCV device is connected"
" to %s and cannot be removed\n",
priv->conn->userid);
IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EPERM;
}
unregister_netdev(ndev);
netiucv_unregister_device(dev);
return count;
}
read_unlock_bh(&iucv_connection_rwlock);
IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
return -EINVAL;
}
static DRIVER_ATTR_WO(remove);
static struct attribute * netiucv_drv_attrs[] = {
&driver_attr_connection.attr,
&driver_attr_remove.attr,
NULL,
};
static struct attribute_group netiucv_drv_attr_group = {
.attrs = netiucv_drv_attrs,
};
static const struct attribute_group *netiucv_drv_attr_groups[] = {
&netiucv_drv_attr_group,
NULL,
};
static void netiucv_banner(void)
{
pr_info("driver initialized\n");
}
static void __exit netiucv_exit(void)
{
struct iucv_connection *cp;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
IUCV_DBF_TEXT(trace, 3, __func__);
while (!list_empty(&iucv_connection_list)) {
cp = list_entry(iucv_connection_list.next,
struct iucv_connection, list);
ndev = cp->netdev;
priv = netdev_priv(ndev);
dev = priv->dev;
unregister_netdev(ndev);
netiucv_unregister_device(dev);
}
driver_unregister(&netiucv_driver);
iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
pr_info("driver unloaded\n");
return;
}
static int __init netiucv_init(void)
{
int rc;
rc = iucv_register_dbf_views();
if (rc)
goto out;
rc = iucv_register(&netiucv_handler, 1);
if (rc)
goto out_dbf;
IUCV_DBF_TEXT(trace, 3, __func__);
netiucv_driver.groups = netiucv_drv_attr_groups;
rc = driver_register(&netiucv_driver);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
goto out_iucv;
}
netiucv_banner();
return rc;
out_iucv:
iucv_unregister(&netiucv_handler, 1);
out_dbf:
iucv_unregister_dbf_views();
out:
return rc;
}
module_init(netiucv_init);
module_exit(netiucv_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/net/netiucv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/list.h>
#include <linux/rwsem.h>
#include <asm/ebcdic.h>
#include "qeth_core.h"
static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
switch (card->state) {
case CARD_STATE_DOWN:
return sysfs_emit(buf, "DOWN\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
return sysfs_emit(buf, "UP (LAN %s)\n",
netif_carrier_ok(card->dev) ?
"ONLINE" : "OFFLINE");
return sysfs_emit(buf, "SOFTSETUP\n");
default:
return sysfs_emit(buf, "UNKNOWN\n");
}
}
static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%02X\n", card->info.chpid);
}
static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", netdev_name(card->dev));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", qeth_get_cardname_short(card));
}
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
static const char *qeth_get_bufsize_str(struct qeth_card *card)
{
if (card->qdio.in_buf_size == 16384)
return "16k";
else if (card->qdio.in_buf_size == 24576)
return "24k";
else if (card->qdio.in_buf_size == 32768)
return "32k";
else if (card->qdio.in_buf_size == 40960)
return "40k";
else
return "64k";
}
static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", qeth_get_bufsize_str(card));
}
static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->dev->dev_port);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
unsigned int portno, limit;
int rc = 0;
rc = kstrtouint(buf, 16, &portno);
if (rc)
return rc;
if (portno > QETH_MAX_PORTNO)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
if (portno > limit) {
rc = -EINVAL;
goto out;
}
card->dev->dev_port = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
static ssize_t qeth_dev_portname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "no portname required\n");
}
static ssize_t qeth_dev_portname_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
dev_warn_once(&card->gdev->dev,
"portname is deprecated and is ignored\n");
return count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
qeth_dev_portname_store);
static ssize_t qeth_dev_prioqing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sysfs_emit(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
return sysfs_emit(buf, "%s\n", "by type of service");
case QETH_PRIO_Q_ING_SKB:
return sysfs_emit(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
return sysfs_emit(buf, "%s\n", "by VLAN headers");
case QETH_PRIO_Q_ING_FIXED:
return sysfs_emit(buf, "always queue %i\n",
card->qdio.default_out_queue);
default:
return sysfs_emit(buf, "disabled\n");
}
}
static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
if (IS_IQD(card) || IS_VM_NIC(card))
return -EOPNOTSUPP;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
rc = -EPERM;
goto out;
}
if (sysfs_streq(buf, "prio_queueing_prec")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_skb")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_tos")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_vlan")) {
if (IS_LAYER3(card)) {
rc = -EOPNOTSUPP;
goto out;
}
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 0;
} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 1;
} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
qeth_dev_prioqing_store);
static ssize_t qeth_dev_bufcnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
unsigned int cnt;
int rc = 0;
rc = kstrtouint(buf, 10, &cnt);
if (rc)
return rc;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX);
rc = qeth_resize_buffer_pool(card, cnt);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
qeth_dev_bufcnt_store);
static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
bool reset;
int rc;
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
if (reset)
rc = qeth_schedule_recovery(card);
return rc ? rc : count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_qdio_out_q *queue;
unsigned int i;
bool reset;
int rc;
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
if (reset) {
memset(&card->stats, 0, sizeof(card->stats));
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
if (!queue)
break;
memset(&queue->stats, 0, sizeof(queue->stats));
}
}
return count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
qeth_dev_performance_stats_store);
static ssize_t qeth_dev_layer2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->options.layer);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct net_device *ndev;
enum qeth_discipline_id newdis;
unsigned int input;
int rc;
rc = kstrtouint(buf, 16, &input);
if (rc)
return rc;
switch (input) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
case 1:
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
return -EINVAL;
}
mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
if (card->options.layer == newdis)
goto out;
if (card->info.layer_enforced) {
/* fixed layer, can't switch */
rc = -EOPNOTSUPP;
goto out;
}
if (card->discipline) {
/* start with a new, pristine netdevice: */
ndev = qeth_clone_netdev(card->dev);
if (!ndev) {
rc = -ENOMEM;
goto out;
}
qeth_remove_discipline(card);
free_netdev(card->dev);
card->dev = ndev;
}
rc = qeth_setup_discipline(card, newdis);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
qeth_dev_layer2_store);
#define ATTR_QETH_ISOLATION_NONE ("none")
#define ATTR_QETH_ISOLATION_FWD ("forward")
#define ATTR_QETH_ISOLATION_DROP ("drop")
static ssize_t qeth_dev_isolation_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_NONE);
case ISOLATION_MODE_FWD:
return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_FWD);
case ISOLATION_MODE_DROP:
return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_DROP);
default:
return sysfs_emit(buf, "%s\n", "N/A");
}
}
static ssize_t qeth_dev_isolation_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
mutex_lock(&card->conf_mutex);
if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
goto out;
}
/* parse input into isolation mode */
if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) {
isolation = ISOLATION_MODE_NONE;
} else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) {
isolation = ISOLATION_MODE_FWD;
} else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) {
isolation = ISOLATION_MODE_DROP;
} else {
rc = -EINVAL;
goto out;
}
if (qeth_card_hw_is_reachable(card))
rc = qeth_setadpparms_set_access_ctrl(card, isolation);
if (!rc)
WRITE_ONCE(card->options.isolation, isolation);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
qeth_dev_isolation_store);
static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_switch_info sw_info;
int rc = 0;
if (!qeth_card_hw_is_reachable(card))
return sysfs_emit(buf, "n/a\n");
rc = qeth_query_switch_attributes(card, &sw_info);
if (rc)
return rc;
if (!sw_info.capabilities)
rc = sysfs_emit(buf, "unknown");
if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
rc = sysfs_emit(buf,
(sw_info.settings & QETH_SWITCH_FORW_802_1 ?
"[802.1]" : "802.1"));
if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
rc += sysfs_emit_at(buf, rc,
(sw_info.settings &
QETH_SWITCH_FORW_REFL_RELAY ?
" [rr]" : " rr"));
rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
static DEVICE_ATTR(switch_attrs, 0444,
qeth_dev_switch_attrs_show, NULL);
static ssize_t qeth_hw_trap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (card->info.hwtrap)
return sysfs_emit(buf, "arm\n");
else
return sysfs_emit(buf, "disarm\n");
}
static ssize_t qeth_hw_trap_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
int state = 0;
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card))
state = 1;
if (sysfs_streq(buf, "arm") && !card->info.hwtrap) {
if (state) {
if (qeth_is_diagass_supported(card,
QETH_DIAGS_CMD_TRAP)) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
if (!rc)
card->info.hwtrap = 1;
} else
rc = -EINVAL;
} else
card->info.hwtrap = 1;
} else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) {
if (state) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
if (!rc)
card->info.hwtrap = 0;
} else
card->info.hwtrap = 0;
} else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap)
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
qeth_hw_trap_store);
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
unsigned int input;
int rc;
rc = kstrtouint(buf, 10, &input);
if (rc)
return rc;
if (input > max_value)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN)
rc = -EPERM;
else
*value = input;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.time_total, 5000);
}
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet, 1000);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
qeth_dev_blkt_inter_store);
static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet_jumbo, 1000);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
qeth_dev_blkt_inter_jumbo_store);
static struct attribute *qeth_blkt_device_attrs[] = {
&dev_attr_total.attr,
&dev_attr_inter.attr,
&dev_attr_inter_jumbo.attr,
NULL,
};
static const struct attribute_group qeth_dev_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
static struct attribute *qeth_dev_extended_attrs[] = {
&dev_attr_inbuf_size.attr,
&dev_attr_portno.attr,
&dev_attr_portname.attr,
&dev_attr_priority_queueing.attr,
&dev_attr_performance_stats.attr,
&dev_attr_layer2.attr,
&dev_attr_isolation.attr,
&dev_attr_hw_trap.attr,
&dev_attr_switch_attrs.attr,
NULL,
};
static const struct attribute_group qeth_dev_extended_group = {
.attrs = qeth_dev_extended_attrs,
};
static struct attribute *qeth_dev_attrs[] = {
&dev_attr_state.attr,
&dev_attr_chpid.attr,
&dev_attr_if_name.attr,
&dev_attr_card_type.attr,
&dev_attr_buffer_count.attr,
&dev_attr_recover.attr,
NULL,
};
static const struct attribute_group qeth_dev_group = {
.attrs = qeth_dev_attrs,
};
const struct attribute_group *qeth_dev_groups[] = {
&qeth_dev_group,
&qeth_dev_extended_group,
&qeth_dev_blkt_group,
NULL,
};
| linux-master | drivers/s390/net/qeth_core_sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004, 2007
* Authors: Belinda Thompson ([email protected])
* Andy Richter ([email protected])
* Peter Tiedemann ([email protected])
*/
/*
This module exports functions to be used by CCS:
EXPORT_SYMBOL(ctc_mpc_alloc_channel);
EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
EXPORT_SYMBOL(ctc_mpc_flow_control);
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <asm/idals.h>
#include "ctcm_main.h"
#include "ctcm_mpc.h"
#include "ctcm_fsms.h"
static const struct xid2 init_xid = {
.xid2_type_id = XID_FM2,
.xid2_len = 0x45,
.xid2_adj_id = 0,
.xid2_rlen = 0x31,
.xid2_resv1 = 0,
.xid2_flag1 = 0,
.xid2_fmtt = 0,
.xid2_flag4 = 0x80,
.xid2_resv2 = 0,
.xid2_tgnum = 0,
.xid2_sender_id = 0,
.xid2_flag2 = 0,
.xid2_option = XID2_0,
.xid2_resv3 = "\x00",
.xid2_resv4 = 0,
.xid2_dlc_type = XID2_READ_SIDE,
.xid2_resv5 = 0,
.xid2_mpc_flag = 0,
.xid2_resv6 = 0,
.xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
};
static const struct th_header thnorm = {
.th_seg = 0x00,
.th_ch_flag = TH_IS_XID,
.th_blk_flag = TH_DATA_IS_XID,
.th_is_xid = 0x01,
.th_seq_num = 0x00000000,
};
static const struct th_header thdummy = {
.th_seg = 0x00,
.th_ch_flag = 0x00,
.th_blk_flag = TH_DATA_IS_XID,
.th_is_xid = 0x01,
.th_seq_num = 0x00000000,
};
/*
* Definition of one MPC group
*/
/*
* Compatibility macros for busy handling
* of network devices.
*/
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
/*
* MPC Group state machine actions (static prototypes)
*/
static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
static int mpc_validate_xid(struct mpcg_info *mpcginfo);
static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
#ifdef DEBUGDATA
/*-------------------------------------------------------------------*
* Dump buffer format *
* *
*--------------------------------------------------------------------*/
void ctcmpc_dumpit(char *buf, int len)
{
__u32 ct, sw, rm, dup;
char *ptr, *rptr;
char tbuf[82], tdup[82];
char addr[22];
char boff[12];
char bhex[82], duphex[82];
char basc[40];
sw = 0;
rptr = ptr = buf;
rm = 16;
duphex[0] = 0x00;
dup = 0;
for (ct = 0; ct < len; ct++, ptr++, rptr++) {
if (sw == 0) {
scnprintf(addr, sizeof(addr), "%16.16llx", (__u64)rptr);
scnprintf(boff, sizeof(boff), "%4.4X", (__u32)ct);
bhex[0] = '\0';
basc[0] = '\0';
}
if ((sw == 4) || (sw == 12))
strcat(bhex, " ");
if (sw == 8)
strcat(bhex, " ");
scnprintf(tbuf, sizeof(tbuf), "%2.2llX", (__u64)*ptr);
tbuf[2] = '\0';
strcat(bhex, tbuf);
if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
basc[sw] = *ptr;
else
basc[sw] = '.';
basc[sw+1] = '\0';
sw++;
rm--;
if (sw != 16)
continue;
if ((strcmp(duphex, bhex)) != 0) {
if (dup != 0) {
scnprintf(tdup, sizeof(tdup),
"Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n",
tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
dup = 0;
strcpy(duphex, bhex);
} else
dup++;
sw = 0;
rm = 16;
} /* endfor */
if (sw != 0) {
for ( ; rm > 0; rm--, sw++) {
if ((sw == 4) || (sw == 12))
strcat(bhex, " ");
if (sw == 8)
strcat(bhex, " ");
strcat(bhex, " ");
strcat(basc, " ");
}
if (dup != 0) {
scnprintf(tdup, sizeof(tdup),
"Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
} else {
if (dup >= 1) {
scnprintf(tdup, sizeof(tdup),
"Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
if (dup != 0) {
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
}
}
return;
} /* end of ctcmpc_dumpit */
#endif
#ifdef DEBUGDATA
/*
* Dump header and first 16 bytes of an sk_buff for debugging purposes.
*
* skb The sk_buff to dump.
* offset Offset relative to skb-data, where to start the dump.
*/
void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
{
__u8 *p = skb->data;
struct th_header *header;
struct pdu *pheader;
int bl = skb->len;
int i;
if (p == NULL)
return;
p += offset;
header = (struct th_header *)p;
ctcm_pr_debug("dump:\n");
ctcm_pr_debug("skb len=%d \n", skb->len);
if (skb->len > 2) {
switch (header->th_ch_flag) {
case TH_HAS_PDU:
break;
case 0x00:
case TH_IS_XID:
if ((header->th_blk_flag == TH_DATA_IS_XID) &&
(header->th_is_xid == 0x01))
goto dumpth;
case TH_SWEEP_REQ:
goto dumpth;
case TH_SWEEP_RESP:
goto dumpth;
default:
break;
}
pheader = (struct pdu *)p;
ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
pheader->pdu_offset, pheader->pdu_offset);
ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag);
ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq);
goto dumpdata;
dumpth:
ctcm_pr_debug("th->seg : %02x\n", header->th_seg);
ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag);
ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
ctcm_pr_debug("th->type : %s\n",
(header->th_is_xid) ? "DATA" : "XID");
ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num);
}
dumpdata:
if (bl > 32)
bl = 32;
ctcm_pr_debug("data: ");
for (i = 0; i < bl; i++)
ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
ctcm_pr_debug("\n");
}
#endif
static struct net_device *ctcmpc_get_dev(int port_num)
{
char device[20];
struct net_device *dev;
struct ctcm_priv *priv;
scnprintf(device, sizeof(device), "%s%i", MPC_DEVICE_NAME, port_num);
dev = __dev_get_by_name(&init_net, device);
if (dev == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s: Device not found by name: %s",
CTCM_FUNTAIL, device);
return NULL;
}
priv = dev->ml_priv;
if (priv == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): dev->ml_priv is NULL",
CTCM_FUNTAIL, device);
return NULL;
}
if (priv->mpcg == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): priv->mpcg is NULL",
CTCM_FUNTAIL, device);
return NULL;
}
return dev;
}
/*
* ctc_mpc_alloc_channel
* (exported interface)
*
* Device Initialization :
* ACTPATH driven IO operations
*/
int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
{
struct net_device *dev;
struct mpc_group *grp;
struct ctcm_priv *priv;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return 1;
priv = dev->ml_priv;
grp = priv->mpcg;
grp->allochanfunc = callback;
grp->port_num = port_num;
grp->port_persist = 1;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_INOP:
/* Group is in the process of terminating */
grp->alloc_called = 1;
break;
case MPCG_STATE_RESET:
/* MPC Group will transition to state */
/* MPCG_STATE_XID2INITW iff the minimum number */
/* of 1 read and 1 write channel have successfully*/
/* activated */
/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
if (callback)
grp->send_qllc_disc = 1;
fallthrough;
case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
if (callback)
ctcm_open(dev);
fsm_event(priv->fsm, DEV_EVENT_START, dev);
break;
case MPCG_STATE_READY:
/* XID exchanges completed after PORT was activated */
/* Link station already active */
/* Maybe timing issue...retry callback */
grp->allocchan_callback_retries++;
if (grp->allocchan_callback_retries < 4) {
if (grp->allochanfunc)
grp->allochanfunc(grp->port_num,
grp->group_max_buflen);
} else {
/* there are problems...bail out */
/* there may be a state mismatch so restart */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->allocchan_callback_retries = 0;
}
break;
}
return 0;
}
EXPORT_SYMBOL(ctc_mpc_alloc_channel);
/*
* ctc_mpc_establish_connectivity
* (exported interface)
*/
void ctc_mpc_establish_connectivity(int port_num,
void (*callback)(int, int, int))
{
struct net_device *dev;
struct mpc_group *grp;
struct ctcm_priv *priv;
struct channel *rch, *wch;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
rch = priv->channel[CTCM_READ];
wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
grp->estconnfunc = callback;
grp->port_num = port_num;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_READY:
/* XID exchanges completed after PORT was activated */
/* Link station already active */
/* Maybe timing issue...retry callback */
fsm_deltimer(&grp->timer);
grp->estconn_callback_retries++;
if (grp->estconn_callback_retries < 4) {
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
}
} else {
/* there are problems...bail out */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->estconn_callback_retries = 0;
}
break;
case MPCG_STATE_INOP:
case MPCG_STATE_RESET:
/* MPC Group is not ready to start XID - min num of */
/* 1 read and 1 write channel have not been acquired*/
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): REJECTED - inactive channels",
CTCM_FUNTAIL, dev->name);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
break;
case MPCG_STATE_XID2INITW:
/* alloc channel was called but no XID exchange */
/* has occurred. initiate xside XID exchange */
/* make sure yside XID0 processing has not started */
if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
(fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): ABORT - PASSIVE XID",
CTCM_FUNTAIL, dev->name);
break;
}
grp->send_qllc_disc = 1;
fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
if ((rch->in_mpcgroup) &&
(fsm_getstate(rch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): RX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, rch->id);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
goto done;
}
if ((wch->in_mpcgroup) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): WX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, wch->id);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
goto done;
}
break;
case MPCG_STATE_XID0IOWAIT:
/* already in active XID negotiations */
default:
break;
}
done:
CTCM_PR_DEBUG("Exit %s()\n", __func__);
return;
}
EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
/*
* ctc_mpc_dealloc_ch
* (exported interface)
*/
void ctc_mpc_dealloc_ch(int port_num)
{
struct net_device *dev;
struct ctcm_priv *priv;
struct mpc_group *grp;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
"%s: %s: refcount = %d\n",
CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
fsm_deltimer(&priv->restart_timer);
grp->channels_terminating = 0;
fsm_deltimer(&grp->timer);
grp->allochanfunc = NULL;
grp->estconnfunc = NULL;
grp->port_persist = 0;
grp->send_qllc_disc = 0;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
ctcm_close(dev);
return;
}
EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
/*
* ctc_mpc_flow_control
* (exported interface)
*/
void ctc_mpc_flow_control(int port_num, int flowc)
{
struct ctcm_priv *priv;
struct mpc_group *grp;
struct net_device *dev;
struct channel *rch;
int mpcg_state;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc);
rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) {
case 1:
if (mpcg_state == MPCG_STATE_FLOWC)
break;
if (mpcg_state == MPCG_STATE_READY) {
if (grp->flow_off_called == 1)
grp->flow_off_called = 0;
else
fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
break;
}
break;
case 0:
if (mpcg_state == MPCG_STATE_FLOWC) {
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* ensure any data that has accumulated */
/* on the io_queue will now be sen t */
tasklet_schedule(&rch->ch_tasklet);
}
/* possible race condition */
if (mpcg_state == MPCG_STATE_READY) {
grp->flow_off_called = 1;
break;
}
break;
}
}
EXPORT_SYMBOL(ctc_mpc_flow_control);
static int mpc_send_qllc_discontact(struct net_device *);
/*
* helper function of ctcmpc_unpack_skb
*/
static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
grp->sweep_rsp_pend_num--;
if ((grp->sweep_req_pend_num == 0) &&
(grp->sweep_rsp_pend_num == 0)) {
fsm_deltimer(&ch->sweep_timer);
grp->in_sweep = 0;
rch->th_seq_num = 0x00;
ch->th_seq_num = 0x00;
ctcm_clear_busy_do(dev);
}
return;
}
/*
* helper function of mpc_rcvd_sweep_req
* which is a helper of ctcmpc_unpack_skb
*/
static void ctcmpc_send_sweep_resp(struct channel *rch)
{
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct th_sweep *header;
struct sk_buff *sweep_skb;
struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
if (sweep_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): sweep_skb allocation ERROR\n",
CTCM_FUNTAIL, rch->id);
goto done;
}
header = skb_put_zero(sweep_skb, TH_SWEEP_LENGTH);
header->th.th_ch_flag = TH_SWEEP_RESP;
header->sw.th_last_seq = ch->th_seq_num;
netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
return;
done:
grp->in_sweep = 0;
ctcm_clear_busy_do(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
/*
* helper function of ctcmpc_unpack_skb
*/
static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
" %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
ctcm_test_and_set_busy(dev);
grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
}
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
grp->sweep_req_pend_num--;
ctcmpc_send_sweep_resp(ch);
kfree(mpcginfo);
return;
}
/*
* MPC Group Station FSM definitions
*/
static const char *mpcg_event_names[] = {
[MPCG_EVENT_INOP] = "INOP Condition",
[MPCG_EVENT_DISCONC] = "Discontact Received",
[MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
[MPCG_EVENT_XID2] = "XID2 Received",
[MPCG_EVENT_XID2DONE] = "XID0 Complete",
[MPCG_EVENT_XID7DONE] = "XID7 Complete",
[MPCG_EVENT_TIMER] = "XID Setup Timer",
[MPCG_EVENT_DOIO] = "XID DoIO",
};
static const char *mpcg_state_names[] = {
[MPCG_STATE_RESET] = "Reset",
[MPCG_STATE_INOP] = "INOP",
[MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
[MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
[MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
[MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
[MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
[MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
[MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
[MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
[MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
[MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
[MPCG_STATE_READY] = "READY",
};
/*
* The MPC Group Station FSM
* 22 events
*/
static const fsm_node mpcg_fsm[] = {
{ MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
{ MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
};
static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
fsm_deltimer(&grp->timer);
if (grp->saved_xid2->xid2_flag2 == 0x40) {
priv->xid->xid2_flag2 = 0x00;
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 1,
grp->group_max_buflen);
grp->estconnfunc = NULL;
} else if (grp->allochanfunc)
grp->send_qllc_disc = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): fails",
CTCM_FUNTAIL, dev->name);
return;
}
grp->port_persist = 1;
grp->out_of_sequence = 0;
grp->estconn_called = 0;
tasklet_hi_schedule(&grp->mpc_tasklet2);
return;
}
/*
* helper of ctcm_init_netdevice
* CTCM_PROTO_MPC only
*/
void mpc_group_ready(unsigned long adev)
{
struct net_device *dev = (struct net_device *)adev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = NULL;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
"%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */
ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
ch->collect_len = 0;
spin_unlock(&ch->collect_lock);
}
ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
ctcm_clear_busy(dev);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
} else if (grp->allochanfunc) {
grp->allochanfunc(grp->port_num, grp->group_max_buflen);
}
grp->send_qllc_disc = 1;
grp->changed_side = 0;
return;
}
/*
* Increment the MPC Group Active Channel Counts
* helper of dev_action (called from channel fsm)
*/
void mpc_channel_action(struct channel *ch, int direction, int action)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++;
grp->active_channels[direction]++;
grp->outstanding_xid2++;
ch->in_mpcgroup = 1;
if (ch->xid_skb != NULL)
dev_kfree_skb_any(ch->xid_skb);
ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
GFP_ATOMIC | GFP_DMA);
if (ch->xid_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Couldn't alloc ch xid_skb\n",
CTCM_FUNTAIL, dev->name);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
ch->xid_skb_data = ch->xid_skb->data;
ch->xid_th = (struct th_header *)ch->xid_skb->data;
skb_put(ch->xid_skb, TH_HEADER_LENGTH);
ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
skb_put(ch->xid_skb, XID2_LENGTH);
ch->xid_id = skb_tail_pointer(ch->xid_skb);
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
skb_put_data(ch->xid_skb, grp->xid_skb->data,
grp->xid_skb->len);
ch->xid->xid2_dlc_type =
((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE);
if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
fsm_newstate(ch->fsm, CH_XID0_PENDING);
if ((grp->active_channels[CTCM_READ] > 0) &&
(grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
"%s: %s: MPC GROUP CHANNELS ACTIVE\n",
__func__, dev->name);
}
} else if ((action == MPC_CHANNEL_REMOVE) &&
(ch->in_mpcgroup == 1)) {
ch->in_mpcgroup = 0;
grp->num_channel_paths--;
grp->active_channels[direction]--;
if (ch->xid_skb != NULL)
dev_kfree_skb_any(ch->xid_skb);
ch->xid_skb = NULL;
if (grp->channels_terminating)
goto done;
if (((grp->active_channels[CTCM_READ] == 0) &&
(grp->active_channels[CTCM_WRITE] > 0))
|| ((grp->active_channels[CTCM_WRITE] == 0) &&
(grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
done:
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
/*
* Unpack a just received skb and hand it over to
* upper layers.
* special MPC version of unpack_skb.
*
* ch The channel where this skb has been received.
* pskb The received skb.
*/
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct pdu *curr_pdu;
struct mpcg_info *mpcginfo;
struct th_header *header = NULL;
struct th_sweep *sweep = NULL;
int pdu_last_seen = 0;
__u32 new_len;
struct sk_buff *skb;
int skblen;
int sendrc = 0;
CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
__func__, dev->name, smp_processor_id(), ch->id);
header = (struct th_header *)pskb->data;
if ((header->th_seg == 0) &&
(header->th_ch_flag == 0) &&
(header->th_blk_flag == 0) &&
(header->th_seq_num == 0))
/* nothing for us */ goto done;
CTCM_PR_DBGDATA("%s: th_header\n", __func__);
CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
pskb->dev = dev;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(pskb, TH_HEADER_LENGTH);
if (likely(header->th_ch_flag == TH_HAS_PDU)) {
CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
(header->th_seq_num != ch->th_seq_num + 1) &&
(ch->th_seq_num != 0))) {
/* This is NOT the next segment *
* we are not the correct race winner *
* go away and let someone else win *
* BUT..this only applies if xid negot *
* is done *
*/
grp->out_of_sequence += 1;
__skb_push(pskb, TH_HEADER_LENGTH);
skb_queue_tail(&ch->io_queue, pskb);
CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
"got:%08x\n", __func__,
ch->th_seq_num + 1, header->th_seq_num);
return;
}
grp->out_of_sequence = 0;
ch->th_seq_num = header->th_seq_num;
CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
__func__, ch->th_seq_num);
if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
goto done;
while ((pskb->len > 0) && !pdu_last_seen) {
curr_pdu = (struct pdu *)pskb->data;
CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
__func__, pskb->len);
skb_pull(pskb, PDU_HEADER_LENGTH);
if (curr_pdu->pdu_flag & PDU_LAST)
pdu_last_seen = 1;
if (curr_pdu->pdu_flag & PDU_CNTL)
pskb->protocol = htons(ETH_P_SNAP);
else
pskb->protocol = htons(ETH_P_SNA_DIX);
if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Dropping packet with "
"illegal siize %d",
CTCM_FUNTAIL, dev->name, pskb->len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto done;
}
skb_reset_mac_header(pskb);
new_len = curr_pdu->pdu_offset;
CTCM_PR_DBGDATA("%s: new_len: %04x \n",
__func__, new_len);
if ((new_len == 0) || (new_len > pskb->len)) {
/* should never happen */
/* pskb len must be hosed...bail out */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): non valid pdu_offset: %04x",
/* "data may be lost", */
CTCM_FUNTAIL, dev->name, new_len);
goto done;
}
skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
if (!skb) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): MEMORY allocation error",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
skb_put_data(skb, pskb->data, new_len);
skb_reset_mac_header(skb);
skb->dev = pskb->dev;
skb->protocol = pskb->protocol;
skb->ip_summed = CHECKSUM_UNNECESSARY;
*((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
ch->pdu_seq++;
if (do_debug_data) {
ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
__func__, ch->pdu_seq);
ctcm_pr_debug("%s: skb:%0lx "
"skb len: %d \n", __func__,
(unsigned long)skb, skb->len);
ctcm_pr_debug("%s: up to 32 bytes "
"of pdu_data sent\n", __func__);
ctcmpc_dump32((char *)skb->data, skb->len);
}
skblen = skb->len;
sendrc = netif_rx(skb);
priv->stats.rx_packets++;
priv->stats.rx_bytes += skblen;
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
mpcginfo = kmalloc(sizeof(struct mpcg_info), GFP_ATOMIC);
if (mpcginfo == NULL)
goto done;
mpcginfo->ch = ch;
mpcginfo->th = header;
mpcginfo->skb = pskb;
CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
__func__);
/* it's a sweep? */
sweep = (struct th_sweep *)pskb->data;
mpcginfo->sweep = sweep;
if (header->th_ch_flag == TH_SWEEP_REQ)
mpc_rcvd_sweep_req(mpcginfo);
else if (header->th_ch_flag == TH_SWEEP_RESP)
mpc_rcvd_sweep_resp(mpcginfo);
else if (header->th_blk_flag == TH_DATA_IS_XID) {
struct xid2 *thisxid = (struct xid2 *)pskb->data;
skb_pull(pskb, XID2_LENGTH);
mpcginfo->xid = thisxid;
fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
} else if (header->th_blk_flag == TH_DISCONTACT)
fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
else if (header->th_seq_num != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): control pkt expected\n",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
/* mpcginfo only used for non-data transfers */
if (do_debug_data)
ctcmpc_dump_skb(pskb, -8);
}
kfree(mpcginfo);
}
done:
dev_kfree_skb_any(pskb);
if (sendrc == NET_RX_DROP) {
dev_warn(&dev->dev,
"The network backlog for %s is exceeded, "
"package dropped\n", __func__);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
__func__, dev->name, ch, ch->id);
}
/*
* tasklet helper for mpc's skb unpacking.
*
* ch The channel to work on.
* Allow flow control back pressure to occur here.
* Throttling back channel can result in excessive
* channel inactivity and system deact of channel
*/
void ctcmpc_bh(unsigned long thischan)
{
struct channel *ch = (struct channel *)thischan;
struct sk_buff *skb;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
dev->name, smp_processor_id(), __func__, ch->id);
/* caller has requested driver to throttle back */
while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
(skb = skb_dequeue(&ch->io_queue))) {
ctcmpc_unpack_skb(ch, skb);
if (grp->out_of_sequence > 20) {
/* assume data loss has occurred if */
/* missing seq_num for extended */
/* period of time */
grp->out_of_sequence = 0;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
break;
}
if (skb == skb_peek(&ch->io_queue))
break;
}
CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
__func__, dev->name, ch, ch->id);
return;
}
/*
* MPC Group Initializations
*/
struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
{
struct mpc_group *grp;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"Enter %s(%p)", CTCM_FUNTAIL, priv);
grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
if (grp == NULL)
return NULL;
grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
mpcg_fsm_len, GFP_KERNEL);
if (grp->fsm == NULL) {
kfree(grp);
return NULL;
}
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
fsm_settimer(grp->fsm, &grp->timer);
grp->xid_skb =
__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
if (grp->xid_skb == NULL) {
kfree_fsm(grp->fsm);
kfree(grp);
return NULL;
}
/* base xid for all channels in group */
grp->xid_skb_data = grp->xid_skb->data;
grp->xid_th = (struct th_header *)grp->xid_skb->data;
skb_put_data(grp->xid_skb, &thnorm, TH_HEADER_LENGTH);
grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
skb_put_data(grp->xid_skb, &init_xid, XID2_LENGTH);
grp->xid->xid2_adj_id = jiffies | 0xfff00000;
grp->xid->xid2_sender_id = jiffies;
grp->xid_id = skb_tail_pointer(grp->xid_skb);
skb_put_data(grp->xid_skb, "VTAM", 4);
grp->rcvd_xid_skb =
__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
if (grp->rcvd_xid_skb == NULL) {
kfree_fsm(grp->fsm);
dev_kfree_skb(grp->xid_skb);
kfree(grp);
return NULL;
}
grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
grp->saved_xid2 = NULL;
priv->xid = grp->xid;
priv->mpcg = grp;
return grp;
}
/*
* The MPC Group Station FSM
*/
/*
* MPC Group Station FSM actions
* CTCM_PROTO_MPC only
*/
/*
* NOP action for statemachines
*/
static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/*
* invoked when the device transitions to dev_stopped
* MPC will stop each individual channel if a single XID failure
* occurs, or will intitiate all channels be stopped if a GROUP
* level failure occurs.
*/
static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv;
struct mpc_group *grp;
struct channel *wch;
CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
priv = dev->ml_priv;
grp = priv->mpcg;
grp->flow_off_called = 0;
fsm_deltimer(&grp->timer);
if (grp->channels_terminating)
return;
grp->channels_terminating = 1;
grp->saved_state = fsm_getstate(grp->fsm);
fsm_newstate(grp->fsm, MPCG_STATE_INOP);
if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): MPC GROUP INOPERATIVE",
CTCM_FUNTAIL, dev->name);
if ((grp->saved_state != MPCG_STATE_RESET) ||
/* dealloc_channel has been called */
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
wch = priv->channel[CTCM_WRITE];
switch (grp->saved_state) {
case MPCG_STATE_RESET:
case MPCG_STATE_INOP:
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID2INITX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID7INITF:
break;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
default:
tasklet_hi_schedule(&wch->ch_disc_tasklet);
}
grp->xid2_tgnum = 0;
grp->group_max_buflen = 0; /*min of all received */
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
grp->xidnogood = 0;
grp->changed_side = 0;
grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
skb_reset_tail_pointer(grp->rcvd_xid_skb);
grp->rcvd_xid_skb->len = 0;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
if (grp->send_qllc_disc == 1) {
grp->send_qllc_disc = 0;
mpc_send_qllc_discontact(dev);
}
/* DO NOT issue DEV_EVENT_STOP directly out of this code */
/* This can result in INOP of VTAM PU due to halting of */
/* outstanding IO which causes a sense to be returned */
/* Only about 3 senses are allowed and then IOS/VTAM will*/
/* become unreachable without manual intervention */
if ((grp->port_persist == 1) || (grp->alloc_called)) {
grp->alloc_called = 0;
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
"%s(%s): MPC GROUP RECOVERY SCHEDULED",
CTCM_FUNTAIL, dev->name);
} else {
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
"%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
CTCM_FUNTAIL, dev->name);
}
}
/*
* Handle mpc group action timeout.
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*
* fi An instance of an mpc_group fsm.
* event The event, just happened.
* arg Generic pointer, casted from net_device * upon call.
*/
static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv;
struct mpc_group *grp;
struct channel *wch;
struct channel *rch;
priv = dev->ml_priv;
grp = priv->mpcg;
wch = priv->channel[CTCM_WRITE];
rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
/* Unless there is outstanding IO on the */
/* channel just return and wait for ATTN */
/* interrupt to begin XID negotiations */
if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
break;
fallthrough;
default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s: dev=%s exit",
CTCM_FUNTAIL, dev->name);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev;
struct ctcm_priv *priv;
struct mpc_group *grp;
if (ch) {
dev = ch->netdev;
if (dev) {
priv = dev->ml_priv;
if (priv) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s: %s: %s\n",
CTCM_FUNTAIL, dev->name, ch->id);
grp = priv->mpcg;
grp->send_qllc_disc = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
}
}
return;
}
/*
* MPC Group Station - not part of FSM
* CTCM_PROTO_MPC only
* called from add_channel in ctcm_main.c
*/
void mpc_action_send_discontact(unsigned long thischan)
{
int rc;
struct channel *ch = (struct channel *)thischan;
unsigned long saveflags = 0;
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
rc = ccw_device_start(ch->cdev, &ch->ccw[15], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
ctcm_ccw_check_rc(ch, rc, (char *)__func__);
}
return;
}
/*
* helper function of mpc FSM
* CTCM_PROTO_MPC only
* mpc_action_rcvd_xid7
*/
static int mpc_validate_xid(struct mpcg_info *mpcginfo)
{
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct xid2 *xid = mpcginfo->xid;
int rc = 0;
__u64 our_id = 0;
__u64 their_id = 0;
int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
if (xid == NULL) {
rc = 1;
/* XID REJECTED: xid == NULL */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid = NULL",
CTCM_FUNTAIL, ch->id);
goto done;
}
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */
if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): r/w channel pairing mismatch",
CTCM_FUNTAIL, ch->id);
goto done;
}
if (xid->xid2_dlc_type == XID2_READ_SIDE) {
CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
grp->group_max_buflen, xid->xid2_buf_len);
if (grp->group_max_buflen == 0 || grp->group_max_buflen >
xid->xid2_buf_len - len)
grp->group_max_buflen = xid->xid2_buf_len - len;
}
if (grp->saved_xid2 == NULL) {
grp->saved_xid2 =
(struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
skb_put_data(grp->rcvd_xid_skb, xid, XID2_LENGTH);
grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
skb_reset_tail_pointer(grp->rcvd_xid_skb);
grp->rcvd_xid_skb->len = 0;
/* convert two 32 bit numbers into 1 64 bit for id compare */
our_id = (__u64)priv->xid->xid2_adj_id;
our_id = our_id << 32;
our_id = our_id + priv->xid->xid2_sender_id;
their_id = (__u64)xid->xid2_adj_id;
their_id = their_id << 32;
their_id = their_id + xid->xid2_sender_id;
/* lower id assume the xside role */
if (our_id < their_id) {
grp->roll = XSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): WE HAVE LOW ID - TAKE XSIDE",
CTCM_FUNTAIL, ch->id);
} else {
grp->roll = YSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
CTCM_FUNTAIL, ch->id);
}
} else {
if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
rc = 3;
/* XID REJECTED: xid flag byte4 mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid flag byte4 mismatch",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_flag2 == 0x40) {
rc = 4;
/* XID REJECTED - xid NOGOOD */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid NOGOOD",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
rc = 5;
/* XID REJECTED - Adjacent Station ID Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Adjacent Station ID Mismatch",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
rc = 6;
/* XID REJECTED - Sender Address Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Sender Address Mismatch",
CTCM_FUNTAIL, ch->id);
}
}
done:
if (rc) {
dev_warn(&dev->dev,
"The XID used in the MPC protocol is not valid, "
"rc = %d\n", rc);
priv->xid->xid2_flag2 = 0x40;
grp->saved_xid2->xid2_flag2 = 0x40;
}
return rc;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
{
struct channel *ch = arg;
int rc = 0;
int gotlock = 0;
unsigned long saveflags = 0; /* avoids compiler warning with
spin_unlock_irqrestore */
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
if (ctcm_checkalloc_buffer(ch))
goto done;
/*
* skb data-buffer referencing:
*/
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
/* result of the previous 3 statements is NOT always
* already set after ctcm_checkalloc_buffer
* because of possible reuse of the trans_skb
*/
memset(ch->trans_skb->data, 0, 16);
ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
/* check is main purpose here: */
skb_put(ch->trans_skb, TH_HEADER_LENGTH);
ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
/* check is main purpose here: */
skb_put(ch->trans_skb, XID2_LENGTH);
ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
/* cleanup back to startpoint */
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
/* non-checking rewrite of above skb data-buffer referencing: */
/*
memset(ch->trans_skb->data, 0, 16);
ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
*/
ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[8].count = 0;
ch->ccw[8].cda = 0x00;
if (!(ch->xid_th && ch->xid && ch->xid_id))
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
"%s(%s): xid_th=%p, xid=%p, xid_id=%p",
CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
if (side == XSIDE) {
/* mpc_action_xside_xid */
if (ch->xid_th == NULL)
goto done;
ch->ccw[9].cmd_code = CCW_CMD_WRITE;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
ch->ccw[9].cda = virt_to_phys(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[10].cmd_code = CCW_CMD_WRITE;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
ch->ccw[10].cda = virt_to_phys(ch->xid);
ch->ccw[11].cmd_code = CCW_CMD_READ;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
ch->ccw[12].cmd_code = CCW_CMD_READ;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
ch->ccw[13].cmd_code = CCW_CMD_READ;
ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
} else { /* side == YSIDE : mpc_action_yside_xid */
ch->ccw[9].cmd_code = CCW_CMD_READ;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
ch->ccw[10].cmd_code = CCW_CMD_READ;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
if (ch->xid_th == NULL)
goto done;
ch->ccw[11].cmd_code = CCW_CMD_WRITE;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
ch->ccw[11].cda = virt_to_phys(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[12].cmd_code = CCW_CMD_WRITE;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
ch->ccw[12].cda = virt_to_phys(ch->xid);
if (ch->xid_id == NULL)
goto done;
ch->ccw[13].cmd_code = CCW_CMD_WRITE;
ch->ccw[13].cda = virt_to_phys(ch->xid_id);
}
ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[13].count = 4;
ch->ccw[14].cmd_code = CCW_CMD_NOOP;
ch->ccw[14].flags = CCW_FLAG_SLI;
ch->ccw[14].count = 0;
ch->ccw[14].cda = 0;
CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
CTCM_D3_DUMP((char *)ch->xid_id, 4);
if (!in_hardirq()) {
/* Such conditional locking is a known problem for
* sparse because its static undeterministic.
* Warnings should be ignored here. */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
gotlock = 1;
}
fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
rc = ccw_device_start(ch->cdev, &ch->ccw[8], 0, 0xff, 0);
if (gotlock) /* see remark above about conditional locking */
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
ctcm_ccw_check_rc(ch, rc,
(side == XSIDE) ? "x-side XID" : "y-side XID");
}
done:
CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
__func__, ch, ch->id);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
{
mpc_action_side_xid(fsm, arg, XSIDE);
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
{
mpc_action_side_xid(fsm, arg, YSIDE);
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
if (ch->xid == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): ch->xid == NULL",
CTCM_FUNTAIL, dev->name);
return;
}
fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
ch->xid->xid2_option = XID2_0;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID2INITX:
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
break;
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID0IOWAIX:
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
break;
}
fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = NULL;
int direction;
int send = 0;
if (priv)
grp = priv->mpcg;
if (grp == NULL)
return;
for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
thisxid->xid2_option = XID2_7;
send = 0;
/* xid7 phase 1 */
if (grp->outstanding_xid7_p2 > 0) {
if (grp->roll == YSIDE) {
if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
skb_put_data(ch->xid_skb, &thdummy,
TH_HEADER_LENGTH);
send = 1;
}
} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
skb_put_data(ch->xid_skb, &thnorm,
TH_HEADER_LENGTH);
send = 1;
}
} else {
/* xid7 phase 2 */
if (grp->roll == YSIDE) {
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
skb_put_data(ch->xid_skb, &thnorm,
TH_HEADER_LENGTH);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
send = 1;
}
} else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
skb_put_data(ch->xid_skb, &thdummy,
TH_HEADER_LENGTH);
send = 1;
}
}
if (send)
fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
}
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
grp->outstanding_xid7, grp->outstanding_xid7_p2);
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
fsm_newstate(ch->fsm, CH_XID7_PENDING);
grp->outstanding_xid2--;
grp->outstanding_xid7++;
grp->outstanding_xid7_p2++;
/* must change state before validating xid to */
/* properly handle interim interrupts received*/
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID0IOWAIT:
fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID2INITX:
if (grp->outstanding_xid2 == 0) {
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
}
break;
case MPCG_STATE_XID0IOWAIX:
if (grp->outstanding_xid2 == 0) {
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
}
break;
}
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
grp->outstanding_xid7, grp->outstanding_xid7_p2);
CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
__func__, ch->id,
fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
__func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
grp->outstanding_xid7--;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID7INITI:
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID7INITW:
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID7INITX:
if (grp->outstanding_xid7 == 0) {
if (grp->outstanding_xid7_p2 > 0) {
grp->outstanding_xid7 =
grp->outstanding_xid7_p2;
grp->outstanding_xid7_p2 = 0;
} else
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
break;
}
mpc_validate_xid(mpcginfo);
break;
}
return;
}
/*
* mpc_action helper of an MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static int mpc_send_qllc_discontact(struct net_device *dev)
{
struct sk_buff *skb;
struct qllc *qllcptr;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
__func__, mpcg_state_names[grp->saved_state]);
switch (grp->saved_state) {
/*
* establish conn callback function is
* preferred method to report failure
*/
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID2INITX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
break;
}
fallthrough;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
skb = __dev_alloc_skb(sizeof(struct qllc), GFP_ATOMIC);
if (skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb allocation error",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
return -ENOMEM;
}
qllcptr = skb_put(skb, sizeof(struct qllc));
qllcptr->qllc_address = 0xcc;
qllcptr->qllc_commands = 0x03;
if (skb_headroom(skb) < 4) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb_headroom error",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
*((__u32 *)skb_push(skb, 4)) =
priv->channel[CTCM_READ]->pdu_seq;
priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
__func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on
receiving side */
priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP);
skb->ip_summed = CHECKSUM_UNNECESSARY;
CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
netif_rx(skb);
break;
default:
break;
}
return 0;
}
/* --- This is the END my friend --- */
| linux-master | drivers/s390/net/ctcm_mpc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ISM driver for s390.
*
* Copyright IBM Corp. 2018
*/
#define KMSG_COMPONENT "ism"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/processor.h>
#include "ism.h"
MODULE_DESCRIPTION("ISM driver for s390");
MODULE_LICENSE("GPL");
#define PCI_DEVICE_ID_IBM_ISM 0x04ED
#define DRV_NAME "ism"
static const struct pci_device_id ism_device_table[] = {
{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
static const struct smcd_ops ism_ops;
#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
/* a list for fast mapping */
static u8 max_client;
static DEFINE_MUTEX(clients_lock);
struct ism_dev_list {
struct list_head list;
struct mutex mutex; /* protects ism device list */
};
static struct ism_dev_list ism_dev_list = {
.list = LIST_HEAD_INIT(ism_dev_list.list),
.mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
};
static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
{
unsigned long flags;
spin_lock_irqsave(&ism->lock, flags);
ism->subs[client->id] = client;
spin_unlock_irqrestore(&ism->lock, flags);
}
int ism_register_client(struct ism_client *client)
{
struct ism_dev *ism;
int i, rc = -ENOSPC;
mutex_lock(&ism_dev_list.mutex);
mutex_lock(&clients_lock);
for (i = 0; i < MAX_CLIENTS; ++i) {
if (!clients[i]) {
clients[i] = client;
client->id = i;
if (i == max_client)
max_client++;
rc = 0;
break;
}
}
mutex_unlock(&clients_lock);
if (i < MAX_CLIENTS) {
/* initialize with all devices that we got so far */
list_for_each_entry(ism, &ism_dev_list.list, list) {
ism->priv[i] = NULL;
client->add(ism);
ism_setup_forwarding(client, ism);
}
}
mutex_unlock(&ism_dev_list.mutex);
return rc;
}
EXPORT_SYMBOL_GPL(ism_register_client);
int ism_unregister_client(struct ism_client *client)
{
struct ism_dev *ism;
unsigned long flags;
int rc = 0;
mutex_lock(&ism_dev_list.mutex);
list_for_each_entry(ism, &ism_dev_list.list, list) {
spin_lock_irqsave(&ism->lock, flags);
/* Stop forwarding IRQs and events */
ism->subs[client->id] = NULL;
for (int i = 0; i < ISM_NR_DMBS; ++i) {
if (ism->sba_client_arr[i] == client->id) {
WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
__func__, client->name);
rc = -EBUSY;
goto err_reg_dmb;
}
}
spin_unlock_irqrestore(&ism->lock, flags);
}
mutex_unlock(&ism_dev_list.mutex);
mutex_lock(&clients_lock);
clients[client->id] = NULL;
if (client->id + 1 == max_client)
max_client--;
mutex_unlock(&clients_lock);
return rc;
err_reg_dmb:
spin_unlock_irqrestore(&ism->lock, flags);
mutex_unlock(&ism_dev_list.mutex);
return rc;
}
EXPORT_SYMBOL_GPL(ism_unregister_client);
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
__ism_write_cmd(ism, req, 0, sizeof(*req));
WRITE_ONCE(resp->ret, ISM_ERROR);
__ism_read_cmd(ism, resp, 0, sizeof(*resp));
if (resp->ret) {
debug_text_event(ism_debug_info, 0, "cmd failure");
debug_event(ism_debug_info, 0, resp, sizeof(*resp));
goto out;
}
__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
return resp->ret;
}
static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
{
union ism_cmd_simple cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = cmd_code;
cmd.request.hdr.len = sizeof(cmd.request);
return ism_cmd(ism, &cmd);
}
static int query_info(struct ism_dev *ism)
{
union ism_qi cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_QUERY_INFO;
cmd.request.hdr.len = sizeof(cmd.request);
if (ism_cmd(ism, &cmd))
goto out;
debug_text_event(ism_debug_info, 3, "query info");
debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
out:
return 0;
}
static int register_sba(struct ism_dev *ism)
{
union ism_reg_sba cmd;
dma_addr_t dma_handle;
struct ism_sba *sba;
sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
GFP_KERNEL);
if (!sba)
return -ENOMEM;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_REG_SBA;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.sba = dma_handle;
if (ism_cmd(ism, &cmd)) {
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
return -EIO;
}
ism->sba = sba;
ism->sba_dma_addr = dma_handle;
return 0;
}
static int register_ieq(struct ism_dev *ism)
{
union ism_reg_ieq cmd;
dma_addr_t dma_handle;
struct ism_eq *ieq;
ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
GFP_KERNEL);
if (!ieq)
return -ENOMEM;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_REG_IEQ;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.ieq = dma_handle;
cmd.request.len = sizeof(*ieq);
if (ism_cmd(ism, &cmd)) {
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
return -EIO;
}
ism->ieq = ieq;
ism->ieq_idx = -1;
ism->ieq_dma_addr = dma_handle;
return 0;
}
static int unregister_sba(struct ism_dev *ism)
{
int ret;
if (!ism->sba)
return 0;
ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
ism->sba, ism->sba_dma_addr);
ism->sba = NULL;
ism->sba_dma_addr = 0;
return 0;
}
static int unregister_ieq(struct ism_dev *ism)
{
int ret;
if (!ism->ieq)
return 0;
ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
ism->ieq, ism->ieq_dma_addr);
ism->ieq = NULL;
ism->ieq_dma_addr = 0;
return 0;
}
static int ism_read_local_gid(struct ism_dev *ism)
{
union ism_read_gid cmd;
int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_READ_GID;
cmd.request.hdr.len = sizeof(cmd.request);
ret = ism_cmd(ism, &cmd);
if (ret)
goto out;
ism->local_gid = cmd.response.gid;
out:
return ret;
}
static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
u32 vid)
{
union ism_query_rgid cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_QUERY_RGID;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.vlan_valid = vid_valid;
cmd.request.vlan_id = vid;
return ism_cmd(ism, &cmd);
}
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr);
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
unsigned long bit;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
if (!dmb->sba_idx) {
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
return -ENOSPC;
dmb->sba_idx = bit;
}
if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
&dmb->dma_addr,
GFP_KERNEL | __GFP_NOWARN |
__GFP_NOMEMALLOC | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
return dmb->cpu_addr ? 0 : -ENOMEM;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
struct ism_client *client)
{
union ism_reg_dmb cmd;
unsigned long flags;
int ret;
ret = ism_alloc_dmb(ism, dmb);
if (ret)
goto out;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_REG_DMB;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.dmb = dmb->dma_addr;
cmd.request.dmb_len = dmb->dmb_len;
cmd.request.sba_idx = dmb->sba_idx;
cmd.request.vlan_valid = dmb->vlan_valid;
cmd.request.vlan_id = dmb->vlan_id;
cmd.request.rgid = dmb->rgid;
ret = ism_cmd(ism, &cmd);
if (ret) {
ism_free_dmb(ism, dmb);
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
spin_lock_irqsave(&ism->lock, flags);
ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
spin_unlock_irqrestore(&ism->lock, flags);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ism_register_dmb);
int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
union ism_unreg_dmb cmd;
unsigned long flags;
int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_UNREG_DMB;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.dmb_tok = dmb->dmb_tok;
spin_lock_irqsave(&ism->lock, flags);
ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
spin_unlock_irqrestore(&ism->lock, flags);
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
goto out;
ism_free_dmb(ism, dmb);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ism_unregister_dmb);
static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.vlan_id = vlan_id;
return ism_cmd(ism, &cmd);
}
static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.vlan_id = vlan_id;
return ism_cmd(ism, &cmd);
}
static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
union ism_sig_ieq cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.trigger_irq = trigger_irq;
cmd.request.event_code = event_code;
cmd.request.info = info;
return ism_cmd(ism, &cmd);
}
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
return min(boundary - (start & (boundary - 1)), len);
}
int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
unsigned int offset, void *data, unsigned int size)
{
unsigned int bytes;
u64 dmb_req;
int ret;
while (size) {
bytes = max_bytes(offset, size, PAGE_SIZE);
dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
offset);
ret = __ism_move(ism, dmb_req, data, bytes);
if (ret)
return ret;
size -= bytes;
data += bytes;
offset += bytes;
}
return 0;
}
EXPORT_SYMBOL_GPL(ism_move);
static struct ism_systemeid SYSTEM_EID = {
.seid_string = "IBM-SYSZ-ISMSEID00000000",
.serial_number = "0000",
.type = "0000",
};
static void ism_create_system_eid(void)
{
struct cpuid id;
u16 ident_tail;
char tmp[5];
get_cpu_id(&id);
ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
snprintf(tmp, 5, "%04X", ident_tail);
memcpy(&SYSTEM_EID.serial_number, tmp, 4);
snprintf(tmp, 5, "%04X", id.machine);
memcpy(&SYSTEM_EID.type, tmp, 4);
}
u8 *ism_get_seid(void)
{
return SYSTEM_EID.seid_string;
}
EXPORT_SYMBOL_GPL(ism_get_seid);
static u16 ism_get_chid(struct ism_dev *ism)
{
if (!ism || !ism->pdev)
return 0;
return to_zpci(ism->pdev)->pchid;
}
static void ism_handle_event(struct ism_dev *ism)
{
struct ism_event *entry;
struct ism_client *clt;
int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
ism->ieq_idx = 0;
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
for (i = 0; i < max_client; ++i) {
clt = ism->subs[i];
if (clt)
clt->handle_event(ism, entry);
}
}
}
static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
unsigned long bit, end;
unsigned long *bv;
u16 dmbemask;
u8 client_id;
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
spin_lock(&ism->lock);
ism->sba->s = 0;
barrier();
for (bit = 0;;) {
bit = find_next_bit_inv(bv, end, bit);
if (bit >= end)
break;
clear_bit_inv(bit, bv);
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
client_id = ism->sba_client_arr[bit];
if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
continue;
ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
}
if (ism->sba->e) {
ism->sba->e = 0;
barrier();
ism_handle_event(ism);
}
spin_unlock(&ism->lock);
return IRQ_HANDLED;
}
static u64 ism_get_local_gid(struct ism_dev *ism)
{
return ism->local_gid;
}
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
int i, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
if (!ism->sba_client_arr)
goto free_vectors;
memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
goto free_client_arr;
ret = register_sba(ism);
if (ret)
goto free_irq;
ret = register_ieq(ism);
if (ret)
goto unreg_sba;
ret = ism_read_local_gid(ism);
if (ret)
goto unreg_ieq;
if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
/* hardware is V2 capable */
ism_create_system_eid();
mutex_lock(&ism_dev_list.mutex);
mutex_lock(&clients_lock);
for (i = 0; i < max_client; ++i) {
if (clients[i]) {
clients[i]->add(ism);
ism_setup_forwarding(clients[i], ism);
}
}
mutex_unlock(&clients_lock);
list_add(&ism->list, &ism_dev_list.list);
mutex_unlock(&ism_dev_list.mutex);
query_info(ism);
return 0;
unreg_ieq:
unregister_ieq(ism);
unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
free_client_arr:
kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
return ret;
}
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ism_dev *ism;
int ret;
ism = kzalloc(sizeof(*ism), GFP_KERNEL);
if (!ism)
return -ENOMEM;
spin_lock_init(&ism->lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
device_initialize(&ism->dev);
dev_set_name(&ism->dev, dev_name(&pdev->dev));
ret = device_add(&ism->dev);
if (ret)
goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
goto err;
ret = pci_request_mem_regions(pdev, DRV_NAME);
if (ret)
goto err_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto err_resource;
dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
ret = ism_dev_init(ism);
if (ret)
goto err_resource;
return 0;
err_resource:
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
err:
device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
kfree(ism);
return ret;
}
static void ism_dev_exit(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
unsigned long flags;
int i;
spin_lock_irqsave(&ism->lock, flags);
for (i = 0; i < max_client; ++i)
ism->subs[i] = NULL;
spin_unlock_irqrestore(&ism->lock, flags);
mutex_lock(&ism_dev_list.mutex);
mutex_lock(&clients_lock);
for (i = 0; i < max_client; ++i) {
if (clients[i])
clients[i]->remove(ism);
}
mutex_unlock(&clients_lock);
if (SYSTEM_EID.serial_number[0] != '0' ||
SYSTEM_EID.type[0] != '0')
ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
free_irq(pci_irq_vector(pdev, 0), ism);
kfree(ism->sba_client_arr);
pci_free_irq_vectors(pdev);
list_del_init(&ism->list);
mutex_unlock(&ism_dev_list.mutex);
}
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
ism_dev_exit(ism);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
kfree(ism);
}
static struct pci_driver ism_driver = {
.name = DRV_NAME,
.id_table = ism_device_table,
.probe = ism_probe,
.remove = ism_remove,
};
static int __init ism_init(void)
{
int ret;
ism_debug_info = debug_register("ism", 2, 1, 16);
if (!ism_debug_info)
return -ENODEV;
memset(clients, 0, sizeof(clients));
max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
debug_unregister(ism_debug_info);
return ret;
}
static void __exit ism_exit(void)
{
pci_unregister_driver(&ism_driver);
debug_unregister(ism_debug_info);
}
module_init(ism_init);
module_exit(ism_exit);
/*************************** SMC-D Implementation *****************************/
#if IS_ENABLED(CONFIG_SMC)
static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
u32 vid)
{
return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
}
static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
struct ism_client *client)
{
return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
}
static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
{
return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
}
static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
{
return ism_add_vlan_id(smcd->priv, vlan_id);
}
static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
{
return ism_del_vlan_id(smcd->priv, vlan_id);
}
static int smcd_set_vlan_required(struct smcd_dev *smcd)
{
return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
}
static int smcd_reset_vlan_required(struct smcd_dev *smcd)
{
return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
}
static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
}
static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size)
{
return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
}
static int smcd_supports_v2(void)
{
return SYSTEM_EID.serial_number[0] != '0' ||
SYSTEM_EID.type[0] != '0';
}
static u64 smcd_get_local_gid(struct smcd_dev *smcd)
{
return ism_get_local_gid(smcd->priv);
}
static u16 smcd_get_chid(struct smcd_dev *smcd)
{
return ism_get_chid(smcd->priv);
}
static inline struct device *smcd_get_dev(struct smcd_dev *dev)
{
struct ism_dev *ism = dev->priv;
return &ism->dev;
}
static const struct smcd_ops ism_ops = {
.query_remote_gid = smcd_query_rgid,
.register_dmb = smcd_register_dmb,
.unregister_dmb = smcd_unregister_dmb,
.add_vlan_id = smcd_add_vlan_id,
.del_vlan_id = smcd_del_vlan_id,
.set_vlan_required = smcd_set_vlan_required,
.reset_vlan_required = smcd_reset_vlan_required,
.signal_event = smcd_signal_ieq,
.move_data = smcd_move,
.supports_v2 = smcd_supports_v2,
.get_system_eid = ism_get_seid,
.get_local_gid = smcd_get_local_gid,
.get_chid = smcd_get_chid,
.get_dev = smcd_get_dev,
};
const struct smcd_ops *ism_get_smcd_ops(void)
{
return &ism_ops;
}
EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
#endif
| linux-master | drivers/s390/net/ism_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive DVD Module
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_dev.h"
#include "hmcdrv_cache.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Copyright 2013 IBM Corporation");
MODULE_DESCRIPTION("HMC drive DVD access");
/*
* module parameter 'cachesize'
*/
static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
/**
* hmcdrv_mod_init() - module init function
*/
static int __init hmcdrv_mod_init(void)
{
int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
if (rc)
return rc;
rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
if (rc)
return rc;
rc = hmcdrv_dev_init();
if (rc)
hmcdrv_cache_shutdown();
return rc;
}
/**
* hmcdrv_mod_exit() - module exit function
*/
static void __exit hmcdrv_mod_exit(void)
{
hmcdrv_dev_exit();
hmcdrv_cache_shutdown();
}
module_init(hmcdrv_mod_init);
module_exit(hmcdrv_mod_exit);
| linux-master | drivers/s390/char/hmcdrv_mod.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - core functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <[email protected]>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include "raw3270.h"
#include <linux/major.h>
#include <linux/kdev_t.h>
#include <linux/device.h>
#include <linux/mutex.h>
struct class *class3270;
EXPORT_SYMBOL(class3270);
/* The main 3270 data structure. */
struct raw3270 {
struct list_head list;
struct ccw_device *cdev;
int minor;
int model, rows, cols;
int old_model, old_rows, old_cols;
unsigned int state;
unsigned long flags;
struct list_head req_queue; /* Request queue. */
struct list_head view_list; /* List of available views. */
struct raw3270_view *view; /* Active view. */
struct timer_list timer; /* Device timer. */
unsigned char *ascebc; /* ascii -> ebcdic table */
struct raw3270_view init_view;
struct raw3270_request init_reset;
struct raw3270_request init_readpart;
struct raw3270_request init_readmod;
unsigned char init_data[256];
struct work_struct resize_work;
};
/* raw3270->state */
#define RAW3270_STATE_INIT 0 /* Initial state */
#define RAW3270_STATE_RESET 1 /* Reset command is pending */
#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */
#define RAW3270_STATE_READMOD 3 /* Read partition is pending */
#define RAW3270_STATE_READY 4 /* Device is usable by views */
/* raw3270->flags */
#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
static DEFINE_MUTEX(raw3270_mutex);
/* List of 3270 devices. */
static LIST_HEAD(raw3270_devices);
/*
* Flag to indicate if the driver has been registered. Some operations
* like waiting for the end of i/o need to be done differently as long
* as the kernel is still starting up (console support).
*/
static int raw3270_registered;
/* Module parameters */
static bool tubxcorrect;
module_param(tubxcorrect, bool, 0);
/*
* Wait queue for device init/delete, view delete.
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
EXPORT_SYMBOL(raw3270_wait_queue);
static void __raw3270_disconnect(struct raw3270 *rp);
/*
* Encode array for 12 bit 3270 addresses.
*/
static unsigned char raw3270_ebcgraf[64] = {
0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
};
static inline int raw3270_state_ready(struct raw3270 *rp)
{
return rp->state == RAW3270_STATE_READY;
}
void raw3270_buffer_address(struct raw3270 *rp, char *cp, int x, int y)
{
int addr;
if (x < 0)
x = max_t(int, 0, rp->view->cols + x);
if (y < 0)
y = max_t(int, 0, rp->view->rows + y);
addr = (y * rp->view->cols) + x;
if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
cp[0] = (addr >> 8) & 0x3f;
cp[1] = addr & 0xff;
} else {
cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
cp[1] = raw3270_ebcgraf[addr & 0x3f];
}
}
EXPORT_SYMBOL(raw3270_buffer_address);
/*
* Allocate a new 3270 ccw request
*/
struct raw3270_request *raw3270_request_alloc(size_t size)
{
struct raw3270_request *rq;
/* Allocate request structure */
rq = kzalloc(sizeof(*rq), GFP_KERNEL | GFP_DMA);
if (!rq)
return ERR_PTR(-ENOMEM);
/* alloc output buffer. */
if (size > 0) {
rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
if (!rq->buffer) {
kfree(rq);
return ERR_PTR(-ENOMEM);
}
}
rq->size = size;
INIT_LIST_HEAD(&rq->list);
/*
* Setup ccw.
*/
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
}
EXPORT_SYMBOL(raw3270_request_alloc);
/*
* Free 3270 ccw request
*/
void raw3270_request_free(struct raw3270_request *rq)
{
kfree(rq->buffer);
kfree(rq);
}
EXPORT_SYMBOL(raw3270_request_free);
/*
* Reset request to initial state.
*/
int raw3270_request_reset(struct raw3270_request *rq)
{
if (WARN_ON_ONCE(!list_empty(&rq->list)))
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
return 0;
}
EXPORT_SYMBOL(raw3270_request_reset);
/*
* Set command code to ccw of a request.
*/
void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
{
rq->ccw.cmd_code = cmd;
}
EXPORT_SYMBOL(raw3270_request_set_cmd);
/*
* Add data fragment to output buffer.
*/
int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
{
if (size + rq->ccw.count > rq->size)
return -E2BIG;
memcpy(rq->buffer + rq->ccw.count, data, size);
rq->ccw.count += size;
return 0;
}
EXPORT_SYMBOL(raw3270_request_add_data);
/*
* Set address/length pair to ccw of a request.
*/
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
rq->ccw.cda = __pa(data);
rq->ccw.count = size;
}
EXPORT_SYMBOL(raw3270_request_set_data);
/*
* Set idal buffer to ccw of a request.
*/
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
rq->ccw.cda = __pa(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
EXPORT_SYMBOL(raw3270_request_set_idal);
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
*/
static int __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
struct raw3270_request *rq)
{
rq->view = view;
raw3270_get_view(view);
if (list_empty(&rp->req_queue) &&
!test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* No other requests are on the queue. Start this one. */
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long)rq, 0, 0);
if (rq->rc) {
raw3270_put_view(view);
return rq->rc;
}
}
list_add_tail(&rq->list, &rp->req_queue);
return 0;
}
int raw3270_view_active(struct raw3270_view *view)
{
struct raw3270 *rp = view->dev;
return rp && rp->view == view;
}
int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
{
unsigned long flags;
struct raw3270 *rp;
int rc;
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rp = view->dev;
if (!rp || rp->view != view)
rc = -EACCES;
else if (!raw3270_state_ready(rp))
rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
return rc;
}
EXPORT_SYMBOL(raw3270_start);
int raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq,
int cmd, void *data, size_t len)
{
int rc;
rc = raw3270_request_reset(rq);
if (rc)
return rc;
raw3270_request_set_cmd(rq, cmd);
rc = raw3270_request_add_data(rq, data, len);
if (rc)
return rc;
return raw3270_start(view, rq);
}
EXPORT_SYMBOL(raw3270_start_request);
int raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
int rc;
rp = view->dev;
if (!rp || rp->view != view)
rc = -EACCES;
else if (!raw3270_state_ready(rp))
rc = -EBUSY;
else
rc = __raw3270_start(rp, view, rq);
return rc;
}
EXPORT_SYMBOL(raw3270_start_locked);
int raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
rp = view->dev;
rq->view = view;
raw3270_get_view(view);
list_add_tail(&rq->list, &rp->req_queue);
return 0;
}
EXPORT_SYMBOL(raw3270_start_irq);
/*
* 3270 interrupt routine, called from the ccw_device layer
*/
static void raw3270_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct raw3270 *rp;
struct raw3270_view *view;
struct raw3270_request *rq;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return;
rq = (struct raw3270_request *)intparm;
view = rq ? rq->view : rp->view;
if (!IS_ERR(irb)) {
/* Handle CE-DE-UE and subsequent UDE */
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP))
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Handle disconnected devices */
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
if (rp->state > RAW3270_STATE_RESET)
__raw3270_disconnect(rp);
}
/* Call interrupt handler of the view */
if (view)
view->fn->intv(view, rq, irb);
}
if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
/* Device busy, do not start I/O */
return;
if (rq && !list_empty(&rq->list)) {
/* The request completed, remove from queue and do callback. */
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
/*
* Try to start each request on request queue until one is
* started successful.
*/
while (!list_empty(&rp->req_queue)) {
rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
(unsigned long)rq, 0, 0);
if (rq->rc == 0)
break;
/* Start failed. Remove request and do callback. */
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
}
/*
* To determine the size of the 3270 device we need to do:
* 1) send a 'read partition' data stream to the device
* 2) wait for the attn interrupt that precedes the query reply
* 3) do a read modified to get the query reply
* To make things worse we have to cope with intervention
* required (3270 device switched to 'stand-by') and command
* rejects (old devices that can't do 'read partition').
*/
struct raw3270_ua { /* Query Reply structure for Usable Area */
struct { /* Usable Area Query Reply Base */
short l; /* Length of this structured field */
char sfid; /* 0x81 if Query Reply */
char qcode; /* 0x81 if Usable Area */
char flags0;
char flags1;
short w; /* Width of usable area */
short h; /* Heigth of usavle area */
char units; /* 0x00:in; 0x01:mm */
int xr;
int yr;
char aw;
char ah;
short buffsz; /* Character buffer size, bytes */
char xmin;
char ymin;
char xmax;
char ymax;
} __packed uab;
struct { /* Alternate Usable Area Self-Defining Parameter */
char l; /* Length of this Self-Defining Parm */
char sdpid; /* 0x02 if Alternate Usable Area */
char res;
char auaid; /* 0x01 is Id for the A U A */
short wauai; /* Width of AUAi */
short hauai; /* Height of AUAi */
char auaunits; /* 0x00:in, 0x01:mm */
int auaxr;
int auayr;
char awauai;
char ahauai;
} __packed aua;
} __packed;
static void raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
struct ccw_dev_id dev_id;
struct diag210 diag_data;
struct diag8c diag8c_data;
ccw_device_get_id(rp->cdev, &dev_id);
rc = diag8c(&diag8c_data, &dev_id);
if (!rc) {
rp->model = 2;
rp->rows = diag8c_data.height;
rp->cols = diag8c_data.width;
if (diag8c_data.flags & 1)
set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
return;
}
diag_data.vrdcdvno = dev_id.devno;
diag_data.vrdclen = sizeof(struct diag210);
rc = diag210(&diag_data);
model = diag_data.vrdccrmd;
/* Use default model 2 if the size could not be detected */
if (rc || model < 2 || model > 5)
model = 2;
switch (model) {
case 2:
rp->model = model;
rp->rows = 24;
rp->cols = 80;
break;
case 3:
rp->model = model;
rp->rows = 32;
rp->cols = 80;
break;
case 4:
rp->model = model;
rp->rows = 43;
rp->cols = 80;
break;
case 5:
rp->model = model;
rp->rows = 27;
rp->cols = 132;
break;
}
}
static void raw3270_size_device(struct raw3270 *rp, char *init_data)
{
struct raw3270_ua *uap;
/* Got a Query Reply */
uap = (struct raw3270_ua *)(init_data + 1);
/* Paranoia check. */
if (init_data[0] != 0x88 || uap->uab.qcode != 0x81) {
/* Couldn't detect size. Use default model 2. */
rp->model = 2;
rp->rows = 24;
rp->cols = 80;
return;
}
/* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h;
rp->cols = uap->uab.w;
/* Check for 14 bit addressing */
if ((uap->uab.flags0 & 0x0d) == 0x01)
set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
/* Check for Alternate Usable Area */
if (uap->uab.l == sizeof(struct raw3270_ua) &&
uap->aua.sdpid == 0x02) {
rp->rows = uap->aua.hauai;
rp->cols = uap->aua.wauai;
}
/* Try to find a model. */
rp->model = 0;
if (rp->rows == 24 && rp->cols == 80)
rp->model = 2;
if (rp->rows == 32 && rp->cols == 80)
rp->model = 3;
if (rp->rows == 43 && rp->cols == 80)
rp->model = 4;
if (rp->rows == 27 && rp->cols == 132)
rp->model = 5;
}
static void raw3270_resize_work(struct work_struct *work)
{
struct raw3270 *rp = container_of(work, struct raw3270, resize_work);
struct raw3270_view *view;
/* Notify views about new size */
list_for_each_entry(view, &rp->view_list, list) {
if (view->fn->resize)
view->fn->resize(view, rp->model, rp->rows, rp->cols,
rp->old_model, rp->old_rows, rp->old_cols);
}
rp->old_cols = rp->cols;
rp->old_rows = rp->rows;
rp->old_model = rp->model;
/* Setup processing done, now activate a view */
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
if (view->fn->activate(view) == 0)
break;
rp->view = NULL;
}
}
static void raw3270_size_device_done(struct raw3270 *rp)
{
rp->view = NULL;
rp->state = RAW3270_STATE_READY;
schedule_work(&rp->resize_work);
}
void raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
{
struct raw3270 *rp = rq->view->dev;
raw3270_size_device(rp, data);
raw3270_size_device_done(rp);
}
EXPORT_SYMBOL(raw3270_read_modified_cb);
static void raw3270_read_modified(struct raw3270 *rp)
{
if (rp->state != RAW3270_STATE_W4ATTN)
return;
/* Use 'read modified' to get the result of a read partition. */
memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
memset(&rp->init_data, 0, sizeof(rp->init_data));
rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp->init_readmod.ccw.count = sizeof(rp->init_data);
rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data);
rp->init_readmod.callback = raw3270_read_modified_cb;
rp->init_readmod.callback_data = rp->init_data;
rp->state = RAW3270_STATE_READMOD;
raw3270_start_irq(&rp->init_view, &rp->init_readmod);
}
static void raw3270_writesf_readpart(struct raw3270 *rp)
{
static const unsigned char wbuf[] = {
0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81
};
/* Store 'read partition' data stream to init_data */
memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
memset(&rp->init_data, 0, sizeof(rp->init_data));
memcpy(&rp->init_data, wbuf, sizeof(wbuf));
rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp->init_readpart.ccw.count = sizeof(wbuf);
rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data);
rp->state = RAW3270_STATE_W4ATTN;
raw3270_start_irq(&rp->init_view, &rp->init_readpart);
}
/*
* Device reset
*/
static void raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
{
struct raw3270 *rp = rq->view->dev;
if (rp->state != RAW3270_STATE_RESET)
return;
if (rq->rc) {
/* Reset command failed. */
rp->state = RAW3270_STATE_INIT;
} else if (MACHINE_IS_VM) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
} else {
raw3270_writesf_readpart(rp);
}
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
}
static int __raw3270_reset_device(struct raw3270 *rp)
{
int rc;
/* Check if reset is already pending */
if (rp->init_reset.view)
return -EBUSY;
/* Store reset data stream to init_data/init_reset */
rp->init_data[0] = TW_KR;
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp->init_reset.ccw.count = 1;
rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data);
rp->init_reset.callback = raw3270_reset_device_cb;
rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
if (rc == 0 && rp->state == RAW3270_STATE_INIT)
rp->state = RAW3270_STATE_RESET;
return rc;
}
static int raw3270_reset_device(struct raw3270 *rp)
{
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rc = __raw3270_reset_device(rp);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
int raw3270_reset(struct raw3270_view *view)
{
struct raw3270 *rp;
int rc;
rp = view->dev;
if (!rp || rp->view != view)
rc = -EACCES;
else if (!raw3270_state_ready(rp))
rc = -EBUSY;
else
rc = raw3270_reset_device(view->dev);
return rc;
}
EXPORT_SYMBOL(raw3270_reset);
static void __raw3270_disconnect(struct raw3270 *rp)
{
struct raw3270_request *rq;
struct raw3270_view *view;
rp->state = RAW3270_STATE_INIT;
rp->view = &rp->init_view;
/* Cancel all queued requests */
while (!list_empty(&rp->req_queue)) {
rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
view = rq->view;
rq->rc = -EACCES;
list_del_init(&rq->list);
if (rq->callback)
rq->callback(rq, rq->callback_data);
raw3270_put_view(view);
}
/* Start from scratch */
__raw3270_reset_device(rp);
}
static void raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
struct irb *irb)
{
struct raw3270 *rp;
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
rq->rc = -EOPNOTSUPP;
else
rq->rc = -EIO;
}
}
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
/* Queue read modified after attention interrupt */
rp = view->dev;
raw3270_read_modified(rp);
}
}
static struct raw3270_fn raw3270_init_fn = {
.intv = raw3270_init_irq
};
/*
* Setup new 3270 device.
*/
static int raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp,
char *ascebc)
{
struct list_head *l;
struct raw3270 *tmp;
int minor;
memset(rp, 0, sizeof(struct raw3270));
/* Copy ebcdic -> ascii translation table. */
memcpy(ascebc, _ascebc, 256);
if (tubxcorrect) {
/* correct brackets and circumflex */
ascebc['['] = 0xad;
ascebc[']'] = 0xbd;
ascebc['^'] = 0xb0;
}
rp->ascebc = ascebc;
/* Set defaults. */
rp->rows = 24;
rp->cols = 80;
rp->old_rows = rp->rows;
rp->old_cols = rp->cols;
INIT_LIST_HEAD(&rp->req_queue);
INIT_LIST_HEAD(&rp->view_list);
rp->init_view.dev = rp;
rp->init_view.fn = &raw3270_init_fn;
rp->view = &rp->init_view;
INIT_WORK(&rp->resize_work, raw3270_resize_work);
/*
* Add device to list and find the smallest unused minor
* number for it. Note: there is no device with minor 0,
* see special case for fs3270.c:fs3270_open().
*/
mutex_lock(&raw3270_mutex);
/* Keep the list sorted. */
minor = RAW3270_FIRSTMINOR;
rp->minor = -1;
list_for_each(l, &raw3270_devices) {
tmp = list_entry(l, struct raw3270, list);
if (tmp->minor > minor) {
rp->minor = minor;
__list_add(&rp->list, l->prev, l);
break;
}
minor++;
}
if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
rp->minor = minor;
list_add_tail(&rp->list, &raw3270_devices);
}
mutex_unlock(&raw3270_mutex);
/* No free minor number? Then give up. */
if (rp->minor == -1)
return -EUSERS;
rp->cdev = cdev;
dev_set_drvdata(&cdev->dev, rp);
cdev->handler = raw3270_irq;
return 0;
}
#ifdef CONFIG_TN3270_CONSOLE
/* Tentative definition - see below for actual definition. */
static struct ccw_driver raw3270_ccw_driver;
static inline int raw3270_state_final(struct raw3270 *rp)
{
return rp->state == RAW3270_STATE_INIT ||
rp->state == RAW3270_STATE_READY;
}
/*
* Setup 3270 device configured as console.
*/
struct raw3270 __init *raw3270_setup_console(void)
{
struct ccw_device *cdev;
unsigned long flags;
struct raw3270 *rp;
char *ascebc;
int rc;
cdev = ccw_device_create_console(&raw3270_ccw_driver);
if (IS_ERR(cdev))
return ERR_CAST(cdev);
rp = kzalloc(sizeof(*rp), GFP_KERNEL | GFP_DMA);
ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
rc = ccw_device_enable_console(cdev);
if (rc) {
ccw_device_destroy_console(cdev);
return ERR_PTR(rc);
}
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
do {
__raw3270_reset_device(rp);
while (!raw3270_state_final(rp)) {
ccw_device_wait_idle(rp->cdev);
barrier();
}
} while (rp->state != RAW3270_STATE_READY);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rp;
}
void raw3270_wait_cons_dev(struct raw3270 *rp)
{
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
ccw_device_wait_idle(rp->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
#endif
/*
* Create a 3270 device structure.
*/
static struct raw3270 *raw3270_create_device(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
int rc;
rp = kzalloc(sizeof(*rp), GFP_KERNEL | GFP_DMA);
if (!rp)
return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL);
if (!ascebc) {
kfree(rp);
return ERR_PTR(-ENOMEM);
}
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc) {
kfree(rp->ascebc);
kfree(rp);
rp = ERR_PTR(rc);
}
/* Get reference to ccw_device structure. */
get_device(&cdev->dev);
return rp;
}
/*
* This helper just validates that it is safe to activate a
* view in the panic() context, due to locking restrictions.
*/
int raw3270_view_lock_unavailable(struct raw3270_view *view)
{
struct raw3270 *rp = view->dev;
if (!rp)
return -ENODEV;
if (spin_is_locked(get_ccwdev_lock(rp->cdev)))
return -EBUSY;
return 0;
}
static int raw3270_assign_activate_view(struct raw3270 *rp, struct raw3270_view *view)
{
rp->view = view;
return view->fn->activate(view);
}
static int __raw3270_activate_view(struct raw3270 *rp, struct raw3270_view *view)
{
struct raw3270_view *oldview = NULL, *nv;
int rc;
if (rp->view == view)
return 0;
if (!raw3270_state_ready(rp))
return -EBUSY;
if (rp->view && rp->view->fn->deactivate) {
oldview = rp->view;
oldview->fn->deactivate(oldview);
}
rc = raw3270_assign_activate_view(rp, view);
if (!rc)
return 0;
/* Didn't work. Try to reactivate the old view. */
if (oldview) {
rc = raw3270_assign_activate_view(rp, oldview);
if (!rc)
return 0;
}
/* Didn't work as well. Try any other view. */
list_for_each_entry(nv, &rp->view_list, list) {
if (nv == view || nv == oldview)
continue;
rc = raw3270_assign_activate_view(rp, nv);
if (!rc)
break;
rp->view = NULL;
}
return rc;
}
/*
* Activate a view.
*/
int raw3270_activate_view(struct raw3270_view *view)
{
struct raw3270 *rp;
unsigned long flags;
int rc;
rp = view->dev;
if (!rp)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
rc = __raw3270_activate_view(rp, view);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
EXPORT_SYMBOL(raw3270_activate_view);
/*
* Deactivate current view.
*/
void raw3270_deactivate_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
rp = view->dev;
if (!rp)
return;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view) {
view->fn->deactivate(view);
rp->view = NULL;
/* Move deactivated view to end of list. */
list_del_init(&view->list);
list_add_tail(&view->list, &rp->view_list);
/* Try to activate another view. */
if (raw3270_state_ready(rp)) {
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
if (view->fn->activate(view) == 0)
break;
rp->view = NULL;
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
EXPORT_SYMBOL(raw3270_deactivate_view);
/*
* Add view to device with minor "minor".
*/
int raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn,
int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
int rc;
if (minor <= 0)
return -ENODEV;
mutex_lock(&raw3270_mutex);
rc = -ENODEV;
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
atomic_set(&view->ref_count, 2);
view->dev = rp;
view->fn = fn;
view->model = rp->model;
view->rows = rp->rows;
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
lockdep_set_subclass(&view->lock, subclass);
list_add(&view->list, &rp->view_list);
rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
mutex_unlock(&raw3270_mutex);
return rc;
}
EXPORT_SYMBOL(raw3270_add_view);
/*
* Find specific view of device with minor "minor".
*/
struct raw3270_view *raw3270_find_view(struct raw3270_fn *fn, int minor)
{
struct raw3270 *rp;
struct raw3270_view *view, *tmp;
unsigned long flags;
mutex_lock(&raw3270_mutex);
view = ERR_PTR(-ENODEV);
list_for_each_entry(rp, &raw3270_devices, list) {
if (rp->minor != minor)
continue;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
list_for_each_entry(tmp, &rp->view_list, list) {
if (tmp->fn == fn) {
raw3270_get_view(tmp);
view = tmp;
break;
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
break;
}
mutex_unlock(&raw3270_mutex);
return view;
}
EXPORT_SYMBOL(raw3270_find_view);
/*
* Remove view from device and free view structure via call to view->fn->free.
*/
void raw3270_del_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
struct raw3270_view *nv;
rp = view->dev;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
if (rp->view == view) {
view->fn->deactivate(view);
rp->view = NULL;
}
list_del_init(&view->list);
if (!rp->view && raw3270_state_ready(rp)) {
/* Try to activate another view. */
list_for_each_entry(nv, &rp->view_list, list) {
if (nv->fn->activate(nv) == 0) {
rp->view = nv;
break;
}
}
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
/* Wait for reference counter to drop to zero. */
atomic_dec(&view->ref_count);
wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
if (view->fn->free)
view->fn->free(view);
}
EXPORT_SYMBOL(raw3270_del_view);
/*
* Remove a 3270 device structure.
*/
static void raw3270_delete_device(struct raw3270 *rp)
{
struct ccw_device *cdev;
/* Remove from device chain. */
mutex_lock(&raw3270_mutex);
list_del_init(&rp->list);
mutex_unlock(&raw3270_mutex);
/* Disconnect from ccw_device. */
cdev = rp->cdev;
rp->cdev = NULL;
dev_set_drvdata(&cdev->dev, NULL);
cdev->handler = NULL;
/* Put ccw_device structure. */
put_device(&cdev->dev);
/* Now free raw3270 structure. */
kfree(rp->ascebc);
kfree(rp);
}
static int raw3270_probe(struct ccw_device *cdev)
{
return 0;
}
/*
* Additional attributes for a 3270 device
*/
static ssize_t model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->model);
}
static DEVICE_ATTR_RO(model);
static ssize_t rows_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->rows);
}
static DEVICE_ATTR_RO(rows);
static ssize_t
columns_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->cols);
}
static DEVICE_ATTR_RO(columns);
static struct attribute *raw3270_attrs[] = {
&dev_attr_model.attr,
&dev_attr_rows.attr,
&dev_attr_columns.attr,
NULL,
};
static const struct attribute_group raw3270_attr_group = {
.attrs = raw3270_attrs,
};
static int raw3270_create_attributes(struct raw3270 *rp)
{
return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
}
/*
* Notifier for device addition/removal
*/
static LIST_HEAD(raw3270_notifier);
int raw3270_register_notifier(struct raw3270_notifier *notifier)
{
struct raw3270 *rp;
mutex_lock(&raw3270_mutex);
list_add_tail(¬ifier->list, &raw3270_notifier);
list_for_each_entry(rp, &raw3270_devices, list)
notifier->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
}
EXPORT_SYMBOL(raw3270_register_notifier);
void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
{
struct raw3270 *rp;
mutex_lock(&raw3270_mutex);
list_for_each_entry(rp, &raw3270_devices, list)
notifier->destroy(rp->minor);
list_del(¬ifier->list);
mutex_unlock(&raw3270_mutex);
}
EXPORT_SYMBOL(raw3270_unregister_notifier);
/*
* Set 3270 device online.
*/
static int raw3270_set_online(struct ccw_device *cdev)
{
struct raw3270_notifier *np;
struct raw3270 *rp;
int rc;
rp = raw3270_create_device(cdev);
if (IS_ERR(rp))
return PTR_ERR(rp);
rc = raw3270_create_attributes(rp);
if (rc)
goto failure;
raw3270_reset_device(rp);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->create(rp->minor);
mutex_unlock(&raw3270_mutex);
return 0;
failure:
raw3270_delete_device(rp);
return rc;
}
/*
* Remove 3270 device structure.
*/
static void raw3270_remove(struct ccw_device *cdev)
{
unsigned long flags;
struct raw3270 *rp;
struct raw3270_view *v;
struct raw3270_notifier *np;
rp = dev_get_drvdata(&cdev->dev);
/*
* _remove is the opposite of _probe; it's probe that
* should set up rp. raw3270_remove gets entered for
* devices even if they haven't been varied online.
* Thus, rp may validly be NULL here.
*/
if (!rp)
return;
sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
/* Deactivate current view and remove all views. */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (rp->view) {
if (rp->view->fn->deactivate)
rp->view->fn->deactivate(rp->view);
rp->view = NULL;
}
while (!list_empty(&rp->view_list)) {
v = list_entry(rp->view_list.next, struct raw3270_view, list);
if (v->fn->release)
v->fn->release(v);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
raw3270_del_view(v);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_lock(&raw3270_mutex);
list_for_each_entry(np, &raw3270_notifier, list)
np->destroy(rp->minor);
mutex_unlock(&raw3270_mutex);
/* Reset 3270 device. */
raw3270_reset_device(rp);
/* And finally remove it. */
raw3270_delete_device(rp);
}
/*
* Set 3270 device offline.
*/
static int raw3270_set_offline(struct ccw_device *cdev)
{
struct raw3270 *rp;
rp = dev_get_drvdata(&cdev->dev);
if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
return -EBUSY;
raw3270_remove(cdev);
return 0;
}
static struct ccw_device_id raw3270_id[] = {
{ CCW_DEVICE(0x3270, 0) },
{ CCW_DEVICE(0x3271, 0) },
{ CCW_DEVICE(0x3272, 0) },
{ CCW_DEVICE(0x3273, 0) },
{ CCW_DEVICE(0x3274, 0) },
{ CCW_DEVICE(0x3275, 0) },
{ CCW_DEVICE(0x3276, 0) },
{ CCW_DEVICE(0x3277, 0) },
{ CCW_DEVICE(0x3278, 0) },
{ CCW_DEVICE(0x3279, 0) },
{ CCW_DEVICE(0x3174, 0) },
{ /* end of list */ },
};
static struct ccw_driver raw3270_ccw_driver = {
.driver = {
.name = "3270",
.owner = THIS_MODULE,
},
.ids = raw3270_id,
.probe = &raw3270_probe,
.remove = &raw3270_remove,
.set_online = &raw3270_set_online,
.set_offline = &raw3270_set_offline,
.int_class = IRQIO_C70,
};
static int raw3270_init(void)
{
struct raw3270 *rp;
int rc;
if (raw3270_registered)
return 0;
raw3270_registered = 1;
rc = ccw_driver_register(&raw3270_ccw_driver);
if (rc == 0) {
/* Create attributes for early (= console) device. */
mutex_lock(&raw3270_mutex);
class3270 = class_create("3270");
list_for_each_entry(rp, &raw3270_devices, list) {
get_device(&rp->cdev->dev);
raw3270_create_attributes(rp);
}
mutex_unlock(&raw3270_mutex);
}
return rc;
}
static void raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
class_destroy(class3270);
}
MODULE_LICENSE("GPL");
module_init(raw3270_init);
module_exit(raw3270_exit);
| linux-master | drivers/s390/char/raw3270.c |
// SPDX-License-Identifier: GPL-2.0
/*
* driver: reading from and writing to system console on S/390 via SCLP
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK,
};
/*
* Setup a sclp write buffer. Gets a page as input (4K) and returns
* a pointer to a struct sclp_buffer structure that is located at the
* end of the input page. This reduces the buffer space by a few
* bytes but simplifies things.
*/
struct sclp_buffer *
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
{
struct sclp_buffer *buffer;
struct sccb_header *sccb;
sccb = (struct sccb_header *) page;
/*
* We keep the struct sclp_buffer structure at the end
* of the sccb page.
*/
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
buffer->sccb = sccb;
buffer->retry_count = 0;
buffer->messages = 0;
buffer->char_sum = 0;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->columns = columns;
buffer->htab = htab;
/* initialize sccb */
memset(sccb, 0, sizeof(struct sccb_header));
sccb->length = sizeof(struct sccb_header);
return buffer;
}
/*
* Return a pointer to the original page that has been used to create
* the buffer.
*/
void *
sclp_unmake_buffer(struct sclp_buffer *buffer)
{
return buffer->sccb;
}
/*
* Initialize a new message the end of the provided buffer with
* enough room for max_len characters. Return 0 on success.
*/
static int
sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
{
struct sccb_header *sccb;
struct msg_buf *msg;
struct mdb *mdb;
struct go *go;
struct mto *mto;
int msg_size;
/* max size of new message including message text */
msg_size = sizeof(struct msg_buf) + max_len;
/* check if current buffer sccb can contain the mto */
sccb = buffer->sccb;
if ((MAX_SCCB_ROOM - sccb->length) < msg_size)
return -ENOMEM;
msg = (struct msg_buf *)((addr_t) sccb + sccb->length);
memset(msg, 0, sizeof(struct msg_buf));
msg->header.length = sizeof(struct msg_buf);
msg->header.type = EVTYP_MSG;
mdb = &msg->mdb;
mdb->header.length = sizeof(struct mdb);
mdb->header.type = 1;
mdb->header.tag = 0xD4C4C240; /* ebcdic "MDB " */
mdb->header.revision_code = 1;
go = &mdb->go;
go->length = sizeof(struct go);
go->type = 1;
mto = &mdb->mto;
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_msg = msg;
buffer->current_line = (char *) (mto + 1);
buffer->current_length = 0;
return 0;
}
/*
* Finalize message initialized by sclp_initialize_mto(),
* updating the sizes of MTO, enclosing MDB, event buffer and SCCB.
*/
static void
sclp_finalize_mto(struct sclp_buffer *buffer)
{
struct sccb_header *sccb;
struct msg_buf *msg;
/*
* update values of sizes
* (SCCB, Event(Message) Buffer, Message Data Block)
*/
sccb = buffer->sccb;
msg = buffer->current_msg;
msg->header.length += buffer->current_length;
msg->mdb.header.length += buffer->current_length;
msg->mdb.mto.length += buffer->current_length;
sccb->length += msg->header.length;
/*
* count number of buffered messages (= number of Message Text
* Objects) and number of buffered characters
* for the SCCB currently used for buffering and at all
*/
buffer->messages++;
buffer->char_sum += buffer->current_length;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->current_msg = NULL;
}
/*
* processing of a message including escape characters,
* returns number of characters written to the output sccb
* ("processed" means that is not guaranteed that the character have already
* been sent to the SCLP but that it will be done at least next time the SCLP
* is not busy)
*/
int
sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
{
int spaces, i_msg;
int rc;
/*
* parse msg for escape sequences (\t,\v ...) and put formated
* msg into an mto (created by sclp_initialize_mto).
*
* We have to do this work ourselfs because there is no support for
* these characters on the native machine and only partial support
* under VM (Why does VM interpret \n but the native machine doesn't ?)
*
* Depending on i/o-control setting the message is always written
* immediately or we wait for a final new line maybe coming with the
* next message. Besides we avoid a buffer overrun by writing its
* content.
*
* RESTRICTIONS:
*
* \r and \b work within one line because we are not able to modify
* previous output that have already been accepted by the SCLP.
*
* \t combined with following \r is not correctly represented because
* \t is expanded to some spaces but \r does not know about a
* previous \t and decreases the current position by one column.
* This is in order to a slim and quick implementation.
*/
for (i_msg = 0; i_msg < count; i_msg++) {
switch (msg[i_msg]) {
case '\n': /* new line, line feed (ASCII) */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer, 0);
if (rc)
return i_msg;
}
sclp_finalize_mto(buffer);
break;
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
buffer->current_msg->mdb.go.general_msg_flags |=
GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
/* "go to (next htab-boundary + 1, same line)" */
do {
if (buffer->current_length >= buffer->columns)
break;
/* ok, add a blank */
*buffer->current_line++ = 0x40;
buffer->current_length++;
} while (buffer->current_length % buffer->htab);
break;
case '\f': /* form feed */
case '\v': /* vertical tabulator */
/* "go to (actual column, actual line + 1)" */
/* = new line, leading spaces */
if (buffer->current_line != NULL) {
spaces = buffer->current_length;
sclp_finalize_mto(buffer);
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
memset(buffer->current_line, 0x40, spaces);
buffer->current_line += spaces;
buffer->current_length = spaces;
} else {
/* one an empty line this is the same as \n */
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
sclp_finalize_mto(buffer);
}
break;
case '\b': /* backspace */
/* "go to (actual column - 1, actual line)" */
/* decrement counter indicating position, */
/* do not remove last character */
if (buffer->current_line != NULL &&
buffer->current_length > 0) {
buffer->current_length--;
buffer->current_line--;
}
break;
case 0x00: /* end of string */
/* transfer current line to SCCB */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* skip the rest of the message including the 0 byte */
i_msg = count - 1;
break;
default: /* no escape character */
/* do not output unprintable characters */
if (!isprint(msg[i_msg]))
break;
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
buffer->current_length++;
break;
}
/* check if current mto is full */
if (buffer->current_line != NULL &&
buffer->current_length >= buffer->columns)
sclp_finalize_mto(buffer);
}
/* return number of processed characters */
return i_msg;
}
/*
* Return the number of free bytes in the sccb
*/
int
sclp_buffer_space(struct sclp_buffer *buffer)
{
struct sccb_header *sccb;
int count;
sccb = buffer->sccb;
count = MAX_SCCB_ROOM - sccb->length;
if (buffer->current_line != NULL)
count -= sizeof(struct msg_buf) + buffer->current_length;
return count;
}
/*
* Return number of characters in buffer
*/
unsigned int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
unsigned int count;
count = buffer->char_sum;
if (buffer->current_line != NULL)
count += buffer->current_length;
return count;
}
/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
sclp_rw_init(void)
{
static int init_done = 0;
int rc;
if (init_done)
return 0;
rc = sclp_register(&sclp_rw_event);
if (rc == 0)
init_done = 1;
return rc;
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* second half of Write Event Data-function that has to be done after
* interruption indicating completion of Service Call.
*/
static void
sclp_writedata_callback(struct sclp_req *request, void *data)
{
int rc;
struct sclp_buffer *buffer;
struct sccb_header *sccb;
buffer = (struct sclp_buffer *) data;
sccb = buffer->sccb;
if (request->status == SCLP_REQ_FAILED) {
if (buffer->callback != NULL)
buffer->callback(buffer, -EIO);
return;
}
/* check SCLP response code and choose suitable action */
switch (sccb->response_code) {
case 0x0020 :
/* Normal completion, buffer processed, message(s) sent */
rc = 0;
break;
case 0x0340: /* Contained SCLP equipment check */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* not all buffers were processed */
sccb->response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
} else
rc = 0;
break;
case 0x0040: /* SCLP equipment check */
case 0x05f0: /* Target resource in improper state */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* retry request */
sccb->response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
break;
default:
if (sccb->response_code == 0x71f0)
rc = -ENOMEM;
else
rc = -EINVAL;
break;
}
if (buffer->callback != NULL)
buffer->callback(buffer, rc);
}
/*
* Setup the request structure in the struct sclp_buffer to do SCLP Write
* Event Data and pass the request to the core SCLP loop. Return zero on
* success, non-zero otherwise.
*/
int
sclp_emit_buffer(struct sclp_buffer *buffer,
void (*callback)(struct sclp_buffer *, int))
{
/* add current line if there is one */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* Are there messages in the output buffer ? */
if (buffer->messages == 0)
return -EIO;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
buffer->request.callback_data = buffer;
buffer->request.sccb = buffer->sccb;
buffer->callback = callback;
return sclp_add_request(&buffer->request);
}
| linux-master | drivers/s390/char/sclp_rw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <[email protected]>
* Michael Holzheu <[email protected]>
* Tuan Ngo-Anh <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
#include "tape_class.h"
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
* file operation structure for tape character frontend
*/
static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
#endif
static const struct file_operations tape_fops =
{
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
.unlocked_ioctl = tapechar_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tapechar_compat_ioctl,
#endif
.open = tapechar_open,
.release = tapechar_release,
.llseek = no_llseek,
};
static int tapechar_major = TAPECHAR_MAJOR;
/*
* This function is called for every new tapedevice
*/
int
tapechar_setup_device(struct tape_device * device)
{
char device_name[20];
sprintf(device_name, "ntibm%i", device->first_minor / 2);
device->nt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor),
&tape_fops,
device_name,
"non-rewinding"
);
device_name[0] = 'r';
device->rt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor + 1),
&tape_fops,
device_name,
"rewinding"
);
return 0;
}
void
tapechar_cleanup_device(struct tape_device *device)
{
unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL;
unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL;
}
static int
tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
{
struct idal_buffer *new;
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == block_size)
return 0;
if (block_size > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
block_size, MAX_BLOCKSIZE);
return -EINVAL;
}
/* The current idal buffer is not correct. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
return 0;
}
/*
* Tape device read function
*/
static ssize_t
tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
int rc;
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
/*
* If the tape isn't terminated yet, do it now. And since we then
* are at the end of the tape there wouldn't be anything to read
* anyways. So we return immediately.
*/
if(device->required_tapemarks) {
return tape_std_terminate_write(device);
}
/* Find out block size to use */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:read smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
} else {
block_size = count;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
/* Let the discipline build the ccw chain. */
request = device->discipline->read_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
/* Execute it. */
rc = tape_do_io(device, request);
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
rc = -EFAULT;
}
tape_free_request(request);
return rc;
}
/*
* Tape device write function
*/
static ssize_t
tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
size_t written;
int nblocks;
int i, rc;
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:write smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
nblocks = count / block_size;
} else {
block_size = count;
nblocks = 1;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
/* Let the discipline build the ccw chain. */
request = device->discipline->write_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
rc = 0;
written = 0;
for (i = 0; i < nblocks; i++) {
/* Copy data from user space to idal buffer. */
if (idal_buffer_from_user(device->char_data.idal_buf,
data, block_size)) {
rc = -EFAULT;
break;
}
rc = tape_do_io(device, request);
if (rc)
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
data += block_size;
}
tape_free_request(request);
if (rc == -ENOSPC) {
/*
* Ok, the device has no more space. It has NOT written
* the block.
*/
if (device->discipline->process_eov)
device->discipline->process_eov(device);
if (written > 0)
rc = 0;
}
/*
* After doing a write we always need two tapemarks to correctly
* terminate the tape (one to terminate the file, the second to
* flag the end of recorded data.
* Since process_eov positions the tape in front of the written
* tapemark it doesn't hurt to write two marks again.
*/
if (!rc)
device->required_tapemarks = 2;
return rc ? rc : written;
}
/*
* Character frontend tape device open function.
*/
static int
tapechar_open (struct inode *inode, struct file *filp)
{
struct tape_device *device;
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
imajor(file_inode(filp)),
iminor(file_inode(filp)));
if (imajor(file_inode(filp)) != tapechar_major)
return -ENODEV;
minor = iminor(file_inode(filp));
device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
return PTR_ERR(device);
}
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
stream_open(inode, filp);
} else
tape_put_device(device);
return rc;
}
/*
* Character frontend tape device release function.
*/
static int
tapechar_release(struct inode *inode, struct file *filp)
{
struct tape_device *device;
DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
device = (struct tape_device *) filp->private_data;
/*
* If this is the rewinding tape minor then rewind. In that case we
* write all required tapemarks. Otherwise only one to terminate the
* file.
*/
if ((iminor(inode) & 1) != 0) {
if (device->required_tapemarks)
tape_std_terminate_write(device);
tape_mtop(device, MTREW, 1);
} else {
if (device->required_tapemarks > 1) {
if (tape_mtop(device, MTWEOF, 1) == 0)
device->required_tapemarks--;
}
}
if (device->char_data.idal_buf != NULL) {
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = NULL;
}
tape_release(device);
filp->private_data = NULL;
tape_put_device(device);
return 0;
}
/*
* Tape device io controls.
*/
static int
__tapechar_ioctl(struct tape_device *device,
unsigned int no, void __user *data)
{
int rc;
if (no == MTIOCTOP) {
struct mtop op;
if (copy_from_user(&op, data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
/*
* Operations that change tape position should write final
* tapemarks.
*/
switch (op.mt_op) {
case MTFSF:
case MTBSF:
case MTFSR:
case MTBSR:
case MTREW:
case MTOFFL:
case MTEOM:
case MTRETEN:
case MTBSFM:
case MTFSFM:
case MTSEEK:
if (device->required_tapemarks)
tape_std_terminate_write(device);
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
if (op.mt_op == MTWEOF && rc == 0) {
if (op.mt_count > device->required_tapemarks)
device->required_tapemarks = 0;
else
device->required_tapemarks -= op.mt_count;
}
return rc;
}
if (no == MTIOCPOS) {
/* MTIOCPOS: query the tape position. */
struct mtpos pos;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pos.mt_blkno = rc;
return put_user_mtpos(data, &pos);
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
struct mtget get;
memset(&get, 0, sizeof(get));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = 0 /* device->devstat.rescnt */;
get.mt_dsreg =
((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
& MT_ST_BLKSIZE_MASK);
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_gstat = 0;
get.mt_erreg = 0;
get.mt_fileno = 0;
get.mt_gstat = device->tape_generic_status;
if (device->medium_state == MS_LOADED) {
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
if (rc == 0)
get.mt_gstat |= GMT_BOT(~0);
get.mt_blkno = rc;
}
return put_user_mtget(data, &get);
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
return device->discipline->ioctl_fn(device, no, (unsigned long)data);
}
static long
tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device;
long rc;
DBF_EVENT(6, "TCHAR:ioct\n");
device = (struct tape_device *) filp->private_data;
mutex_lock(&device->mutex);
rc = __tapechar_ioctl(device, no, (void __user *)data);
mutex_unlock(&device->mutex);
return rc;
}
#ifdef CONFIG_COMPAT
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
long rc;
if (no == MTIOCPOS32)
no = MTIOCPOS;
else if (no == MTIOCGET32)
no = MTIOCGET;
mutex_lock(&device->mutex);
rc = __tapechar_ioctl(device, no, compat_ptr(data));
mutex_unlock(&device->mutex);
return rc;
}
#endif /* CONFIG_COMPAT */
/*
* Initialize character device frontend.
*/
int
tapechar_init (void)
{
dev_t dev;
if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
return -1;
tapechar_major = MAJOR(dev);
return 0;
}
/*
* cleanup
*/
void
tapechar_exit(void)
{
unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
}
| linux-master | drivers/s390/char/tape_char.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for reading z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2004, 2009
*
* Author: Gerald Schaefer <[email protected]>
*/
#define KMSG_COMPONENT "monreader"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40
#define MON_SERVICE "*MONITOR"
#define MON_IN_USE 0x01
#define MON_MSGLIM 255
static char mon_dcss_name[9] = "MONDCSS\0";
struct mon_msg {
u32 pos;
u32 mca_offset;
struct iucv_message msg;
char msglim_reached;
char replied_msglim;
};
struct mon_private {
struct iucv_path *path;
struct mon_msg *msg_array[MON_MSGLIM];
unsigned int write_index;
unsigned int read_index;
atomic_t msglim_count;
atomic_t read_ready;
atomic_t iucv_connected;
atomic_t iucv_severed;
};
static unsigned long mon_in_use = 0;
static unsigned long mon_dcss_start;
static unsigned long mon_dcss_end;
static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
static u8 user_data_connect[16] = {
/* Version code, must be 0x01 for shared mode */
0x01,
/* what to collect */
MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
/* DCSS name in EBCDIC, 8 bytes padded with blanks */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
/******************************************************************************
* helper functions *
*****************************************************************************/
/*
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
static void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
for (i = 0; i < 8; i++) {
if (ascii_name[i] == '\0')
break;
ebcdic_name[i] = toupper(ascii_name[i]);
}
for (; i < 8; i++)
ebcdic_name[i] = ' ';
ASCEBC(ebcdic_name, 8);
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg;
}
static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg[4];
}
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
return *((u8 *)__va(mon_mca_start(monmsg)) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
{
return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
}
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
(mon_rec_end(monmsg) > mon_dcss_end) ||
(mon_mca_type(monmsg, 0) == 0) ||
(mon_mca_size(monmsg) % 12 != 0) ||
(mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
(mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
return -EINVAL;
return 0;
}
static int mon_send_reply(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
int rc;
rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
if (likely(!monmsg->msglim_reached)) {
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
} else
monmsg->replied_msglim = 1;
if (rc) {
pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO;
}
return 0;
}
static void mon_free_mem(struct mon_private *monpriv)
{
int i;
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
}
static struct mon_private *mon_alloc_mem(void)
{
int i;
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return NULL;
for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
mon_free_mem(monpriv);
return NULL;
}
}
return monpriv;
}
static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
monmsg->mca_offset += 12;
monmsg->pos = 0;
}
static struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
if (!atomic_read(&monpriv->read_ready))
return NULL;
monmsg = monpriv->msg_array[monpriv->read_index];
if (unlikely(monmsg->replied_msglim)) {
monmsg->replied_msglim = 0;
monmsg->msglim_reached = 0;
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
return ERR_PTR(-EOVERFLOW);
}
return monmsg;
}
/******************************************************************************
* IUCV handler *
*****************************************************************************/
static void mon_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
{
struct mon_private *monpriv = path->private;
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
static void mon_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
{
struct mon_private *monpriv = path->private;
pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
ipuser[0]);
iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
wake_up_interruptible(&mon_read_wait_queue);
}
static void mon_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct mon_private *monpriv = path->private;
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
pr_warn("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
atomic_inc(&monpriv->read_ready);
wake_up_interruptible(&mon_read_wait_queue);
}
static struct iucv_handler monreader_iucv_handler = {
.path_complete = mon_iucv_path_complete,
.path_severed = mon_iucv_path_severed,
.message_pending = mon_iucv_message_pending,
};
/******************************************************************************
* file operations *
*****************************************************************************/
static int mon_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
int rc;
/*
* only one user allowed
*/
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
rc = -ENOMEM;
monpriv = mon_alloc_mem();
if (!monpriv)
goto out_use;
/*
* Connect to *MONITOR service
*/
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out_priv;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
rc = -EIO;
goto out_path;
}
/*
* Wait for connection confirmation
*/
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed)) {
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
rc = -EIO;
goto out_path;
}
filp->private_data = monpriv;
return nonseekable_open(inode, filp);
out_path:
iucv_path_free(monpriv->path);
out_priv:
mon_free_mem(monpriv);
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
return rc;
}
static int mon_close(struct inode *inode, struct file *filp)
{
int rc, i;
struct mon_private *monpriv = filp->private_data;
/*
* Close IUCV connection and unregister
*/
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
clear_bit(MON_IN_USE, &mon_in_use);
return 0;
}
static ssize_t mon_read(struct file *filp, char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
struct mon_msg *monmsg;
int ret;
u32 mce_start;
monmsg = mon_next_message(monpriv);
if (IS_ERR(monmsg))
return PTR_ERR(monmsg);
if (!monmsg) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(mon_read_wait_queue,
atomic_read(&monpriv->read_ready) ||
atomic_read(&monpriv->iucv_severed));
if (ret)
return ret;
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return -EIO;
monmsg = monpriv->msg_array[monpriv->read_index];
}
if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
if (mon_check_mca(monmsg))
goto reply;
/* read monitor control element (12 bytes) first */
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos == mce_start + 12)
monmsg->pos = mon_rec_start(monmsg);
goto out_copy;
}
/* read records */
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos > mon_rec_end(monmsg))
mon_next_mca(monmsg);
goto out_copy;
}
reply:
ret = mon_send_reply(monmsg, monpriv);
return ret;
out_copy:
*ppos += count;
return count;
}
static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
poll_wait(filp, &mon_read_wait_queue, p);
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return EPOLLERR;
if (atomic_read(&monpriv->read_ready))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static const struct file_operations mon_fops = {
.owner = THIS_MODULE,
.open = &mon_open,
.release = &mon_close,
.read = &mon_read,
.poll = &mon_poll,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monreader",
.fops = &mon_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/******************************************************************************
* module init/exit *
*****************************************************************************/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
/*
* Register with IUCV and connect to *MONITOR service
*/
rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) {
pr_err("The z/VM *MONITOR record device driver failed to "
"register with IUCV\n");
return rc;
}
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_iucv;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_iucv;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
return 0;
out:
segment_unload(mon_dcss_name);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
}
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
misc_deregister(&mon_dev);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
module_init(mon_init);
module_exit(mon_exit);
module_param_string(mondcss, mon_dcss_name, 9, 0444);
MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
"service, max. 8 chars. Default is MONDCSS");
MODULE_AUTHOR("Gerald Schaefer <[email protected]>");
MODULE_DESCRIPTION("Character device driver for reading z/VM "
"monitor service records.");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/char/monreader.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP "store data in absolute storage"
*
* Copyright IBM Corp. 2003, 2013
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "sclp_sdias"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include "sclp_sdias.h"
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
#define SDIAS_RETRIES 300
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
static struct sdias_sccb *sclp_sdias_sccb;
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
static DECLARE_COMPLETION(evbuf_done);
static DEFINE_MUTEX(sdias_mutex);
/*
* Called by SCLP base when read event data has been completed (async mode only)
*/
static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
{
memcpy(&sdias_evbuf, evbuf,
min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
complete(&evbuf_done);
TRACE("sclp_sdias_receiver_fn done\n");
}
/*
* Called by SCLP base when sdias event has been accepted
*/
static void sdias_callback(struct sclp_req *request, void *data)
{
complete(&evbuf_accepted);
TRACE("callback done\n");
}
static int sdias_sclp_send(struct sclp_req *req)
{
struct sdias_sccb *sccb = sclp_sdias_sccb;
int retries;
int rc;
for (retries = SDIAS_RETRIES; retries; retries--) {
TRACE("add request\n");
rc = sclp_add_request(req);
if (rc) {
/* not initiated, wait some time and retry */
set_current_state(TASK_INTERRUPTIBLE);
TRACE("add request failed: rc = %i\n",rc);
schedule_timeout(msecs_to_jiffies(500));
continue;
}
/* initiated, wait for completion of service call */
wait_for_completion(&evbuf_accepted);
if (req->status == SCLP_REQ_FAILED) {
TRACE("sclp request failed\n");
continue;
}
/* if not accepted, retry */
if (!(sccb->evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
sccb->evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
memcpy(&sdias_evbuf, &sccb->evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
/* otherwise we wait for completion */
wait_for_completion(&evbuf_done);
TRACE("request done\n");
return 0;
}
return -EIO;
}
/*
* Get number of blocks (4K) available in the HSA
*/
int sclp_sdias_blk_count(void)
{
struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.dbs = 1;
request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb->hdr.response_code != 0x0020) {
TRACE("send failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case 0:
rc = sdias_evbuf.blk_cnt;
break;
default:
pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
rc = -EIO;
goto out;
}
TRACE("%i blocks\n", rc);
out:
mutex_unlock(&sdias_mutex);
return rc;
}
/*
* Copy from HSA to absolute storage (not reentrant):
*
* @dest : Address of buffer where data should be copied
* @start_blk: Start Block (beginning with 1)
* @nr_blks : Number of 4K blocks to copy
*
* Return Value: 0 : Requested 'number' of blocks of data copied
* <0: ERROR - negative event status
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
sccb->evbuf.hdr.flags = 0;
sccb->evbuf.event_qual = SDIAS_EQ_STORE_DATA;
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64;
sccb->evbuf.event_status = 0;
sccb->evbuf.blk_cnt = nr_blks;
sccb->evbuf.asa = __pa(dest);
sccb->evbuf.fbn = start_blk;
sccb->evbuf.lbn = 0;
sccb->evbuf.dbs = 1;
request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb->hdr.response_code != 0x0020) {
TRACE("copy failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case SDIAS_EVSTATE_ALL_STORED:
TRACE("all stored\n");
break;
case SDIAS_EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case SDIAS_EVSTATE_NO_DATA:
TRACE("no data\n");
fallthrough;
default:
pr_err("Error from SCLP while copying hsa. Event status = %x\n",
sdias_evbuf.event_status);
rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
return rc;
}
static int __init sclp_sdias_register_check(void)
{
int rc;
rc = sclp_register(&sclp_sdias_register);
if (rc)
return rc;
if (sclp_sdias_blk_count() == 0) {
sclp_unregister(&sclp_sdias_register);
return -ENODEV;
}
return 0;
}
static int __init sclp_sdias_init_sync(void)
{
TRACE("Try synchronous mode\n");
sclp_sdias_register.receive_mask = 0;
sclp_sdias_register.receiver_fn = NULL;
return sclp_sdias_register_check();
}
static int __init sclp_sdias_init_async(void)
{
TRACE("Try asynchronous mode\n");
sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
return sclp_sdias_register_check();
}
int __init sclp_sdias_init(void)
{
if (!is_ipl_type_dump())
return 0;
sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
BUG_ON(!sclp_sdias_sccb);
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
if (sclp_sdias_init_sync() == 0)
goto out;
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
free_page((unsigned long) sclp_sdias_sccb);
return -ENODEV;
out:
TRACE("init done\n");
return 0;
}
| linux-master | drivers/s390/char/sclp_sdias.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI/NVMe disks (zfcp/nvme dump).
*
* For more information please refer to Documentation/arch/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "zdump"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
#include <asm/maccess.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
};
struct ipib_info {
unsigned long ipib;
u32 checksum;
} __attribute__((packed));
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
static unsigned long os_info_flags;
static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
* Copy memory from HSA to iterator (not reentrant):
*
* @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
size_t bytes, copied, res = 0;
unsigned long offset;
if (!hsa_available)
return 0;
mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
break;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
copied = copy_to_iter(hsa_buf + offset, bytes, iter);
count -= copied;
src += copied;
res += copied;
if (copied < bytes)
break;
}
mutex_unlock(&hsa_buf_mutex);
return res;
}
/*
* Copy memory from HSA to kernel memory (not reentrant):
*
* @dest: Kernel or user buffer where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{
struct iov_iter iter;
struct kvec kvec;
kvec.iov_base = dst;
kvec.iov_len = count;
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
if (memcpy_hsa_iter(&iter, src, count) < count)
return -EIO;
return 0;
}
static int __init init_cpu_info(void)
{
struct save_area *sa;
/* get info for boot cpu from lowcore, stored in the HSA */
sa = save_area_boot_cpu();
if (!sa)
return -ENOMEM;
if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
TRACE("could not copy from HSA\n");
return -EIO;
}
save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
return 0;
}
/*
* Release the HSA
*/
static void release_hsa(void)
{
diag308(DIAG308_REL_HSA, NULL);
hsa_available = 0;
}
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
if (zcore_ipl_block) {
diag308(DIAG308_SET, zcore_ipl_block);
if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR)
diag308(DIAG308_LOAD_CLEAR, NULL);
/* Use special diag308 subcode for CCW normal ipl */
if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW)
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
else
diag308(DIAG308_LOAD_NORMAL, NULL);
}
return count;
}
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
return stream_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations zcore_reipl_fops = {
.owner = THIS_MODULE,
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
.llseek = no_llseek,
};
static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
static char str[18];
if (hsa_available)
snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size);
else
snprintf(str, sizeof(str), "0\n");
return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
}
static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
char value;
if (*ppos != 0)
return -EPIPE;
if (copy_from_user(&value, buf, 1))
return -EFAULT;
if (value != '0')
return -EINVAL;
release_hsa();
return count;
}
static const struct file_operations zcore_hsa_fops = {
.owner = THIS_MODULE,
.write = zcore_hsa_write,
.read = zcore_hsa_read,
.open = nonseekable_open,
.llseek = no_llseek,
};
static int __init check_sdias(void)
{
if (!sclp.hsa_size) {
TRACE("Could not determine HSA size\n");
return -ENODEV;
}
return 0;
}
/*
* Provide IPL parameter information block from either HSA or memory
* for future reipl
*/
static int __init zcore_reipl_init(void)
{
struct os_info_entry *entry;
struct ipib_info ipib_info;
unsigned long os_info_addr;
struct os_info *os_info;
int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
if (rc)
return rc;
if (ipib_info.ipib == 0)
return 0;
zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
if (!zcore_ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp.hsa_size)
rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
PAGE_SIZE);
else
rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE);
if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) zcore_ipl_block);
zcore_ipl_block = NULL;
}
/*
* Read the bit-flags field from os_info flags entry.
* Return zero even for os_info read or entry checksum errors in order
* to continue dump processing, considering that os_info could be
* corrupted on the panicked system.
*/
os_info = (void *)__get_free_page(GFP_KERNEL);
if (!os_info)
return -ENOMEM;
rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr));
if (rc)
goto out;
if (os_info_addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
else
rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
if (rc || os_info_csum(os_info) != os_info->csum)
goto out;
entry = &os_info->entry[OS_INFO_FLAGS_ENTRY];
if (entry->addr && entry->size) {
if (entry->addr < sclp.hsa_size)
rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags));
else
rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags));
if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum)
os_info_flags = 0;
}
out:
free_page((unsigned long)os_info);
return 0;
}
static int zcore_reboot_and_on_panic_handler(struct notifier_block *self,
unsigned long event,
void *data)
{
if (hsa_available)
release_hsa();
return NOTIFY_OK;
}
static struct notifier_block zcore_reboot_notifier = {
.notifier_call = zcore_reboot_and_on_panic_handler,
/* we need to be notified before reipl and kdump */
.priority = INT_MAX,
};
static struct notifier_block zcore_on_panic_notifier = {
.notifier_call = zcore_reboot_and_on_panic_handler,
/* we need to be notified before reipl and kdump */
.priority = INT_MAX,
};
static int __init zcore_init(void)
{
unsigned char arch;
int rc;
if (!is_ipl_type_dump())
return -ENODATA;
if (oldmem_data.start)
return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
debug_register_view(zcore_dbf, &debug_sprintf_view);
debug_set_level(zcore_dbf, 6);
if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
TRACE("type: fcp\n");
TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
} else if (ipl_info.type == IPL_TYPE_NVME_DUMP) {
TRACE("type: nvme\n");
TRACE("fid: %x\n", ipl_info.data.nvme.fid);
TRACE("nsid: %x\n", ipl_info.data.nvme.nsid);
} else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) {
TRACE("type: eckd\n");
TRACE("devno: %x\n", ipl_info.data.eckd.dev_id.devno);
TRACE("ssid: %x\n", ipl_info.data.eckd.dev_id.ssid);
}
rc = sclp_sdias_init();
if (rc)
goto fail;
rc = check_sdias();
if (rc)
goto fail;
hsa_available = 1;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
goto fail;
if (arch == ARCH_S390) {
pr_alert("The 64-bit dump tool cannot be used for a "
"32-bit system\n");
rc = -EINVAL;
goto fail;
}
pr_alert("The dump process started for a 64-bit operating system\n");
rc = init_cpu_info();
if (rc)
goto fail;
rc = zcore_reipl_init();
if (rc)
goto fail;
zcore_dir = debugfs_create_dir("zcore" , NULL);
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
NULL, &zcore_hsa_fops);
register_reboot_notifier(&zcore_reboot_notifier);
atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier);
return 0;
fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
subsys_initcall(zcore_init);
| linux-master | drivers/s390/char/zcore.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ebcdic keycode functions for s390 console drivers
*
* S390 version
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky ([email protected]),
*/
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/consolemap.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/uaccess.h>
#include "keyboard.h"
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_ignore,\
k_dead, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore
typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
/* maximum values each key_handler can handle */
static const int kbd_max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
static const unsigned char ret_diacr[NR_DEAD] = {
'`', /* dead_grave */
'\'', /* dead_acute */
'^', /* dead_circumflex */
'~', /* dead_tilda */
'"', /* dead_diaeresis */
',', /* dead_cedilla */
'_', /* dead_macron */
'U', /* dead_breve */
'.', /* dead_abovedot */
'*', /* dead_abovering */
'=', /* dead_doubleacute */
'c', /* dead_caron */
'k', /* dead_ogonek */
'i', /* dead_iota */
'#', /* dead_voiced_sound */
'o', /* dead_semivoiced_sound */
'!', /* dead_belowdot */
'?', /* dead_hook */
'+', /* dead_horn */
'-', /* dead_stroke */
')', /* dead_abovecomma */
'(', /* dead_abovereversedcomma */
':', /* dead_doublegrave */
'n', /* dead_invertedbreve */
';', /* dead_belowcomma */
'$', /* dead_currency */
'@', /* dead_greek */
};
/*
* Alloc/free of kbd_data structures.
*/
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
goto out;
kbd->key_maps = kzalloc(sizeof(ebc_key_maps), GFP_KERNEL);
if (!kbd->key_maps)
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
if (ebc_key_maps[i]) {
kbd->key_maps[i] = kmemdup(ebc_key_maps[i],
sizeof(u_short) * NR_KEYS,
GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
}
}
kbd->func_table = kzalloc(sizeof(ebc_func_table), GFP_KERNEL);
if (!kbd->func_table)
goto out_maps;
for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) {
if (ebc_func_table[i]) {
kbd->func_table[i] = kstrdup(ebc_func_table[i],
GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
}
}
kbd->fn_handler =
kcalloc(NR_FN_HANDLER, sizeof(fn_handler_fn *), GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
kbd->accent_table = kmemdup(ebc_accent_table,
sizeof(struct kbdiacruc) * MAX_DIACR,
GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
kbd->accent_table_size = ebc_accent_table_size;
return kbd;
out_fn_handler:
kfree(kbd->fn_handler);
out_func:
for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
out_maps:
for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
out_kbd:
kfree(kbd);
out:
return NULL;
}
void
kbd_free(struct kbd_data *kbd)
{
int i;
kfree(kbd->accent_table);
kfree(kbd->fn_handler);
for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
kfree(kbd);
}
/*
* Generate ascii -> ebcdic translation table from kbd_data.
*/
void
kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ascebc, 0x40, 256);
for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
k = ((i & 1) << 7) + j;
keysym = keymap[j];
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ascebc[KVAL(keysym)] = k;
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ascebc[ret_diacr[KVAL(keysym)]] = k;
}
}
}
#if 0
/*
* Generate ebcdic -> ascii translation table from kbd_data.
*/
void
kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ebcasc, ' ', 256);
for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
keysym = keymap[j];
k = ((i & 1) << 7) + j;
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ebcasc[k] = KVAL(keysym);
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ebcasc[k] = ret_diacr[KVAL(keysym)];
}
}
}
#endif
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int
handle_diacr(struct kbd_data *kbd, unsigned int ch)
{
int i, d;
d = kbd->diacr;
kbd->diacr = 0;
for (i = 0; i < kbd->accent_table_size; i++) {
if (kbd->accent_table[i].diacr == d &&
kbd->accent_table[i].base == ch)
return kbd->accent_table[i].result;
}
if (ch == ' ' || ch == d)
return d;
kbd_put_queue(kbd->port, d);
return ch;
}
/*
* Handle dead key.
*/
static void
k_dead(struct kbd_data *kbd, unsigned char value)
{
value = ret_diacr[value];
kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
}
/*
* Normal character handler.
*/
static void
k_self(struct kbd_data *kbd, unsigned char value)
{
if (kbd->diacr)
value = handle_diacr(kbd, value);
kbd_put_queue(kbd->port, value);
}
/*
* Special key handlers
*/
static void
k_ignore(struct kbd_data *kbd, unsigned char value)
{
}
/*
* Function key handler.
*/
static void
k_fn(struct kbd_data *kbd, unsigned char value)
{
if (kbd->func_table[value])
kbd_puts_queue(kbd->port, kbd->func_table[value]);
}
static void
k_spec(struct kbd_data *kbd, unsigned char value)
{
if (value >= NR_FN_HANDLER)
return;
if (kbd->fn_handler[value])
kbd->fn_handler[value](kbd);
}
/*
* Put utf8 character to tty flip buffer.
* UTF-8 is defined for words of up to 31 bits,
* but we need only 16 bits here
*/
static void
to_utf8(struct tty_port *port, ushort c)
{
if (c < 0x80)
/* 0******* */
kbd_put_queue(port, c);
else if (c < 0x800) {
/* 110***** 10****** */
kbd_put_queue(port, 0xc0 | (c >> 6));
kbd_put_queue(port, 0x80 | (c & 0x3f));
} else {
/* 1110**** 10****** 10****** */
kbd_put_queue(port, 0xe0 | (c >> 12));
kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f));
kbd_put_queue(port, 0x80 | (c & 0x3f));
}
}
/*
* Process keycode.
*/
void
kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
{
unsigned short keysym;
unsigned char type, value;
if (!kbd)
return;
if (keycode >= 384)
keysym = kbd->key_maps[5][keycode - 384];
else if (keycode >= 256)
keysym = kbd->key_maps[4][keycode - 256];
else if (keycode >= 128)
keysym = kbd->key_maps[1][keycode - 128];
else
keysym = kbd->key_maps[0][keycode];
type = KTYP(keysym);
if (type >= 0xf0) {
type -= 0xf0;
if (type == KT_LETTER)
type = KT_LATIN;
value = KVAL(keysym);
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
handle_sysrq(value);
return;
}
if (value == '-') {
kbd->sysrq = K(KT_LATIN, '-');
return;
}
/* Incomplete sysrq sequence. */
(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
kbd->sysrq = 0;
} else if ((type == KT_LATIN && value == '^') ||
(type == KT_DEAD && ret_diacr[value] == '^')) {
kbd->sysrq = K(type, value);
return;
}
#endif
(*k_handler[type])(kbd, value);
} else
to_utf8(kbd->port, keysym);
}
/*
* Ioctl stuff.
*/
static int
do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
unsigned long kb_index, kb_table;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
kb_index = (unsigned long) tmp.kb_index;
#if NR_KEYS < 256
if (kb_index >= NR_KEYS)
return -EINVAL;
#endif
kb_table = (unsigned long) tmp.kb_table;
#if MAX_NR_KEYMAPS < 256
if (kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
#endif
switch (cmd) {
case KDGKBENT:
key_map = kbd->key_maps[kb_table];
if (key_map) {
val = U(key_map[kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
val = (kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
key_map = kbd->key_maps[kb_table];
if (key_map) {
kbd->key_maps[kb_table] = NULL;
kfree(key_map);
}
break;
}
if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
return -EINVAL;
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
if (!(key_map = kbd->key_maps[kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
kbd->key_maps[kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
ov = U(key_map[kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
* Attention Key.
*/
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
key_map[kb_index] = U(tmp.kb_value);
break;
}
return 0;
}
static int
do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
int cmd, int perm)
{
unsigned char kb_func;
char *p;
int len;
/* Get u_kbs->kb_func. */
if (get_user(kb_func, &u_kbs->kb_func))
return -EFAULT;
#if MAX_NR_FUNC < 256
if (kb_func >= MAX_NR_FUNC)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBSENT:
p = kbd->func_table[kb_func];
if (p) {
len = strlen(p);
if (len >= sizeof(u_kbs->kb_string))
len = sizeof(u_kbs->kb_string) - 1;
if (copy_to_user(u_kbs->kb_string, p, len))
return -EFAULT;
} else
len = 0;
if (put_user('\0', u_kbs->kb_string + len))
return -EFAULT;
break;
case KDSKBSENT:
if (!perm)
return -EPERM;
p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
if (IS_ERR(p))
return PTR_ERR(p);
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
}
return 0;
}
int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
{
struct tty_struct *tty;
void __user *argp;
unsigned int ct;
int perm;
argp = (void __user *)arg;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
tty = tty_port_tty_get(kbd->port);
/* FIXME this test is pretty racy */
perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
tty_kref_put(tty);
switch (cmd) {
case KDGKBTYPE:
return put_user(KB_101, (char __user *)argp);
case KDGKBENT:
case KDSKBENT:
return do_kdsk_ioctl(kbd, argp, cmd, perm);
case KDGKBSENT:
case KDSKBSENT:
return do_kdgkb_ioctl(kbd, argp, cmd, perm);
case KDGKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (put_user(kbd->accent_table_size, &a->kb_cnt))
return -EFAULT;
for (i = 0; i < kbd->accent_table_size; i++) {
diacr.diacr = kbd->accent_table[i].diacr;
diacr.base = kbd->accent_table[i].base;
diacr.result = kbd->accent_table[i].result;
if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
return -EFAULT;
}
return 0;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
ct = kbd->accent_table_size;
if (put_user(ct, &a->kb_cnt))
return -EFAULT;
if (copy_to_user(a->kbdiacruc, kbd->accent_table,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
for (i = 0; i < ct; i++) {
if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
return -EFAULT;
kbd->accent_table[i].diacr = diacr.diacr;
kbd->accent_table[i].base = diacr.base;
kbd->accent_table[i].result = diacr.result;
}
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
if (copy_from_user(kbd->accent_table, a->kbdiacruc,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
default:
return -ENOIOCTLCMD;
}
}
EXPORT_SYMBOL(kbd_ioctl);
EXPORT_SYMBOL(kbd_ascebc);
EXPORT_SYMBOL(kbd_free);
EXPORT_SYMBOL(kbd_alloc);
EXPORT_SYMBOL(kbd_keycode);
| linux-master | drivers/s390/char/keyboard.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 crypto adapter related sclp functions.
*
* Copyright IBM Corp. 2020
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/sclp.h>
#include "sclp.h"
#define SCLP_CMDW_CONFIGURE_AP 0x001f0001
#define SCLP_CMDW_DECONFIGURE_AP 0x001e0001
struct ap_cfg_sccb {
struct sccb_header header;
} __packed;
static int do_ap_configure(sclp_cmdw_t cmd, u32 apid)
{
struct ap_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_AP_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct ap_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
cmd |= (apid & 0xFF) << 8;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020: case 0x0120: case 0x0440: case 0x0450:
break;
default:
pr_warn("configure AP adapter %u failed: cmd=0x%08x response=0x%04x\n",
apid, cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_ap_configure(u32 apid)
{
return do_ap_configure(SCLP_CMDW_CONFIGURE_AP, apid);
}
EXPORT_SYMBOL(sclp_ap_configure);
int sclp_ap_deconfigure(u32 apid)
{
return do_ap_configure(SCLP_CMDW_DECONFIGURE_AP, apid);
}
EXPORT_SYMBOL(sclp_ap_deconfigure);
| linux-master | drivers/s390/char/sclp_ap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <[email protected]>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/cma.h>
#include <linux/mm.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/vmcp.h>
struct vmcp_session {
char *response;
unsigned int bufsize;
unsigned int cma_alloc : 1;
int resp_size;
int resp_code;
struct mutex mutex;
};
static debug_info_t *vmcp_debug;
static unsigned long vmcp_cma_size __initdata = CONFIG_VMCP_CMA_SIZE * 1024 * 1024;
static struct cma *vmcp_cma;
static int __init early_parse_vmcp_cma(char *p)
{
if (!p)
return 1;
vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
return 0;
}
early_param("vmcp_cma", early_parse_vmcp_cma);
void __init vmcp_cma_reserve(void)
{
if (!MACHINE_IS_VM)
return;
cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma);
}
static void vmcp_response_alloc(struct vmcp_session *session)
{
struct page *page = NULL;
int nr_pages, order;
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
/*
* For anything below order 3 allocations rely on the buddy
* allocator. If such low-order allocations can't be handled
* anymore the system won't work anyway.
*/
if (order > 2)
page = cma_alloc(vmcp_cma, nr_pages, 0, false);
if (page) {
session->response = (char *)page_to_virt(page);
session->cma_alloc = 1;
return;
}
session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order);
}
static void vmcp_response_free(struct vmcp_session *session)
{
int nr_pages, order;
struct page *page;
if (!session->response)
return;
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) {
page = virt_to_page(session->response);
cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0;
} else {
free_pages((unsigned long)session->response, order);
}
session->response = NULL;
}
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
session = kmalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
static int vmcp_release(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
session = file->private_data;
file->private_data = NULL;
vmcp_response_free(session);
kfree(session);
return 0;
}
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
mutex_unlock(&session->mutex);
return ret;
}
static ssize_t
vmcp_write(struct file *file, const char __user *buff, size_t count,
loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
if (count > 240)
return -EINVAL;
cmd = memdup_user_nul(buff, count);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
vmcp_response_alloc(session);
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
session->resp_size = cpcmd(cmd, session->response, session->bufsize,
&session->resp_code);
mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
}
/*
* These ioctls are available, as the semantics of the diagnose 8 call
* does not fit very well into a Linux call. Diagnose X'08' is described in
* CP Programming Services SC24-6084-00
*
* VMCP_GETCODE: gives the CP return code back to user space
* VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
* expects adjacent pages in real storage and to make matters worse, we
* dont know the size of the response. Therefore we default to PAGESIZE and
* let userspace to change the response size, if userspace expects a bigger
* response
*/
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
int ret = -ENOTTY;
int __user *argp;
session = file->private_data;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
ret = put_user(session->resp_code, argp);
break;
case VMCP_SETBUF:
vmcp_response_free(session);
ret = get_user(session->bufsize, argp);
if (ret)
session->bufsize = PAGE_SIZE;
if (!session->bufsize || get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
ret = -EINVAL;
}
break;
case VMCP_GETSIZE:
ret = put_user(session->resp_size, argp);
break;
default:
break;
}
mutex_unlock(&session->mutex);
return ret;
}
static const struct file_operations vmcp_fops = {
.owner = THIS_MODULE,
.open = vmcp_open,
.release = vmcp_release,
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
.llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
.name = "vmcp",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmcp_fops,
};
static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM)
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
return -ENOMEM;
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret)
debug_unregister(vmcp_debug);
return ret;
}
device_initcall(vmcp_init);
| linux-master | drivers/s390/char/vmcp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
* Author(s): Martin Schwidefsky <[email protected]>
*/
#define KMSG_COMPONENT "sclp_ocf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#define OCF_LENGTH_HMC_NETWORK 8UL
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
static struct kset *ocf_kset;
static void sclp_ocf_change_notify(struct work_struct *work)
{
kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
}
/* Handler for OCF event. Look for the CPC image name. */
static void sclp_ocf_handler(struct evbuf_header *evbuf)
{
struct gds_vector *v;
struct gds_subvector *sv, *netid, *cpc;
size_t size;
/* Find the 0x9f00 block. */
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
0x9f00);
if (!v)
return;
/* Find the 0x9f22 block inside the 0x9f00 block. */
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
if (!v)
return;
/* Find the 0x81 block inside the 0x9f22 block. */
sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
if (!sv)
return;
/* Find the 0x01 block inside the 0x81 block. */
netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
/* Find the 0x02 block inside the 0x81 block. */
cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
/* Copy network name and cpc name. */
spin_lock(&sclp_ocf_lock);
if (netid) {
size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
memcpy(hmc_network, netid + 1, size);
EBCASC(hmc_network, size);
hmc_network[size] = 0;
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
memset(cpc_name, 0, OCF_LENGTH_CPC_NAME);
memcpy(cpc_name, cpc + 1, size);
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
}
static struct sclp_register sclp_ocf_event = {
.receive_mask = EVTYP_OCF_MASK,
.receiver_fn = sclp_ocf_handler,
};
void sclp_ocf_cpc_name_copy(char *dst)
{
spin_lock_irq(&sclp_ocf_lock);
memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME);
spin_unlock_irq(&sclp_ocf_lock);
}
EXPORT_SYMBOL(sclp_ocf_cpc_name_copy);
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char name[OCF_LENGTH_CPC_NAME + 1];
sclp_ocf_cpc_name_copy(name);
name[OCF_LENGTH_CPC_NAME] = 0;
EBCASC(name, OCF_LENGTH_CPC_NAME);
return snprintf(page, PAGE_SIZE, "%s\n", name);
}
static struct kobj_attribute cpc_name_attr =
__ATTR(cpc_name, 0444, cpc_name_show, NULL);
static ssize_t hmc_network_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute hmc_network_attr =
__ATTR(hmc_network, 0444, hmc_network_show, NULL);
static struct attribute *ocf_attrs[] = {
&cpc_name_attr.attr,
&hmc_network_attr.attr,
NULL,
};
static const struct attribute_group ocf_attr_group = {
.attrs = ocf_attrs,
};
static int __init ocf_init(void)
{
int rc;
INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
if (!ocf_kset)
return -ENOMEM;
rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
if (rc) {
kset_unregister(ocf_kset);
return rc;
}
return sclp_register(&sclp_ocf_event);
}
device_initcall(ocf_init);
| linux-master | drivers/s390/char/sclp_ocf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <[email protected]>
* Michael Ernst <[email protected]>
*/
#define KMSG_COMPONENT "sclp_cpi"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/export.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_cpi_sys.h"
#define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf {
struct evbuf_header header;
u8 id_format;
u8 reserved0;
u8 system_type[CPI_LENGTH_NAME];
u64 reserved1;
u8 system_name[CPI_LENGTH_NAME];
u64 reserved2;
u64 system_level;
u64 reserved3;
u8 sysplex_name[CPI_LENGTH_NAME];
u8 reserved4[16];
} __attribute__((packed));
struct cpi_sccb {
struct sccb_header header;
struct cpi_evbuf cpi_evbuf;
} __attribute__((packed));
static struct sclp_register sclp_cpi_event = {
.send_mask = EVTYP_CTLPROGIDENT_MASK,
};
static char system_name[CPI_LENGTH_NAME + 1];
static char sysplex_name[CPI_LENGTH_NAME + 1];
static char system_type[CPI_LENGTH_NAME + 1];
static u64 system_level;
static void set_data(char *field, char *data)
{
memset(field, ' ', CPI_LENGTH_NAME);
memcpy(field, data, strlen(data));
sclp_ascebc_str(field, CPI_LENGTH_NAME);
}
static void cpi_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static struct sclp_req *cpi_prepare_req(void)
{
struct sclp_req *req;
struct cpi_sccb *sccb;
struct cpi_evbuf *evb;
req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
kfree(req);
return ERR_PTR(-ENOMEM);
}
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
evb = &sccb->cpi_evbuf;
/* set system type */
set_data(evb->system_type, system_type);
/* set system name */
set_data(evb->system_name, system_name);
/* set system level */
evb->system_level = system_level;
/* set sysplex name */
set_data(evb->sysplex_name, sysplex_name);
/* prepare request data structure presented to SCLP driver */
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = cpi_callback;
return req;
}
static void cpi_free_req(struct sclp_req *req)
{
free_page((unsigned long) req->sccb);
kfree(req);
}
static int cpi_req(void)
{
struct completion completion;
struct sclp_req *req;
int rc;
int response;
rc = sclp_register(&sclp_cpi_event);
if (rc)
goto out;
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
rc = -EOPNOTSUPP;
goto out_unregister;
}
req = cpi_prepare_req();
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto out_unregister;
}
init_completion(&completion);
req->callback_data = &completion;
/* Add request to sclp queue */
rc = sclp_add_request(req);
if (rc)
goto out_free_req;
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
pr_warn("request failed (status=0x%02x)\n", req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
pr_warn("request failed with response code 0x%x\n", response);
rc = -EIO;
}
out_free_req:
cpi_free_req(req);
out_unregister:
sclp_unregister(&sclp_cpi_event);
out:
return rc;
}
static int check_string(const char *attr, const char *str)
{
size_t len;
size_t i;
len = strlen(str);
if ((len > 0) && (str[len - 1] == '\n'))
len--;
if (len > CPI_LENGTH_NAME)
return -EINVAL;
for (i = 0; i < len ; i++) {
if (isalpha(str[i]) || isdigit(str[i]) ||
strchr("$@# ", str[i]))
continue;
return -EINVAL;
}
return 0;
}
static void set_string(char *attr, const char *value)
{
size_t len;
size_t i;
len = strlen(value);
if ((len > 0) && (value[len - 1] == '\n'))
len--;
for (i = 0; i < CPI_LENGTH_NAME; i++) {
if (i < len)
attr[i] = toupper(value[i]);
else
attr[i] = ' ';
}
}
static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_name_attr =
__ATTR(system_name, 0644, system_name_show, system_name_store);
static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t sysplex_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("sysplex_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute sysplex_name_attr =
__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_type", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_type_attr =
__ATTR(system_type, 0644, system_type_show, system_type_store);
static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
}
static ssize_t system_level_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
unsigned long long level;
char *endp;
level = simple_strtoull(buf, &endp, 16);
if (endp == buf)
return -EINVAL;
if (*endp == '\n')
endp++;
if (*endp)
return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_level_attr =
__ATTR(system_level, 0644, system_level_show, system_level_store);
static ssize_t set_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc)
return rc;
return len;
}
static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
static struct attribute *cpi_attrs[] = {
&system_name_attr.attr,
&sysplex_name_attr.attr,
&system_type_attr.attr,
&system_level_attr.attr,
&set_attr.attr,
NULL,
};
static struct attribute_group cpi_attr_group = {
.attrs = cpi_attrs,
};
static struct kset *cpi_kset;
int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
const u64 level)
{
int rc;
rc = check_string("system_name", system);
if (rc)
return rc;
rc = check_string("sysplex_name", sysplex);
if (rc)
return rc;
rc = check_string("system_type", type);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system);
set_string(sysplex_name, sysplex);
set_string(system_type, type);
system_level = level;
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
EXPORT_SYMBOL(sclp_cpi_set_data);
static int __init cpi_init(void)
{
int rc;
cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
if (!cpi_kset)
return -ENOMEM;
rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
if (rc)
kset_unregister(cpi_kset);
return rc;
}
__initcall(cpi_init);
| linux-master | drivers/s390/char/sclp_cpi_sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI I/O adapter configuration related functions.
*
* Copyright IBM Corp. 2016
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <asm/sclp.h>
#include "sclp.h"
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_ATYPE_PCI 2
#define SCLP_ERRNOTIFY_AQ_RESET 0
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
struct err_notify_evbuf {
struct evbuf_header header;
u8 action;
u8 atype;
u32 fh;
u32 fid;
u8 data[];
} __packed;
struct err_notify_sccb {
struct sccb_header header;
struct err_notify_evbuf evbuf;
} __packed;
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_ATYPE_PCI;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
static void sclp_pci_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static int sclp_pci_check_report(struct zpci_report_error_header *report)
{
if (report->version != 1)
return -EINVAL;
switch (report->action) {
case SCLP_ERRNOTIFY_AQ_RESET:
case SCLP_ERRNOTIFY_AQ_REPAIR:
case SCLP_ERRNOTIFY_AQ_INFO_LOG:
break;
default:
return -EINVAL;
}
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
return -EINVAL;
return 0;
}
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct err_notify_sccb *sccb;
struct sclp_req req;
int ret;
ret = sclp_pci_check_report(report);
if (ret)
return ret;
mutex_lock(&sclp_pci_mutex);
ret = sclp_register(&sclp_pci_event);
if (ret)
goto out_unlock;
if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
ret = -EOPNOTSUPP;
goto out_unregister;
}
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
ret = -ENOMEM;
goto out_unregister;
}
memset(&req, 0, sizeof(req));
req.callback_data = &completion;
req.callback = sclp_pci_callback;
req.command = SCLP_CMDW_WRITE_EVENT_DATA;
req.status = SCLP_REQ_FILLED;
req.sccb = sccb;
sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
sccb->evbuf.action = report->action;
sccb->evbuf.atype = SCLP_ATYPE_PCI;
sccb->evbuf.fh = fh;
sccb->evbuf.fid = fid;
memcpy(sccb->evbuf.data, report->data, report->length);
ret = sclp_add_request(&req);
if (ret)
goto out_free_req;
wait_for_completion(&completion);
if (req.status != SCLP_REQ_DONE) {
pr_warn("request failed (status=0x%02x)\n",
req.status);
ret = -EIO;
goto out_free_req;
}
if (sccb->header.response_code != 0x0020) {
pr_warn("request failed with response code 0x%x\n",
sccb->header.response_code);
ret = -EIO;
}
out_free_req:
free_page((unsigned long) sccb);
out_unregister:
sclp_unregister(&sclp_pci_event);
out_unlock:
mutex_unlock(&sclp_pci_mutex);
return ret;
}
| linux-master | drivers/s390/char/sclp_pci.c |
// SPDX-License-Identifier: GPL-2.0
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include "keyboard.h"
u_short ebc_plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *ebc_key_maps[MAX_NR_KEYMAPS] = {
ebc_plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int ebc_keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char ebc_func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *ebc_funcbufptr = ebc_func_buf;
int ebc_funcbufsize = sizeof(ebc_func_buf);
int ebc_funcbufleft; /* space left */
char *ebc_func_table[MAX_NR_FUNC] = {
ebc_func_buf + 0,
ebc_func_buf + 5,
ebc_func_buf + 10,
ebc_func_buf + 15,
ebc_func_buf + 20,
ebc_func_buf + 25,
ebc_func_buf + 31,
ebc_func_buf + 37,
ebc_func_buf + 43,
ebc_func_buf + 49,
ebc_func_buf + 55,
ebc_func_buf + 61,
ebc_func_buf + 67,
ebc_func_buf + 73,
ebc_func_buf + 79,
ebc_func_buf + 85,
ebc_func_buf + 91,
ebc_func_buf + 97,
ebc_func_buf + 103,
ebc_func_buf + 109,
NULL,
};
struct kbdiacruc ebc_accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int ebc_accent_table_size = 4;
| linux-master | drivers/s390/char/defkeymap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
#include <asm/diag.h>
#include "hmcdrv_ftp.h"
#include "diag_ftp.h"
/* DIAGNOSE X'2C4' return codes in Ry */
#define DIAG_FTP_RET_OK 0 /* HMC FTP started successfully */
#define DIAG_FTP_RET_EBUSY 4 /* HMC FTP service currently busy */
#define DIAG_FTP_RET_EIO 8 /* HMC FTP service I/O error */
/* and an artificial extension */
#define DIAG_FTP_RET_EPERM 2 /* HMC FTP service privilege error */
/* FTP service status codes (after INTR at guest real location 133) */
#define DIAG_FTP_STAT_OK 0U /* request completed successfully */
#define DIAG_FTP_STAT_PGCC 4U /* program check condition */
#define DIAG_FTP_STAT_PGIOE 8U /* paging I/O error */
#define DIAG_FTP_STAT_TIMEOUT 12U /* timeout */
#define DIAG_FTP_STAT_EBASE 16U /* base of error codes from SCLP */
#define DIAG_FTP_STAT_LDFAIL (DIAG_FTP_STAT_EBASE + 1U) /* failed */
#define DIAG_FTP_STAT_LDNPERM (DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
#define DIAG_FTP_STAT_LDRUNS (DIAG_FTP_STAT_EBASE + 3U) /* runs */
#define DIAG_FTP_STAT_LDNRUNS (DIAG_FTP_STAT_EBASE + 4U) /* not runs */
/**
* struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
* @bufaddr: real buffer address (at 4k boundary)
* @buflen: length of buffer
* @offset: dir/file offset
* @intparm: interruption parameter (unused)
* @transferred: bytes transferred
* @fsize: file size, filled on GET
* @failaddr: failing address
* @spare: padding
* @fident: file name - ASCII
*/
struct diag_ftp_ldfpl {
u64 bufaddr;
u64 buflen;
u64 offset;
u64 intparm;
u64 transferred;
u64 fsize;
u64 failaddr;
u64 spare;
u8 fident[HMCDRV_FTP_FIDENT_MAX];
} __packed;
static DECLARE_COMPLETION(diag_ftp_rx_complete);
static int diag_ftp_subcode;
/**
* diag_ftp_handler() - FTP services IRQ handler
* @extirq: external interrupt (sub-) code
* @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
* @param64: unused (for 64-bit interrupt parameters)
*/
static void diag_ftp_handler(struct ext_code extirq,
unsigned int param32,
unsigned long param64)
{
if ((extirq.subcode >> 8) != 8)
return; /* not a FTP services sub-code */
inc_irq_stat(IRQEXT_FTP);
diag_ftp_subcode = extirq.subcode & 0xffU;
complete(&diag_ftp_rx_complete);
}
/**
* diag_ftp_2c4() - DIAGNOSE X'2C4' service call
* @fpl: pointer to prepared LDFPL
* @cmd: FTP command to be executed
*
* Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
* @fpl and FTP function code @cmd. In case of an error the function does
* nothing and returns an (negative) error code.
*
* Notes:
* 1. This function only initiates a transfer, so the caller must wait
* for completion (asynchronous execution).
* 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
* 3. fpl->bufaddr must be a real address, 4k aligned
*/
static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
enum hmcdrv_ftp_cmdid cmd)
{
int rc;
diag_stat_inc(DIAG_STAT_X2C4);
asm volatile(
" diag %[addr],%[cmd],0x2c4\n"
"0: j 2f\n"
"1: la %[rc],%[err]\n"
"2:\n"
EX_TABLE(0b, 1b)
: [rc] "=d" (rc), "+m" (*fpl)
: [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
[err] "i" (DIAG_FTP_RET_EPERM)
: "cc");
switch (rc) {
case DIAG_FTP_RET_OK:
return 0;
case DIAG_FTP_RET_EBUSY:
return -EBUSY;
case DIAG_FTP_RET_EPERM:
return -EPERM;
case DIAG_FTP_RET_EIO:
default:
return -EIO;
}
}
/**
* diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
struct diag_ftp_ldfpl *ldfpl;
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
ftp->fname, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&diag_ftp_rx_complete);
ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ldfpl) {
len = -ENOMEM;
goto out;
}
len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
if (len < 0) {
len = -EINVAL;
goto out_free;
}
ldfpl->transferred = 0;
ldfpl->fsize = 0;
ldfpl->offset = ftp->ofs;
ldfpl->buflen = ftp->len;
ldfpl->bufaddr = virt_to_phys(ftp->buf);
len = diag_ftp_2c4(ldfpl, ftp->id);
if (len)
goto out_free;
/*
* There is no way to cancel the running diag X'2C4', the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&diag_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed DIAG X'2C4' after %lu ms\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
#endif
switch (diag_ftp_subcode) {
case DIAG_FTP_STAT_OK: /* success */
len = ldfpl->transferred;
if (fsize)
*fsize = ldfpl->fsize;
break;
case DIAG_FTP_STAT_LDNPERM:
len = -EPERM;
break;
case DIAG_FTP_STAT_LDRUNS:
len = -EBUSY;
break;
case DIAG_FTP_STAT_LDFAIL:
len = -ENOENT; /* no such file or media */
break;
default:
len = -EIO;
break;
}
out_free:
free_page((unsigned long) ldfpl);
out:
return len;
}
/**
* diag_ftp_startup() - startup of FTP services, when running on z/VM
*
* Return: 0 on success, else an (negative) error code
*/
int diag_ftp_startup(void)
{
int rc;
rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
if (rc)
return rc;
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
return 0;
}
/**
* diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
*/
void diag_ftp_shutdown(void)
{
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
}
| linux-master | drivers/s390/char/diag_ftp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* core function to access sclp interface
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/panic_notifier.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/types.h>
#include <asm/irq.h>
#include <asm/debug.h>
#include "sclp.h"
#define SCLP_HEADER "sclp: "
struct sclp_trace_entry {
char id[4] __nonstring;
u32 a;
u64 b;
};
#define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
#define SCLP_TRACE_MAX_SIZE 128
#define SCLP_TRACE_EVENT_MAX_SIZE 64
/* Debug trace area intended for all entries in abbreviated form. */
DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
&debug_hex_ascii_view);
/* Error trace area intended for full entries relating to failed requests. */
DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
/* Lock to protect internal data consistency. */
static DEFINE_SPINLOCK(sclp_lock);
/* Mask of events that we can send to the sclp interface. */
static sccb_mask_t sclp_receive_mask;
/* Mask of events that we can receive from the sclp interface. */
static sccb_mask_t sclp_send_mask;
/* List of registered event listeners and senders. */
static LIST_HEAD(sclp_reg_list);
/* List of queued requests. */
static LIST_HEAD(sclp_req_queue);
/* Data for read and init requests. */
static struct sclp_req sclp_read_req;
static struct sclp_req sclp_init_req;
static void *sclp_read_sccb;
static struct init_sccb *sclp_init_sccb;
/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
int sclp_console_pages = SCLP_CONSOLE_PAGES;
/* Flag to indicate if buffer pages are dropped on buffer full condition */
bool sclp_console_drop = true;
/* Number of times the console dropped buffer pages */
unsigned long sclp_console_full;
/* The currently active SCLP command word. */
static sclp_cmdw_t active_cmd;
static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
{
struct sclp_trace_entry e;
memset(&e, 0, sizeof(e));
strncpy(e.id, id, sizeof(e.id));
e.a = a;
e.b = b;
debug_event(&sclp_debug, prio, &e, sizeof(e));
if (err)
debug_event(&sclp_debug_err, 0, &e, sizeof(e));
}
static inline int no_zeroes_len(void *data, int len)
{
char *d = data;
/* Minimize trace area usage by not tracing trailing zeroes. */
while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
len--;
return len;
}
static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
{
debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
if (errlen)
debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
}
static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
{
struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
/* Full SCCB tracing if debug level is set to max. */
if (sclp_debug.level == DEBUG_MAX_LEVEL)
return len;
/* Minimal tracing for console writes. */
if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
(evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
limit = SCLP_TRACE_ENTRY_SIZE;
return min(len, limit);
}
static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
sclp_cmdw_t cmd, struct sccb_header *sccb,
bool err)
{
sclp_trace(prio, id, a, b, err);
if (sccb) {
sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
err ? sccb->length : 0);
}
}
static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
struct evbuf_header *evbuf, bool err)
{
sclp_trace(prio, id, a, b, err);
sclp_trace_bin(prio + 1, evbuf,
min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
err ? evbuf->length : 0);
}
static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
bool err)
{
struct sccb_header *sccb = req->sccb;
union {
struct {
u16 status;
u16 response;
u16 timeout;
u16 start_count;
};
u64 b;
} summary;
summary.status = req->status;
summary.response = sccb ? sccb->response_code : 0;
summary.timeout = (u16)req->queue_timeout;
summary.start_count = (u16)req->start_count;
sclp_trace(prio, id, __pa(sccb), summary.b, err);
}
static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
struct sclp_register *reg)
{
struct {
u64 receive;
u64 send;
} d;
d.receive = reg->receive_mask;
d.send = reg->send_mask;
sclp_trace(prio, id, a, b, false);
sclp_trace_bin(prio, &d, sizeof(d), 0);
}
static int __init sclp_setup_console_pages(char *str)
{
int pages, rc;
rc = kstrtoint(str, 0, &pages);
if (!rc && pages >= SCLP_CONSOLE_PAGES)
sclp_console_pages = pages;
return 1;
}
__setup("sclp_con_pages=", sclp_setup_console_pages);
static int __init sclp_setup_console_drop(char *str)
{
return kstrtobool(str, &sclp_console_drop) == 0;
}
__setup("sclp_con_drop=", sclp_setup_console_drop);
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
/* Timer for queued requests. */
static struct timer_list sclp_queue_timer;
/* Internal state: is a request active at the sclp? */
static volatile enum sclp_running_state_t {
sclp_running_state_idle,
sclp_running_state_running,
sclp_running_state_reset_pending
} sclp_running_state = sclp_running_state_idle;
/* Internal state: is a read request pending? */
static volatile enum sclp_reading_state_t {
sclp_reading_state_idle,
sclp_reading_state_reading
} sclp_reading_state = sclp_reading_state_idle;
/* Internal state: is the driver currently serving requests? */
static volatile enum sclp_activation_state_t {
sclp_activation_state_active,
sclp_activation_state_deactivating,
sclp_activation_state_inactive,
sclp_activation_state_activating
} sclp_activation_state = sclp_activation_state_active;
/* Internal state: is an init mask request pending? */
static volatile enum sclp_mask_state_t {
sclp_mask_state_idle,
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
/* Maximum retry counts */
#define SCLP_INIT_RETRY 3
#define SCLP_MASK_RETRY 3
/* Timeout intervals in seconds.*/
#define SCLP_BUSY_INTERVAL 10
#define SCLP_RETRY_INTERVAL 30
static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void);
static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
static void
__sclp_queue_read_req(void)
{
if (sclp_reading_state == sclp_reading_state_idle) {
sclp_reading_state = sclp_reading_state_reading;
__sclp_make_read_req();
/* Add request to head of queue */
list_add(&sclp_read_req.list, &sclp_req_queue);
}
}
/* Set up request retry timer. Called while sclp_lock is locked. */
static inline void
__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
{
del_timer(&sclp_request_timer);
sclp_request_timer.function = cb;
sclp_request_timer.expires = jiffies + time;
add_timer(&sclp_request_timer);
}
static void sclp_request_timeout_restart(struct timer_list *unused)
{
sclp_request_timeout(true);
}
static void sclp_request_timeout_normal(struct timer_list *unused)
{
sclp_request_timeout(false);
}
/* Request timeout handler. Restart the request queue. If force_restart,
* force restart of running request. */
static void sclp_request_timeout(bool force_restart)
{
unsigned long flags;
/* TMO: A timeout occurred (a=force_restart) */
sclp_trace(2, "TMO", force_restart, 0, true);
spin_lock_irqsave(&sclp_lock, flags);
if (force_restart) {
if (sclp_running_state == sclp_running_state_running) {
/* Break running state and queue NOP read event request
* to get a defined interface state. */
__sclp_queue_read_req();
sclp_running_state = sclp_running_state_idle;
}
} else {
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
sclp_request_timeout_normal);
}
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_process_queue();
}
/*
* Returns the expire value in jiffies of the next pending request timeout,
* if any. Needs to be called with sclp_lock.
*/
static unsigned long __sclp_req_queue_find_next_timeout(void)
{
unsigned long expires_next = 0;
struct sclp_req *req;
list_for_each_entry(req, &sclp_req_queue, list) {
if (!req->queue_expires)
continue;
if (!expires_next ||
(time_before(req->queue_expires, expires_next)))
expires_next = req->queue_expires;
}
return expires_next;
}
/*
* Returns expired request, if any, and removes it from the list.
*/
static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
{
unsigned long flags, now;
struct sclp_req *req;
spin_lock_irqsave(&sclp_lock, flags);
now = jiffies;
/* Don't need list_for_each_safe because we break out after list_del */
list_for_each_entry(req, &sclp_req_queue, list) {
if (!req->queue_expires)
continue;
if (time_before_eq(req->queue_expires, now)) {
if (req->status == SCLP_REQ_QUEUED) {
req->status = SCLP_REQ_QUEUED_TIMEOUT;
list_del(&req->list);
goto out;
}
}
}
req = NULL;
out:
spin_unlock_irqrestore(&sclp_lock, flags);
return req;
}
/*
* Timeout handler for queued requests. Removes request from list and
* invokes callback. This timer can be set per request in situations where
* waiting too long would be harmful to the system, e.g. during SE reboot.
*/
static void sclp_req_queue_timeout(struct timer_list *unused)
{
unsigned long flags, expires_next;
struct sclp_req *req;
do {
req = __sclp_req_queue_remove_expired_req();
if (req) {
/* RQTM: Request timed out (a=sccb, b=summary) */
sclp_trace_req(2, "RQTM", req, true);
}
if (req && req->callback)
req->callback(req, req->callback_data);
} while (req);
spin_lock_irqsave(&sclp_lock, flags);
expires_next = __sclp_req_queue_find_next_timeout();
if (expires_next)
mod_timer(&sclp_queue_timer, expires_next);
spin_unlock_irqrestore(&sclp_lock, flags);
}
static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
{
static u64 srvc_count;
int rc;
/* SRV1: Service call about to be issued (a=command, b=sccb address) */
sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
rc = sclp_service_call(command, sccb);
/* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
if (rc == 0)
active_cmd = command;
return rc;
}
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
static int
__sclp_start_request(struct sclp_req *req)
{
int rc;
if (sclp_running_state != sclp_running_state_idle)
return 0;
del_timer(&sclp_request_timer);
rc = sclp_service_call_trace(req->command, req->sccb);
req->start_count++;
if (rc == 0) {
/* Successfully started request */
req->status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
sclp_request_timeout_restart);
return 0;
} else if (rc == -EBUSY) {
/* Try again later */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
sclp_request_timeout_normal);
return 0;
}
/* Request failed */
req->status = SCLP_REQ_FAILED;
return rc;
}
/* Try to start queued requests. */
static void
sclp_process_queue(void)
{
struct sclp_req *req;
int rc;
unsigned long flags;
spin_lock_irqsave(&sclp_lock, flags);
if (sclp_running_state != sclp_running_state_idle) {
spin_unlock_irqrestore(&sclp_lock, flags);
return;
}
del_timer(&sclp_request_timer);
while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
rc = __sclp_start_request(req);
if (rc == 0)
break;
/* Request failed */
if (req->start_count > 1) {
/* Cannot abort already submitted request - could still
* be active at the SCLP */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
sclp_request_timeout_normal);
break;
}
/* Post-processing for aborted request */
list_del(&req->list);
/* RQAB: Request aborted (a=sccb, b=summary) */
sclp_trace_req(2, "RQAB", req, true);
if (req->callback) {
spin_unlock_irqrestore(&sclp_lock, flags);
req->callback(req, req->callback_data);
spin_lock_irqsave(&sclp_lock, flags);
}
}
spin_unlock_irqrestore(&sclp_lock, flags);
}
static int __sclp_can_add_request(struct sclp_req *req)
{
if (req == &sclp_init_req)
return 1;
if (sclp_init_state != sclp_init_state_initialized)
return 0;
if (sclp_activation_state != sclp_activation_state_active)
return 0;
return 1;
}
/* Queue a new request. Return zero on success, non-zero otherwise. */
int
sclp_add_request(struct sclp_req *req)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_lock, flags);
if (!__sclp_can_add_request(req)) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EIO;
}
/* RQAD: Request was added (a=sccb, b=caller) */
sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
req->status = SCLP_REQ_QUEUED;
req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue);
rc = 0;
if (req->queue_timeout) {
req->queue_expires = jiffies + req->queue_timeout * HZ;
if (!timer_pending(&sclp_queue_timer) ||
time_after(sclp_queue_timer.expires, req->queue_expires))
mod_timer(&sclp_queue_timer, req->queue_expires);
} else
req->queue_expires = 0;
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
}
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
EXPORT_SYMBOL(sclp_add_request);
/* Dispatch events found in request buffer to registered listeners. Return 0
* if all events were dispatched, non-zero otherwise. */
static int
sclp_dispatch_evbufs(struct sccb_header *sccb)
{
unsigned long flags;
struct evbuf_header *evbuf;
struct list_head *l;
struct sclp_register *reg;
int offset;
int rc;
spin_lock_irqsave(&sclp_lock, flags);
rc = 0;
for (offset = sizeof(struct sccb_header); offset < sccb->length;
offset += evbuf->length) {
evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
/* Check for malformed hardware response */
if (evbuf->length == 0)
break;
/* Search for event handler */
reg = NULL;
list_for_each(l, &sclp_reg_list) {
reg = list_entry(l, struct sclp_register, list);
if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
break;
else
reg = NULL;
}
/* EVNT: Event callback (b=receiver) */
sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
evbuf, !reg);
if (reg && reg->receiver_fn) {
spin_unlock_irqrestore(&sclp_lock, flags);
reg->receiver_fn(evbuf);
spin_lock_irqsave(&sclp_lock, flags);
} else if (reg == NULL)
rc = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
/* Read event data request callback. */
static void
sclp_read_cb(struct sclp_req *req, void *data)
{
unsigned long flags;
struct sccb_header *sccb;
sccb = (struct sccb_header *) req->sccb;
if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
sccb->response_code == 0x220))
sclp_dispatch_evbufs(sccb);
spin_lock_irqsave(&sclp_lock, flags);
sclp_reading_state = sclp_reading_state_idle;
spin_unlock_irqrestore(&sclp_lock, flags);
}
/* Prepare read event data request. Called while sclp_lock is locked. */
static void __sclp_make_read_req(void)
{
struct sccb_header *sccb;
sccb = (struct sccb_header *) sclp_read_sccb;
clear_page(sccb);
memset(&sclp_read_req, 0, sizeof(struct sclp_req));
sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
sclp_read_req.status = SCLP_REQ_QUEUED;
sclp_read_req.start_count = 0;
sclp_read_req.callback = sclp_read_cb;
sclp_read_req.sccb = sccb;
sccb->length = PAGE_SIZE;
sccb->function_code = 0;
sccb->control_mask[2] = 0x80;
}
/* Search request list for request with matching sccb. Return request if found,
* NULL otherwise. Called while sclp_lock is locked. */
static inline struct sclp_req *
__sclp_find_req(u32 sccb)
{
struct list_head *l;
struct sclp_req *req;
list_for_each(l, &sclp_req_queue) {
req = list_entry(l, struct sclp_req, list);
if (sccb == __pa(req->sccb))
return req;
}
return NULL;
}
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
{
struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
struct evbuf_header *evbuf;
u16 response;
if (!sccb)
return true;
/* Check SCCB response. */
response = sccb->response_code & 0xff;
if (response != 0x10 && response != 0x20)
return false;
/* Check event-processed flag on outgoing events. */
if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
evbuf = (struct evbuf_header *)(sccb + 1);
if (!(evbuf->flags & 0x80))
return false;
}
return true;
}
/* Handler for external interruption. Perform request post-processing.
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
static void sclp_interrupt_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct sclp_req *req;
u32 finished_sccb;
u32 evbuf_pending;
inc_irq_stat(IRQEXT_SCP);
spin_lock(&sclp_lock);
finished_sccb = param32 & 0xfffffff8;
evbuf_pending = param32 & 0x3;
/* INT: Interrupt received (a=intparm, b=cmd) */
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
(struct sccb_header *)__va(finished_sccb),
!ok_response(finished_sccb, active_cmd));
if (finished_sccb) {
del_timer(&sclp_request_timer);
sclp_running_state = sclp_running_state_reset_pending;
req = __sclp_find_req(finished_sccb);
if (req) {
/* Request post-processing */
list_del(&req->list);
req->status = SCLP_REQ_DONE;
/* RQOK: Request success (a=sccb, b=summary) */
sclp_trace_req(2, "RQOK", req, false);
if (req->callback) {
spin_unlock(&sclp_lock);
req->callback(req, req->callback_data);
spin_lock(&sclp_lock);
}
} else {
/* UNEX: Unexpected SCCB completion (a=sccb address) */
sclp_trace(0, "UNEX", finished_sccb, 0, true);
}
sclp_running_state = sclp_running_state_idle;
active_cmd = 0;
}
if (evbuf_pending &&
sclp_activation_state == sclp_activation_state_active)
__sclp_queue_read_req();
spin_unlock(&sclp_lock);
sclp_process_queue();
}
/* Convert interval in jiffies to TOD ticks. */
static inline u64
sclp_tod_from_jiffies(unsigned long jiffies)
{
return (u64) (jiffies / HZ) << 32;
}
/* Wait until a currently running request finished. Note: while this function
* is running, no timers are served on the calling CPU. */
void
sclp_sync_wait(void)
{
unsigned long long old_tick;
unsigned long flags;
unsigned long cr0, cr0_sync;
static u64 sync_count;
u64 timeout;
int irq_context;
/* SYN1: Synchronous wait start (a=runstate, b=sync count) */
sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
/* We'll be disabling timer interrupts, so we need a custom timeout
* mechanism */
timeout = 0;
if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */
timeout = get_tod_clock_fast() +
sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies);
}
local_irq_save(flags);
/* Prevent bottom half from executing once we force interrupts open */
irq_context = in_interrupt();
if (!irq_context)
local_bh_disable();
/* Enable service-signal interruption, disable timer interrupts */
old_tick = local_tick_disable();
trace_hardirqs_on();
__ctl_store(cr0, 0, 0);
cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync |= 1UL << (63 - 54);
__ctl_load(cr0_sync, 0, 0);
__arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
local_irq_disable();
__ctl_load(cr0, 0, 0);
if (!irq_context)
_local_bh_enable();
local_tick_enable(old_tick);
local_irq_restore(flags);
/* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
}
EXPORT_SYMBOL(sclp_sync_wait);
/* Dispatch changes in send and receive mask to registered listeners. */
static void
sclp_dispatch_state_change(void)
{
struct list_head *l;
struct sclp_register *reg;
unsigned long flags;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
do {
spin_lock_irqsave(&sclp_lock, flags);
reg = NULL;
list_for_each(l, &sclp_reg_list) {
reg = list_entry(l, struct sclp_register, list);
receive_mask = reg->send_mask & sclp_receive_mask;
send_mask = reg->receive_mask & sclp_send_mask;
if (reg->sclp_receive_mask != receive_mask ||
reg->sclp_send_mask != send_mask) {
reg->sclp_receive_mask = receive_mask;
reg->sclp_send_mask = send_mask;
break;
} else
reg = NULL;
}
spin_unlock_irqrestore(&sclp_lock, flags);
if (reg && reg->state_change_fn) {
/* STCG: State-change callback (b=callback) */
sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
false);
reg->state_change_fn(reg);
}
} while (reg);
}
struct sclp_statechangebuf {
struct evbuf_header header;
u8 validity_sclp_active_facility_mask : 1;
u8 validity_sclp_receive_mask : 1;
u8 validity_sclp_send_mask : 1;
u8 validity_read_data_function_mask : 1;
u16 _zeros : 12;
u16 mask_length;
u64 sclp_active_facility_mask;
u8 masks[2 * 1021 + 4]; /* variable length */
/*
* u8 sclp_receive_mask[mask_length];
* u8 sclp_send_mask[mask_length];
* u32 read_data_function_mask;
*/
} __attribute__((packed));
/* State change event callback. Inform listeners of changes. */
static void
sclp_state_change_cb(struct evbuf_header *evbuf)
{
unsigned long flags;
struct sclp_statechangebuf *scbuf;
BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
scbuf = (struct sclp_statechangebuf *) evbuf;
spin_lock_irqsave(&sclp_lock, flags);
if (scbuf->validity_sclp_receive_mask)
sclp_receive_mask = sccb_get_recv_mask(scbuf);
if (scbuf->validity_sclp_send_mask)
sclp_send_mask = sccb_get_send_mask(scbuf);
spin_unlock_irqrestore(&sclp_lock, flags);
if (scbuf->validity_sclp_active_facility_mask)
sclp.facilities = scbuf->sclp_active_facility_mask;
sclp_dispatch_state_change();
}
static struct sclp_register sclp_state_change_event = {
.receive_mask = EVTYP_STATECHANGE_MASK,
.receiver_fn = sclp_state_change_cb
};
/* Calculate receive and send mask of currently registered listeners.
* Called while sclp_lock is locked. */
static inline void
__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
{
struct list_head *l;
struct sclp_register *t;
*receive_mask = 0;
*send_mask = 0;
list_for_each(l, &sclp_reg_list) {
t = list_entry(l, struct sclp_register, list);
*receive_mask |= t->receive_mask;
*send_mask |= t->send_mask;
}
}
/* Register event listener. Return 0 on success, non-zero otherwise. */
int
sclp_register(struct sclp_register *reg)
{
unsigned long flags;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
int rc;
/* REG: Event listener registered (b=caller) */
sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
rc = sclp_init();
if (rc)
return rc;
spin_lock_irqsave(&sclp_lock, flags);
/* Check event mask for collisions */
__sclp_get_mask(&receive_mask, &send_mask);
if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EBUSY;
}
/* Trigger initial state change callback */
reg->sclp_receive_mask = 0;
reg->sclp_send_mask = 0;
list_add(®->list, &sclp_reg_list);
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
if (rc) {
spin_lock_irqsave(&sclp_lock, flags);
list_del(®->list);
spin_unlock_irqrestore(&sclp_lock, flags);
}
return rc;
}
EXPORT_SYMBOL(sclp_register);
/* Unregister event listener. */
void
sclp_unregister(struct sclp_register *reg)
{
unsigned long flags;
/* UREG: Event listener unregistered (b=caller) */
sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
spin_lock_irqsave(&sclp_lock, flags);
list_del(®->list);
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_init_mask(1);
}
EXPORT_SYMBOL(sclp_unregister);
/* Remove event buffers which are marked processed. Return the number of
* remaining event buffers. */
int
sclp_remove_processed(struct sccb_header *sccb)
{
struct evbuf_header *evbuf;
int unprocessed;
u16 remaining;
evbuf = (struct evbuf_header *) (sccb + 1);
unprocessed = 0;
remaining = sccb->length - sizeof(struct sccb_header);
while (remaining > 0) {
remaining -= evbuf->length;
if (evbuf->flags & 0x80) {
sccb->length -= evbuf->length;
memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
remaining);
} else {
unprocessed++;
evbuf = (struct evbuf_header *)
((addr_t) evbuf + evbuf->length);
}
}
return unprocessed;
}
EXPORT_SYMBOL(sclp_remove_processed);
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
{
struct init_sccb *sccb = sclp_init_sccb;
clear_page(sccb);
memset(&sclp_init_req, 0, sizeof(struct sclp_req));
sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
sclp_init_req.status = SCLP_REQ_FILLED;
sclp_init_req.start_count = 0;
sclp_init_req.callback = NULL;
sclp_init_req.callback_data = NULL;
sclp_init_req.sccb = sccb;
sccb->header.length = sizeof(*sccb);
if (sclp_mask_compat_mode)
sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
else
sccb->mask_length = sizeof(sccb_mask_t);
sccb_set_recv_mask(sccb, receive_mask);
sccb_set_send_mask(sccb, send_mask);
sccb_set_sclp_recv_mask(sccb, 0);
sccb_set_sclp_send_mask(sccb, 0);
}
/* Start init mask request. If calculate is non-zero, calculate the mask as
* requested by registered listeners. Use zero mask otherwise. Return 0 on
* success, non-zero otherwise. */
static int
sclp_init_mask(int calculate)
{
unsigned long flags;
struct init_sccb *sccb = sclp_init_sccb;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
int retry;
int rc;
unsigned long wait;
spin_lock_irqsave(&sclp_lock, flags);
/* Check if interface is in appropriate state */
if (sclp_mask_state != sclp_mask_state_idle) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EBUSY;
}
if (sclp_activation_state == sclp_activation_state_inactive) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EINVAL;
}
sclp_mask_state = sclp_mask_state_initializing;
/* Determine mask */
if (calculate)
__sclp_get_mask(&receive_mask, &send_mask);
else {
receive_mask = 0;
send_mask = 0;
}
rc = -EIO;
for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
/* Prepare request */
__sclp_make_init_req(receive_mask, send_mask);
spin_unlock_irqrestore(&sclp_lock, flags);
if (sclp_add_request(&sclp_init_req)) {
/* Try again later */
wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
while (time_before(jiffies, wait))
sclp_sync_wait();
spin_lock_irqsave(&sclp_lock, flags);
continue;
}
while (sclp_init_req.status != SCLP_REQ_DONE &&
sclp_init_req.status != SCLP_REQ_FAILED)
sclp_sync_wait();
spin_lock_irqsave(&sclp_lock, flags);
if (sclp_init_req.status == SCLP_REQ_DONE &&
sccb->header.response_code == 0x20) {
/* Successful request */
if (calculate) {
sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
sclp_send_mask = sccb_get_sclp_send_mask(sccb);
} else {
sclp_receive_mask = 0;
sclp_send_mask = 0;
}
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_dispatch_state_change();
spin_lock_irqsave(&sclp_lock, flags);
rc = 0;
break;
}
}
sclp_mask_state = sclp_mask_state_idle;
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
/* Deactivate SCLP interface. On success, new requests will be rejected,
* events will no longer be dispatched. Return 0 on success, non-zero
* otherwise. */
int
sclp_deactivate(void)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_lock, flags);
/* Deactivate can only be called when active */
if (sclp_activation_state != sclp_activation_state_active) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EINVAL;
}
sclp_activation_state = sclp_activation_state_deactivating;
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(0);
spin_lock_irqsave(&sclp_lock, flags);
if (rc == 0)
sclp_activation_state = sclp_activation_state_inactive;
else
sclp_activation_state = sclp_activation_state_active;
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
EXPORT_SYMBOL(sclp_deactivate);
/* Reactivate SCLP interface after sclp_deactivate. On success, new
* requests will be accepted, events will be dispatched again. Return 0 on
* success, non-zero otherwise. */
int
sclp_reactivate(void)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_lock, flags);
/* Reactivate can only be called when inactive */
if (sclp_activation_state != sclp_activation_state_inactive) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EINVAL;
}
sclp_activation_state = sclp_activation_state_activating;
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
spin_lock_irqsave(&sclp_lock, flags);
if (rc == 0)
sclp_activation_state = sclp_activation_state_active;
else
sclp_activation_state = sclp_activation_state_inactive;
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
EXPORT_SYMBOL(sclp_reactivate);
/* Handler for external interruption used during initialization. Modify
* request state to done. */
static void sclp_check_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
u32 finished_sccb;
inc_irq_stat(IRQEXT_SCP);
finished_sccb = param32 & 0xfffffff8;
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
return;
if (finished_sccb != __pa(sclp_init_sccb))
panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
finished_sccb);
spin_lock(&sclp_lock);
if (sclp_running_state == sclp_running_state_running) {
sclp_init_req.status = SCLP_REQ_DONE;
sclp_running_state = sclp_running_state_idle;
}
spin_unlock(&sclp_lock);
}
/* Initial init mask request timed out. Modify request state to failed. */
static void
sclp_check_timeout(struct timer_list *unused)
{
unsigned long flags;
spin_lock_irqsave(&sclp_lock, flags);
if (sclp_running_state == sclp_running_state_running) {
sclp_init_req.status = SCLP_REQ_FAILED;
sclp_running_state = sclp_running_state_idle;
}
spin_unlock_irqrestore(&sclp_lock, flags);
}
/* Perform a check of the SCLP interface. Return zero if the interface is
* available and there are no pending requests from a previous instance.
* Return non-zero otherwise. */
static int
sclp_check_interface(void)
{
struct init_sccb *sccb;
unsigned long flags;
int retry;
int rc;
spin_lock_irqsave(&sclp_lock, flags);
/* Prepare init mask command */
rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
if (rc) {
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
__sclp_make_init_req(0, 0);
sccb = (struct init_sccb *) sclp_init_req.sccb;
rc = sclp_service_call_trace(sclp_init_req.command, sccb);
if (rc == -EIO)
break;
sclp_init_req.status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
sclp_check_timeout);
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
rc = -EBUSY;
if (sclp_init_req.status == SCLP_REQ_DONE) {
if (sccb->header.response_code == 0x20) {
rc = 0;
break;
} else if (sccb->header.response_code == 0x74f0) {
if (!sclp_mask_compat_mode) {
sclp_mask_compat_mode = true;
retry = 0;
}
}
}
}
unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
* events from interfering with rebooted system. */
static int
sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
{
sclp_deactivate();
return NOTIFY_DONE;
}
static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
};
static ssize_t con_pages_show(struct device_driver *dev, char *buf)
{
return sysfs_emit(buf, "%i\n", sclp_console_pages);
}
static DRIVER_ATTR_RO(con_pages);
static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count)
{
int rc;
rc = kstrtobool(buf, &sclp_console_drop);
return rc ?: count;
}
static ssize_t con_drop_show(struct device_driver *dev, char *buf)
{
return sysfs_emit(buf, "%i\n", sclp_console_drop);
}
static DRIVER_ATTR_RW(con_drop);
static ssize_t con_full_show(struct device_driver *dev, char *buf)
{
return sysfs_emit(buf, "%lu\n", sclp_console_full);
}
static DRIVER_ATTR_RO(con_full);
static struct attribute *sclp_drv_attrs[] = {
&driver_attr_con_pages.attr,
&driver_attr_con_drop.attr,
&driver_attr_con_full.attr,
NULL,
};
static struct attribute_group sclp_drv_attr_group = {
.attrs = sclp_drv_attrs,
};
static const struct attribute_group *sclp_drv_attr_groups[] = {
&sclp_drv_attr_group,
NULL,
};
static struct platform_driver sclp_pdrv = {
.driver = {
.name = "sclp",
.groups = sclp_drv_attr_groups,
},
};
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
static int
sclp_init(void)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&sclp_lock, flags);
/* Check for previous or running initialization */
if (sclp_init_state != sclp_init_state_uninitialized)
goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
/* Set up variables */
list_add(&sclp_state_change_event.list, &sclp_reg_list);
timer_setup(&sclp_request_timer, NULL, 0);
timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
spin_lock_irqsave(&sclp_lock, flags);
if (rc)
goto fail_init_state_uninitialized;
/* Register reboot handler */
rc = register_reboot_notifier(&sclp_reboot_notifier);
if (rc)
goto fail_init_state_uninitialized;
/* Register interrupt handler */
rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
if (rc)
goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized;
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
sclp_init_state = sclp_init_state_uninitialized;
free_page((unsigned long) sclp_read_sccb);
free_page((unsigned long) sclp_init_sccb);
fail_unlock:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
static __init int sclp_initcall(void)
{
int rc;
rc = platform_driver_register(&sclp_pdrv);
if (rc)
return rc;
return sclp_init();
}
arch_initcall(sclp_initcall);
| linux-master | drivers/s390/char/sclp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007,2012
*
* Author(s): Peter Oberparleiter <[email protected]>
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include <asm/numa.h>
#include <asm/facility.h>
#include "sclp.h"
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
{
return sclp_sync_request_timeout(cmd, sccb, 0);
}
int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
{
struct completion completion;
struct sclp_req *request;
int rc;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
if (timeout)
request->queue_timeout = timeout;
request->command = cmd;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
request->callback = sclp_sync_callback;
request->callback_data = &completion;
init_completion(&completion);
/* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
cmd, request->status);
rc = -EIO;
}
out:
kfree(request);
return rc;
}
/*
* CPU configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
int _sclp_get_core_info(struct sclp_core_info *info)
{
int rc;
int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
struct read_cpu_info_sccb *sccb;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
if (!sccb)
return -ENOMEM;
sccb->header.length = length;
sccb->header.control_mask[2] = 0x80;
rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warn("readcpuinfo failed (response=0x%04x)\n",
sccb->header.response_code);
rc = -EIO;
goto out;
}
sclp_fill_core_info(info, sccb);
out:
free_pages((unsigned long) sccb, get_order(length));
return rc;
}
struct cpu_configure_sccb {
struct sccb_header header;
} __attribute__((packed, aligned(8)));
static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
int rc;
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
* This is not going to cross a page boundary since we force
* kmalloc to have a minimum alignment of 8 bytes on s390.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
kfree(sccb);
return rc;
}
int sclp_core_configure(u8 core)
{
return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
}
int sclp_core_deconfigure(u8 core)
{
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static DECLARE_BITMAP(sclp_storage_ids, 256);
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
int arch_get_memory_phys_device(unsigned long start_pfn)
{
if (!sclp.rzm)
return 0;
return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
}
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * sclp.rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
cmd, sccb->header.response_code, rn);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
return rc;
start = rn2addr(rn);
storage_key_init_range(start, start + sclp.rzm);
return 0;
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->header.function_code = 0x40;
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (sccb->entries[i])
sclp_unassign_storage(sccb->entries[i] >> 16);
}
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + sclp.rzm - 1)
continue;
if (online)
rc |= sclp_assign_storage(incr->rn);
else
sclp_unassign_storage(incr->rn);
if (rc == 0)
incr->standby = online ? 0 : 1;
}
return rc ? -EIO : 0;
}
static bool contains_standby_increment(unsigned long start, unsigned long end)
{
struct memory_increment *incr;
unsigned long istart;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (end - 1 < istart)
continue;
if (start > istart + sclp.rzm - 1)
continue;
if (incr->standby)
return true;
}
return false;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
sclp_attach_storage(id);
switch (action) {
case MEM_GOING_OFFLINE:
/*
* We do not allow to set memory blocks offline that contain
* standby memory. This is done to simplify the "memory online"
* case.
*/
if (contains_standby_increment(start, start + size))
rc = -EPERM;
break;
case MEM_ONLINE:
case MEM_CANCEL_OFFLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
case MEM_OFFLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init align_to_block_size(unsigned long long *start,
unsigned long long *size,
unsigned long long alignment)
{
unsigned long long start_align, size_align;
start_align = roundup(*start, alignment);
size_align = rounddown(*start + *size, alignment) - start_align;
pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
*start, size_align >> 20, *size >> 20);
*start = start_align;
*size = size_align;
}
static void __init add_memory_merged(u16 rn)
{
unsigned long long start, size, addr, block_size;
static u16 first_rn, num;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long) num * sclp.rzm;
if (start >= ident_map_size)
goto skip_add;
if (start + size > ident_map_size)
size = ident_map_size - start;
block_size = memory_block_size_bytes();
align_to_block_size(&start, &size, block_size);
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
add_memory(0, addr, block_size, MHP_NONE);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > sclp.rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
static int __init sclp_detect_standby_memory(void)
{
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (oldmem_data.start) /* No standby memory in kdump mode */
return 0;
if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= sclp.rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
sclp_add_standby_memory();
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Channel path configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
struct chp_cfg_sccb {
struct sccb_header header;
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
case 0x0440:
case 0x0450:
break;
default:
pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
/**
* sclp_chp_configure - perform configure channel-path sclp command
* @chpid: channel-path ID
*
* Perform configure channel-path command sclp command for specified chpid.
* Return 0 after command successfully finished, non-zero otherwise.
*/
int sclp_chp_configure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
}
/**
* sclp_chp_deconfigure - perform deconfigure channel-path sclp command
* @chpid: channel-path ID
*
* Perform deconfigure channel-path command sclp command for specified chpid
* and wait for completion. On success return 0. Return non-zero otherwise.
*/
int sclp_chp_deconfigure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
struct chp_info_sccb {
struct sccb_header header;
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
*
* Perform read channel-path information sclp command and wait for completion.
* On success, store channel-path information in @info and return 0. Return
* non-zero otherwise.
*/
int sclp_chp_read_info(struct sclp_chp_info *info)
{
struct chp_info_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warn("read channel-path info failed (response=0x%04x)\n",
sccb->header.response_code);
rc = -EIO;
goto out;
}
memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
free_page((unsigned long) sccb);
return rc;
}
| linux-master | drivers/s390/char/sclp_cmd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* character device driver for reading z/VM system service records
*
*
* Copyright IBM Corp. 2004, 2009
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <[email protected]>
* Stefan Weinhuber <[email protected]>
*
*/
#define KMSG_COMPONENT "vmlogrdr"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/string.h>
MODULE_AUTHOR
("(C) 2004 IBM Corporation by Xenia Tkatschow ([email protected])\n"
" Stefan Weinhuber ([email protected])");
MODULE_DESCRIPTION ("Character device driver for reading z/VM "
"system service records.");
MODULE_LICENSE("GPL");
/*
* The size of the buffer for iucv data transfer is one page,
* but in addition to the data we read from iucv we also
* place an integer and some characters into that buffer,
* so the maximum size for record data is a little less then
* one page.
*/
#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
/*
* The elements that are concurrently accessed by bottom halves are
* connection_established, iucv_path_severed, local_interrupt_buffer
* and receive_ready. The first three can be protected by
* priv_lock. receive_ready is atomic, so it can be incremented and
* decremented without holding a lock.
* The variable dev_in_use needs to be protected by the lock, since
* it's a flag used by open to make sure that the device is opened only
* by one user at the same time.
*/
struct vmlogrdr_priv_t {
char system_service[8];
char internal_name[8];
char recording_name[8];
struct iucv_path *path;
int connection_established;
int iucv_path_severed;
struct iucv_message local_interrupt_buffer;
atomic_t receive_ready;
int minor_num;
char * buffer;
char * current_position;
int remaining;
ulong residual_length;
int buffer_free;
int dev_in_use; /* 1: already opened, 0: not opened*/
spinlock_t priv_lock;
struct device *device;
struct device *class_device;
int autorecording;
int autopurge;
};
/*
* File operation structure for vmlogrdr devices
*/
static int vmlogrdr_open(struct inode *, struct file *);
static int vmlogrdr_release(struct inode *, struct file *);
static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
size_t count, loff_t * ppos);
static const struct file_operations vmlogrdr_fops = {
.owner = THIS_MODULE,
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
.llseek = no_llseek,
};
static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
static void vmlogrdr_iucv_message_pending(struct iucv_path *,
struct iucv_message *);
static struct iucv_handler vmlogrdr_iucv_handler = {
.path_complete = vmlogrdr_iucv_path_complete,
.path_severed = vmlogrdr_iucv_path_severed,
.message_pending = vmlogrdr_iucv_message_pending,
};
static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
/*
* pointer to system service private structure
* minor number 0 --> logrec
* minor number 1 --> account
* minor number 2 --> symptom
*/
static struct vmlogrdr_priv_t sys_ser[] = {
{ .system_service = "*LOGREC ",
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*ACCOUNT",
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*SYMPTOM",
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
.autorecording = 1,
.autopurge = 1,
}
};
#define MAXMINOR ARRAY_SIZE(sys_ser)
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;
static struct cdev *vmlogrdr_cdev = NULL;
static int recording_class_AB;
static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
{
struct vmlogrdr_priv_t * logptr = path->private;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
}
static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
{
struct vmlogrdr_priv_t * logptr = path->private;
u8 reason = (u8) ipuser[8];
pr_err("vmlogrdr: connection severed with reason %i\n", reason);
iucv_path_sever(path, NULL);
kfree(path);
logptr->path = NULL;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 0;
logptr->iucv_path_severed = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
/* just in case we're sleeping waiting for a record */
wake_up_interruptible(&read_wait_queue);
}
static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct vmlogrdr_priv_t * logptr = path->private;
/*
* This function is the bottom half so it should be quick.
* Copy the external interrupt data into our local eib and increment
* the usage count
*/
spin_lock(&logptr->priv_lock);
memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
atomic_inc(&logptr->receive_ready);
spin_unlock(&logptr->priv_lock);
wake_up_interruptible(&read_wait_queue);
}
static int vmlogrdr_get_recording_class_AB(void)
{
static const char cp_command[] = "QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
int len,i;
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
len = strnlen(cp_response,sizeof(cp_response));
// now the parsing
tail=strnchr(cp_response,len,'=');
if (!tail)
return 0;
tail++;
if (!strncmp("ANY",tail,3))
return 1;
if (!strncmp("NONE",tail,4))
return 0;
/*
* expect comma separated list of classes here, if one of them
* is A or B return 1 otherwise 0
*/
for (i=tail-cp_response; i<len; i++)
if ( cp_response[i]=='A' || cp_response[i]=='B' )
return 1;
return 0;
}
static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
int action, int purge)
{
char cp_command[80];
char cp_response[160];
char *onoff, *qid_string;
int rc;
onoff = ((action == 1) ? "ON" : "OFF");
qid_string = ((recording_class_AB == 1) ? " QID * " : "");
/*
* The recording commands needs to be called with option QID
* for guests that have previlege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
*/
if (purge && (action == 1)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
logptr->recording_name,
onoff,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
* 'HCPCRC8072I Recording entry not found' before the
* 'Command complete'. So I use strstr rather then the strncmp.
*/
if (strstr(cp_response,"Command complete"))
rc = 0;
else
rc = -EIO;
/*
* If we turn recording off, we have to purge any remaining records
* afterwards, as a large number of queued records may impact z/VM
* performance.
*/
if (purge && (action == 0)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
return rc;
}
static int vmlogrdr_open (struct inode *inode, struct file *filp)
{
int dev_num = 0;
struct vmlogrdr_priv_t * logptr = NULL;
int connect_rc = 0;
int ret;
dev_num = iminor(inode);
if (dev_num >= MAXMINOR)
return -ENODEV;
logptr = &sys_ser[dev_num];
/*
* only allow for blocking reads to be open
*/
if (filp->f_flags & O_NONBLOCK)
return -EOPNOTSUPP;
/* Besure this device hasn't already been opened */
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
return -EBUSY;
}
logptr->dev_in_use = 1;
logptr->connection_established = 0;
logptr->iucv_path_severed = 0;
atomic_set(&logptr->receive_ready, 0);
logptr->buffer_free = 1;
spin_unlock_bh(&logptr->priv_lock);
/* set the file options */
filp->private_data = logptr;
/* start recording for this service*/
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
pr_warn("vmlogrdr: failed to start recording automatically\n");
}
/* create connection to the system service */
logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
if (!logptr->path)
goto out_dev;
connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
logptr->system_service, NULL, NULL,
logptr);
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
logptr->system_service, connect_rc);
goto out_path;
}
/* We've issued the connect and now we must wait for a
* ConnectionComplete or ConnectinSevered Interrupt
* before we can continue to process.
*/
wait_event(conn_wait_queue, (logptr->connection_established)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
nonseekable_open(inode, filp);
return 0;
out_record:
if (logptr->autorecording)
vmlogrdr_recording(logptr,0,logptr->autopurge);
out_path:
kfree(logptr->path); /* kfree(NULL) is ok. */
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
return -EIO;
}
static int vmlogrdr_release (struct inode *inode, struct file *filp)
{
int ret;
struct vmlogrdr_priv_t * logptr = filp->private_data;
iucv_path_sever(logptr->path, NULL);
kfree(logptr->path);
logptr->path = NULL;
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
pr_warn("vmlogrdr: failed to stop recording automatically\n");
}
logptr->dev_in_use = 0;
return 0;
}
static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
{
int rc, *temp;
/* we need to keep track of two data sizes here:
* The number of bytes we need to receive from iucv and
* the total number of bytes we actually write into the buffer.
*/
int user_data_count, iucv_data_count;
char * buffer;
if (atomic_read(&priv->receive_ready)) {
spin_lock_bh(&priv->priv_lock);
if (priv->residual_length){
/* receive second half of a record */
iucv_data_count = priv->residual_length;
user_data_count = 0;
buffer = priv->buffer;
} else {
/* receive a new record:
* We need to return the total length of the record
* + size of FENCE in the first 4 bytes of the buffer.
*/
iucv_data_count = priv->local_interrupt_buffer.length;
user_data_count = sizeof(int);
temp = (int*)priv->buffer;
*temp= iucv_data_count + sizeof(FENCE);
buffer = priv->buffer + sizeof(int);
}
/*
* If the record is bigger than our buffer, we receive only
* a part of it. We can get the rest later.
*/
if (iucv_data_count > NET_BUFFER_SIZE)
iucv_data_count = NET_BUFFER_SIZE;
rc = iucv_message_receive(priv->path,
&priv->local_interrupt_buffer,
0, buffer, iucv_data_count,
&priv->residual_length);
spin_unlock_bh(&priv->priv_lock);
/* An rc of 5 indicates that the record was bigger than
* the buffer, which is OK for us. A 9 indicates that the
* record was purged befor we could receive it.
*/
if (rc == 5)
rc = 0;
if (rc == 9)
atomic_set(&priv->receive_ready, 0);
} else {
rc = 1;
}
if (!rc) {
priv->buffer_free = 0;
user_data_count += iucv_data_count;
priv->current_position = priv->buffer;
if (priv->residual_length == 0){
/* the whole record has been captured,
* now add the fence */
atomic_dec(&priv->receive_ready);
buffer = priv->buffer + user_data_count;
memcpy(buffer, FENCE, sizeof(FENCE));
user_data_count += sizeof(FENCE);
}
priv->remaining = user_data_count;
}
return rc;
}
static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
size_t count, loff_t * ppos)
{
int rc;
struct vmlogrdr_priv_t * priv = filp->private_data;
while (priv->buffer_free) {
rc = vmlogrdr_receive_data(priv);
if (rc) {
rc = wait_event_interruptible(read_wait_queue,
atomic_read(&priv->receive_ready));
if (rc)
return rc;
}
}
/* copy only up to end of record */
if (count > priv->remaining)
count = priv->remaining;
if (copy_to_user(data, priv->current_position, count))
return -EFAULT;
*ppos += count;
priv->current_position += count;
priv->remaining -= count;
/* if all data has been transferred, set buffer free */
if (priv->remaining == 0)
priv->buffer_free = 1;
return count;
}
static ssize_t vmlogrdr_autopurge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autopurge=0;
break;
case '1':
priv->autopurge=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autopurge_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autopurge);
}
static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
vmlogrdr_autopurge_store);
static ssize_t vmlogrdr_purge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
char cp_command[80];
char cp_response[80];
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
if (buf[0] != '1')
return -EINVAL;
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
/*
* The recording command needs to be called with option QID
* for guests that have previlege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
if (recording_class_AB)
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE QID * ",
priv->recording_name);
else
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE ",
priv->recording_name);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
return count;
}
static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
static ssize_t vmlogrdr_autorecording_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autorecording=0;
break;
case '1':
priv->autorecording=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autorecording_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autorecording);
}
static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
vmlogrdr_autorecording_store);
static ssize_t vmlogrdr_recording_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret;
switch (buf[0]) {
case '0':
ret = vmlogrdr_recording(priv,0,0);
break;
case '1':
ret = vmlogrdr_recording(priv,1,0);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
else
return count;
}
static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
static ssize_t recording_status_show(struct device_driver *driver, char *buf)
{
static const char cp_command[] = "QUERY RECORDING ";
int len;
cpcmd(cp_command, buf, 4096, NULL);
len = strlen(buf);
return len;
}
static DRIVER_ATTR_RO(recording_status);
static struct attribute *vmlogrdr_drv_attrs[] = {
&driver_attr_recording_status.attr,
NULL,
};
static struct attribute_group vmlogrdr_drv_attr_group = {
.attrs = vmlogrdr_drv_attrs,
};
static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
&vmlogrdr_drv_attr_group,
NULL,
};
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
&dev_attr_purge.attr,
&dev_attr_autorecording.attr,
&dev_attr_recording.attr,
NULL,
};
static struct attribute_group vmlogrdr_attr_group = {
.attrs = vmlogrdr_attrs,
};
static const struct attribute_group *vmlogrdr_attr_groups[] = {
&vmlogrdr_attr_group,
NULL,
};
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.groups = vmlogrdr_drv_attr_groups,
};
static int vmlogrdr_register_driver(void)
{
int ret;
/* Register with iucv driver */
ret = iucv_register(&vmlogrdr_iucv_handler, 1);
if (ret)
goto out;
ret = driver_register(&vmlogrdr_driver);
if (ret)
goto out_iucv;
vmlogrdr_class = class_create("vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
goto out_driver;
}
return 0;
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
iucv_unregister(&vmlogrdr_iucv_handler, 1);
out:
return ret;
}
static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
{
struct device *dev;
int ret;
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
dev_set_name(dev, "%s", priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
dev->groups = vmlogrdr_attr_groups;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
* free the struct. Therefore, we specify kfree()
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
} else
return -ENOMEM;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
device_unregister(dev);
return ret;
}
priv->device = dev;
return 0;
}
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
}
return 0;
}
static int vmlogrdr_register_cdev(dev_t dev)
{
int rc = 0;
vmlogrdr_cdev = cdev_alloc();
if (!vmlogrdr_cdev) {
return -ENOMEM;
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
if (!rc)
return 0;
// cleanup: cdev is not fully registered, no cdev_del here!
kobject_put(&vmlogrdr_cdev->kobj);
vmlogrdr_cdev=NULL;
return rc;
}
static void vmlogrdr_cleanup(void)
{
int i;
if (vmlogrdr_cdev) {
cdev_del(vmlogrdr_cdev);
vmlogrdr_cdev=NULL;
}
for (i=0; i < MAXMINOR; ++i ) {
vmlogrdr_unregister_device(&sys_ser[i]);
free_page((unsigned long)sys_ser[i].buffer);
}
vmlogrdr_unregister_driver();
if (vmlogrdr_major) {
unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
vmlogrdr_major=0;
}
}
static int __init vmlogrdr_init(void)
{
int rc;
int i;
dev_t dev;
if (! MACHINE_IS_VM) {
pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
recording_class_AB = vmlogrdr_get_recording_class_AB();
rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
if (rc)
return rc;
vmlogrdr_major = MAJOR(dev);
rc=vmlogrdr_register_driver();
if (rc)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
}
sys_ser[i].current_position = sys_ser[i].buffer;
rc=vmlogrdr_register_device(&sys_ser[i]);
if (rc)
break;
}
if (rc)
goto cleanup;
rc = vmlogrdr_register_cdev(dev);
if (rc)
goto cleanup;
return 0;
cleanup:
vmlogrdr_cleanup();
return rc;
}
static void __exit vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
return;
}
module_init(vmlogrdr_init);
module_exit(vmlogrdr_exit);
| linux-master | drivers/s390/char/vmlogrdr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP early driver
*
* Copyright IBM Corp. 2013
*/
#define KMSG_COMPONENT "sclp_early"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h>
#include <linux/memblock.h>
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
#include <asm/setup.h>
#include <asm/facility.h>
#include "sclp_sdias.h"
#include "sclp.h"
static struct sclp_ipl_info sclp_ipl_info;
struct sclp_info sclp;
EXPORT_SYMBOL(sclp);
static void __init sclp_early_facilities_detect(void)
{
struct sclp_core_entry *cpue;
struct read_info_sccb *sccb;
u16 boot_cpu_address, cpu;
sccb = sclp_early_get_info();
if (!sccb)
return;
sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01);
sclp.has_gsls = !!(sccb->fac85 & 0x80);
sclp.has_64bscao = !!(sccb->fac116 & 0x80);
sclp.has_cmma = !!(sccb->fac116 & 0x40);
sclp.has_esca = !!(sccb->fac116 & 0x08);
sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
sclp.has_aisii = !!(sccb->fac118 & 0x40);
sclp.has_aeni = !!(sccb->fac118 & 0x20);
sclp.has_aisi = !!(sccb->fac118 & 0x10);
sclp.has_zpci_lsi = !!(sccb->fac118 & 0x01);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
sclp.has_iplcc = !!(sccb->byte_134 & 0x02);
}
if (sccb->cpuoff > 137) {
sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000);
}
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
sclp.ibc = sccb->ibc;
if (sccb->hamaxpow && sccb->hamaxpow < 64)
sclp.hamax = (1UL << sccb->hamaxpow) - 1;
else
sclp.hamax = U64_MAX;
if (!sccb->hcpua) {
if (MACHINE_IS_VM)
sclp.max_cores = 64;
else
sclp.max_cores = sccb->ncpurl;
} else {
sclp.max_cores = sccb->hcpua + 1;
}
boot_cpu_address = stap();
cpue = (void *)sccb + sccb->cpuoff;
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
if (boot_cpu_address != cpue->core_id)
continue;
sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
sclp.has_gpere = cpue->gpere;
sclp.has_ib = cpue->ib;
sclp.has_cei = cpue->cei;
sclp.has_skey = cpue->skey;
break;
}
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->fac91 & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
if (sccb->hsa_size)
sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE;
sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
sclp.hmfai = sccb->hmfai;
sclp.has_dirq = !!(sccb->cpudirq & 0x80);
}
/*
* This function will be called after sclp_early_facilities_detect(), which gets
* called from early.c code. The sclp_early_facilities_detect() function retrieves
* and saves the IPL information.
*/
void __init sclp_early_get_ipl_info(struct sclp_ipl_info *info)
{
*info = sclp_ipl_info;
}
int __init sclp_early_get_core_info(struct sclp_core_info *info)
{
struct read_cpu_info_sccb *sccb;
int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
int rc = 0;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
sccb = memblock_alloc_low(length, PAGE_SIZE);
if (!sccb)
return -ENOMEM;
memset(sccb, 0, length);
sccb->header.length = length;
sccb->header.control_mask[2] = 0x80;
if (sclp_early_cmd(SCLP_CMDW_READ_CPU_INFO, sccb)) {
rc = -EIO;
goto out;
}
if (sccb->header.response_code != 0x0010) {
rc = -EIO;
goto out;
}
sclp_fill_core_info(info, sccb);
out:
memblock_free(sccb, length);
return rc;
}
static void __init sclp_early_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
if (sclp_early_con_check_vt220(sccb))
sclp.has_vt220 = 1;
if (sclp_early_con_check_linemode(sccb))
sclp.has_linemode = 1;
}
void __init __no_sanitize_address sclp_early_adjust_va(void)
{
sclp_early_sccb = __va((unsigned long)sclp_early_sccb);
}
void __init sclp_early_detect(void)
{
void *sccb = sclp_early_sccb;
sclp_early_facilities_detect();
/*
* Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
sclp_early_set_event_mask(sccb, 0, 0);
sclp_early_console_detect(sccb);
}
| linux-master | drivers/s390/char/sclp_early.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SE/HMC Drive (Read) Cache Functions
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#define HMCDRV_CACHE_TIMEOUT 30 /* aging timeout in seconds */
/**
* struct hmcdrv_cache_entry - file cache (only used on read/dir)
* @id: FTP command ID
* @content: kernel-space buffer, 4k aligned
* @len: size of @content cache (0 if caching disabled)
* @ofs: start of content within file (-1 if no cached content)
* @fname: file name
* @fsize: file size
* @timeout: cache timeout in jiffies
*
* Notice that the first three members (id, fname, fsize) are cached on all
* read/dir requests. But content is cached only under some preconditions.
* Uncached content is signalled by a negative value of @ofs.
*/
struct hmcdrv_cache_entry {
enum hmcdrv_ftp_cmdid id;
char fname[HMCDRV_FTP_FIDENT_MAX];
size_t fsize;
loff_t ofs;
unsigned long timeout;
void *content;
size_t len;
};
static int hmcdrv_cache_order; /* cache allocated page order */
static struct hmcdrv_cache_entry hmcdrv_cache_file = {
.fsize = SIZE_MAX,
.ofs = -1,
.len = 0,
.fname = {'\0'}
};
/**
* hmcdrv_cache_get() - looks for file data/content in read cache
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read from cache or a negative number if nothing
* in content cache (for the file/cmd specified in @ftp)
*/
static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
{
loff_t pos; /* position in cache (signed) */
ssize_t len;
if ((ftp->id != hmcdrv_cache_file.id) ||
strcmp(hmcdrv_cache_file.fname, ftp->fname))
return -1;
if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
return 0;
if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
time_after(jiffies, hmcdrv_cache_file.timeout))
return -1;
/* there seems to be cached content - calculate the maximum number
* of bytes that can be returned (regarding file size and offset)
*/
len = hmcdrv_cache_file.fsize - ftp->ofs;
if (len > ftp->len)
len = ftp->len;
/* check if the requested chunk falls into our cache (which starts
* at offset 'hmcdrv_cache_file.ofs' in the file of interest)
*/
pos = ftp->ofs - hmcdrv_cache_file.ofs;
if ((pos >= 0) &&
((pos + len) <= hmcdrv_cache_file.len)) {
memcpy(ftp->buf,
hmcdrv_cache_file.content + pos,
len);
pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
hmcdrv_cache_file.fname, len,
hmcdrv_cache_file.fsize);
return len;
}
return -1;
}
/**
* hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Return: number of bytes read/written or a (negative) error code
*/
static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
/* only cache content if the read/dir cache really exists
* (hmcdrv_cache_file.len > 0), is large enough to handle the
* request (hmcdrv_cache_file.len >= ftp->len) and there is a need
* to do so (ftp->len > 0)
*/
if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
/* because the cache is not located at ftp->buf, we have to
* assemble a new HMC drive FTP cmd specification (pointing
* to our cache, and using the increased size)
*/
struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
cftp.buf = hmcdrv_cache_file.content; /* and update */
cftp.len = hmcdrv_cache_file.len; /* buffer data */
len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
if (len > 0) {
pr_debug("caching %zd bytes content for '%s'\n",
len, ftp->fname);
if (len > ftp->len)
len = ftp->len;
hmcdrv_cache_file.ofs = ftp->ofs;
hmcdrv_cache_file.timeout = jiffies +
HMCDRV_CACHE_TIMEOUT * HZ;
memcpy(ftp->buf, hmcdrv_cache_file.content, len);
}
} else {
len = func(ftp, &hmcdrv_cache_file.fsize);
hmcdrv_cache_file.ofs = -1; /* invalidate content */
}
if (len > 0) {
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
strscpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
}
return len;
}
/**
* hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure exclusive execution.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
(ftp->id == HMCDRV_FTP_NLIST) ||
(ftp->id == HMCDRV_FTP_GET)) {
len = hmcdrv_cache_get(ftp);
if (len >= 0) /* got it from cache ? */
return len; /* yes */
len = hmcdrv_cache_do(ftp, func);
if (len >= 0)
return len;
} else {
len = func(ftp, NULL); /* simply do original command */
}
/* invalidate the (read) cache in case there was a write operation
* or an error on read/dir
*/
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
return len;
}
/**
* hmcdrv_cache_startup() - startup of HMC drive cache
* @cachesize: cache size
*
* Return: 0 on success, else a (negative) error code
*/
int hmcdrv_cache_startup(size_t cachesize)
{
if (cachesize > 0) { /* perform caching ? */
hmcdrv_cache_order = get_order(cachesize);
hmcdrv_cache_file.content =
(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
hmcdrv_cache_order);
if (!hmcdrv_cache_file.content) {
pr_err("Allocating the requested cache size of %zu bytes failed\n",
cachesize);
return -ENOMEM;
}
pr_debug("content cache enabled, size is %zu bytes\n",
cachesize);
}
hmcdrv_cache_file.len = cachesize;
return 0;
}
/**
* hmcdrv_cache_shutdown() - shutdown of HMC drive cache
*/
void hmcdrv_cache_shutdown(void)
{
if (hmcdrv_cache_file.content) {
free_pages((unsigned long) hmcdrv_cache_file.content,
hmcdrv_cache_order);
hmcdrv_cache_file.content = NULL;
}
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
hmcdrv_cache_file.len = 0; /* no cache */
}
| linux-master | drivers/s390/char/hmcdrv_cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2015
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/lowcore.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include <asm/sections.h>
#include <asm/physmem_info.h>
#include <asm/facility.h>
#include "sclp.h"
#include "sclp_rw.h"
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
char *__bootdata_preserved(sclp_early_sccb);
int sclp_init_state = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
* only supports 4 and needs a workaround.
*/
bool sclp_mask_compat_mode;
void sclp_early_wait_irq(void)
{
unsigned long psw_mask, addr;
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
__ctl_store(cr0.val, 0, 0);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
__ctl_load(cr0_new.val, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
S390_lowcore.external_new_psw.mask = psw_mask;
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
S390_lowcore.ext_int_code = 0;
do {
asm volatile(
" larl %[addr],0f\n"
" stg %[addr],%[psw_wait_addr]\n"
" stg %[addr],%[psw_ext_addr]\n"
" lpswe %[psw_wait]\n"
"0:\n"
: [addr] "=&d" (addr),
[psw_wait_addr] "=Q" (psw_wait.addr),
[psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
: [psw_wait] "Q" (psw_wait)
: "cc", "memory");
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
__ctl_load(cr0.val, 0, 0);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
{
unsigned long flags;
int rc;
flags = arch_local_irq_save();
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
sclp_early_wait_irq();
out:
arch_local_irq_restore(flags);
return rc;
}
struct write_sccb {
struct sccb_header header;
struct msg_buf msg;
} __packed;
/* Output multi-line text using SCLP Message interface. */
static void sclp_early_print_lm(const char *str, unsigned int len)
{
unsigned char *ptr, *end, ch;
unsigned int count, offset;
struct write_sccb *sccb;
struct msg_buf *msg;
struct mdb *mdb;
struct mto *mto;
struct go *go;
sccb = (struct write_sccb *) sclp_early_sccb;
end = (unsigned char *) sccb + EARLY_SCCB_SIZE - 1;
memset(sccb, 0, sizeof(*sccb));
ptr = (unsigned char *) &sccb->msg.mdb.mto;
offset = 0;
do {
for (count = sizeof(*mto); offset < len; count++) {
ch = str[offset++];
if ((ch == 0x0a) || (ptr + count > end))
break;
ptr[count] = _ascebc[ch];
}
mto = (struct mto *) ptr;
memset(mto, 0, sizeof(*mto));
mto->length = count;
mto->type = 4;
mto->line_type_flags = LNTPFLGS_ENDTEXT;
ptr += count;
} while ((offset < len) && (ptr + sizeof(*mto) <= end));
len = ptr - (unsigned char *) sccb;
sccb->header.length = len - offsetof(struct write_sccb, header);
msg = &sccb->msg;
msg->header.type = EVTYP_MSG;
msg->header.length = len - offsetof(struct write_sccb, msg.header);
mdb = &msg->mdb;
mdb->header.type = 1;
mdb->header.tag = 0xD4C4C240;
mdb->header.revision_code = 1;
mdb->header.length = len - offsetof(struct write_sccb, msg.mdb.header);
go = &mdb->go;
go->length = sizeof(*go);
go->type = 1;
sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
}
struct vt220_sccb {
struct sccb_header header;
struct {
struct evbuf_header header;
char data[];
} msg;
} __packed;
/* Output multi-line text using SCLP VT220 interface. */
static void sclp_early_print_vt220(const char *str, unsigned int len)
{
struct vt220_sccb *sccb;
sccb = (struct vt220_sccb *) sclp_early_sccb;
if (sizeof(*sccb) + len >= EARLY_SCCB_SIZE)
len = EARLY_SCCB_SIZE - sizeof(*sccb);
memset(sccb, 0, sizeof(*sccb));
memcpy(&sccb->msg.data, str, len);
sccb->header.length = sizeof(*sccb) + len;
sccb->msg.header.length = sizeof(sccb->msg) + len;
sccb->msg.header.type = EVTYP_VT220MSG;
sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
}
int sclp_early_set_event_mask(struct init_sccb *sccb,
sccb_mask_t receive_mask,
sccb_mask_t send_mask)
{
retry:
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
if (sclp_mask_compat_mode)
sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
else
sccb->mask_length = sizeof(sccb_mask_t);
sccb_set_recv_mask(sccb, receive_mask);
sccb_set_send_mask(sccb, send_mask);
if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
return -EIO;
if ((sccb->header.response_code == 0x74f0) && !sclp_mask_compat_mode) {
sclp_mask_compat_mode = true;
goto retry;
}
if (sccb->header.response_code != 0x20)
return -EIO;
return 0;
}
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb_get_sclp_send_mask(sccb) & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb_get_sclp_recv_mask(sccb) & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb)
{
if (sccb_get_sclp_send_mask(sccb) & EVTYP_VT220MSG_MASK)
return 1;
return 0;
}
static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
{
unsigned long receive_mask, send_mask;
struct init_sccb *sccb;
int rc;
BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
*have_linemode = *have_vt220 = 0;
sccb = (struct init_sccb *) sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
if (rc)
return rc;
*have_linemode = sclp_early_con_check_linemode(sccb);
*have_vt220 = !!(sccb_get_send_mask(sccb) & EVTYP_VT220MSG_MASK);
return rc;
}
void sclp_early_set_buffer(void *sccb)
{
sclp_early_sccb = sccb;
}
/*
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
void __sclp_early_printk(const char *str, unsigned int len)
{
int have_linemode, have_vt220;
if (sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
if (have_linemode)
sclp_early_print_lm(str, len);
if (have_vt220)
sclp_early_print_vt220(str, len);
sclp_early_setup(1, &have_linemode, &have_vt220);
}
void sclp_early_printk(const char *str)
{
__sclp_early_printk(str, strlen(str));
}
/*
* Use sclp_emergency_printk() to print a string when the system is in a
* state where regular console drivers cannot be assumed to work anymore.
*
* Callers must make sure that no concurrent SCLP requests are outstanding
* and all other CPUs are stopped, or at least disabled for external
* interrupts.
*/
void sclp_emergency_printk(const char *str)
{
int have_linemode, have_vt220;
unsigned int len;
len = strlen(str);
/*
* Don't care about return values; if requests fail, just ignore and
* continue to have a rather high chance that anything is printed.
*/
sclp_early_setup(0, &have_linemode, &have_vt220);
sclp_early_print_lm(str, len);
sclp_early_print_vt220(str, len);
sclp_early_setup(1, &have_linemode, &have_vt220);
}
/*
* We can't pass sclp_info_sccb to sclp_early_cmd() here directly,
* because it might not fulfil the requiremets for a SCLP communication buffer:
* - lie below 2G in memory
* - be page-aligned
* Therefore, we use the buffer sclp_early_sccb (which fulfils all those
* requirements) temporarily for communication and copy a received response
* back into the buffer sclp_info_sccb upon successful completion.
*/
int __init sclp_early_read_info(void)
{
int i;
int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
struct read_info_sccb *sccb = (struct read_info_sccb *)sclp_early_sccb;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
memset(sccb, 0, length);
sccb->header.length = length;
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10) {
memcpy(&sclp_info_sccb, sccb, length);
sclp_info_sccb_valid = 1;
return 0;
}
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
struct read_info_sccb * __init sclp_early_get_info(void)
{
if (!sclp_info_sccb_valid)
return NULL;
return &sclp_info_sccb;
}
int __init sclp_early_get_memsize(unsigned long *mem)
{
unsigned long rnmax;
unsigned long rnsize;
struct read_info_sccb *sccb = &sclp_info_sccb;
if (!sclp_info_sccb_valid)
return -EIO;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rnsize <<= 20;
*mem = rnsize * rnmax;
return 0;
}
int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
{
if (!sclp_info_sccb_valid)
return -EIO;
*hsa_size = 0;
if (sclp_info_sccb.hsa_size)
*hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE;
return 0;
}
#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
void __weak __init add_physmem_online_range(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
int rc, id, max_id = 0;
unsigned long rn, rzm;
sclp_cmdw_t command;
u16 sn;
if (!sclp_info_sccb_valid)
return -EIO;
if (!(sclp_info_sccb.facilities & SCLP_STORAGE_INFO_FACILITY))
return -EOPNOTSUPP;
rzm = sclp_info_sccb.rnsize ?: sclp_info_sccb.rnsize2;
rzm <<= 20;
for (id = 0; id <= max_id; id++) {
memset(sclp_early_sccb, 0, EARLY_SCCB_SIZE);
sccb->header.length = EARLY_SCCB_SIZE;
command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
rc = sclp_early_cmd(command, sccb);
if (rc)
goto fail;
max_id = sccb->max_id;
switch (sccb->header.response_code) {
case 0x0010:
for (sn = 0; sn < sccb->assigned; sn++) {
if (!sccb->entries[sn])
continue;
rn = sccb->entries[sn] >> 16;
add_physmem_online_range((rn - 1) * rzm, rn * rzm);
}
break;
case 0x0310:
case 0x0410:
break;
default:
goto fail;
}
}
return 0;
fail:
physmem_info.range_count = 0;
return -EIO;
}
| linux-master | drivers/s390/char/sclp_early_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3480/3490 tapes.
*
* Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <[email protected]>
* Tuan Ngo-Anh <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#define KMSG_COMPONENT "tape_34xx"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#define TAPE_DBF_AREA tape_34xx_dbf
#include "tape.h"
#include "tape_std.h"
/*
* Pointer to debug area.
*/
debug_info_t *TAPE_DBF_AREA = NULL;
EXPORT_SYMBOL(TAPE_DBF_AREA);
#define TAPE34XX_FMT_3480 0
#define TAPE34XX_FMT_3480_2_XF 1
#define TAPE34XX_FMT_3480_XF 2
struct tape_34xx_block_id {
unsigned int wrap : 1;
unsigned int segment : 7;
unsigned int format : 2;
unsigned int block : 22;
};
/*
* A list of block ID's is used to faster seek blocks.
*/
struct tape_34xx_sbid {
struct list_head list;
struct tape_34xx_block_id bid;
};
static void tape_34xx_delete_sbid_from(struct tape_device *, int);
/*
* Medium sense for 34xx tapes. There is no 'real' medium sense call.
* So we just do a normal sense.
*/
static void __tape_34xx_medium_sense(struct tape_request *request)
{
struct tape_device *device = request->device;
unsigned char *sense;
if (request->rc == 0) {
sense = request->cpdata;
/*
* This isn't quite correct. But since INTERVENTION_REQUIRED
* means that the drive is 'neither ready nor on-line' it is
* only slightly inaccurate to say there is no tape loaded if
* the drive isn't online...
*/
if (sense[0] & SENSE_INTERVENTION_REQUIRED)
tape_med_state_set(device, MS_UNLOADED);
else
tape_med_state_set(device, MS_LOADED);
if (sense[1] & SENSE_WRITE_PROTECT)
device->tape_generic_status |= GMT_WR_PROT(~0);
else
device->tape_generic_status &= ~GMT_WR_PROT(~0);
} else
DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
request->rc);
tape_free_request(request);
}
static int tape_34xx_medium_sense(struct tape_device *device)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(1, 32);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "MSEN fail\n");
return PTR_ERR(request);
}
request->op = TO_MSEN;
tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
rc = tape_do_io_interruptible(device, request);
__tape_34xx_medium_sense(request);
return rc;
}
static void tape_34xx_medium_sense_async(struct tape_device *device)
{
struct tape_request *request;
request = tape_alloc_request(1, 32);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "MSEN fail\n");
return;
}
request->op = TO_MSEN;
tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
request->callback = (void *) __tape_34xx_medium_sense;
request->callback_data = NULL;
tape_do_io_async(device, request);
}
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
};
/*
* These functions are currently used only to schedule a medium_sense for
* later execution. This is because we get an interrupt whenever a medium
* is inserted but cannot call tape_do_io* from an interrupt context.
* Maybe that's useful for other actions we want to start from the
* interrupt handler.
* Note: the work handler is called by the system work queue. The tape
* commands started by the handler need to be asynchrounous, otherwise
* a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
static void
tape_34xx_work_handler(struct work_struct *work)
{
struct tape_34xx_work *p =
container_of(work, struct tape_34xx_work, work);
struct tape_device *device = p->device;
switch(p->op) {
case TO_MSEN:
tape_34xx_medium_sense_async(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
}
tape_put_device(device);
kfree(p);
}
static int
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{
struct tape_34xx_work *p;
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
return 0;
}
/*
* Done Handler is called when dev stat = DEVICE-END (successful operation)
*/
static inline int
tape_34xx_done(struct tape_request *request)
{
DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
switch (request->op) {
case TO_DSE:
case TO_RUN:
case TO_WRI:
case TO_WTM:
case TO_ASSIGN:
case TO_UNASSIGN:
tape_34xx_delete_sbid_from(request->device, 0);
break;
default:
;
}
return TAPE_IO_SUCCESS;
}
static inline int
tape_34xx_erp_failed(struct tape_request *request, int rc)
{
DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
tape_op_verbose[request->op], rc);
return rc;
}
static inline int
tape_34xx_erp_succeeded(struct tape_request *request)
{
DBF_EVENT(3, "Error Recovery successful for %s\n",
tape_op_verbose[request->op]);
return tape_34xx_done(request);
}
static inline int
tape_34xx_erp_retry(struct tape_request *request)
{
DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
return TAPE_IO_RETRY;
}
/*
* This function is called, when no request is outstanding and we get an
* interrupt
*/
static int
tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
{
if (irb->scsw.cmd.dstat == 0x85) { /* READY */
/* A medium was inserted in the drive. */
DBF_EVENT(6, "xuud med\n");
tape_34xx_delete_sbid_from(device, 0);
tape_34xx_schedule_work(device, TO_MSEN);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
tape_dump_sense_dbf(device, NULL, irb);
}
return TAPE_IO_SUCCESS;
}
/*
* Read Opposite Error Recovery Function:
* Used, when Read Forward does not work
*/
static int
tape_34xx_erp_read_opposite(struct tape_device *device,
struct tape_request *request)
{
if (request->op == TO_RFO) {
/*
* We did read forward, but the data could not be read
* *correctly*. We transform the request to a read backward
* and try again.
*/
tape_std_read_backward(device, request);
return tape_34xx_erp_retry(request);
}
/*
* We tried to read forward and backward, but hat no
* success -> failed.
*/
return tape_34xx_erp_failed(request, -EIO);
}
static int
tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
struct irb *irb, int no)
{
if (request->op != TO_ASSIGN) {
dev_err(&device->cdev->dev, "An unexpected condition %d "
"occurred in tape error recovery\n", no);
tape_dump_sense_dbf(device, request, irb);
}
return tape_34xx_erp_failed(request, -EIO);
}
/*
* Handle data overrun between cu and drive. The channel speed might
* be too slow.
*/
static int
tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (irb->ecw[3] == 0x40) {
dev_warn (&device->cdev->dev, "A data overrun occurred between"
" the control unit and tape unit\n");
return tape_34xx_erp_failed(request, -EIO);
}
return tape_34xx_erp_bug(device, request, irb, -1);
}
/*
* Handle record sequence error.
*/
static int
tape_34xx_erp_sequence(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
if (irb->ecw[3] == 0x41) {
/*
* cu detected incorrect block-id sequence on tape.
*/
dev_warn (&device->cdev->dev, "The block ID sequence on the "
"tape is incorrect\n");
return tape_34xx_erp_failed(request, -EIO);
}
/*
* Record sequence error bit is set, but erpa does not
* show record sequence error.
*/
return tape_34xx_erp_bug(device, request, irb, -2);
}
/*
* This function analyses the tape's sense-data in case of a unit-check.
* If possible, it tries to recover from the error. Else the user is
* informed about the problem.
*/
static int
tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
int inhibit_cu_recovery;
__u8* sense;
inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
sense = irb->ecw;
if (
sense[0] & SENSE_COMMAND_REJECT &&
sense[1] & SENSE_WRITE_PROTECT
) {
if (
request->op == TO_DSE ||
request->op == TO_WRI ||
request->op == TO_WTM
) {
/* medium is write protected */
return tape_34xx_erp_failed(request, -EACCES);
} else {
return tape_34xx_erp_bug(device, request, irb, -3);
}
}
/*
* Special cases for various tape-states when reaching
* end of recorded area
*
* FIXME: Maybe a special case of the special case:
* sense[0] == SENSE_EQUIPMENT_CHECK &&
* sense[1] == SENSE_DRIVE_ONLINE &&
* sense[3] == 0x47 (Volume Fenced)
*
* This was caused by continued FSF or FSR after an
* 'End Of Data'.
*/
if ((
sense[0] == SENSE_DATA_CHECK ||
sense[0] == SENSE_EQUIPMENT_CHECK ||
sense[0] == (SENSE_EQUIPMENT_CHECK | SENSE_DEFERRED_UNIT_CHECK)
) && (
sense[1] == SENSE_DRIVE_ONLINE ||
sense[1] == (SENSE_BEGINNING_OF_TAPE | SENSE_WRITE_MODE)
)) {
switch (request->op) {
/*
* sense[0] == SENSE_DATA_CHECK &&
* sense[1] == SENSE_DRIVE_ONLINE
* sense[3] == 0x36 (End Of Data)
*
* Further seeks might return a 'Volume Fenced'.
*/
case TO_FSF:
case TO_FSB:
/* Trying to seek beyond end of recorded area */
return tape_34xx_erp_failed(request, -ENOSPC);
case TO_BSB:
return tape_34xx_erp_retry(request);
/*
* sense[0] == SENSE_DATA_CHECK &&
* sense[1] == SENSE_DRIVE_ONLINE &&
* sense[3] == 0x36 (End Of Data)
*/
case TO_LBL:
/* Block could not be located. */
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case TO_RFO:
/* Read beyond end of recorded area -> 0 bytes read */
return tape_34xx_erp_failed(request, 0);
/*
* sense[0] == SENSE_EQUIPMENT_CHECK &&
* sense[1] == SENSE_DRIVE_ONLINE &&
* sense[3] == 0x38 (Physical End Of Volume)
*/
case TO_WRI:
/* Writing at physical end of volume */
return tape_34xx_erp_failed(request, -ENOSPC);
default:
return tape_34xx_erp_failed(request, 0);
}
}
/* Sensing special bits */
if (sense[0] & SENSE_BUS_OUT_CHECK)
return tape_34xx_erp_retry(request);
if (sense[0] & SENSE_DATA_CHECK) {
/*
* hardware failure, damaged tape or improper
* operating conditions
*/
switch (sense[3]) {
case 0x23:
/* a read data check occurred */
if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
inhibit_cu_recovery)
// data check is not permanent, may be
// recovered. We always use async-mode with
// cu-recovery, so this should *never* happen.
return tape_34xx_erp_bug(device, request,
irb, -4);
/* data check is permanent, CU recovery has failed */
dev_warn (&device->cdev->dev, "A read error occurred "
"that cannot be recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x25:
// a write data check occurred
if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
inhibit_cu_recovery)
// data check is not permanent, may be
// recovered. We always use async-mode with
// cu-recovery, so this should *never* happen.
return tape_34xx_erp_bug(device, request,
irb, -5);
// data check is permanent, cu-recovery has failed
dev_warn (&device->cdev->dev, "A write error on the "
"tape cannot be recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x26:
/* Data Check (read opposite) occurred. */
return tape_34xx_erp_read_opposite(device, request);
case 0x28:
/* ID-Mark at tape start couldn't be written */
dev_warn (&device->cdev->dev, "Writing the ID-mark "
"failed\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x31:
/* Tape void. Tried to read beyond end of device. */
dev_warn (&device->cdev->dev, "Reading the tape beyond"
" the end of the recorded area failed\n");
return tape_34xx_erp_failed(request, -ENOSPC);
case 0x41:
/* Record sequence error. */
dev_warn (&device->cdev->dev, "The tape contains an "
"incorrect block ID sequence\n");
return tape_34xx_erp_failed(request, -EIO);
default:
/* all data checks for 3480 should result in one of
* the above erpa-codes. For 3490, other data-check
* conditions do exist. */
if (device->cdev->id.driver_info == tape_3480)
return tape_34xx_erp_bug(device, request,
irb, -6);
}
}
if (sense[0] & SENSE_OVERRUN)
return tape_34xx_erp_overrun(device, request, irb);
if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
return tape_34xx_erp_sequence(device, request, irb);
/* Sensing erpa codes */
switch (sense[3]) {
case 0x00:
/* Unit check with erpa code 0. Report and ignore. */
return TAPE_IO_SUCCESS;
case 0x21:
/*
* Data streaming not operational. CU will switch to
* interlock mode. Reissue the command.
*/
return tape_34xx_erp_retry(request);
case 0x22:
/*
* Path equipment check. Might be drive adapter error, buffer
* error on the lower interface, internal path not usable,
* or error during cartridge load.
*/
dev_warn (&device->cdev->dev, "A path equipment check occurred"
" for the tape device\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x24:
/*
* Load display check. Load display was command was issued,
* but the drive is displaying a drive check message. Can
* be threated as "device end".
*/
return tape_34xx_erp_succeeded(request);
case 0x27:
/*
* Command reject. May indicate illegal channel program or
* buffer over/underrun. Since all channel programs are
* issued by this driver and ought be correct, we assume a
* over/underrun situation and retry the channel program.
*/
return tape_34xx_erp_retry(request);
case 0x29:
/*
* Function incompatible. Either the tape is idrc compressed
* but the hardware isn't capable to do idrc, or a perform
* subsystem func is issued and the CU is not on-line.
*/
return tape_34xx_erp_failed(request, -EIO);
case 0x2a:
/*
* Unsolicited environmental data. An internal counter
* overflows, we can ignore this and reissue the cmd.
*/
return tape_34xx_erp_retry(request);
case 0x2b:
/*
* Environmental data present. Indicates either unload
* completed ok or read buffered log command completed ok.
*/
if (request->op == TO_RUN) {
/* Rewind unload completed ok. */
tape_med_state_set(device, MS_UNLOADED);
return tape_34xx_erp_succeeded(request);
}
/* tape_34xx doesn't use read buffered log commands. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x2c:
/*
* Permanent equipment check. CU has tried recovery, but
* did not succeed.
*/
return tape_34xx_erp_failed(request, -EIO);
case 0x2d:
/* Data security erase failure. */
if (request->op == TO_DSE)
return tape_34xx_erp_failed(request, -EIO);
/* Data security erase failure, but no such command issued. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
* reading the format id mark or that format specified
* is not supported by the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit cannot process "
"the tape format\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
case 0x30:
/* The medium is write protected. */
dev_warn (&device->cdev->dev, "The tape medium is write-"
"protected\n");
return tape_34xx_erp_failed(request, -EACCES);
case 0x32:
// Tension loss. We cannot recover this, it's an I/O error.
dev_warn (&device->cdev->dev, "The tape does not have the "
"required tape tension\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x33:
/*
* Load Failure. The cartridge was not inserted correctly or
* the tape is not threaded correctly.
*/
dev_warn (&device->cdev->dev, "The tape unit failed to load"
" the cartridge\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x34:
/*
* Unload failure. The drive cannot maintain tape tension
* and control tape movement during an unload operation.
*/
dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
" cartridge failed\n");
if (request->op == TO_RUN)
return tape_34xx_erp_failed(request, -EIO);
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x35:
/*
* Drive equipment check. One of the following:
* - cu cannot recover from a drive detected error
* - a check code message is shown on drive display
* - the cartridge loader does not respond correctly
* - a failure occurs during an index, load, or unload cycle
*/
dev_warn (&device->cdev->dev, "An equipment check has occurred"
" on the tape unit\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x36:
if (device->cdev->id.driver_info == tape_3490)
/* End of data. */
return tape_34xx_erp_failed(request, -EIO);
/* This erpa is reserved for 3480 */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x37:
/*
* Tape length error. The tape is shorter than reported in
* the beginning-of-tape data.
*/
dev_warn (&device->cdev->dev, "The tape information states an"
" incorrect length\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x38:
/*
* Physical end of tape. A read/write operation reached
* the physical end of tape.
*/
if (request->op==TO_WRI ||
request->op==TO_DSE ||
request->op==TO_WTM)
return tape_34xx_erp_failed(request, -ENOSPC);
return tape_34xx_erp_failed(request, -EIO);
case 0x39:
/* Backward at Beginning of tape. */
return tape_34xx_erp_failed(request, -EIO);
case 0x3a:
/* Drive switched to not ready. */
dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x3b:
/* Manual rewind or unload. This causes an I/O error. */
dev_warn (&device->cdev->dev, "The tape medium has been "
"rewound or unloaded manually\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x42:
/*
* Degraded mode. A condition that can cause degraded
* performance is detected.
*/
dev_warn (&device->cdev->dev, "The tape subsystem is running "
"in degraded mode\n");
return tape_34xx_erp_retry(request);
case 0x43:
/* Drive not ready. */
tape_34xx_delete_sbid_from(device, 0);
tape_med_state_set(device, MS_UNLOADED);
/* Some commands commands are successful even in this case */
if (sense[1] & SENSE_DRIVE_ONLINE) {
switch(request->op) {
case TO_ASSIGN:
case TO_UNASSIGN:
case TO_DIS:
case TO_NOP:
return tape_34xx_done(request);
break;
default:
break;
}
}
return tape_34xx_erp_failed(request, -ENOMEDIUM);
case 0x44:
/* Locate Block unsuccessful. */
if (request->op != TO_BLOCK && request->op != TO_LBL)
/* No locate block was issued. */
return tape_34xx_erp_bug(device, request,
irb, sense[3]);
return tape_34xx_erp_failed(request, -EIO);
case 0x45:
/* The drive is assigned to a different channel path. */
dev_warn (&device->cdev->dev, "The tape unit is already "
"assigned\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x46:
/*
* Drive not on-line. Drive may be switched offline,
* the power supply may be switched off or
* the drive address may not be set correctly.
*/
dev_warn (&device->cdev->dev, "The tape unit is not online\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x47:
/* Volume fenced. CU reports volume integrity is lost. */
dev_warn (&device->cdev->dev, "The control unit has fenced "
"access to the tape volume\n");
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_failed(request, -EIO);
case 0x48:
/* Log sense data and retry request. */
return tape_34xx_erp_retry(request);
case 0x49:
/* Bus out check. A parity check error on the bus was found. */
dev_warn (&device->cdev->dev, "A parity error occurred on the "
"tape bus\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4a:
/* Control unit erp failed. */
dev_warn (&device->cdev->dev, "I/O error recovery failed on "
"the tape control unit\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4b:
/*
* CU and drive incompatible. The drive requests micro-program
* patches, which are not available on the CU.
*/
dev_warn (&device->cdev->dev, "The tape unit requires a "
"firmware update\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x4c:
/*
* Recovered Check-One failure. Cu develops a hardware error,
* but is able to recover.
*/
return tape_34xx_erp_retry(request);
case 0x4d:
if (device->cdev->id.driver_info == tape_3490)
/*
* Resetting event received. Since the driver does
* not support resetting event recovery (which has to
* be handled by the I/O Layer), retry our command.
*/
return tape_34xx_erp_retry(request);
/* This erpa is reserved for 3480. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x4e:
if (device->cdev->id.driver_info == tape_3490) {
/*
* Maximum block size exceeded. This indicates, that
* the block to be written is larger than allowed for
* buffered mode.
*/
dev_warn (&device->cdev->dev, "The maximum block size"
" for buffered mode is exceeded\n");
return tape_34xx_erp_failed(request, -ENOBUFS);
}
/* This erpa is reserved for 3480. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x50:
/*
* Read buffered log (Overflow). CU is running in extended
* buffered log mode, and a counter overflows. This should
* never happen, since we're never running in extended
* buffered log mode.
*/
return tape_34xx_erp_retry(request);
case 0x51:
/*
* Read buffered log (EOV). EOF processing occurs while the
* CU is in extended buffered log mode. This should never
* happen, since we're never running in extended buffered
* log mode.
*/
return tape_34xx_erp_retry(request);
case 0x52:
/* End of Volume complete. Rewind unload completed ok. */
if (request->op == TO_RUN) {
tape_med_state_set(device, MS_UNLOADED);
tape_34xx_delete_sbid_from(device, 0);
return tape_34xx_erp_succeeded(request);
}
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x53:
/* Global command intercept. */
return tape_34xx_erp_retry(request);
case 0x54:
/* Channel interface recovery (temporary). */
return tape_34xx_erp_retry(request);
case 0x55:
/* Channel interface recovery (permanent). */
dev_warn (&device->cdev->dev, "A channel interface error cannot be"
" recovered\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x56:
/* Channel protocol error. */
dev_warn (&device->cdev->dev, "A channel protocol error "
"occurred\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x57:
/*
* 3480: Attention intercept.
* 3490: Global status intercept.
*/
return tape_34xx_erp_retry(request);
case 0x5a:
/*
* Tape length incompatible. The tape inserted is too long,
* which could cause damage to the tape or the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit does not support "
"the tape length\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5b:
/* Format 3480 XF incompatible */
if (sense[1] & SENSE_BEGINNING_OF_TAPE)
/* The tape will get overwritten. */
return tape_34xx_erp_retry(request);
dev_warn (&device->cdev->dev, "The tape unit does not support"
" format 3480 XF\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5c:
/* Format 3480-2 XF incompatible */
dev_warn (&device->cdev->dev, "The tape unit does not support tape "
"format 3480-2 XF\n");
return tape_34xx_erp_failed(request, -EIO);
case 0x5d:
/* Tape length violation. */
dev_warn (&device->cdev->dev, "The tape unit does not support"
" the current tape length\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
case 0x5e:
/* Compaction algorithm incompatible. */
dev_warn (&device->cdev->dev, "The tape unit does not support"
" the compaction algorithm\n");
return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
/* The following erpas should have been covered earlier. */
case 0x23: /* Read data check. */
case 0x25: /* Write data check. */
case 0x26: /* Data check (read opposite). */
case 0x28: /* Write id mark check. */
case 0x31: /* Tape void. */
case 0x40: /* Overrun error. */
case 0x41: /* Record sequence error. */
/* All other erpas are reserved for future use. */
default:
return tape_34xx_erp_bug(device, request, irb, sense[3]);
}
}
/*
* 3480/3490 interrupt handler
*/
static int
tape_34xx_irq(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (request == NULL)
return tape_34xx_unsolicited_irq(device, irb);
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
return tape_34xx_erp_failed(request, -ENOSPC);
}
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
return tape_34xx_unit_check(device, request, irb);
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/*
* A unit exception occurs on skipping over a tapemark block.
*/
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
if (request->op == TO_BSB || request->op == TO_FSB)
request->rescnt++;
else
DBF_EVENT(5, "Unit Exception!\n");
}
return tape_34xx_done(request);
}
DBF_EVENT(6, "xunknownirq\n");
tape_dump_sense_dbf(device, request, irb);
return TAPE_IO_STOP;
}
/*
* ioctl_overload
*/
static int
tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
{
if (cmd == TAPE390_DISPLAY) {
struct display_struct disp;
if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
return -EFAULT;
return tape_std_display(device, &disp);
} else
return -EINVAL;
}
static inline void
tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
{
struct tape_34xx_sbid * new_sbid;
new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
if (!new_sbid)
return;
new_sbid->bid = bid;
list_add(&new_sbid->list, l);
}
/*
* Build up the search block ID list. The block ID consists of a logical
* block number and a hardware specific part. The hardware specific part
* helps the tape drive to speed up searching for a specific block.
*/
static void
tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
{
struct list_head * sbid_list;
struct tape_34xx_sbid * sbid;
struct list_head * l;
/*
* immediately return if there is no list at all or the block to add
* is located in segment 1 of wrap 0 because this position is used
* if no hardware position data is supplied.
*/
sbid_list = (struct list_head *) device->discdata;
if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
return;
/*
* Search the position where to insert the new entry. Hardware
* acceleration uses only the segment and wrap number. So we
* need only one entry for a specific wrap/segment combination.
* If there is a block with a lower number but the same hard-
* ware position data we just update the block number in the
* existing entry.
*/
list_for_each(l, sbid_list) {
sbid = list_entry(l, struct tape_34xx_sbid, list);
if (
(sbid->bid.segment == bid.segment) &&
(sbid->bid.wrap == bid.wrap)
) {
if (bid.block < sbid->bid.block)
sbid->bid = bid;
else return;
break;
}
/* Sort in according to logical block number. */
if (bid.block < sbid->bid.block) {
tape_34xx_append_new_sbid(bid, l->prev);
break;
}
}
/* List empty or new block bigger than last entry. */
if (l == sbid_list)
tape_34xx_append_new_sbid(bid, l->prev);
DBF_LH(4, "Current list is:\n");
list_for_each(l, sbid_list) {
sbid = list_entry(l, struct tape_34xx_sbid, list);
DBF_LH(4, "%d:%03d@%05d\n",
sbid->bid.wrap,
sbid->bid.segment,
sbid->bid.block
);
}
}
/*
* Delete all entries from the search block ID list that belong to tape blocks
* equal or higher than the given number.
*/
static void
tape_34xx_delete_sbid_from(struct tape_device *device, int from)
{
struct list_head * sbid_list;
struct tape_34xx_sbid * sbid;
struct list_head * l;
struct list_head * n;
sbid_list = (struct list_head *) device->discdata;
if (!sbid_list)
return;
list_for_each_safe(l, n, sbid_list) {
sbid = list_entry(l, struct tape_34xx_sbid, list);
if (sbid->bid.block >= from) {
DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
sbid->bid.wrap,
sbid->bid.segment,
sbid->bid.block
);
list_del(l);
kfree(sbid);
}
}
}
/*
* Merge hardware position data into a block id.
*/
static void
tape_34xx_merge_sbid(
struct tape_device * device,
struct tape_34xx_block_id * bid
) {
struct tape_34xx_sbid * sbid;
struct tape_34xx_sbid * sbid_to_use;
struct list_head * sbid_list;
struct list_head * l;
sbid_list = (struct list_head *) device->discdata;
bid->wrap = 0;
bid->segment = 1;
if (!sbid_list || list_empty(sbid_list))
return;
sbid_to_use = NULL;
list_for_each(l, sbid_list) {
sbid = list_entry(l, struct tape_34xx_sbid, list);
if (sbid->bid.block >= bid->block)
break;
sbid_to_use = sbid;
}
if (sbid_to_use) {
bid->wrap = sbid_to_use->bid.wrap;
bid->segment = sbid_to_use->bid.segment;
DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
sbid_to_use->bid.wrap,
sbid_to_use->bid.segment,
sbid_to_use->bid.block,
bid->block
);
}
}
static int
tape_34xx_setup_device(struct tape_device * device)
{
int rc;
struct list_head * discdata;
DBF_EVENT(6, "34xx device setup\n");
if ((rc = tape_std_assign(device)) == 0) {
if ((rc = tape_34xx_medium_sense(device)) != 0) {
DBF_LH(3, "34xx medium sense returned %d\n", rc);
}
}
discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
if (discdata) {
INIT_LIST_HEAD(discdata);
device->discdata = discdata;
}
return rc;
}
static void
tape_34xx_cleanup_device(struct tape_device *device)
{
tape_std_unassign(device);
if (device->discdata) {
tape_34xx_delete_sbid_from(device, 0);
kfree(device->discdata);
device->discdata = NULL;
}
}
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
static int
tape_34xx_mttell(struct tape_device *device, int mt_count)
{
struct {
struct tape_34xx_block_id cbid;
struct tape_34xx_block_id dbid;
} __attribute__ ((packed)) block_id;
int rc;
rc = tape_std_read_block_id(device, (__u64 *) &block_id);
if (rc)
return rc;
tape_34xx_add_sbid(device, block_id.cbid);
return block_id.cbid.block;
}
/*
* MTSEEK: seek to the specified block.
*/
static int
tape_34xx_mtseek(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct tape_34xx_block_id * bid;
if (mt_count > 0x3fffff) {
DBF_EXCEPTION(6, "xsee parm\n");
return -EINVAL;
}
request = tape_alloc_request(3, 4);
if (IS_ERR(request))
return PTR_ERR(request);
/* setup ccws */
request->op = TO_LBL;
bid = (struct tape_34xx_block_id *) request->cpdata;
bid->format = (*device->modeset_byte & 0x08) ?
TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
bid->block = mt_count;
tape_34xx_merge_sbid(device, bid);
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* List of 3480/3490 magnetic tape commands.
*/
static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
[MTRESET] = tape_std_mtreset,
[MTFSF] = tape_std_mtfsf,
[MTBSF] = tape_std_mtbsf,
[MTFSR] = tape_std_mtfsr,
[MTBSR] = tape_std_mtbsr,
[MTWEOF] = tape_std_mtweof,
[MTREW] = tape_std_mtrew,
[MTOFFL] = tape_std_mtoffl,
[MTNOP] = tape_std_mtnop,
[MTRETEN] = tape_std_mtreten,
[MTBSFM] = tape_std_mtbsfm,
[MTFSFM] = tape_std_mtfsfm,
[MTEOM] = tape_std_mteom,
[MTERASE] = tape_std_mterase,
[MTRAS1] = NULL,
[MTRAS2] = NULL,
[MTRAS3] = NULL,
[MTSETBLK] = tape_std_mtsetblk,
[MTSETDENSITY] = NULL,
[MTSEEK] = tape_34xx_mtseek,
[MTTELL] = tape_34xx_mttell,
[MTSETDRVBUFFER] = NULL,
[MTFSS] = NULL,
[MTBSS] = NULL,
[MTWSM] = NULL,
[MTLOCK] = NULL,
[MTUNLOCK] = NULL,
[MTLOAD] = tape_std_mtload,
[MTUNLOAD] = tape_std_mtunload,
[MTCOMPRESSION] = tape_std_mtcompression,
[MTSETPART] = NULL,
[MTMKPART] = NULL
};
/*
* Tape discipline structure for 3480 and 3490.
*/
static struct tape_discipline tape_discipline_34xx = {
.owner = THIS_MODULE,
.setup_device = tape_34xx_setup_device,
.cleanup_device = tape_34xx_cleanup_device,
.process_eov = tape_std_process_eov,
.irq = tape_34xx_irq,
.read_block = tape_std_read_block,
.write_block = tape_std_write_block,
.ioctl_fn = tape_34xx_ioctl,
.mtop_array = tape_34xx_mtop
};
static struct ccw_device_id tape_34xx_ids[] = {
{ CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
{ CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
{ /* end of list */ },
};
static int
tape_34xx_online(struct ccw_device *cdev)
{
return tape_generic_online(
dev_get_drvdata(&cdev->dev),
&tape_discipline_34xx
);
}
static struct ccw_driver tape_34xx_driver = {
.driver = {
.name = "tape_34xx",
.owner = THIS_MODULE,
},
.ids = tape_34xx_ids,
.probe = tape_generic_probe,
.remove = tape_generic_remove,
.set_online = tape_34xx_online,
.set_offline = tape_generic_offline,
.int_class = IRQIO_TAP,
};
static int
tape_34xx_init (void)
{
int rc;
TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
#ifdef DBF_LIKE_HELL
debug_set_level(TAPE_DBF_AREA, 6);
#endif
DBF_EVENT(3, "34xx init\n");
/* Register driver for 3480/3490 tapes. */
rc = ccw_driver_register(&tape_34xx_driver);
if (rc)
DBF_EVENT(3, "34xx init failed\n");
else
DBF_EVENT(3, "34xx registered\n");
return rc;
}
static void
tape_34xx_exit(void)
{
ccw_driver_unregister(&tape_34xx_driver);
debug_unregister(TAPE_DBF_AREA);
}
MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
MODULE_LICENSE("GPL");
module_init(tape_34xx_init);
module_exit(tape_34xx_exit);
| linux-master | drivers/s390/char/tape_34xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001
* Author(s): Carsten Otte <[email protected]>
* Michael Holzheu <[email protected]>
* Tuan Ngo-Anh <[email protected]>
*
* PROCFS Functions
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= 256 / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices = proc_create_seq("tapedevices", 0444, NULL,
&tape_proc_seq);
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", NULL);
}
| linux-master | drivers/s390/char/tape_proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/crc16.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#include "sclp_ftp.h"
#include "diag_ftp.h"
/**
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
* @transfer: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
void (*shutdown)(void);
ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
};
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
static const struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
/**
* hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
* @cmd: FTP command string (NOT zero-terminated)
* @len: length of FTP command string in @cmd
*/
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
{
/* HMC FTP command descriptor */
struct hmcdrv_ftp_cmd_desc {
const char *str; /* command string */
enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
};
/* Description of all HMC drive FTP commands
*
* Notes:
* 1. Array size should be a prime number.
* 2. Do not change the order of commands in table (because the
* index is determined by CRC % ARRAY_SIZE).
* 3. Original command 'nlist' was renamed, else the CRC would
* collide with 'append' (see point 2).
*/
static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
{.str = "get", /* [0] get (CRC = 0x68eb) */
.cmd = HMCDRV_FTP_GET},
{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
.cmd = HMCDRV_FTP_DIR},
{.str = "delete", /* [2] delete (CRC = 0x53ae) */
.cmd = HMCDRV_FTP_DELETE},
{.str = "nls", /* [3] nls (CRC = 0xf87c) */
.cmd = HMCDRV_FTP_NLIST},
{.str = "put", /* [4] put (CRC = 0xac56) */
.cmd = HMCDRV_FTP_PUT},
{.str = "append", /* [5] append (CRC = 0xf56e) */
.cmd = HMCDRV_FTP_APPEND},
{.str = NULL} /* [6] unused */
};
const struct hmcdrv_ftp_cmd_desc *pdesc;
u16 crc = 0xffffU;
if (len == 0)
return HMCDRV_FTP_NOOP; /* error indiactor */
crc = crc16(crc, cmd, len);
pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
if (!pdesc->str || strncmp(pdesc->str, cmd, len))
return HMCDRV_FTP_NOOP;
pr_debug("FTP command '%s' found, with ID %d\n",
pdesc->str, pdesc->cmd);
return pdesc->cmd;
}
/**
* hmcdrv_ftp_parse() - HMC drive FTP command parser
* @cmd: FTP command string "<cmd> <filename>"
* @ftp: Pointer to FTP command specification buffer (output)
*
* Return: 0 on success, else a (negative) error code
*/
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
{
char *start;
int argc = 0;
ftp->id = HMCDRV_FTP_NOOP;
ftp->fname = NULL;
while (*cmd != '\0') {
while (isspace(*cmd))
++cmd;
if (*cmd == '\0')
break;
start = cmd;
switch (argc) {
case 0: /* 1st argument (FTP command) */
while ((*cmd != '\0') && !isspace(*cmd))
++cmd;
ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
break;
case 1: /* 2nd / last argument (rest of line) */
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
fallthrough;
default:
*cmd = '\0';
break;
} /* switch */
++argc;
} /* while */
if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
return -EINVAL;
return 0;
}
/**
* hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
{
ssize_t len;
mutex_lock(&hmcdrv_ftp_mutex);
if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
} else {
len = -ENXIO;
}
mutex_unlock(&hmcdrv_ftp_mutex);
return len;
}
EXPORT_SYMBOL(hmcdrv_ftp_do);
/**
* hmcdrv_ftp_probe() - probe for the HMC drive FTP service
*
* Return: 0 if service is available, else an (negative) error code
*/
int hmcdrv_ftp_probe(void)
{
int rc;
struct hmcdrv_ftp_cmdspec ftp = {
.id = HMCDRV_FTP_NOOP,
.ofs = 0,
.fname = "",
.len = PAGE_SIZE
};
ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ftp.buf)
return -ENOMEM;
rc = hmcdrv_ftp_startup();
if (rc)
goto out;
rc = hmcdrv_ftp_do(&ftp);
hmcdrv_ftp_shutdown();
switch (rc) {
case -ENOENT: /* no such file/media or currently busy, */
case -EBUSY: /* but service seems to be available */
rc = 0;
break;
default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
if (rc > 0)
rc = 0; /* clear length (success) */
break;
} /* switch */
out:
free_page((unsigned long) ftp.buf);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_probe);
/**
* hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
*
* @cmd: FTP command string "<cmd> <filename>"
* @offset: file position to read/write
* @buf: user-space buffer for read/written directory/file
* @len: size of @buf (read/dir) or number of bytes to write
*
* This function must not be called before hmcdrv_ftp_startup() was called.
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
int order;
struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
if (retlen)
return retlen;
order = get_order(ftp.len);
ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
if (!ftp.buf)
return -ENOMEM;
switch (ftp.id) {
case HMCDRV_FTP_DIR:
case HMCDRV_FTP_NLIST:
case HMCDRV_FTP_GET:
retlen = hmcdrv_ftp_do(&ftp);
if ((retlen >= 0) &&
copy_to_user(buf, ftp.buf, retlen))
retlen = -EFAULT;
break;
case HMCDRV_FTP_PUT:
case HMCDRV_FTP_APPEND:
if (!copy_from_user(ftp.buf, buf, ftp.len))
retlen = hmcdrv_ftp_do(&ftp);
else
retlen = -EFAULT;
break;
case HMCDRV_FTP_DELETE:
retlen = hmcdrv_ftp_do(&ftp);
break;
default:
retlen = -EOPNOTSUPP;
break;
}
free_pages((unsigned long) ftp.buf, order);
return retlen;
}
/**
* hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
* dedicated (owner) instance
*
* Return: 0 on success, else an (negative) error code
*/
int hmcdrv_ftp_startup(void)
{
static const struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
.startup = diag_ftp_startup,
.shutdown = diag_ftp_shutdown,
.transfer = diag_ftp_cmd
};
static const struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
.startup = sclp_ftp_startup,
.shutdown = sclp_ftp_shutdown,
.transfer = sclp_ftp_cmd
};
int rc = 0;
mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
if (hmcdrv_ftp_refcnt == 0) {
if (MACHINE_IS_VM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
else
rc = -EOPNOTSUPP;
if (hmcdrv_ftp_funcs)
rc = hmcdrv_ftp_funcs->startup();
}
if (!rc)
++hmcdrv_ftp_refcnt;
mutex_unlock(&hmcdrv_ftp_mutex);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_startup);
/**
* hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
* dedicated (owner) instance
*/
void hmcdrv_ftp_shutdown(void)
{
mutex_lock(&hmcdrv_ftp_mutex);
--hmcdrv_ftp_refcnt;
if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
hmcdrv_ftp_funcs->shutdown();
mutex_unlock(&hmcdrv_ftp_mutex);
}
EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
| linux-master | drivers/s390/char/hmcdrv_ftp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
*
* Copyright IBM Corp. 2001, 2009
* Authors: Malcolm Beattie <[email protected]>
* Michael Holzheu <[email protected]>
* Frank Munzert <[email protected]>
*/
#define KMSG_COMPONENT "vmur"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/scsw.h>
#include "vmur.h"
/*
* Driver overview
*
* Unit record device support is implemented as a character device driver.
* We can fit at least 16 bits into a device minor number and use the
* simple method of mapping a character device number with minor abcd
* to the unit record device with devno abcd.
* I/O to virtual unit record devices is handled as follows:
* Reads: Diagnose code 0x14 (input spool file manipulation)
* is used to read spool data page-wise.
* Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
* is available by reading sysfs attr reclen. Each write() to the device
* must specify an integral multiple (maximal 511) of reclen.
*/
static char ur_banner[] = "z/VM virtual unit record device driver";
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");
static dev_t ur_first_dev_maj_min;
static struct class *vmur_class;
static struct debug_info *vmur_dbf;
/* We put the device's record length (for writes) in the driver_info field */
static struct ccw_device_id ur_ids[] = {
{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
{ /* end of list */ }
};
MODULE_DEVICE_TABLE(ccw, ur_ids);
static int ur_probe(struct ccw_device *cdev);
static void ur_remove(struct ccw_device *cdev);
static int ur_set_online(struct ccw_device *cdev);
static int ur_set_offline(struct ccw_device *cdev);
static struct ccw_driver ur_driver = {
.driver = {
.name = "vmur",
.owner = THIS_MODULE,
},
.ids = ur_ids,
.probe = ur_probe,
.remove = ur_remove,
.set_online = ur_set_online,
.set_offline = ur_set_offline,
.int_class = IRQIO_VMR,
};
static DEFINE_MUTEX(vmur_mutex);
static void ur_uevent(struct work_struct *ws);
/*
* Allocation, freeing, getting and putting of urdev structures
*
* Each ur device (urd) contains a reference to its corresponding ccw device
* (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
* ur device using dev_get_drvdata(&cdev->dev) pointer.
*
* urd references:
* - ur_probe gets a urd reference, ur_remove drops the reference
* dev_get_drvdata(&cdev->dev)
* - ur_open gets a urd reference, ur_release drops the reference
* (urf->urd)
*
* cdev references:
* - urdev_alloc get a cdev reference (urd->cdev)
* - urdev_free drops the cdev reference (urd->cdev)
*
* Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
*/
static struct urdev *urdev_alloc(struct ccw_device *cdev)
{
struct urdev *urd;
urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
if (!urd)
return NULL;
urd->reclen = cdev->id.driver_info;
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
INIT_WORK(&urd->uevent_work, ur_uevent);
spin_lock_init(&urd->open_lock);
refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
get_device(&cdev->dev);
return urd;
}
static void urdev_free(struct urdev *urd)
{
TRACE("urdev_free: %p\n", urd);
if (urd->cdev)
put_device(&urd->cdev->dev);
kfree(urd);
}
static void urdev_get(struct urdev *urd)
{
refcount_inc(&urd->ref_count);
}
static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
{
struct urdev *urd;
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
urd = dev_get_drvdata(&cdev->dev);
if (urd)
urdev_get(urd);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return urd;
}
static struct urdev *urdev_get_from_devno(u16 devno)
{
char bus_id[16];
struct ccw_device *cdev;
struct urdev *urd;
sprintf(bus_id, "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
if (!cdev)
return NULL;
urd = urdev_get_from_cdev(cdev);
put_device(&cdev->dev);
return urd;
}
static void urdev_put(struct urdev *urd)
{
if (refcount_dec_and_test(&urd->ref_count))
urdev_free(urd);
}
/*
* Low-level functions to do I/O to a ur device.
* alloc_chan_prog
* free_chan_prog
* do_ur_io
* ur_int_handler
*
* alloc_chan_prog allocates and builds the channel program
* free_chan_prog frees memory of the channel program
*
* do_ur_io issues the channel program to the device and blocks waiting
* on a completion event it publishes at urd->io_done. The function
* serialises itself on the device's mutex so that only one I/O
* is issued at a time (and that I/O is synchronous).
*
* ur_int_handler catches the "I/O done" interrupt, writes the
* subchannel status word into the scsw member of the urdev structure
* and complete()s the io_done to wake the waiting do_ur_io.
*
* The caller of do_ur_io is responsible for kfree()ing the channel program
* address pointer that alloc_chan_prog returned.
*/
static void free_chan_prog(struct ccw1 *cpa)
{
struct ccw1 *ptr = cpa;
while (ptr->cda) {
kfree((void *)(addr_t) ptr->cda);
ptr++;
}
kfree(cpa);
}
/*
* alloc_chan_prog
* The channel program we use is write commands chained together
* with a final NOP CCW command-chained on (which ensures that CE and DE
* are presented together in a single interrupt instead of as separate
* interrupts unless an incorrect length indication kicks in first). The
* data length in each CCW is reclen.
*/
static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
int reclen)
{
struct ccw1 *cpa;
void *kbuf;
int i;
TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
/*
* We chain a NOP onto the writes to force CE+DE together.
* That means we allocate room for CCWs to cover count/reclen
* records plus a NOP.
*/
cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
GFP_KERNEL | GFP_DMA);
if (!cpa)
return ERR_PTR(-ENOMEM);
for (i = 0; i < rec_count; i++) {
cpa[i].cmd_code = WRITE_CCW_CMD;
cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
cpa[i].count = reclen;
kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
if (!kbuf) {
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
cpa[i].cda = (u32)(addr_t) kbuf;
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
}
ubuf += reclen;
}
/* The following NOP CCW forces CE+DE to be presented together */
cpa[i].cmd_code = CCW_CMD_NOOP;
return cpa;
}
static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
{
int rc;
struct ccw_device *cdev = urd->cdev;
DECLARE_COMPLETION_ONSTACK(event);
TRACE("do_ur_io: cpa=%p\n", cpa);
rc = mutex_lock_interruptible(&urd->io_mutex);
if (rc)
return rc;
urd->io_done = &event;
spin_lock_irq(get_ccwdev_lock(cdev));
rc = ccw_device_start(cdev, cpa, 1, 0, 0);
spin_unlock_irq(get_ccwdev_lock(cdev));
TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
if (rc)
goto out;
wait_for_completion(&event);
TRACE("do_ur_io: I/O complete\n");
rc = 0;
out:
mutex_unlock(&urd->io_mutex);
return rc;
}
static void ur_uevent(struct work_struct *ws)
{
struct urdev *urd = container_of(ws, struct urdev, uevent_work);
char *envp[] = {
"EVENT=unsol_de", /* Unsolicited device-end interrupt */
NULL
};
kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp);
urdev_put(urd);
}
/*
* ur interrupt handler, called from the ccw_device layer
*/
static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct urdev *urd;
if (!IS_ERR(irb)) {
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
}
urd = dev_get_drvdata(&cdev->dev);
if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n");
if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) {
/*
* Userspace might be interested in a transition to
* device-ready state.
*/
urdev_get(urd);
schedule_work(&urd->uevent_work);
}
return;
}
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
urd->io_request_rc = 0;
else
urd->io_request_rc = -EIO;
complete(urd->io_done);
}
/*
* reclen sysfs attribute - The record length to be used for write CCWs
*/
static ssize_t ur_attr_reclen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct urdev *urd;
int rc;
urd = urdev_get_from_cdev(to_ccwdev(dev));
if (!urd)
return -ENODEV;
rc = sprintf(buf, "%zu\n", urd->reclen);
urdev_put(urd);
return rc;
}
static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
static int ur_create_attributes(struct device *dev)
{
return device_create_file(dev, &dev_attr_reclen);
}
static void ur_remove_attributes(struct device *dev)
{
device_remove_file(dev, &dev_attr_reclen);
}
/*
* diagnose code 0x210 - retrieve device information
* cc=0 normal completion, we have a real device
* cc=1 CP paging error
* cc=2 The virtual device exists, but is not associated with a real device
* cc=3 Invalid device address, or the virtual device does not exist
*/
static int get_urd_class(struct urdev *urd)
{
static struct diag210 ur_diag210;
int cc;
ur_diag210.vrdcdvno = urd->dev_id.devno;
ur_diag210.vrdclen = sizeof(struct diag210);
cc = diag210(&ur_diag210);
switch (cc) {
case 0:
return -EOPNOTSUPP;
case 2:
return ur_diag210.vrdcvcla; /* virtual device class */
case 3:
return -ENODEV;
default:
return -EIO;
}
}
/*
* Allocation and freeing of urfile structures
*/
static struct urfile *urfile_alloc(struct urdev *urd)
{
struct urfile *urf;
urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
if (!urf)
return NULL;
urf->urd = urd;
TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
urf->dev_reclen);
return urf;
}
static void urfile_free(struct urfile *urf)
{
TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
kfree(urf);
}
/*
* The fops implementation of the character device driver
*/
static ssize_t do_write(struct urdev *urd, const char __user *udata,
size_t count, size_t reclen, loff_t *ppos)
{
struct ccw1 *cpa;
int rc;
cpa = alloc_chan_prog(udata, count / reclen, reclen);
if (IS_ERR(cpa))
return PTR_ERR(cpa);
rc = do_ur_io(urd, cpa);
if (rc)
goto fail_kfree_cpa;
if (urd->io_request_rc) {
rc = urd->io_request_rc;
goto fail_kfree_cpa;
}
*ppos += count;
rc = count;
fail_kfree_cpa:
free_chan_prog(cpa);
return rc;
}
static ssize_t ur_write(struct file *file, const char __user *udata,
size_t count, loff_t *ppos)
{
struct urfile *urf = file->private_data;
TRACE("ur_write: count=%zu\n", count);
if (count == 0)
return 0;
if (count % urf->dev_reclen)
return -EINVAL; /* count must be a multiple of reclen */
if (count > urf->dev_reclen * MAX_RECS_PER_IO)
count = urf->dev_reclen * MAX_RECS_PER_IO;
return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
}
/*
* diagnose code 0x14 subcode 0x0028 - position spool file to designated
* record
* cc=0 normal completion
* cc=2 no file active on the virtual reader or device not ready
* cc=3 record specified is beyond EOF
*/
static int diag_position_to_record(int devno, int record)
{
int cc;
cc = diag14(record, devno, 0x28);
switch (cc) {
case 0:
return 0;
case 2:
return -ENOMEDIUM;
case 3:
return -ENODATA; /* position beyond end of file */
default:
return -EIO;
}
}
/*
* diagnose code 0x14 subcode 0x0000 - read next spool file buffer
* cc=0 normal completion
* cc=1 EOF reached
* cc=2 no file active on the virtual reader, and no file eligible
* cc=3 file already active on the virtual reader or specified virtual
* reader does not exist or is not a reader
*/
static int diag_read_file(int devno, char *buf)
{
int cc;
cc = diag14((unsigned long) buf, devno, 0x00);
switch (cc) {
case 0:
return 0;
case 1:
return -ENODATA;
case 2:
return -ENOMEDIUM;
default:
return -EIO;
}
}
static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
loff_t *offs)
{
size_t len, copied, res;
char *buf;
int rc;
u16 reclen;
struct urdev *urd;
urd = ((struct urfile *) file->private_data)->urd;
reclen = ((struct urfile *) file->private_data)->file_reclen;
rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
if (rc == -ENODATA)
return 0;
if (rc)
return rc;
len = min((size_t) PAGE_SIZE, count);
buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!buf)
return -ENOMEM;
copied = 0;
res = (size_t) (*offs % PAGE_SIZE);
do {
rc = diag_read_file(urd->dev_id.devno, buf);
if (rc == -ENODATA) {
break;
}
if (rc)
goto fail;
if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
len = min(count - copied, PAGE_SIZE - res);
if (copy_to_user(ubuf + copied, buf + res, len)) {
rc = -EFAULT;
goto fail;
}
res = 0;
copied += len;
} while (copied != count);
*offs += copied;
rc = copied;
fail:
free_page((unsigned long) buf);
return rc;
}
static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
loff_t *offs)
{
struct urdev *urd;
int rc;
TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
if (count == 0)
return 0;
urd = ((struct urfile *) file->private_data)->urd;
rc = mutex_lock_interruptible(&urd->io_mutex);
if (rc)
return rc;
rc = diag14_read(file, ubuf, count, offs);
mutex_unlock(&urd->io_mutex);
return rc;
}
/*
* diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
* cc=0 normal completion
* cc=1 no files on reader queue or no subsequent file
* cc=2 spid specified is invalid
*/
static int diag_read_next_file_info(struct file_control_block *buf, int spid)
{
int cc;
cc = diag14((unsigned long) buf, spid, 0xfff);
switch (cc) {
case 0:
return 0;
default:
return -ENODATA;
}
}
static int verify_uri_device(struct urdev *urd)
{
struct file_control_block *fcb;
char *buf;
int rc;
fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
if (!fcb)
return -ENOMEM;
/* check for empty reader device (beginning of chain) */
rc = diag_read_next_file_info(fcb, 0);
if (rc)
goto fail_free_fcb;
/* if file is in hold status, we do not read it */
if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
rc = -EPERM;
goto fail_free_fcb;
}
/* open file on virtual reader */
buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!buf) {
rc = -ENOMEM;
goto fail_free_fcb;
}
rc = diag_read_file(urd->dev_id.devno, buf);
if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
goto fail_free_buf;
/* check if the file on top of the queue is open now */
rc = diag_read_next_file_info(fcb, 0);
if (rc)
goto fail_free_buf;
if (!(fcb->file_stat & FLG_IN_USE)) {
rc = -EMFILE;
goto fail_free_buf;
}
rc = 0;
fail_free_buf:
free_page((unsigned long) buf);
fail_free_fcb:
kfree(fcb);
return rc;
}
static int verify_device(struct urdev *urd)
{
switch (urd->class) {
case DEV_CLASS_UR_O:
return 0; /* no check needed here */
case DEV_CLASS_UR_I:
return verify_uri_device(urd);
default:
return -EOPNOTSUPP;
}
}
static int get_uri_file_reclen(struct urdev *urd)
{
struct file_control_block *fcb;
int rc;
fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
if (!fcb)
return -ENOMEM;
rc = diag_read_next_file_info(fcb, 0);
if (rc)
goto fail_free;
if (fcb->file_stat & FLG_CP_DUMP)
rc = 0;
else
rc = fcb->rec_len;
fail_free:
kfree(fcb);
return rc;
}
static int get_file_reclen(struct urdev *urd)
{
switch (urd->class) {
case DEV_CLASS_UR_O:
return 0;
case DEV_CLASS_UR_I:
return get_uri_file_reclen(urd);
default:
return -EOPNOTSUPP;
}
}
static int ur_open(struct inode *inode, struct file *file)
{
u16 devno;
struct urdev *urd;
struct urfile *urf;
unsigned short accmode;
int rc;
accmode = file->f_flags & O_ACCMODE;
if (accmode == O_RDWR)
return -EACCES;
/*
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
*/
devno = iminor(file_inode(file));
urd = urdev_get_from_devno(devno);
if (!urd) {
rc = -ENXIO;
goto out;
}
spin_lock(&urd->open_lock);
while (urd->open_flag) {
spin_unlock(&urd->open_lock);
if (file->f_flags & O_NONBLOCK) {
rc = -EBUSY;
goto fail_put;
}
if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
rc = -ERESTARTSYS;
goto fail_put;
}
spin_lock(&urd->open_lock);
}
urd->open_flag++;
spin_unlock(&urd->open_lock);
TRACE("ur_open\n");
if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
rc = -EACCES;
goto fail_unlock;
}
rc = verify_device(urd);
if (rc)
goto fail_unlock;
urf = urfile_alloc(urd);
if (!urf) {
rc = -ENOMEM;
goto fail_unlock;
}
urf->dev_reclen = urd->reclen;
rc = get_file_reclen(urd);
if (rc < 0)
goto fail_urfile_free;
urf->file_reclen = rc;
file->private_data = urf;
return 0;
fail_urfile_free:
urfile_free(urf);
fail_unlock:
spin_lock(&urd->open_lock);
urd->open_flag--;
spin_unlock(&urd->open_lock);
fail_put:
urdev_put(urd);
out:
return rc;
}
static int ur_release(struct inode *inode, struct file *file)
{
struct urfile *urf = file->private_data;
TRACE("ur_release\n");
spin_lock(&urf->urd->open_lock);
urf->urd->open_flag--;
spin_unlock(&urf->urd->open_lock);
wake_up_interruptible(&urf->urd->wait);
urdev_put(urf->urd);
urfile_free(urf);
return 0;
}
static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
{
if ((file->f_flags & O_ACCMODE) != O_RDONLY)
return -ESPIPE; /* seek allowed only for reader */
if (offset % PAGE_SIZE)
return -ESPIPE; /* only multiples of 4K allowed */
return no_seek_end_llseek(file, offset, whence);
}
static const struct file_operations ur_fops = {
.owner = THIS_MODULE,
.open = ur_open,
.release = ur_release,
.read = ur_read,
.write = ur_write,
.llseek = ur_llseek,
};
/*
* ccw_device infrastructure:
* ur_probe creates the struct urdev (with refcount = 1), the device
* attributes, sets up the interrupt handler and validates the virtual
* unit record device.
* ur_remove removes the device attributes and drops the reference to
* struct urdev.
*
* ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
* by the vmur_mutex lock.
*
* urd->char_device is used as indication that the online function has
* been completed successfully.
*/
static int ur_probe(struct ccw_device *cdev)
{
struct urdev *urd;
int rc;
TRACE("ur_probe: cdev=%p\n", cdev);
mutex_lock(&vmur_mutex);
urd = urdev_alloc(cdev);
if (!urd) {
rc = -ENOMEM;
goto fail_unlock;
}
rc = ur_create_attributes(&cdev->dev);
if (rc) {
rc = -ENOMEM;
goto fail_urdev_put;
}
/* validate virtual unit record device */
urd->class = get_urd_class(urd);
if (urd->class < 0) {
rc = urd->class;
goto fail_remove_attr;
}
if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
rc = -EOPNOTSUPP;
goto fail_remove_attr;
}
spin_lock_irq(get_ccwdev_lock(cdev));
dev_set_drvdata(&cdev->dev, urd);
cdev->handler = ur_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev));
mutex_unlock(&vmur_mutex);
return 0;
fail_remove_attr:
ur_remove_attributes(&cdev->dev);
fail_urdev_put:
urdev_put(urd);
fail_unlock:
mutex_unlock(&vmur_mutex);
return rc;
}
static int ur_set_online(struct ccw_device *cdev)
{
struct urdev *urd;
int minor, major, rc;
char node_id[16];
TRACE("ur_set_online: cdev=%p\n", cdev);
mutex_lock(&vmur_mutex);
urd = urdev_get_from_cdev(cdev);
if (!urd) {
/* ur_remove already deleted our urd */
rc = -ENODEV;
goto fail_unlock;
}
if (urd->char_device) {
/* Another ur_set_online was faster */
rc = -EBUSY;
goto fail_urdev_put;
}
minor = urd->dev_id.devno;
major = MAJOR(ur_first_dev_maj_min);
urd->char_device = cdev_alloc();
if (!urd->char_device) {
rc = -ENOMEM;
goto fail_urdev_put;
}
urd->char_device->ops = &ur_fops;
urd->char_device->owner = ur_fops.owner;
rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
if (rc)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
if (urd->class == DEV_CLASS_UR_I)
sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
if (urd->class == DEV_CLASS_UR_O)
sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
} else {
rc = -EOPNOTSUPP;
goto fail_free_cdev;
}
urd->device = device_create(vmur_class, &cdev->dev,
urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
TRACE("ur_set_online: device_create rc=%d\n", rc);
goto fail_free_cdev;
}
urdev_put(urd);
mutex_unlock(&vmur_mutex);
return 0;
fail_free_cdev:
cdev_del(urd->char_device);
urd->char_device = NULL;
fail_urdev_put:
urdev_put(urd);
fail_unlock:
mutex_unlock(&vmur_mutex);
return rc;
}
static int ur_set_offline_force(struct ccw_device *cdev, int force)
{
struct urdev *urd;
int rc;
TRACE("ur_set_offline: cdev=%p\n", cdev);
urd = urdev_get_from_cdev(cdev);
if (!urd)
/* ur_remove already deleted our urd */
return -ENODEV;
if (!urd->char_device) {
/* Another ur_set_offline was faster */
rc = -EBUSY;
goto fail_urdev_put;
}
if (!force && (refcount_read(&urd->ref_count) > 2)) {
/* There is still a user of urd (e.g. ur_open) */
TRACE("ur_set_offline: BUSY\n");
rc = -EBUSY;
goto fail_urdev_put;
}
if (cancel_work_sync(&urd->uevent_work)) {
/* Work not run yet - need to release reference here */
urdev_put(urd);
}
device_destroy(vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
rc = 0;
fail_urdev_put:
urdev_put(urd);
return rc;
}
static int ur_set_offline(struct ccw_device *cdev)
{
int rc;
mutex_lock(&vmur_mutex);
rc = ur_set_offline_force(cdev, 0);
mutex_unlock(&vmur_mutex);
return rc;
}
static void ur_remove(struct ccw_device *cdev)
{
unsigned long flags;
TRACE("ur_remove\n");
mutex_lock(&vmur_mutex);
if (cdev->online)
ur_set_offline_force(cdev, 1);
ur_remove_attributes(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
urdev_put(dev_get_drvdata(&cdev->dev));
dev_set_drvdata(&cdev->dev, NULL);
cdev->handler = NULL;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_unlock(&vmur_mutex);
}
/*
* Module initialisation and cleanup
*/
static int __init ur_init(void)
{
int rc;
dev_t dev;
if (!MACHINE_IS_VM) {
pr_err("The %s cannot be loaded without z/VM\n",
ur_banner);
return -ENODEV;
}
vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
if (!vmur_dbf)
return -ENOMEM;
rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
if (rc)
goto fail_free_dbf;
debug_set_level(vmur_dbf, 6);
vmur_class = class_create("vmur");
if (IS_ERR(vmur_class)) {
rc = PTR_ERR(vmur_class);
goto fail_free_dbf;
}
rc = ccw_driver_register(&ur_driver);
if (rc)
goto fail_class_destroy;
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
if (rc) {
pr_err("Kernel function alloc_chrdev_region failed with "
"error code %d\n", rc);
goto fail_unregister_driver;
}
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
pr_info("%s loaded.\n", ur_banner);
return 0;
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
fail_class_destroy:
class_destroy(vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
}
static void __exit ur_exit(void)
{
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
class_destroy(vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
module_init(ur_init);
module_exit(ur_exit);
| linux-master | drivers/s390/char/vmur.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for writing z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <[email protected]>
*/
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/ebcdic.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
#define MONWRITE_MAX_DATALEN 4010
static int mon_max_bufs = 255;
static int mon_buf_count;
struct mon_buf {
struct list_head list;
struct monwrite_hdr hdr;
int diag_done;
char *data;
};
struct mon_private {
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
struct mutex thread_mutex;
};
/*
* helper functions
*/
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_parameter_list *parm_list;
struct appldata_product_id *id;
int rc;
id = kmalloc(sizeof(*id), GFP_KERNEL);
parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
rc = -ENOMEM;
if (!id || !parm_list)
goto out;
memcpy(id->prod_nr, "LNXAPPL", 7);
id->prod_fn = myhdr->applid;
id->record_nr = myhdr->record_num;
id->version_nr = myhdr->version;
id->release_nr = myhdr->release;
id->mod_lvl = myhdr->mod_level;
rc = appldata_asm(parm_list, id, fcn,
(void *) buffer, myhdr->datalen);
if (rc <= 0)
goto out;
pr_err("Writing monitor data failed with rc=%i\n", rc);
rc = (rc == 5) ? -EPERM : -EINVAL;
out:
kfree(id);
kfree(parm_list);
return rc;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct monwrite_hdr *monhdr)
{
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
if ((entry->hdr.mon_function == monhdr->mon_function ||
monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
return NULL;
}
static int monwrite_new_hdr(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf;
int rc = 0;
if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
monbuf = NULL;
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
return -ENOMEM;
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
mon_buf_count++;
}
monpriv->current_buf = monbuf;
return rc;
}
static int monwrite_new_data(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf = monpriv->current_buf;
int rc = 0;
switch (monhdr->mon_function) {
case MONWRITE_START_INTERVAL:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_START_CONFIG:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_GEN_EVENT:
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_GEN_EVENT_REC);
list_del(&monpriv->current_buf->list);
kfree(monpriv->current_buf->data);
kfree(monpriv->current_buf);
monpriv->current_buf = NULL;
break;
default:
/* monhdr->mon_function is checked in monwrite_new_hdr */
BUG();
}
return rc;
}
/*
* file operations
*/
static int monwrite_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
return nonseekable_open(inode, filp);
}
static int monwrite_close(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv = filp->private_data;
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list) {
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
}
kfree(monpriv);
return 0;
}
static ssize_t monwrite_write(struct file *filp, const char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
size_t len, written;
void *to;
int rc;
mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
to = (char *) &monpriv->hdr +
sizeof(monpriv->hdr) - monpriv->hdr_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->hdr_to_read -= len;
written += len;
if (monpriv->hdr_to_read > 0)
continue;
rc = monwrite_new_hdr(monpriv);
if (rc)
goto out_error;
monpriv->data_to_read = monpriv->current_buf ?
monpriv->current_buf->hdr.datalen : 0;
}
if (monpriv->data_to_read) {
len = min(count - written, monpriv->data_to_read);
to = monpriv->current_buf->data +
monpriv->hdr.datalen - monpriv->data_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->data_to_read -= len;
written += len;
if (monpriv->data_to_read > 0)
continue;
rc = monwrite_new_data(monpriv);
if (rc)
goto out_error;
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
mutex_unlock(&monpriv->thread_mutex);
return rc;
}
static const struct file_operations monwrite_fops = {
.owner = THIS_MODULE,
.open = &monwrite_open,
.release = &monwrite_close,
.write = &monwrite_write,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monwriter",
.fops = &monwrite_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/*
* module init/exit
*/
static int __init mon_init(void)
{
if (!MACHINE_IS_VM)
return -ENODEV;
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
return misc_register(&mon_dev);
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
}
module_init(mon_init);
module_exit(mon_exit);
module_param_named(max_bufs, mon_max_bufs, int, 0644);
MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
"that can be active at one time");
MODULE_AUTHOR("Melissa Howland <[email protected]>");
MODULE_DESCRIPTION("Character device driver for writing z/VM "
"APPLDATA monitor records.");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/char/monwriter.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2022
* Author(s): Steffen Eiden <[email protected]>
*
* This file provides a Linux misc device to give userspace access to some
* Ultravisor (UV) functions. The device only accepts IOCTLs and will only
* be present if the Ultravisor facility (158) is present.
*
* When userspace sends a valid IOCTL uvdevice will copy the input data to
* kernel space, do some basic validity checks to avoid kernel/system
* corruption. Any other check that the Ultravisor does will not be done by
* the uvdevice to keep changes minimal when adding new functionalities
* to existing UV-calls.
* After the checks uvdevice builds a corresponding
* Ultravisor Call Control Block, and sends the request to the Ultravisor.
* Then, it copies the response, including the return codes, back to userspace.
* It is the responsibility of the userspace to check for any error issued
* by UV and to interpret the UV response. The uvdevice acts as a communication
* channel for userspace to the Ultravisor.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/cpufeature.h>
#include <asm/uvdevice.h>
#include <asm/uv.h>
#define BIT_UVIO_INTERNAL U32_MAX
/* Mapping from IOCTL-nr to UVC-bit */
static const u32 ioctl_nr_to_uvc_bit[] __initconst = {
[UVIO_IOCTL_UVDEV_INFO_NR] = BIT_UVIO_INTERNAL,
[UVIO_IOCTL_ATT_NR] = BIT_UVC_CMD_RETR_ATTEST,
[UVIO_IOCTL_ADD_SECRET_NR] = BIT_UVC_CMD_ADD_SECRET,
[UVIO_IOCTL_LIST_SECRETS_NR] = BIT_UVC_CMD_LIST_SECRETS,
[UVIO_IOCTL_LOCK_SECRETS_NR] = BIT_UVC_CMD_LOCK_SECRETS,
};
static_assert(ARRAY_SIZE(ioctl_nr_to_uvc_bit) == UVIO_IOCTL_NUM_IOCTLS);
static struct uvio_uvdev_info uvdev_info = {
.supp_uvio_cmds = GENMASK_ULL(UVIO_IOCTL_NUM_IOCTLS - 1, 0),
};
static void __init set_supp_uv_cmds(unsigned long *supp_uv_cmds)
{
int i;
for (i = 0; i < UVIO_IOCTL_NUM_IOCTLS; i++) {
if (ioctl_nr_to_uvc_bit[i] == BIT_UVIO_INTERNAL)
continue;
if (!test_bit_inv(ioctl_nr_to_uvc_bit[i], uv_info.inst_calls_list))
continue;
__set_bit(i, supp_uv_cmds);
}
}
/**
* uvio_uvdev_info() - get information about the uvdevice
*
* @uv_ioctl: ioctl control block
*
* Lists all IOCTLs that are supported by this uvdevice
*/
static int uvio_uvdev_info(struct uvio_ioctl_cb *uv_ioctl)
{
void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
if (uv_ioctl->argument_len < sizeof(uvdev_info))
return -EINVAL;
if (copy_to_user(user_buf_arg, &uvdev_info, sizeof(uvdev_info)))
return -EFAULT;
uv_ioctl->uv_rc = UVC_RC_EXECUTED;
return 0;
}
static int uvio_build_uvcb_attest(struct uv_cb_attest *uvcb_attest, u8 *arcb,
u8 *meas, u8 *add_data, struct uvio_attest *uvio_attest)
{
void __user *user_buf_arcb = (void __user *)uvio_attest->arcb_addr;
if (copy_from_user(arcb, user_buf_arcb, uvio_attest->arcb_len))
return -EFAULT;
uvcb_attest->header.len = sizeof(*uvcb_attest);
uvcb_attest->header.cmd = UVC_CMD_RETR_ATTEST;
uvcb_attest->arcb_addr = (u64)arcb;
uvcb_attest->cont_token = 0;
uvcb_attest->user_data_len = uvio_attest->user_data_len;
memcpy(uvcb_attest->user_data, uvio_attest->user_data, sizeof(uvcb_attest->user_data));
uvcb_attest->meas_len = uvio_attest->meas_len;
uvcb_attest->meas_addr = (u64)meas;
uvcb_attest->add_data_len = uvio_attest->add_data_len;
uvcb_attest->add_data_addr = (u64)add_data;
return 0;
}
static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest,
struct uvio_ioctl_cb *uv_ioctl,
u8 *measurement, u8 *add_data,
struct uvio_attest *uvio_attest)
{
struct uvio_attest __user *user_uvio_attest = (void __user *)uv_ioctl->argument_addr;
void __user *user_buf_add = (void __user *)uvio_attest->add_data_addr;
void __user *user_buf_meas = (void __user *)uvio_attest->meas_addr;
void __user *user_buf_uid = &user_uvio_attest->config_uid;
if (copy_to_user(user_buf_meas, measurement, uvio_attest->meas_len))
return -EFAULT;
if (add_data && copy_to_user(user_buf_add, add_data, uvio_attest->add_data_len))
return -EFAULT;
if (copy_to_user(user_buf_uid, uvcb_attest->config_uid, sizeof(uvcb_attest->config_uid)))
return -EFAULT;
return 0;
}
static int get_uvio_attest(struct uvio_ioctl_cb *uv_ioctl, struct uvio_attest *uvio_attest)
{
u8 __user *user_arg_buf = (u8 __user *)uv_ioctl->argument_addr;
if (copy_from_user(uvio_attest, user_arg_buf, sizeof(*uvio_attest)))
return -EFAULT;
if (uvio_attest->arcb_len > UVIO_ATT_ARCB_MAX_LEN)
return -EINVAL;
if (uvio_attest->arcb_len == 0)
return -EINVAL;
if (uvio_attest->meas_len > UVIO_ATT_MEASUREMENT_MAX_LEN)
return -EINVAL;
if (uvio_attest->meas_len == 0)
return -EINVAL;
if (uvio_attest->add_data_len > UVIO_ATT_ADDITIONAL_MAX_LEN)
return -EINVAL;
if (uvio_attest->reserved136)
return -EINVAL;
return 0;
}
/**
* uvio_attestation() - Perform a Retrieve Attestation Measurement UVC.
*
* @uv_ioctl: ioctl control block
*
* uvio_attestation() does a Retrieve Attestation Measurement Ultravisor Call.
* It verifies that the given userspace addresses are valid and request sizes
* are sane. Every other check is made by the Ultravisor (UV) and won't result
* in a negative return value. It copies the input to kernelspace, builds the
* request, sends the UV-call, and copies the result to userspace.
*
* The Attestation Request has two input and two outputs.
* ARCB and User Data are inputs for the UV generated by userspace.
* Measurement and Additional Data are outputs for userspace generated by UV.
*
* The Attestation Request Control Block (ARCB) is a cryptographically verified
* and secured request to UV and User Data is some plaintext data which is
* going to be included in the Attestation Measurement calculation.
*
* Measurement is a cryptographic measurement of the callers properties,
* optional data configured by the ARCB and the user data. If specified by the
* ARCB, UV will add some Additional Data to the measurement calculation.
* This Additional Data is then returned as well.
*
* If the Retrieve Attestation Measurement UV facility is not present,
* UV will return invalid command rc. This won't be fenced in the driver
* and does not result in a negative return value.
*
* Context: might sleep
*
* Return: 0 on success or a negative error code on error.
*/
static int uvio_attestation(struct uvio_ioctl_cb *uv_ioctl)
{
struct uv_cb_attest *uvcb_attest = NULL;
struct uvio_attest *uvio_attest = NULL;
u8 *measurement = NULL;
u8 *add_data = NULL;
u8 *arcb = NULL;
int ret;
ret = -EINVAL;
if (uv_ioctl->argument_len != sizeof(*uvio_attest))
goto out;
ret = -ENOMEM;
uvio_attest = kzalloc(sizeof(*uvio_attest), GFP_KERNEL);
if (!uvio_attest)
goto out;
ret = get_uvio_attest(uv_ioctl, uvio_attest);
if (ret)
goto out;
ret = -ENOMEM;
arcb = kvzalloc(uvio_attest->arcb_len, GFP_KERNEL);
measurement = kvzalloc(uvio_attest->meas_len, GFP_KERNEL);
if (!arcb || !measurement)
goto out;
if (uvio_attest->add_data_len) {
add_data = kvzalloc(uvio_attest->add_data_len, GFP_KERNEL);
if (!add_data)
goto out;
}
uvcb_attest = kzalloc(sizeof(*uvcb_attest), GFP_KERNEL);
if (!uvcb_attest)
goto out;
ret = uvio_build_uvcb_attest(uvcb_attest, arcb, measurement, add_data, uvio_attest);
if (ret)
goto out;
uv_call_sched(0, (u64)uvcb_attest);
uv_ioctl->uv_rc = uvcb_attest->header.rc;
uv_ioctl->uv_rrc = uvcb_attest->header.rrc;
ret = uvio_copy_attest_result_to_user(uvcb_attest, uv_ioctl, measurement, add_data,
uvio_attest);
out:
kvfree(arcb);
kvfree(measurement);
kvfree(add_data);
kfree(uvio_attest);
kfree(uvcb_attest);
return ret;
}
/** uvio_add_secret() - perform an Add Secret UVC
*
* @uv_ioctl: ioctl control block
*
* uvio_add_secret() performs the Add Secret Ultravisor Call.
*
* The given userspace argument address and size are verified to be
* valid but every other check is made by the Ultravisor
* (UV). Therefore UV errors won't result in a negative return
* value. The request is then copied to kernelspace, the UV-call is
* performed and the results are copied back to userspace.
*
* The argument has to point to an Add Secret Request Control Block
* which is an encrypted and cryptographically verified request that
* inserts a protected guest's secrets into the Ultravisor for later
* use.
*
* If the Add Secret UV facility is not present, UV will return
* invalid command rc. This won't be fenced in the driver and does not
* result in a negative return value.
*
* Context: might sleep
*
* Return: 0 on success or a negative error code on error.
*/
static int uvio_add_secret(struct uvio_ioctl_cb *uv_ioctl)
{
void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
struct uv_cb_guest_addr uvcb = {
.header.len = sizeof(uvcb),
.header.cmd = UVC_CMD_ADD_SECRET,
};
void *asrcb = NULL;
int ret;
if (uv_ioctl->argument_len > UVIO_ADD_SECRET_MAX_LEN)
return -EINVAL;
if (uv_ioctl->argument_len == 0)
return -EINVAL;
asrcb = kvzalloc(uv_ioctl->argument_len, GFP_KERNEL);
if (!asrcb)
return -ENOMEM;
ret = -EFAULT;
if (copy_from_user(asrcb, user_buf_arg, uv_ioctl->argument_len))
goto out;
ret = 0;
uvcb.addr = (u64)asrcb;
uv_call_sched(0, (u64)&uvcb);
uv_ioctl->uv_rc = uvcb.header.rc;
uv_ioctl->uv_rrc = uvcb.header.rrc;
out:
kvfree(asrcb);
return ret;
}
/** uvio_list_secrets() - perform a List Secret UVC
* @uv_ioctl: ioctl control block
*
* uvio_list_secrets() performs the List Secret Ultravisor Call. It verifies
* that the given userspace argument address is valid and its size is sane.
* Every other check is made by the Ultravisor (UV) and won't result in a
* negative return value. It builds the request, performs the UV-call, and
* copies the result to userspace.
*
* The argument specifies the location for the result of the UV-Call.
*
* If the List Secrets UV facility is not present, UV will return invalid
* command rc. This won't be fenced in the driver and does not result in a
* negative return value.
*
* Context: might sleep
*
* Return: 0 on success or a negative error code on error.
*/
static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
{
void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
struct uv_cb_guest_addr uvcb = {
.header.len = sizeof(uvcb),
.header.cmd = UVC_CMD_LIST_SECRETS,
};
void *secrets = NULL;
int ret = 0;
if (uv_ioctl->argument_len != UVIO_LIST_SECRETS_LEN)
return -EINVAL;
secrets = kvzalloc(UVIO_LIST_SECRETS_LEN, GFP_KERNEL);
if (!secrets)
return -ENOMEM;
uvcb.addr = (u64)secrets;
uv_call_sched(0, (u64)&uvcb);
uv_ioctl->uv_rc = uvcb.header.rc;
uv_ioctl->uv_rrc = uvcb.header.rrc;
if (copy_to_user(user_buf_arg, secrets, UVIO_LIST_SECRETS_LEN))
ret = -EFAULT;
kvfree(secrets);
return ret;
}
/** uvio_lock_secrets() - perform a Lock Secret Store UVC
* @uv_ioctl: ioctl control block
*
* uvio_lock_secrets() performs the Lock Secret Store Ultravisor Call. It
* performs the UV-call and copies the return codes to the ioctl control block.
* After this call was dispatched successfully every following Add Secret UVC
* and Lock Secrets UVC will fail with return code 0x102.
*
* The argument address and size must be 0.
*
* If the Lock Secrets UV facility is not present, UV will return invalid
* command rc. This won't be fenced in the driver and does not result in a
* negative return value.
*
* Context: might sleep
*
* Return: 0 on success or a negative error code on error.
*/
static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
{
struct uv_cb_nodata uvcb = {
.header.len = sizeof(uvcb),
.header.cmd = UVC_CMD_LOCK_SECRETS,
};
if (ioctl->argument_addr || ioctl->argument_len)
return -EINVAL;
uv_call(0, (u64)&uvcb);
ioctl->uv_rc = uvcb.header.rc;
ioctl->uv_rrc = uvcb.header.rrc;
return 0;
}
static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp,
unsigned long cmd)
{
u8 nr = _IOC_NR(cmd);
if (_IOC_DIR(cmd) != (_IOC_READ | _IOC_WRITE))
return -ENOIOCTLCMD;
if (_IOC_TYPE(cmd) != UVIO_TYPE_UVC)
return -ENOIOCTLCMD;
if (nr >= UVIO_IOCTL_NUM_IOCTLS)
return -ENOIOCTLCMD;
if (_IOC_SIZE(cmd) != sizeof(*ioctl))
return -ENOIOCTLCMD;
if (copy_from_user(ioctl, argp, sizeof(*ioctl)))
return -EFAULT;
if (ioctl->flags != 0)
return -EINVAL;
if (memchr_inv(ioctl->reserved14, 0, sizeof(ioctl->reserved14)))
return -EINVAL;
return nr;
}
/*
* IOCTL entry point for the Ultravisor device.
*/
static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct uvio_ioctl_cb uv_ioctl = { };
long ret;
int nr;
nr = uvio_copy_and_check_ioctl(&uv_ioctl, argp, cmd);
if (nr < 0)
return nr;
switch (nr) {
case UVIO_IOCTL_UVDEV_INFO_NR:
ret = uvio_uvdev_info(&uv_ioctl);
break;
case UVIO_IOCTL_ATT_NR:
ret = uvio_attestation(&uv_ioctl);
break;
case UVIO_IOCTL_ADD_SECRET_NR:
ret = uvio_add_secret(&uv_ioctl);
break;
case UVIO_IOCTL_LIST_SECRETS_NR:
ret = uvio_list_secrets(&uv_ioctl);
break;
case UVIO_IOCTL_LOCK_SECRETS_NR:
ret = uvio_lock_secrets(&uv_ioctl);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
if (ret)
return ret;
if (copy_to_user(argp, &uv_ioctl, sizeof(uv_ioctl)))
ret = -EFAULT;
return ret;
}
static const struct file_operations uvio_dev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = uvio_ioctl,
.llseek = no_llseek,
};
static struct miscdevice uvio_dev_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = UVIO_DEVICE_NAME,
.fops = &uvio_dev_fops,
};
static void __exit uvio_dev_exit(void)
{
misc_deregister(&uvio_dev_miscdev);
}
static int __init uvio_dev_init(void)
{
set_supp_uv_cmds((unsigned long *)&uvdev_info.supp_uv_cmds);
return misc_register(&uvio_dev_miscdev);
}
module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init);
module_exit(uvio_dev_exit);
MODULE_AUTHOR("IBM Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ultravisor UAPI driver");
| linux-master | drivers/s390/char/uvdevice.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP line mode terminal driver.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Peschke <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#include "ctrlchar.h"
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
/*
* size of a buffer that collects single characters coming in
* via sclp_tty_put_char()
*/
#define SCLP_TTY_BUF_SIZE 512
/*
* There is exactly one SCLP terminal, so we can keep things simple
* and allocate all variables statically.
*/
/* Lock to guard over changes to global variables. */
static DEFINE_SPINLOCK(sclp_tty_lock);
/* List of free pages that can be used for console output buffering. */
static LIST_HEAD(sclp_tty_pages);
/* List of full struct sclp_buffer structures ready for output. */
static LIST_HEAD(sclp_tty_outqueue);
/* Counter how many buffers are emitted. */
static int sclp_tty_buffer_count;
/* Pointer to current console buffer. */
static struct sclp_buffer *sclp_ttybuf;
/* Timer for delayed output of console messages. */
static struct timer_list sclp_tty_timer;
static struct tty_port sclp_port;
static u8 sclp_tty_chars[SCLP_TTY_BUF_SIZE];
static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
static int sclp_tty_tolower;
#define SCLP_TTY_COLUMNS 320
#define SPACES_PER_TAB 8
#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
/* This routine is called whenever we try to open a SCLP terminal. */
static int
sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
return 0;
}
/* This routine is called when the SCLP terminal is closed. */
static void
sclp_tty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count > 1)
return;
tty_port_tty_set(&sclp_port, NULL);
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted. This is not an exact number because not every
* character needs the same space in the sccb. The worst case is
* a string of newlines. Every newline creates a new message which
* needs 82 bytes.
*/
static unsigned int
sclp_tty_write_room (struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
unsigned int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct msg_buf);
list_for_each(l, &sclp_tty_pages)
count += NR_EMPTY_MSG_PER_SCCB;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
static void
sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_tty_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
sclp_tty_buffer_count--;
list_add_tail((struct list_head *) page, &sclp_tty_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_tty_outqueue))
buffer = list_entry(sclp_tty_outqueue.next,
struct sclp_buffer, list);
spin_unlock_irqrestore(&sclp_tty_lock, flags);
} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
tty_port_tty_wakeup(&sclp_port);
}
static inline void
__sclp_ttybuf_emit(struct sclp_buffer *buffer)
{
unsigned long flags;
int count;
int rc;
spin_lock_irqsave(&sclp_tty_lock, flags);
list_add_tail(&buffer->list, &sclp_tty_outqueue);
count = sclp_tty_buffer_count++;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (count)
return;
rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
if (rc)
sclp_ttybuf_callback(buffer, rc);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer.
*/
static void
sclp_tty_timeout(struct timer_list *unused)
{
unsigned long flags;
struct sclp_buffer *buf;
spin_lock_irqsave(&sclp_tty_lock, flags);
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (buf != NULL) {
__sclp_ttybuf_emit(buf);
}
}
/*
* Write a string to the sclp tty.
*/
static int sclp_tty_write_string(const u8 *str, int count, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
struct sclp_buffer *buf;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
do {
/* Create a sclp output buffer if none exists yet */
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (may_fail)
goto out;
else
sclp_sync_wait();
spin_lock_irqsave(&sclp_tty_lock, flags);
}
page = sclp_tty_pages.next;
list_del((struct list_head *) page);
sclp_ttybuf = sclp_make_buffer(page, SCLP_TTY_COLUMNS,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_ttybuf, str, count);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
__sclp_ttybuf_emit(buf);
spin_lock_irqsave(&sclp_tty_lock, flags);
str += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
return overall_written;
}
/*
* This routine is called by the kernel to write a series of characters to the
* tty device. The characters may come from user space or kernel space. This
* routine will return the number of characters actually accepted for writing.
*/
static ssize_t
sclp_tty_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return sclp_tty_write_string(buf, count, 1);
}
/*
* This routine is called by the kernel to write a single character to the tty
* device. If the kernel uses this routine, it must call the flush_chars()
* routine (if defined) when it is done stuffing characters into the driver.
*
* Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
* If the given character is a '\n' the contents of the SCLP write buffer
* - including previous characters from sclp_tty_put_char() and strings from
* sclp_write() without final '\n' - will be written.
*/
static int
sclp_tty_put_char(struct tty_struct *tty, u8 ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return 1;
}
/*
* This routine is called by the kernel after it has written a series of
* characters to the tty device using put_char().
*/
static void
sclp_tty_flush_chars(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* This routine returns the number of characters in the write buffer of the
* SCLP driver. The provided number includes all characters that are stored
* in the SCCB (will be written next time the SCLP is not busy) as well as
* characters in the write buffer (will not be written as long as there is a
* final line feed missing).
*/
static unsigned int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct sclp_buffer *t;
unsigned int count = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
list_for_each_entry(t, &sclp_tty_outqueue, list) {
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
/*
* removes all content from buffers of low level driver
*/
static void
sclp_tty_flush_buffer(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* push input to tty
*/
static void
sclp_tty_input(unsigned char* buf, unsigned int count)
{
struct tty_struct *tty = tty_port_tty_get(&sclp_port);
unsigned int cchar;
/*
* If this tty driver is currently closed
* then throw the received input away.
*/
if (tty == NULL)
return;
cchar = ctrlchar_handle(buf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
tty_flip_buffer_push(&sclp_port);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
if (count < 2 ||
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
tty_insert_flip_string(&sclp_port, buf, count);
tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
} else
tty_insert_flip_string(&sclp_port, buf, count - 2);
tty_flip_buffer_push(&sclp_port);
break;
}
tty_kref_put(tty);
}
/*
* get a EBCDIC string in upper/lower case,
* find out characters in lower/upper case separated by a special character,
* modifiy original string,
* returns length of resulting string
*/
static int sclp_switch_cases(unsigned char *buf, int count)
{
unsigned char *ip, *op;
int toggle;
/* initially changing case is off */
toggle = 0;
ip = op = buf;
while (count-- > 0) {
/* compare with special character */
if (*ip == CASE_DELIMITER) {
/* followed by another special character? */
if (count && ip[1] == CASE_DELIMITER) {
/*
* ... then put a single copy of the special
* character to the output string
*/
*op++ = *ip++;
count--;
} else
/*
* ... special character follower by a normal
* character toggles the case change behaviour
*/
toggle = ~toggle;
/* skip special character */
ip++;
} else
/* not the special character */
if (toggle)
/* but case switching is on */
if (sclp_tty_tolower)
/* switch to uppercase */
*op++ = _ebc_toupper[(int) *ip++];
else
/* switch to lowercase */
*op++ = _ebc_tolower[(int) *ip++];
else
/* no case switching, copy the character */
*op++ = *ip++;
}
/* return length of reformatted string. */
return op - buf;
}
static void sclp_get_input(struct gds_subvector *sv)
{
unsigned char *str;
int count;
str = (unsigned char *) (sv + 1);
count = sv->length - sizeof(*sv);
if (sclp_tty_tolower)
EBC_TOLOWER(str, count);
count = sclp_switch_cases(str, count);
/* convert EBCDIC to ASCII (modify original input in SCCB) */
sclp_ebcasc_str(str, count);
/* transfer input to high level driver */
sclp_tty_input(str, count);
}
static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
{
void *end;
end = (void *) sv + sv->length;
for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == 0x30)
sclp_get_input(sv);
}
static inline void sclp_eval_textcmd(struct gds_vector *v)
{
struct gds_subvector *sv;
void *end;
end = (void *) v + v->length;
for (sv = (struct gds_subvector *) (v + 1);
(void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
sclp_eval_selfdeftextmsg(sv);
}
static inline void sclp_eval_cpmsu(struct gds_vector *v)
{
void *end;
end = (void *) v + v->length;
for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
if (v->gds_id == GDS_ID_TEXTCMD)
sclp_eval_textcmd(v);
}
static inline void sclp_eval_mdsmu(struct gds_vector *v)
{
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
if (v)
sclp_eval_cpmsu(v);
}
static void sclp_tty_receiver(struct evbuf_header *evbuf)
{
struct gds_vector *v;
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
GDS_ID_MDSMU);
if (v)
sclp_eval_mdsmu(v);
}
static void
sclp_tty_state_change(struct sclp_register *reg)
{
}
static struct sclp_register sclp_input_event =
{
.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
.state_change_fn = sclp_tty_state_change,
.receiver_fn = sclp_tty_receiver
};
static const struct tty_operations sclp_ops = {
.open = sclp_tty_open,
.close = sclp_tty_close,
.write = sclp_tty_write,
.put_char = sclp_tty_put_char,
.flush_chars = sclp_tty_flush_chars,
.write_room = sclp_tty_write_room,
.chars_in_buffer = sclp_tty_chars_in_buffer,
.flush_buffer = sclp_tty_flush_buffer,
};
static int __init
sclp_tty_init(void)
{
struct tty_driver *driver;
void *page;
int i;
int rc;
/* z/VM multiplexes the line mode output on the 32xx screen */
if (MACHINE_IS_VM && !CONSOLE_IS_SCLP)
return 0;
if (!sclp.has_linemode)
return 0;
driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
rc = sclp_rw_init();
if (rc) {
tty_driver_kref_put(driver);
return rc;
}
/* Allocate pages for output buffering */
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
tty_driver_kref_put(driver);
return -ENOMEM;
}
list_add_tail((struct list_head *) page, &sclp_tty_pages);
}
timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
/* case input lines to lowercase */
sclp_tty_tolower = 1;
}
sclp_tty_chars_count = 0;
rc = sclp_register(&sclp_input_event);
if (rc) {
tty_driver_kref_put(driver);
return rc;
}
tty_port_init(&sclp_port);
driver->driver_name = "sclp_line";
driver->name = "sclp_line";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
driver->init_termios.c_oflag = ONLCR;
driver->init_termios.c_lflag = ISIG | ECHO;
tty_set_operations(driver, &sclp_ops);
tty_port_link_device(&sclp_port, driver, 0);
rc = tty_register_driver(driver);
if (rc) {
tty_driver_kref_put(driver);
tty_port_destroy(&sclp_port);
return rc;
}
sclp_tty_driver = driver;
return 0;
}
device_initcall(sclp_tty_init);
| linux-master | drivers/s390/char/sclp_tty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - fullscreen driver.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5/2.6 by Martin Schwidefsky <[email protected]>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/memblock.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <uapi/asm/fs3270.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include "raw3270.h"
#include "ctrlchar.h"
static struct raw3270_fn fs3270_fn;
struct fs3270 {
struct raw3270_view view;
struct pid *fs_pid; /* Pid of controlling program. */
int read_command; /* ccw command to use for reads. */
int write_command; /* ccw command to use for writes. */
int attention; /* Got attention. */
int active; /* Fullscreen view is active. */
struct raw3270_request *init; /* single init request. */
wait_queue_head_t wait; /* Init & attention wait queue. */
struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */
size_t rdbuf_size; /* size of data returned by RDBUF */
};
static DEFINE_MUTEX(fs3270_mutex);
static void fs3270_wake_up(struct raw3270_request *rq, void *data)
{
wake_up((wait_queue_head_t *)data);
}
static inline int fs3270_working(struct fs3270 *fp)
{
/*
* The fullscreen view is in working order if the view
* has been activated AND the initial request is finished.
*/
return fp->active && raw3270_request_final(fp->init);
}
static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
{
struct fs3270 *fp;
int rc;
fp = (struct fs3270 *)view;
rq->callback = fs3270_wake_up;
rq->callback_data = &fp->wait;
do {
if (!fs3270_working(fp)) {
/* Fullscreen view isn't ready yet. */
rc = wait_event_interruptible(fp->wait,
fs3270_working(fp));
if (rc != 0)
break;
}
rc = raw3270_start(view, rq);
if (rc == 0) {
/* Started successfully. Now wait for completion. */
wait_event(fp->wait, raw3270_request_final(rq));
}
} while (rc == -EACCES);
return rc;
}
/*
* Switch to the fullscreen view.
*/
static void fs3270_reset_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *)rq->view;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void fs3270_restore_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *)rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
fp->rdbuf_size = 0;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static int fs3270_activate(struct raw3270_view *view)
{
struct fs3270 *fp;
char *cp;
int rc;
fp = (struct fs3270 *)view;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return 0;
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
cp = fp->rdbuf->data[0];
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
fp->init->ccw.count = 1;
fp->init->callback = fs3270_reset_callback;
cp[0] = 0;
} else {
/* Restore fullscreen buffer saved by fs3270_deactivate. */
fp->init->ccw.count = fp->rdbuf_size;
fp->init->callback = fs3270_restore_callback;
cp[0] = TW_KR;
cp[1] = TO_SBA;
cp[2] = cp[6];
cp[3] = cp[7];
cp[4] = TO_IC;
cp[5] = TO_SBA;
cp[6] = 0x40;
cp[7] = 0x40;
}
rc = raw3270_start_locked(view, fp->init);
fp->init->rc = rc;
if (rc)
fp->init->callback(fp->init, NULL);
else
fp->active = 1;
return rc;
}
/*
* Shutdown fullscreen view.
*/
static void fs3270_save_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *)rq->view;
/* Correct idal buffer element 0 address. */
fp->rdbuf->data[0] -= 5;
fp->rdbuf->size += 5;
/*
* If the rdbuf command failed or the idal buffer is
* to small for the amount of data returned by the
* rdbuf command, then we have no choice but to send
* a SIGHUP to the application.
*/
if (rq->rc != 0 || rq->rescnt == 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
} else {
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
}
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void fs3270_deactivate(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *)view;
fp->active = 0;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return;
/* Prepare read-buffer request. */
raw3270_request_set_cmd(fp->init, TC_RDBUF);
/*
* Hackish: skip first 5 bytes of the idal buffer to make
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
fp->rdbuf->data[0] += 5;
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
fp->init->callback = fs3270_save_callback;
/* Start I/O to read in the 3270 buffer. */
fp->init->rc = raw3270_start_locked(view, fp->init);
if (fp->init->rc)
fp->init->callback(fp->init, NULL);
}
static void fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq,
struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1;
wake_up(&fp->wait);
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
}
/*
* Process reads from fullscreen 3270.
*/
static ssize_t fs3270_read(struct file *filp, char __user *data,
size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
ssize_t rc;
if (count == 0 || count > 65535)
return -EINVAL;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (fp->read_command == 0 && fp->write_command != 0)
fp->read_command = 6;
raw3270_request_set_cmd(rq, fp->read_command ? : 2);
raw3270_request_set_idal(rq, ib);
rc = wait_event_interruptible(fp->wait, fp->attention);
fp->attention = 0;
if (rc == 0) {
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0) {
count -= rq->rescnt;
if (idal_buffer_to_user(ib, data, count) != 0)
rc = -EFAULT;
else
rc = count;
}
}
raw3270_request_free(rq);
} else {
rc = PTR_ERR(rq);
}
idal_buffer_free(ib);
return rc;
}
/*
* Process writes to fullscreen 3270.
*/
static ssize_t fs3270_write(struct file *filp, const char __user *data,
size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
int write_command;
ssize_t rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (idal_buffer_from_user(ib, data, count) == 0) {
write_command = fp->write_command ? : 1;
if (write_command == 5)
write_command = 13;
raw3270_request_set_cmd(rq, write_command);
raw3270_request_set_idal(rq, ib);
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0)
rc = count - rq->rescnt;
} else {
rc = -EFAULT;
}
raw3270_request_free(rq);
} else {
rc = PTR_ERR(rq);
}
idal_buffer_free(ib);
return rc;
}
/*
* process ioctl commands for the tube driver
*/
static long fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
break;
case TUBOCMD:
fp->write_command = arg;
break;
case TUBGETI:
rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
iocb.line_cnt = fp->view.rows;
iocb.col_cnt = fp->view.cols;
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* Allocate fs3270 structure.
*/
static struct fs3270 *fs3270_alloc_view(void)
{
struct fs3270 *fp;
fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
fp->init = raw3270_request_alloc(0);
if (IS_ERR(fp->init)) {
kfree(fp);
return ERR_PTR(-ENOMEM);
}
return fp;
}
/*
* Free fs3270 structure.
*/
static void fs3270_free_view(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *)view;
if (fp->rdbuf)
idal_buffer_free(fp->rdbuf);
raw3270_request_free(((struct fs3270 *)view)->init);
kfree(view);
}
/*
* Unlink fs3270 data structure from filp.
*/
static void fs3270_release(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *)view;
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
/* View to a 3270 device. Can be console, tty or fullscreen. */
static struct raw3270_fn fs3270_fn = {
.activate = fs3270_activate,
.deactivate = fs3270_deactivate,
.intv = (void *)fs3270_irq,
.release = fs3270_release,
.free = fs3270_free_view
};
/*
* This routine is called whenever a 3270 fullscreen device is opened.
*/
static int fs3270_open(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
struct idal_buffer *ib;
int minor, rc = 0;
if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
return -ENODEV;
minor = iminor(file_inode(filp));
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
tty_kref_put(tty);
return -ENODEV;
}
minor = tty->index;
tty_kref_put(tty);
}
mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *)raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
raw3270_put_view(&fp->view);
rc = -EBUSY;
goto out;
}
/* Allocate fullscreen view structure. */
fp = fs3270_alloc_view();
if (IS_ERR(fp)) {
rc = PTR_ERR(fp);
goto out;
}
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
RAW3270_VIEW_LOCK_BH);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
}
/* Allocate idal-buffer. */
ib = idal_buffer_alloc(2 * fp->view.rows * fp->view.cols + 5, 0);
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
rc = PTR_ERR(ib);
goto out;
}
fp->rdbuf = ib;
rc = raw3270_activate_view(&fp->view);
if (rc) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
goto out;
}
stream_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static int fs3270_close(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
fp = filp->private_data;
filp->private_data = NULL;
if (fp) {
put_pid(fp->fs_pid);
fp->fs_pid = NULL;
raw3270_reset(&fp->view);
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
}
return 0;
}
static const struct file_operations fs3270_fops = {
.owner = THIS_MODULE, /* owner */
.read = fs3270_read, /* read */
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
.llseek = no_llseek,
};
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
static struct raw3270_notifier fs3270_notifier = {
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
};
/*
* 3270 fullscreen driver initialization.
*/
static int __init fs3270_init(void)
{
int rc;
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
}
static void __exit fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
module_init(fs3270_init);
module_exit(fs3270_exit);
| linux-master | drivers/s390/char/fs3270.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define OFB_DATA_MAX 64
struct sclp_ofb_evbuf {
struct evbuf_header header;
struct conf_mgm_data cm_data;
char ev_data[OFB_DATA_MAX];
} __packed;
struct sclp_ofb_sccb {
struct sccb_header header;
struct sclp_ofb_evbuf ofb_evbuf;
} __packed;
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
#define EV_QUAL_OPEN4BUSINESS 5
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct device *dev;
s390_update_cpu_mhz();
pr_info("CPU capability may have changed\n");
cpus_read_lock();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
cpus_read_unlock();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
lock_device_hotplug();
smp_rescan_cpus();
unlock_device_hotplug();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
#ifdef CONFIG_SCLP_OFB
.send_mask = EVTYP_CONFMGMDATA_MASK,
#endif
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
#ifdef CONFIG_SCLP_OFB
static int sclp_ofb_send_req(char *ev_data, size_t len)
{
static DEFINE_MUTEX(send_mutex);
struct sclp_ofb_sccb *sccb;
int rc, response;
if (len > OFB_DATA_MAX)
return -EINVAL;
sccb = (struct sclp_ofb_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
/* Setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct sclp_ofb_sccb);
sccb->ofb_evbuf.header.length = sizeof(struct sclp_ofb_evbuf);
sccb->ofb_evbuf.header.type = EVTYP_CONFMGMDATA;
sccb->ofb_evbuf.cm_data.ev_qualifier = EV_QUAL_OPEN4BUSINESS;
memcpy(sccb->ofb_evbuf.ev_data, ev_data, len);
if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK))
pr_warn("SCLP receiver did not register to receive "
"Configuration Management Data Events.\n");
mutex_lock(&send_mutex);
rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
mutex_unlock(&send_mutex);
if (rc)
goto out;
response = sccb->header.response_code;
if (response != 0x0020) {
pr_err("Open for Business request failed with response code "
"0x%04x\n", response);
rc = -EIO;
}
out:
free_page((unsigned long)sccb);
return rc;
}
static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
int rc;
rc = sclp_ofb_send_req(buf, count);
return rc ?: count;
}
static const struct bin_attribute ofb_bin_attr = {
.attr = {
.name = "event_data",
.mode = S_IWUSR,
},
.write = sysfs_ofb_data_write,
};
#endif
static int __init sclp_ofb_setup(void)
{
#ifdef CONFIG_SCLP_OFB
struct kset *ofb_kset;
int rc;
ofb_kset = kset_create_and_add("ofb", NULL, firmware_kobj);
if (!ofb_kset)
return -ENOMEM;
rc = sysfs_create_bin_file(&ofb_kset->kobj, &ofb_bin_attr);
if (rc) {
kset_unregister(ofb_kset);
return rc;
}
#endif
return 0;
}
static int __init sclp_conf_init(void)
{
int rc;
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
rc = sclp_register(&sclp_conf_register);
if (rc)
return rc;
return sclp_ofb_setup();
}
__initcall(sclp_conf_init);
| linux-master | drivers/s390/char/sclp_config.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive CD/DVD Device
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*
* This file provides a Linux "misc" character device for access to an
* assigned HMC drive CD/DVD-ROM. It works as follows: First create the
* device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
* SEEK_END) indicates that a new FTP command follows (not needed on the
* first command after open). Then write() the FTP command ASCII string
* to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
* end read() the response.
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include "hmcdrv_dev.h"
#include "hmcdrv_ftp.h"
/* If the following macro is defined, then the HMC device creates it's own
* separated device class (and dynamically assigns a major number). If not
* defined then the HMC device is assigned to the "misc" class devices.
*
#define HMCDRV_DEV_CLASS "hmcftp"
*/
#define HMCDRV_DEV_NAME "hmcdrv"
#define HMCDRV_DEV_BUSY_DELAY 500 /* delay between -EBUSY trials in ms */
#define HMCDRV_DEV_BUSY_RETRIES 3 /* number of retries on -EBUSY */
struct hmcdrv_dev_node {
#ifdef HMCDRV_DEV_CLASS
struct cdev dev; /* character device structure */
umode_t mode; /* mode of device node (unused, zero) */
#else
struct miscdevice dev; /* "misc" device structure */
#endif
};
static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len);
/*
* device operations
*/
static const struct file_operations hmcdrv_dev_fops = {
.open = hmcdrv_dev_open,
.llseek = hmcdrv_dev_seek,
.release = hmcdrv_dev_release,
.read = hmcdrv_dev_read,
.write = hmcdrv_dev_write,
};
static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
#ifdef HMCDRV_DEV_CLASS
static struct class *hmcdrv_dev_class; /* device class pointer */
static dev_t hmcdrv_dev_no; /* device number (major/minor) */
/**
* hmcdrv_dev_name() - provides a naming hint for a device node in /dev
* @dev: device for which the naming/mode hint is
* @mode: file mode for device node created in /dev
*
* See: devtmpfs.c, function devtmpfs_create_node()
*
* Return: recommended device file name in /dev
*/
static char *hmcdrv_dev_name(const struct device *dev, umode_t *mode)
{
char *nodename = NULL;
const char *devname = dev_name(dev); /* kernel device name */
if (devname)
nodename = kasprintf(GFP_KERNEL, "%s", devname);
/* on device destroy (rmmod) the mode pointer may be NULL
*/
if (mode)
*mode = hmcdrv_dev.mode;
return nodename;
}
#endif /* HMCDRV_DEV_CLASS */
/*
* open()
*/
static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
{
int rc;
/* check for non-blocking access, which is really unsupported
*/
if (fp->f_flags & O_NONBLOCK)
return -EINVAL;
/* Because it makes no sense to open this device read-only (then a
* FTP command cannot be emitted), we respond with an error.
*/
if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
return -EINVAL;
/* prevent unloading this module as long as anyone holds the
* device file open - so increment the reference count here
*/
if (!try_module_get(THIS_MODULE))
return -ENODEV;
fp->private_data = NULL; /* no command yet */
rc = hmcdrv_ftp_startup();
if (rc)
module_put(THIS_MODULE);
pr_debug("open file '/dev/%pD' with return code %d\n", fp, rc);
return rc;
}
/*
* release()
*/
static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
{
pr_debug("closing file '/dev/%pD'\n", fp);
kfree(fp->private_data);
fp->private_data = NULL;
hmcdrv_ftp_shutdown();
module_put(THIS_MODULE);
return 0;
}
/*
* lseek()
*/
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
{
switch (whence) {
case SEEK_CUR: /* relative to current file position */
pos += fp->f_pos; /* new position stored in 'pos' */
break;
case SEEK_SET: /* absolute (relative to beginning of file) */
break; /* SEEK_SET */
/* We use SEEK_END as a special indicator for a SEEK_SET
* (set absolute position), combined with a FTP command
* clear.
*/
case SEEK_END:
if (fp->private_data) {
kfree(fp->private_data);
fp->private_data = NULL;
}
break; /* SEEK_END */
default: /* SEEK_DATA, SEEK_HOLE: unsupported */
return -EINVAL;
}
if (pos < 0)
return -EINVAL;
if (fp->f_pos != pos)
++fp->f_version;
fp->f_pos = pos;
return pos;
}
/*
* transfer (helper function)
*/
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
ssize_t retlen;
unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
do {
retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
if (retlen != -EBUSY)
break;
msleep(HMCDRV_DEV_BUSY_DELAY);
} while (--trials > 0);
return retlen;
}
/*
* read()
*/
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
(fp->private_data == NULL)) { /* no FTP cmd defined ? */
return -EBADF;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, ubuf, len);
pr_debug("read from file '/dev/%pD' at %lld returns %zd/%zu\n",
fp, (long long) *pos, retlen, len);
if (retlen > 0)
*pos += retlen;
return retlen;
}
/*
* write()
*/
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
fp, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
fp->private_data = kmalloc(len + 1, GFP_KERNEL);
if (!fp->private_data)
return -ENOMEM;
if (!copy_from_user(fp->private_data, ubuf, len)) {
((char *)fp->private_data)[len] = '\0';
return len;
}
kfree(fp->private_data);
fp->private_data = NULL;
return -EFAULT;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, (char __user *) ubuf, len);
if (retlen > 0)
*pos += retlen;
pr_debug("write to file '/dev/%pD' returned %zd\n", fp, retlen);
return retlen;
}
/**
* hmcdrv_dev_init() - creates a HMC drive CD/DVD device
*
* This function creates a HMC drive CD/DVD kernel device and an associated
* device under /dev, using a dynamically allocated major number.
*
* Return: 0 on success, else an error code.
*/
int hmcdrv_dev_init(void)
{
int rc;
#ifdef HMCDRV_DEV_CLASS
struct device *dev;
rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
if (rc)
goto out_err;
cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
hmcdrv_dev.dev.owner = THIS_MODULE;
rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
if (rc)
goto out_unreg;
/* At this point the character device exists in the kernel (see
* /proc/devices), but not under /dev nor /sys/devices/virtual. So
* we have to create an associated class (see /sys/class).
*/
hmcdrv_dev_class = class_create(HMCDRV_DEV_CLASS);
if (IS_ERR(hmcdrv_dev_class)) {
rc = PTR_ERR(hmcdrv_dev_class);
goto out_devdel;
}
/* Finally a device node in /dev has to be established (as 'mkdev'
* does from the command line). Notice that assignment of a device
* node name/mode function is optional (only for mode != 0600).
*/
hmcdrv_dev.mode = 0; /* "unset" */
hmcdrv_dev_class->devnode = hmcdrv_dev_name;
dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
"%s", HMCDRV_DEV_NAME);
if (!IS_ERR(dev))
return 0;
rc = PTR_ERR(dev);
class_destroy(hmcdrv_dev_class);
hmcdrv_dev_class = NULL;
out_devdel:
cdev_del(&hmcdrv_dev.dev);
out_unreg:
unregister_chrdev_region(hmcdrv_dev_no, 1);
out_err:
#else /* !HMCDRV_DEV_CLASS */
hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
rc = misc_register(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
return rc;
}
/**
* hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
*/
void hmcdrv_dev_exit(void)
{
#ifdef HMCDRV_DEV_CLASS
if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
class_destroy(hmcdrv_dev_class);
}
cdev_del(&hmcdrv_dev.dev);
unregister_chrdev_region(hmcdrv_dev_no, 1);
#else /* !HMCDRV_DEV_CLASS */
misc_deregister(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
}
| linux-master | drivers/s390/char/hmcdrv_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* basic function of the tape device driver
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <[email protected]>
* Michael Holzheu <[email protected]>
* Tuan Ngo-Anh <[email protected]>
* Martin Schwidefsky <[email protected]>
* Stefan Bader <[email protected]>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
#include <linux/spinlock.h> // for locks
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/types.h> // for variable types
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
#define LONG_BUSY_TIMEOUT 180 /* seconds */
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(struct work_struct *);
static void tape_long_busy_timeout(struct timer_list *t);
/*
* One list to contain all tape devices of all disciplines, so
* we can assign the devices to minor numbers of the same major
* The list is protected by the rwlock
*/
static LIST_HEAD(tape_device_list);
static DEFINE_RWLOCK(tape_device_lock);
/*
* Pointer to debug area.
*/
debug_info_t *TAPE_DBF_AREA = NULL;
EXPORT_SYMBOL(TAPE_DBF_AREA);
/*
* Printable strings for tape enumerations.
*/
const char *tape_state_verbose[TS_SIZE] =
{
[TS_UNUSED] = "UNUSED",
[TS_IN_USE] = "IN_USE",
[TS_BLKUSE] = "BLKUSE",
[TS_INIT] = "INIT ",
[TS_NOT_OPER] = "NOT_OP"
};
const char *tape_op_verbose[TO_SIZE] =
{
[TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
[TO_BSF] = "BSF", [TO_DSE] = "DSE",
[TO_FSB] = "FSB", [TO_FSF] = "FSF",
[TO_LBL] = "LBL", [TO_NOP] = "NOP",
[TO_RBA] = "RBA", [TO_RBI] = "RBI",
[TO_RFO] = "RFO", [TO_REW] = "REW",
[TO_RUN] = "RUN", [TO_WRI] = "WRI",
[TO_WTM] = "WTM", [TO_MSEN] = "MSN",
[TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
[TO_READ_ATTMSG] = "RAT",
[TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
[TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
[TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
};
static int devid_to_int(struct ccw_dev_id *dev_id)
{
return dev_id->devno + (dev_id->ssid << 16);
}
/*
* Some channel attached tape specific attributes.
*
* FIXME: In the future the first_minor and blocksize attribute should be
* replaced by a link to the cdev tree.
*/
static ssize_t
tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
}
static
DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
static ssize_t
tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
}
static
DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
static ssize_t
tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
"OFFLINE" : tape_state_verbose[tdev->tape_state]);
}
static
DEVICE_ATTR(state, 0444, tape_state_show, NULL);
static ssize_t
tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
ssize_t rc;
tdev = dev_get_drvdata(dev);
if (tdev->first_minor < 0)
return scnprintf(buf, PAGE_SIZE, "N/A\n");
spin_lock_irq(get_ccwdev_lock(tdev->cdev));
if (list_empty(&tdev->req_queue))
rc = scnprintf(buf, PAGE_SIZE, "---\n");
else {
struct tape_request *req;
req = list_entry(tdev->req_queue.next, struct tape_request,
list);
rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
}
spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
return rc;
}
static
DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
static ssize_t
tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
}
static
DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
static struct attribute *tape_attrs[] = {
&dev_attr_medium_state.attr,
&dev_attr_first_minor.attr,
&dev_attr_state.attr,
&dev_attr_operation.attr,
&dev_attr_blocksize.attr,
NULL
};
static const struct attribute_group tape_attr_group = {
.attrs = tape_attrs,
};
/*
* Tape state functions
*/
void
tape_state_set(struct tape_device *device, enum tape_state newstate)
{
const char *str;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "ts_set err: not oper\n");
return;
}
DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
DBF_EVENT(4, "old ts:\t\n");
if (device->tape_state < TS_SIZE && device->tape_state >=0 )
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN TS";
DBF_EVENT(4, "%s\n", str);
DBF_EVENT(4, "new ts:\t\n");
if (newstate < TS_SIZE && newstate >= 0)
str = tape_state_verbose[newstate];
else
str = "UNKNOWN TS";
DBF_EVENT(4, "%s\n", str);
device->tape_state = newstate;
wake_up(&device->state_change_wq);
}
struct tape_med_state_work_data {
struct tape_device *device;
enum tape_medium_state state;
struct work_struct work;
};
static void
tape_med_state_work_handler(struct work_struct *work)
{
static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
struct tape_med_state_work_data *p =
container_of(work, struct tape_med_state_work_data, work);
struct tape_device *device = p->device;
char *envp[] = { NULL, NULL };
switch (p->state) {
case MS_UNLOADED:
pr_info("%s: The tape cartridge has been successfully "
"unloaded\n", dev_name(&device->cdev->dev));
envp[0] = env_state_unloaded;
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
break;
case MS_LOADED:
pr_info("%s: A tape cartridge has been mounted\n",
dev_name(&device->cdev->dev));
envp[0] = env_state_loaded;
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
break;
default:
break;
}
tape_put_device(device);
kfree(p);
}
static void
tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
{
struct tape_med_state_work_data *p;
p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
INIT_WORK(&p->work, tape_med_state_work_handler);
p->device = tape_get_device(device);
p->state = state;
schedule_work(&p->work);
}
}
void
tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
{
enum tape_medium_state oldstate;
oldstate = device->medium_state;
if (oldstate == newstate)
return;
device->medium_state = newstate;
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
if (oldstate == MS_LOADED)
tape_med_state_work(device, MS_UNLOADED);
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
if (oldstate == MS_UNLOADED)
tape_med_state_work(device, MS_LOADED);
break;
default:
break;
}
wake_up(&device->state_change_wq);
}
/*
* Stop running ccw. Has to be called with the device lock held.
*/
static int
__tape_cancel_io(struct tape_device *device, struct tape_request *request)
{
int retries;
int rc;
/* Check if interrupt has already been processed */
if (request->callback == NULL)
return 0;
rc = 0;
for (retries = 0; retries < 5; retries++) {
rc = ccw_device_clear(device->cdev, (long) request);
switch (rc) {
case 0:
request->status = TAPE_REQUEST_DONE;
return 0;
case -EBUSY:
request->status = TAPE_REQUEST_CANCEL;
schedule_delayed_work(&device->tape_dnr, 0);
return 0;
case -ENODEV:
DBF_EXCEPTION(2, "device gone, retry\n");
break;
case -EIO:
DBF_EXCEPTION(2, "I/O error, retry\n");
break;
default:
BUG();
}
}
return rc;
}
/*
* Add device into the sorted list, giving it the first
* available minor number.
*/
static int
tape_assign_minor(struct tape_device *device)
{
struct tape_device *tmp;
int minor;
minor = 0;
write_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (minor < tmp->first_minor)
break;
minor += TAPE_MINORS_PER_DEV;
}
if (minor >= 256) {
write_unlock(&tape_device_lock);
return -ENODEV;
}
device->first_minor = minor;
list_add_tail(&device->node, &tmp->node);
write_unlock(&tape_device_lock);
return 0;
}
/* remove device from the list */
static void
tape_remove_minor(struct tape_device *device)
{
write_lock(&tape_device_lock);
list_del_init(&device->node);
device->first_minor = -1;
write_unlock(&tape_device_lock);
}
/*
* Set a device online.
*
* This function is called by the common I/O layer to move a device from the
* detected but offline into the online state.
* If we return an error (RC < 0) the device remains in the offline state. This
* can happen if the device is assigned somewhere else, for example.
*/
int
tape_generic_online(struct tape_device *device,
struct tape_discipline *discipline)
{
int rc;
DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
if (device->tape_state != TS_INIT) {
DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
return -EINVAL;
}
timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
/* Let the discipline have a go at the device. */
device->discipline = discipline;
if (!try_module_get(discipline->owner)) {
return -EINVAL;
}
rc = discipline->setup_device(device);
if (rc)
goto out;
rc = tape_assign_minor(device);
if (rc)
goto out_discipline;
rc = tapechar_setup_device(device);
if (rc)
goto out_minor;
tape_state_set(device, TS_UNUSED);
DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
return 0;
out_minor:
tape_remove_minor(device);
out_discipline:
device->discipline->cleanup_device(device);
device->discipline = NULL;
out:
module_put(discipline->owner);
return rc;
}
static void
tape_cleanup_device(struct tape_device *device)
{
tapechar_cleanup_device(device);
device->discipline->cleanup_device(device);
module_put(device->discipline->owner);
tape_remove_minor(device);
tape_med_state_set(device, MS_UNKNOWN);
}
/*
* Set device offline.
*
* Called by the common I/O layer if the drive should set offline on user
* request. We may prevent this by returning an error.
* Manual offline is only allowed while the drive is not in use.
*/
int
tape_generic_offline(struct ccw_device *cdev)
{
struct tape_device *device;
device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
device->cdev_id, device);
spin_lock_irq(get_ccwdev_lock(device->cdev));
switch (device->tape_state) {
case TS_INIT:
case TS_NOT_OPER:
spin_unlock_irq(get_ccwdev_lock(device->cdev));
break;
case TS_UNUSED:
tape_state_set(device, TS_INIT);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_cleanup_device(device);
break;
default:
DBF_EVENT(3, "(%08x): Set offline failed "
"- drive in use.\n",
device->cdev_id);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return -EBUSY;
}
DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
return 0;
}
/*
* Allocate memory for a new device structure.
*/
static struct tape_device *
tape_alloc_device(void)
{
struct tape_device *device;
device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
if (device == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
return ERR_PTR(-ENOMEM);
}
device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
if (device->modeset_byte == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
kfree(device);
return ERR_PTR(-ENOMEM);
}
mutex_init(&device->mutex);
INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq);
init_waitqueue_head(&device->wait_queue);
device->tape_state = TS_INIT;
device->medium_state = MS_UNKNOWN;
*device->modeset_byte = 0;
device->first_minor = -1;
atomic_set(&device->ref_count, 1);
INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
return device;
}
/*
* Get a reference to an existing device structure. This will automatically
* increment the reference count.
*/
struct tape_device *
tape_get_device(struct tape_device *device)
{
int count;
count = atomic_inc_return(&device->ref_count);
DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
return device;
}
/*
* Decrease the reference counter of a devices structure. If the
* reference counter reaches zero free the device structure.
* The function returns a NULL pointer to be used by the caller
* for clearing reference pointers.
*/
void
tape_put_device(struct tape_device *device)
{
int count;
count = atomic_dec_return(&device->ref_count);
DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
BUG_ON(count < 0);
if (count == 0) {
kfree(device->modeset_byte);
kfree(device);
}
}
/*
* Find tape device by a device index.
*/
struct tape_device *
tape_find_device(int devindex)
{
struct tape_device *device, *tmp;
device = ERR_PTR(-ENODEV);
read_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
device = tape_get_device(tmp);
break;
}
}
read_unlock(&tape_device_lock);
return device;
}
/*
* Driverfs tape probe function.
*/
int
tape_generic_probe(struct ccw_device *cdev)
{
struct tape_device *device;
int ret;
struct ccw_dev_id dev_id;
device = tape_alloc_device();
if (IS_ERR(device))
return -ENODEV;
ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
CCWDEV_DO_MULTIPATH);
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
if (ret) {
tape_put_device(device);
return ret;
}
dev_set_drvdata(&cdev->dev, device);
cdev->handler = __tape_do_irq;
device->cdev = cdev;
ccw_device_get_id(cdev, &dev_id);
device->cdev_id = devid_to_int(&dev_id);
return ret;
}
static void
__tape_discard_requests(struct tape_device *device)
{
struct tape_request * request;
struct list_head * l, *n;
list_for_each_safe(l, n, &device->req_queue) {
request = list_entry(l, struct tape_request, list);
if (request->status == TAPE_REQUEST_IN_IO)
request->status = TAPE_REQUEST_DONE;
list_del(&request->list);
/* Decrease ref_count for removed request. */
request->device = NULL;
tape_put_device(device);
request->rc = -EIO;
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
}
/*
* Driverfs tape remove function.
*
* This function is called whenever the common I/O layer detects the device
* gone. This can happen at any time and we cannot refuse.
*/
void
tape_generic_remove(struct ccw_device *cdev)
{
struct tape_device * device;
device = dev_get_drvdata(&cdev->dev);
if (!device) {
return;
}
DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
spin_lock_irq(get_ccwdev_lock(device->cdev));
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
fallthrough;
case TS_NOT_OPER:
/*
* Nothing to do.
*/
spin_unlock_irq(get_ccwdev_lock(device->cdev));
break;
case TS_UNUSED:
/*
* Need only to release the device.
*/
tape_state_set(device, TS_NOT_OPER);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_cleanup_device(device);
break;
default:
/*
* There may be requests on the queue. We will not get
* an interrupt for a request that was running. So we
* just post them all as I/O errors.
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
pr_warn("%s: A tape unit was detached while in use\n",
dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_cleanup_device(device);
}
device = dev_get_drvdata(&cdev->dev);
if (device) {
sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
dev_set_drvdata(&cdev->dev, NULL);
tape_put_device(device);
}
}
/*
* Allocate a new tape ccw request
*/
struct tape_request *
tape_alloc_request(int cplength, int datasize)
{
struct tape_request *request;
BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
if (request == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
return ERR_PTR(-ENOMEM);
}
/* allocate channel program */
if (cplength > 0) {
request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
GFP_ATOMIC | GFP_DMA);
if (request->cpaddr == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
kfree(request);
return ERR_PTR(-ENOMEM);
}
}
/* alloc small kernel buffer */
if (datasize > 0) {
request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
if (request->cpdata == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
kfree(request->cpaddr);
kfree(request);
return ERR_PTR(-ENOMEM);
}
}
DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
request->cpdata);
return request;
}
/*
* Free tape ccw request
*/
void
tape_free_request (struct tape_request * request)
{
DBF_LH(6, "Free request %p\n", request);
if (request->device)
tape_put_device(request->device);
kfree(request->cpdata);
kfree(request->cpaddr);
kfree(request);
}
static int
__tape_start_io(struct tape_device *device, struct tape_request *request)
{
int rc;
rc = ccw_device_start(
device->cdev,
request->cpaddr,
(unsigned long) request,
0x00,
request->options
);
if (rc == 0) {
request->status = TAPE_REQUEST_IN_IO;
} else if (rc == -EBUSY) {
/* The common I/O subsystem is currently busy. Retry later. */
request->status = TAPE_REQUEST_QUEUED;
schedule_delayed_work(&device->tape_dnr, 0);
rc = 0;
} else {
/* Start failed. Remove request and indicate failure. */
DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
}
return rc;
}
static void
__tape_start_next_request(struct tape_device *device)
{
struct list_head *l, *n;
struct tape_request *request;
int rc;
DBF_LH(6, "__tape_start_next_request(%p)\n", device);
/*
* Try to start each request on request queue until one is
* started successful.
*/
list_for_each_safe(l, n, &device->req_queue) {
request = list_entry(l, struct tape_request, list);
/*
* Avoid race condition if bottom-half was triggered more than
* once.
*/
if (request->status == TAPE_REQUEST_IN_IO)
return;
/*
* Request has already been stopped. We have to wait until
* the request is removed from the queue in the interrupt
* handling.
*/
if (request->status == TAPE_REQUEST_DONE)
return;
/*
* We wanted to cancel the request but the common I/O layer
* was busy at that time. This can only happen if this
* function is called by delayed_next_request.
* Otherwise we start the next request on the queue.
*/
if (request->status == TAPE_REQUEST_CANCEL) {
rc = __tape_cancel_io(device, request);
} else {
rc = __tape_start_io(device, request);
}
if (rc == 0)
return;
/* Set ending status. */
request->rc = rc;
request->status = TAPE_REQUEST_DONE;
/* Remove from request queue. */
list_del(&request->list);
/* Do callback. */
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
}
static void
tape_delayed_next_request(struct work_struct *work)
{
struct tape_device *device =
container_of(work, struct tape_device, tape_dnr.work);
DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_start_next_request(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
static void tape_long_busy_timeout(struct timer_list *t)
{
struct tape_device *device = from_timer(device, t, lb_timeout);
struct tape_request *request;
spin_lock_irq(get_ccwdev_lock(device->cdev));
request = list_entry(device->req_queue.next, struct tape_request, list);
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
tape_put_device(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
static void
__tape_end_request(
struct tape_device * device,
struct tape_request * request,
int rc)
{
DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
if (request) {
request->rc = rc;
request->status = TAPE_REQUEST_DONE;
/* Remove from request queue. */
list_del(&request->list);
/* Do callback. */
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
/* Start next request. */
if (!list_empty(&device->req_queue))
__tape_start_next_request(device);
}
/*
* Write sense data to dbf
*/
void
tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
unsigned int *sptr;
const char* op;
if (request != NULL)
op = tape_op_verbose[request->op];
else
op = "---";
DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
sptr = (unsigned int *) irb->ecw;
DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
}
/*
* I/O helper function. Adds the request to the request queue
* and starts it if the tape is idle. Has to be called with
* the device lock held.
*/
static int
__tape_start_request(struct tape_device *device, struct tape_request *request)
{
int rc;
switch (request->op) {
case TO_MSEN:
case TO_ASSIGN:
case TO_UNASSIGN:
case TO_READ_ATTMSG:
case TO_RDC:
if (device->tape_state == TS_INIT)
break;
if (device->tape_state == TS_UNUSED)
break;
fallthrough;
default:
if (device->tape_state == TS_BLKUSE)
break;
if (device->tape_state != TS_IN_USE)
return -ENODEV;
}
/* Increase use count of device for the added request. */
request->device = tape_get_device(device);
if (list_empty(&device->req_queue)) {
/* No other requests are on the queue. Start this one. */
rc = __tape_start_io(device, request);
if (rc)
return rc;
DBF_LH(5, "Request %p added for execution.\n", request);
list_add(&request->list, &device->req_queue);
} else {
DBF_LH(5, "Request %p add to queue.\n", request);
request->status = TAPE_REQUEST_QUEUED;
list_add_tail(&request->list, &device->req_queue);
}
return 0;
}
/*
* Add the request to the request queue, try to start it if the
* tape is idle. Return without waiting for end of i/o.
*/
int
tape_do_io_async(struct tape_device *device, struct tape_request *request)
{
int rc;
DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* tape_do_io/__tape_wake_up
* Add the request to the request queue, try to start it if the
* tape is idle and wait uninterruptible for its completion.
*/
static void
__tape_wake_up(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up((wait_queue_head_t *) data);
}
int
tape_do_io(struct tape_device *device, struct tape_request *request)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up;
request->callback_data = &device->wait_queue;
/* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
wait_event(device->wait_queue, (request->callback == NULL));
/* Get rc from request */
return request->rc;
}
/*
* tape_do_io_interruptible/__tape_wake_up_interruptible
* Add the request to the request queue, try to start it if the
* tape is idle and wait uninterruptible for its completion.
*/
static void
__tape_wake_up_interruptible(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up_interruptible((wait_queue_head_t *) data);
}
int
tape_do_io_interruptible(struct tape_device *device,
struct tape_request *request)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up_interruptible;
request->callback_data = &device->wait_queue;
rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
rc = wait_event_interruptible(device->wait_queue,
(request->callback == NULL));
if (rc != -ERESTARTSYS)
/* Request finished normally. */
return request->rc;
/* Interrupted by a signal. We have to stop the current request. */
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = __tape_cancel_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc == 0) {
/* Wait for the interrupt that acknowledges the halt. */
do {
rc = wait_event_interruptible(
device->wait_queue,
(request->callback == NULL)
);
} while (rc == -ERESTARTSYS);
DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
rc = -ERESTARTSYS;
}
return rc;
}
/*
* Stop running ccw.
*/
int
tape_cancel_io(struct tape_device *device, struct tape_request *request)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = __tape_cancel_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* Tape interrupt routine, called from the ccw_device layer
*/
static void
__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct tape_device *device;
struct tape_request *request;
int rc;
device = dev_get_drvdata(&cdev->dev);
if (device == NULL) {
return;
}
request = (struct tape_request *) intparm;
DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb)) {
/* FIXME: What to do with the request? */
switch (PTR_ERR(irb)) {
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
fallthrough;
case -EIO:
__tape_end_request(device, request, -EIO);
break;
default:
DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
device->cdev_id, PTR_ERR(irb));
}
return;
}
/*
* If the condition code is not zero and the start function bit is
* still set, this is an deferred error and the last start I/O did
* not succeed. At this point the condition that caused the deferred
* error might still apply. So we just schedule the request to be
* started later.
*/
if (irb->scsw.cmd.cc != 0 &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(request->status == TAPE_REQUEST_IN_IO)) {
DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
request->status = TAPE_REQUEST_QUEUED;
schedule_delayed_work(&device->tape_dnr, HZ);
return;
}
/* May be an unsolicited irq */
if(request != NULL)
request->rescnt = irb->scsw.cmd.count;
else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
!list_empty(&device->req_queue)) {
/* Not Ready to Ready after long busy ? */
struct tape_request *req;
req = list_entry(device->req_queue.next,
struct tape_request, list);
if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
if (del_timer(&device->lb_timeout)) {
tape_put_device(device);
__tape_start_next_request(device);
}
return;
}
}
if (irb->scsw.cmd.dstat != 0x0c) {
/* Set the 'ONLINE' flag depending on sense byte 1 */
if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
device->tape_generic_status |= GMT_ONLINE(~0);
else
device->tape_generic_status &= ~GMT_ONLINE(~0);
/*
* Any request that does not come back with channel end
* and device end is unusual. Log the sense data.
*/
DBF_EVENT(3,"-- Tape Interrupthandler --\n");
tape_dump_sense_dbf(device, request, irb);
} else {
/* Upon normal completion the device _is_ online */
device->tape_generic_status |= GMT_ONLINE(~0);
}
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "tape:device is not operational\n");
return;
}
/*
* Request that were canceled still come back with an interrupt.
* To detect these request the state will be set to TAPE_REQUEST_DONE.
*/
if(request != NULL && request->status == TAPE_REQUEST_DONE) {
__tape_end_request(device, request, -EIO);
return;
}
rc = device->discipline->irq(device, request, irb);
/*
* rc < 0 : request finished unsuccessfully.
* rc == TAPE_IO_SUCCESS: request finished successfully.
* rc == TAPE_IO_PENDING: request is still running. Ignore rc.
* rc == TAPE_IO_RETRY: request finished but needs another go.
* rc == TAPE_IO_STOP: request needs to get terminated.
*/
switch (rc) {
case TAPE_IO_SUCCESS:
/* Upon normal completion the device _is_ online */
device->tape_generic_status |= GMT_ONLINE(~0);
__tape_end_request(device, request, rc);
break;
case TAPE_IO_PENDING:
break;
case TAPE_IO_LONG_BUSY:
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
add_timer(&device->lb_timeout);
request->status = TAPE_REQUEST_LONG_BUSY;
break;
case TAPE_IO_RETRY:
rc = __tape_start_io(device, request);
if (rc)
__tape_end_request(device, request, rc);
break;
case TAPE_IO_STOP:
rc = __tape_cancel_io(device, request);
if (rc)
__tape_end_request(device, request, rc);
break;
default:
if (rc > 0) {
DBF_EVENT(6, "xunknownrc\n");
__tape_end_request(device, request, -EIO);
} else {
__tape_end_request(device, request, rc);
}
break;
}
}
/*
* Tape device open function used by tape_char frontend.
*/
int
tape_open(struct tape_device *device)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "TAPE:nodev\n");
rc = -ENODEV;
} else if (device->tape_state == TS_IN_USE) {
DBF_EVENT(6, "TAPE:dbusy\n");
rc = -EBUSY;
} else if (device->tape_state == TS_BLKUSE) {
DBF_EVENT(6, "TAPE:dbusy\n");
rc = -EBUSY;
} else if (device->discipline != NULL &&
!try_module_get(device->discipline->owner)) {
DBF_EVENT(6, "TAPE:nodisc\n");
rc = -ENODEV;
} else {
tape_state_set(device, TS_IN_USE);
rc = 0;
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* Tape device release function used by tape_char frontend.
*/
int
tape_release(struct tape_device *device)
{
spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_IN_USE)
tape_state_set(device, TS_UNUSED);
module_put(device->discipline->owner);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return 0;
}
/*
* Execute a magnetic tape command a number of times.
*/
int
tape_mtop(struct tape_device *device, int mt_op, int mt_count)
{
tape_mtop_fn fn;
int rc;
DBF_EVENT(6, "TAPE:mtio\n");
DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
return -EINVAL;
fn = device->discipline->mtop_array[mt_op];
if (fn == NULL)
return -EINVAL;
/* We assume that the backends can handle count up to 500. */
if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
rc = 0;
for (; mt_count > 500; mt_count -= 500)
if ((rc = fn(device, 500)) != 0)
break;
if (rc == 0)
rc = fn(device, mt_count);
} else
rc = fn(device, mt_count);
return rc;
}
/*
* Tape init function.
*/
static int
tape_init (void)
{
TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
#ifdef DBF_LIKE_HELL
debug_set_level(TAPE_DBF_AREA, 6);
#endif
DBF_EVENT(3, "tape init\n");
tape_proc_init();
tapechar_init ();
return 0;
}
/*
* Tape exit function.
*/
static void
tape_exit(void)
{
DBF_EVENT(6, "tape exit\n");
/* Get rid of the frontends */
tapechar_exit();
tape_proc_cleanup();
debug_unregister (TAPE_DBF_AREA);
}
MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
"Michael Holzheu ([email protected],[email protected])");
MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
MODULE_LICENSE("GPL");
module_init(tape_init);
module_exit(tape_exit);
EXPORT_SYMBOL(tape_generic_remove);
EXPORT_SYMBOL(tape_generic_probe);
EXPORT_SYMBOL(tape_generic_online);
EXPORT_SYMBOL(tape_generic_offline);
EXPORT_SYMBOL(tape_put_device);
EXPORT_SYMBOL(tape_get_device);
EXPORT_SYMBOL(tape_state_verbose);
EXPORT_SYMBOL(tape_op_verbose);
EXPORT_SYMBOL(tape_state_set);
EXPORT_SYMBOL(tape_med_state_set);
EXPORT_SYMBOL(tape_alloc_request);
EXPORT_SYMBOL(tape_free_request);
EXPORT_SYMBOL(tape_dump_sense_dbf);
EXPORT_SYMBOL(tape_do_io);
EXPORT_SYMBOL(tape_do_io_async);
EXPORT_SYMBOL(tape_do_io_interruptible);
EXPORT_SYMBOL(tape_cancel_io);
EXPORT_SYMBOL(tape_mtop);
| linux-master | drivers/s390/char/tape_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP line mode console driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Peschke <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/panic_notifier.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <linux/gfp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
#define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64
#define sclp_console_name "ttyS"
/* Lock to guard over changes to global variables */
static DEFINE_SPINLOCK(sclp_con_lock);
/* List of free pages that can be used for console output buffering */
static LIST_HEAD(sclp_con_pages);
/* List of full struct sclp_buffer structures ready for output */
static LIST_HEAD(sclp_con_outqueue);
/* Pointer to current console buffer */
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
/* Output format for console messages */
#define SCLP_CON_COLUMNS 320
#define SPACES_PER_TAB 8
static void
sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_con_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
list_add_tail((struct list_head *) page, &sclp_con_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
if (!buffer) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
/*
* Finalize and emit first pending buffer.
*/
static void sclp_conbuf_emit(void)
{
struct sclp_buffer* buffer;
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_con_lock, flags);
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
if (sclp_con_queue_running)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
list);
sclp_con_queue_running = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
if (rc)
sclp_conbuf_callback(buffer, rc);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* Wait until out queue is empty
*/
static void sclp_console_sync_queue(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer without further waiting on a final new line.
*/
static void
sclp_console_timeout(struct timer_list *unused)
{
sclp_conbuf_emit();
}
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_console_drop_buffer(void)
{
struct list_head *list;
struct sclp_buffer *buffer;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_con_outqueue.next;
if (sclp_con_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_con_outqueue)
return 0;
list_del(list);
buffer = list_entry(list, struct sclp_buffer, list);
page = sclp_unmake_buffer(buffer);
list_add_tail((struct list_head *) page, &sclp_con_pages);
return 1;
}
/*
* Writes the given message to S390 system console
*/
static void
sclp_console_write(struct console *console, const char *message,
unsigned int count)
{
unsigned long flags;
void *page;
int written;
if (count == 0)
return;
spin_lock_irqsave(&sclp_con_lock, flags);
/*
* process escape characters, write message into buffer,
* send buffer to SCLP
*/
do {
/* make sure we have a console output buffer */
if (sclp_conbuf == NULL) {
if (list_empty(&sclp_con_pages))
sclp_console_full++;
while (list_empty(&sclp_con_pages)) {
if (sclp_console_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
page = sclp_con_pages.next;
list_del((struct list_head *) page);
sclp_conbuf = sclp_make_buffer(page, SCLP_CON_COLUMNS,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_conbuf, (const unsigned char *)
message, count);
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
spin_lock_irqsave(&sclp_con_lock, flags);
message += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
static struct tty_driver *
sclp_console_device(struct console *c, int *index)
{
*index = c->index;
return sclp_tty_driver;
}
/*
* This panic/reboot notifier makes sure that all buffers
* will be flushed to the SCLP.
*/
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
/*
* Perform the lock check before effectively getting the
* lock on sclp_conbuf_emit() / sclp_console_sync_queue()
* to prevent potential lockups in atomic context.
*/
if (spin_is_locked(&sclp_con_lock))
return NOTIFY_DONE;
sclp_conbuf_emit();
sclp_console_sync_queue();
return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_console_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
/*
* used to register the SCLP console to the kernel and to
* give printk necessary information
*/
static struct console sclp_console =
{
.name = sclp_console_name,
.write = sclp_console_write,
.device = sclp_console_device,
.flags = CON_PRINTBUFFER,
.index = 0 /* ttyS0 */
};
/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
sclp_console_init(void)
{
void *page;
int i;
int rc;
/* SCLP consoles are handled together */
if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220))
return 0;
rc = sclp_rw_init();
if (rc)
return rc;
/* Allocate pages for output buffering */
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
list_add_tail(page, &sclp_con_pages);
}
sclp_conbuf = NULL;
timer_setup(&sclp_con_timer, sclp_console_timeout, 0);
/* enable printk-access to this driver */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_console);
return 0;
}
console_initcall(sclp_console_init);
| linux-master | drivers/s390/char/sclp_con.c |
// SPDX-License-Identifier: GPL-2.0
/*
* signal quiesce handler
*
* Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <[email protected]>
* Peter Oberparleiter <[email protected]>
*/
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include "sclp.h"
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask =
PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
ctrl_alt_del();
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
};
/* Initialize quiesce driver. */
static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
device_initcall(sclp_quiesce_init);
| linux-master | drivers/s390/char/sclp_quiesce.c |
// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001, 2009
* Author(s): Stefan Bader <[email protected]>
* Michael Holzheu <[email protected]>
* Martin Schwidefsky <[email protected]>
*/
#define KMSG_COMPONENT "tape_3590"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <asm/ebcdic.h>
#define TAPE_DBF_AREA tape_3590_dbf
#define BUFSIZE 512 /* size of buffers for dynamic generated messages */
#include "tape.h"
#include "tape_std.h"
#include "tape_3590.h"
static struct workqueue_struct *tape_3590_wq;
/*
* Pointer to debug area.
*/
debug_info_t *TAPE_DBF_AREA = NULL;
EXPORT_SYMBOL(TAPE_DBF_AREA);
/*******************************************************************
* Error Recovery functions:
* - Read Opposite: implemented
* - Read Device (buffered) log: BRA
* - Read Library log: BRA
* - Swap Devices: BRA
* - Long Busy: implemented
* - Special Intercept: BRA
* - Read Alternate: implemented
*******************************************************************/
static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
[0x00] = "",
[0x10] = "Lost Sense",
[0x11] = "Assigned Elsewhere",
[0x12] = "Allegiance Reset",
[0x13] = "Shared Access Violation",
[0x20] = "Command Reject",
[0x21] = "Configuration Error",
[0x22] = "Protection Exception",
[0x23] = "Write Protect",
[0x24] = "Write Length",
[0x25] = "Read-Only Format",
[0x31] = "Beginning of Partition",
[0x33] = "End of Partition",
[0x34] = "End of Data",
[0x35] = "Block not found",
[0x40] = "Device Intervention",
[0x41] = "Loader Intervention",
[0x42] = "Library Intervention",
[0x50] = "Write Error",
[0x51] = "Erase Error",
[0x52] = "Formatting Error",
[0x53] = "Read Error",
[0x54] = "Unsupported Format",
[0x55] = "No Formatting",
[0x56] = "Positioning lost",
[0x57] = "Read Length",
[0x60] = "Unsupported Medium",
[0x61] = "Medium Length Error",
[0x62] = "Medium removed",
[0x64] = "Load Check",
[0x65] = "Unload Check",
[0x70] = "Equipment Check",
[0x71] = "Bus out Check",
[0x72] = "Protocol Error",
[0x73] = "Interface Error",
[0x74] = "Overrun",
[0x75] = "Halt Signal",
[0x90] = "Device fenced",
[0x91] = "Device Path fenced",
[0xa0] = "Volume misplaced",
[0xa1] = "Volume inaccessible",
[0xa2] = "Volume in input",
[0xa3] = "Volume ejected",
[0xa4] = "All categories reserved",
[0xa5] = "Duplicate Volume",
[0xa6] = "Library Manager Offline",
[0xa7] = "Library Output Station full",
[0xa8] = "Vision System non-operational",
[0xa9] = "Library Manager Equipment Check",
[0xaa] = "Library Equipment Check",
[0xab] = "All Library Cells full",
[0xac] = "No Cleaner Volumes in Library",
[0xad] = "I/O Station door open",
[0xae] = "Subsystem environmental alert",
};
static int crypt_supported(struct tape_device *device)
{
return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
}
static int crypt_enabled(struct tape_device *device)
{
return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
}
static void ext_to_int_kekl(struct tape390_kekl *in,
struct tape3592_kekl *out)
{
int len;
memset(out, 0, sizeof(*out));
if (in->type == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x40;
if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x80;
len = min(sizeof(out->label), strlen(in->label));
memcpy(out->label, in->label, len);
memset(out->label + len, ' ', sizeof(out->label) - len);
ASCEBC(out->label, sizeof(out->label));
}
static void int_to_ext_kekl(struct tape3592_kekl *in,
struct tape390_kekl *out)
{
memset(out, 0, sizeof(*out));
if(in->flags & 0x40)
out->type = TAPE390_KEKL_TYPE_HASH;
else
out->type = TAPE390_KEKL_TYPE_LABEL;
if(in->flags & 0x80)
out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
else
out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
memcpy(out->label, in->label, sizeof(in->label));
EBCASC(out->label, sizeof(in->label));
strim(out->label);
}
static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
struct tape390_kekl_pair *out)
{
if (in->count == 0) {
out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
} else if (in->count == 1) {
int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
} else if (in->count == 2) {
int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
} else {
printk("Invalid KEKL number: %d\n", in->count);
BUG();
}
}
static int check_ext_kekl(struct tape390_kekl *kekl)
{
if (kekl->type == TAPE390_KEKL_TYPE_NONE)
goto invalid;
if (kekl->type > TAPE390_KEKL_TYPE_HASH)
goto invalid;
if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
goto invalid;
if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
goto invalid;
if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
(kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
goto invalid;
return 0;
invalid:
return -EINVAL;
}
static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
{
if (check_ext_kekl(&kekls->kekl[0]))
goto invalid;
if (check_ext_kekl(&kekls->kekl[1]))
goto invalid;
return 0;
invalid:
return -EINVAL;
}
/*
* Query KEKLs
*/
static int tape_3592_kekl_query(struct tape_device *device,
struct tape390_kekl_pair *ext_kekls)
{
struct tape_request *request;
struct tape3592_kekl_query_order *order;
struct tape3592_kekl_query_data *int_kekls;
int rc;
DBF_EVENT(6, "tape3592_kekl_query\n");
int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
if (!int_kekls)
return -ENOMEM;
request = tape_alloc_request(2, sizeof(*order));
if (IS_ERR(request)) {
rc = PTR_ERR(request);
goto fail_malloc;
}
order = request->cpdata;
memset(order,0,sizeof(*order));
order->code = 0xe2;
order->max_count = 2;
request->op = TO_KEKL_QUERY;
tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
int_kekls);
rc = tape_do_io(device, request);
if (rc)
goto fail_request;
int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
rc = 0;
fail_request:
tape_free_request(request);
fail_malloc:
kfree(int_kekls);
return rc;
}
/*
* IOCTL: Query KEKLs
*/
static int tape_3592_ioctl_kekl_query(struct tape_device *device,
unsigned long arg)
{
int rc;
struct tape390_kekl_pair *ext_kekls;
DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
if (!crypt_supported(device))
return -ENOSYS;
if (!crypt_enabled(device))
return -EUNATCH;
ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
if (!ext_kekls)
return -ENOMEM;
rc = tape_3592_kekl_query(device, ext_kekls);
if (rc != 0)
goto fail;
if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
rc = -EFAULT;
goto fail;
}
rc = 0;
fail:
kfree(ext_kekls);
return rc;
}
static int tape_3590_mttell(struct tape_device *device, int mt_count);
/*
* Set KEKLs
*/
static int tape_3592_kekl_set(struct tape_device *device,
struct tape390_kekl_pair *ext_kekls)
{
struct tape_request *request;
struct tape3592_kekl_set_order *order;
DBF_EVENT(6, "tape3592_kekl_set\n");
if (check_ext_kekl_pair(ext_kekls)) {
DBF_EVENT(6, "invalid kekls\n");
return -EINVAL;
}
if (tape_3590_mttell(device, 0) != 0)
return -EBADSLT;
request = tape_alloc_request(1, sizeof(*order));
if (IS_ERR(request))
return PTR_ERR(request);
order = request->cpdata;
memset(order, 0, sizeof(*order));
order->code = 0xe3;
order->kekls.count = 2;
ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
request->op = TO_KEKL_SET;
tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
return tape_do_io_free(device, request);
}
/*
* IOCTL: Set KEKLs
*/
static int tape_3592_ioctl_kekl_set(struct tape_device *device,
unsigned long arg)
{
int rc;
struct tape390_kekl_pair *ext_kekls;
DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
if (!crypt_supported(device))
return -ENOSYS;
if (!crypt_enabled(device))
return -EUNATCH;
ext_kekls = memdup_user((char __user *)arg, sizeof(*ext_kekls));
if (IS_ERR(ext_kekls))
return PTR_ERR(ext_kekls);
rc = tape_3592_kekl_set(device, ext_kekls);
kfree(ext_kekls);
return rc;
}
/*
* Enable encryption
*/
static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_enable_crypt\n");
if (!crypt_supported(device))
return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
return request;
data = request->cpdata;
memset(data,0,72);
data[0] = 0x05;
data[36 + 0] = 0x03;
data[36 + 1] = 0x03;
data[36 + 4] = 0x40;
data[36 + 6] = 0x01;
data[36 + 14] = 0x2f;
data[36 + 18] = 0xc3;
data[36 + 35] = 0x72;
request->op = TO_CRYPT_ON;
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
return request;
}
static int tape_3592_enable_crypt(struct tape_device *device)
{
struct tape_request *request;
request = __tape_3592_enable_crypt(device);
if (IS_ERR(request))
return PTR_ERR(request);
return tape_do_io_free(device, request);
}
static void tape_3592_enable_crypt_async(struct tape_device *device)
{
struct tape_request *request;
request = __tape_3592_enable_crypt(device);
if (!IS_ERR(request))
tape_do_io_async_free(device, request);
}
/*
* Disable encryption
*/
static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_disable_crypt\n");
if (!crypt_supported(device))
return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
return request;
data = request->cpdata;
memset(data,0,72);
data[0] = 0x05;
data[36 + 0] = 0x03;
data[36 + 1] = 0x03;
data[36 + 35] = 0x32;
request->op = TO_CRYPT_OFF;
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
return request;
}
static int tape_3592_disable_crypt(struct tape_device *device)
{
struct tape_request *request;
request = __tape_3592_disable_crypt(device);
if (IS_ERR(request))
return PTR_ERR(request);
return tape_do_io_free(device, request);
}
static void tape_3592_disable_crypt_async(struct tape_device *device)
{
struct tape_request *request;
request = __tape_3592_disable_crypt(device);
if (!IS_ERR(request))
tape_do_io_async_free(device, request);
}
/*
* IOCTL: Set encryption status
*/
static int tape_3592_ioctl_crypt_set(struct tape_device *device,
unsigned long arg)
{
struct tape390_crypt_info info;
DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
if (!crypt_supported(device))
return -ENOSYS;
if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
return -EFAULT;
if (info.status & ~TAPE390_CRYPT_ON_MASK)
return -EINVAL;
if (info.status & TAPE390_CRYPT_ON_MASK)
return tape_3592_enable_crypt(device);
else
return tape_3592_disable_crypt(device);
}
static int tape_3590_sense_medium(struct tape_device *device);
/*
* IOCTL: Query enryption status
*/
static int tape_3592_ioctl_crypt_query(struct tape_device *device,
unsigned long arg)
{
DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
if (!crypt_supported(device))
return -ENOSYS;
tape_3590_sense_medium(device);
if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
sizeof(TAPE_3590_CRYPT_INFO(device))))
return -EFAULT;
else
return 0;
}
/*
* 3590 IOCTL Overload
*/
static int
tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TAPE390_DISPLAY: {
struct display_struct disp;
if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)))
return -EFAULT;
return tape_std_display(device, &disp);
}
case TAPE390_KEKL_SET:
return tape_3592_ioctl_kekl_set(device, arg);
case TAPE390_KEKL_QUERY:
return tape_3592_ioctl_kekl_query(device, arg);
case TAPE390_CRYPT_SET:
return tape_3592_ioctl_crypt_set(device, arg);
case TAPE390_CRYPT_QUERY:
return tape_3592_ioctl_crypt_query(device, arg);
default:
return -EINVAL; /* no additional ioctls */
}
}
/*
* SENSE Medium: Get Sense data about medium state
*/
static int tape_3590_sense_medium(struct tape_device *device)
{
struct tape_request *request;
request = tape_alloc_request(1, 128);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_MSEN;
tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
return tape_do_io_free(device, request);
}
static void tape_3590_sense_medium_async(struct tape_device *device)
{
struct tape_request *request;
request = tape_alloc_request(1, 128);
if (IS_ERR(request))
return;
request->op = TO_MSEN;
tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
tape_do_io_async_free(device, request);
}
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
static int
tape_3590_mttell(struct tape_device *device, int mt_count)
{
__u64 block_id;
int rc;
rc = tape_std_read_block_id(device, &block_id);
if (rc)
return rc;
return block_id >> 32;
}
/*
* MTSEEK: seek to the specified block.
*/
static int
tape_3590_mtseek(struct tape_device *device, int count)
{
struct tape_request *request;
DBF_EVENT(6, "xsee id: %x\n", count);
request = tape_alloc_request(3, 4);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_LBL;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
*(__u32 *) request->cpdata = count;
tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
return tape_do_io_free(device, request);
}
/*
* Read Opposite Error Recovery Function:
* Used, when Read Forward does not work
*/
static void
tape_3590_read_opposite(struct tape_device *device,
struct tape_request *request)
{
struct tape_3590_disc_data *data;
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
data = device->discdata;
tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg\n");
}
/*
* Read Attention Msg
* This should be done after an interrupt with attention bit (0x80)
* in device state.
*
* After a "read attention message" request there are two possible
* results:
*
* 1. A unit check is presented, when attention sense is present (e.g. when
* a medium has been unloaded). The attention sense comes then
* together with the unit check. The recovery action is either "retry"
* (in case there is an attention message pending) or "permanent error".
*
* 2. The attention msg is written to the "read subsystem data" buffer.
* In this case we probably should print it to the console.
*/
static void tape_3590_read_attmsg_async(struct tape_device *device)
{
struct tape_request *request;
char *buf;
request = tape_alloc_request(3, 4096);
if (IS_ERR(request))
return;
request->op = TO_READ_ATTMSG;
buf = request->cpdata;
buf[0] = PREP_RD_SS_DATA;
buf[6] = RD_ATTMSG; /* read att msg */
tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
tape_do_io_async_free(device, request);
}
/*
* These functions are used to schedule follow-up actions from within an
* interrupt context (like unsolicited interrupts).
* Note: the work handler is called by the system work queue. The tape
* commands started by the handler need to be asynchrounous, otherwise
* a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
struct work_handler_data {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
};
static void
tape_3590_work_handler(struct work_struct *work)
{
struct work_handler_data *p =
container_of(work, struct work_handler_data, work);
switch (p->op) {
case TO_MSEN:
tape_3590_sense_medium_async(p->device);
break;
case TO_READ_ATTMSG:
tape_3590_read_attmsg_async(p->device);
break;
case TO_CRYPT_ON:
tape_3592_enable_crypt_async(p->device);
break;
case TO_CRYPT_OFF:
tape_3592_disable_crypt_async(p->device);
break;
default:
DBF_EVENT(3, "T3590: work handler undefined for "
"operation 0x%02x\n", p->op);
}
tape_put_device(p->device);
kfree(p);
}
static int
tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
{
struct work_handler_data *p;
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
INIT_WORK(&p->work, tape_3590_work_handler);
p->device = tape_get_device(device);
p->op = op;
queue_work(tape_3590_wq, &p->work);
return 0;
}
static void tape_3590_med_state_set(struct tape_device *device,
struct tape_3590_med_sense *sense)
{
struct tape390_crypt_info *c_info;
c_info = &TAPE_3590_CRYPT_INFO(device);
DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
switch (sense->macst) {
case 0x04:
case 0x05:
case 0x06:
tape_med_state_set(device, MS_UNLOADED);
TAPE_3590_CRYPT_INFO(device).medium_status = 0;
return;
case 0x08:
case 0x09:
tape_med_state_set(device, MS_LOADED);
break;
default:
tape_med_state_set(device, MS_UNKNOWN);
return;
}
c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
if (sense->flags & MSENSE_CRYPT_MASK) {
DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
} else {
DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
}
}
/*
* The done handler is called at device/channel end and wakes up the sleeping
* process
*/
static int
tape_3590_done(struct tape_device *device, struct tape_request *request)
{
DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
switch (request->op) {
case TO_BSB:
case TO_BSF:
case TO_DSE:
case TO_FSB:
case TO_FSF:
case TO_LBL:
case TO_RFO:
case TO_RBA:
case TO_REW:
case TO_WRI:
case TO_WTM:
case TO_BLOCK:
case TO_LOAD:
tape_med_state_set(device, MS_LOADED);
break;
case TO_RUN:
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
break;
case TO_MSEN:
tape_3590_med_state_set(device, request->cpdata);
break;
case TO_CRYPT_ON:
TAPE_3590_CRYPT_INFO(device).status
|= TAPE390_CRYPT_ON_MASK;
*(device->modeset_byte) |= 0x03;
break;
case TO_CRYPT_OFF:
TAPE_3590_CRYPT_INFO(device).status
&= ~TAPE390_CRYPT_ON_MASK;
*(device->modeset_byte) &= ~0x03;
break;
case TO_RBI: /* RBI seems to succeed even without medium loaded. */
case TO_NOP: /* Same to NOP. */
case TO_READ_CONFIG:
case TO_READ_ATTMSG:
case TO_DIS:
case TO_ASSIGN:
case TO_UNASSIGN:
case TO_SIZE:
case TO_KEKL_SET:
case TO_KEKL_QUERY:
case TO_RDC:
break;
}
return TAPE_IO_SUCCESS;
}
/*
* This function is called, when error recovery was successful
*/
static inline int
tape_3590_erp_succeeded(struct tape_device *device, struct tape_request *request)
{
DBF_EVENT(3, "Error Recovery successful for %s\n",
tape_op_verbose[request->op]);
return tape_3590_done(device, request);
}
/*
* This function is called, when error recovery was not successful
*/
static inline int
tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
struct irb *irb, int rc)
{
DBF_EVENT(3, "Error Recovery failed for %s\n",
tape_op_verbose[request->op]);
tape_dump_sense_dbf(device, request, irb);
return rc;
}
/*
* Error Recovery do retry
*/
static inline int
tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]);
tape_dump_sense_dbf(device, request, irb);
return TAPE_IO_RETRY;
}
/*
* Handle unsolicited interrupts
*/
static int
tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
{
if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
/* Probably result of halt ssch */
return TAPE_IO_PENDING;
else if (irb->scsw.cmd.dstat == 0x85)
/* Device Ready */
DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
tape_3590_schedule_work(device, TO_READ_ATTMSG);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
tape_dump_sense_dbf(device, NULL, irb);
}
/* check medium state */
tape_3590_schedule_work(device, TO_MSEN);
return TAPE_IO_SUCCESS;
}
/*
* Basic Recovery routine
*/
static int
tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
struct irb *irb, int rc)
{
struct tape_3590_sense *sense;
sense = (struct tape_3590_sense *) irb->ecw;
switch (sense->bra) {
case SENSE_BRA_PER:
return tape_3590_erp_failed(device, request, irb, rc);
case SENSE_BRA_CONT:
return tape_3590_erp_succeeded(device, request);
case SENSE_BRA_RE:
return tape_3590_erp_retry(device, request, irb);
case SENSE_BRA_DRE:
return tape_3590_erp_failed(device, request, irb, rc);
default:
BUG();
return TAPE_IO_STOP;
}
}
/*
* RDL: Read Device (buffered) log
*/
static int
tape_3590_erp_read_buf_log(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
/*
* We just do the basic error recovery at the moment (retry).
* Perhaps in the future, we read the log and dump it somewhere...
*/
return tape_3590_erp_basic(device, request, irb, -EIO);
}
/*
* SWAP: Swap Devices
*/
static int
tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
/*
* This error recovery should swap the tapes
* if the original has a problem. The operation
* should proceed with the new tape... this
* should probably be done in user space!
*/
dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
"different tape unit\n");
return tape_3590_erp_basic(device, request, irb, -EIO);
}
/*
* LBY: Long Busy
*/
static int
tape_3590_erp_long_busy(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
DBF_EVENT(6, "Device is busy\n");
return TAPE_IO_LONG_BUSY;
}
/*
* SPI: Special Intercept
*/
static int
tape_3590_erp_special_interrupt(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
return tape_3590_erp_basic(device, request, irb, -EIO);
}
/*
* RDA: Read Alternate
*/
static int
tape_3590_erp_read_alternate(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
struct tape_3590_disc_data *data;
/*
* The issued Read Backward or Read Previous command is not
* supported by the device
* The recovery action should be to issue another command:
* Read Revious: if Read Backward is not supported
* Read Backward: if Read Previous is not supported
*/
data = device->discdata;
if (data->read_back_op == READ_PREVIOUS) {
DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n",
device->cdev_id);
data->read_back_op = READ_BACKWARD;
} else {
DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n",
device->cdev_id);
data->read_back_op = READ_PREVIOUS;
}
tape_3590_read_opposite(device, request);
return tape_3590_erp_retry(device, request, irb);
}
/*
* Error Recovery read opposite
*/
static int
tape_3590_erp_read_opposite(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
switch (request->op) {
case TO_RFO:
/*
* We did read forward, but the data could not be read.
* We will read backward and then skip forward again.
*/
tape_3590_read_opposite(device, request);
return tape_3590_erp_retry(device, request, irb);
case TO_RBA:
/* We tried to read forward and backward, but hat no success */
return tape_3590_erp_failed(device, request, irb, -EIO);
break;
default:
return tape_3590_erp_failed(device, request, irb, -EIO);
}
}
/*
* Print an MIM (Media Information Message) (message code f0)
*/
static void
tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
char *exception, *service;
exception = kmalloc(BUFSIZE, GFP_ATOMIC);
service = kmalloc(BUFSIZE, GFP_ATOMIC);
if (!exception || !service)
goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f70.emc) {
case 0x02:
snprintf(exception, BUFSIZE, "Data degraded");
break;
case 0x03:
snprintf(exception, BUFSIZE, "Data degraded in partition %i",
sense->fmt.f70.mp);
break;
case 0x04:
snprintf(exception, BUFSIZE, "Medium degraded");
break;
case 0x05:
snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
sense->fmt.f70.mp);
break;
case 0x06:
snprintf(exception, BUFSIZE, "Block 0 Error");
break;
case 0x07:
snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
sense->fmt.f70.md);
break;
default:
snprintf(exception, BUFSIZE, "0x%02x",
sense->fmt.f70.emc);
break;
}
/* Service Message */
switch (sense->fmt.f70.smc) {
case 0x02:
snprintf(service, BUFSIZE, "Reference Media maintenance "
"procedure %i", sense->fmt.f70.md);
break;
default:
snprintf(service, BUFSIZE, "0x%02x",
sense->fmt.f70.smc);
break;
}
dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
"service %s\n", exception, service);
out_nomem:
kfree(exception);
kfree(service);
}
/*
* Print an I/O Subsystem Service Information Message (message code f1)
*/
static void
tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
char *exception, *service;
exception = kmalloc(BUFSIZE, GFP_ATOMIC);
service = kmalloc(BUFSIZE, GFP_ATOMIC);
if (!exception || !service)
goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f71.emc) {
case 0x01:
snprintf(exception, BUFSIZE, "Effect of failure is unknown");
break;
case 0x02:
snprintf(exception, BUFSIZE, "CU Exception - no performance "
"impact");
break;
case 0x03:
snprintf(exception, BUFSIZE, "CU Exception on channel "
"interface 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x04:
snprintf(exception, BUFSIZE, "CU Exception on device path "
"0x%02x", sense->fmt.f71.md[0]);
break;
case 0x05:
snprintf(exception, BUFSIZE, "CU Exception on library path "
"0x%02x", sense->fmt.f71.md[0]);
break;
case 0x06:
snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
sense->fmt.f71.md[0]);
break;
case 0x07:
snprintf(exception, BUFSIZE, "CU Exception on partition "
"0x%02x", sense->fmt.f71.md[0]);
break;
default:
snprintf(exception, BUFSIZE, "0x%02x",
sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
snprintf(service, BUFSIZE, "Repair impact is unknown");
break;
case 0x02:
snprintf(service, BUFSIZE, "Repair will not impact cu "
"performance");
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable node "
"0x%x on CU", sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable "
"nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable "
"channel path 0x%x on CU",
sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable channel"
" paths (0x%x-0x%x) on CU",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable device"
" path 0x%x on CU", sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable device"
" paths (0x%x-0x%x) on CU",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x06:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable "
"library path 0x%x on CU",
sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable "
"library paths (0x%x-0x%x) on CU",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
snprintf(service, BUFSIZE, "Repair will disable access to CU");
break;
default:
snprintf(service, BUFSIZE, "0x%02x",
sense->fmt.f71.smc);
}
dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
" %s, service %s\n", exception, service);
out_nomem:
kfree(exception);
kfree(service);
}
/*
* Print an Device Subsystem Service Information Message (message code f2)
*/
static void
tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
char *exception, *service;
exception = kmalloc(BUFSIZE, GFP_ATOMIC);
service = kmalloc(BUFSIZE, GFP_ATOMIC);
if (!exception || !service)
goto out_nomem;
sense = (struct tape_3590_sense *) irb->ecw;
/* Exception Message */
switch (sense->fmt.f71.emc) {
case 0x01:
snprintf(exception, BUFSIZE, "Effect of failure is unknown");
break;
case 0x02:
snprintf(exception, BUFSIZE, "DV Exception - no performance"
" impact");
break;
case 0x03:
snprintf(exception, BUFSIZE, "DV Exception on channel "
"interface 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x04:
snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
sense->fmt.f71.md[0]);
break;
case 0x05:
snprintf(exception, BUFSIZE, "DV Exception on message display"
" 0x%02x", sense->fmt.f71.md[0]);
break;
case 0x06:
snprintf(exception, BUFSIZE, "DV Exception in tape path");
break;
case 0x07:
snprintf(exception, BUFSIZE, "DV Exception in drive");
break;
default:
snprintf(exception, BUFSIZE, "0x%02x",
sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
snprintf(service, BUFSIZE, "Repair impact is unknown");
break;
case 0x02:
snprintf(service, BUFSIZE, "Repair will not impact device "
"performance");
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable "
"channel path 0x%x on DV",
sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable "
"channel path (0x%x-0x%x) on DV",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable "
"interface 0x%x on DV", sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable "
"interfaces (0x%x-0x%x) on DV",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable loader"
" 0x%x on DV", sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable loader"
" (0x%x-0x%x) on DV",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
snprintf(service, BUFSIZE, "Repair will disable access to DV");
break;
case 0x08:
if (sense->fmt.f71.mdf == 0)
snprintf(service, BUFSIZE, "Repair will disable "
"message display 0x%x on DV",
sense->fmt.f71.md[1]);
else
snprintf(service, BUFSIZE, "Repair will disable "
"message displays (0x%x-0x%x) on DV",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x09:
snprintf(service, BUFSIZE, "Clean DV");
break;
default:
snprintf(service, BUFSIZE, "0x%02x",
sense->fmt.f71.smc);
}
dev_warn (&device->cdev->dev, "Device subsystem information: exception"
" %s, service %s\n", exception, service);
out_nomem:
kfree(exception);
kfree(service);
}
/*
* Print standard ERA Message
*/
static void
tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
{
struct tape_3590_sense *sense;
sense = (struct tape_3590_sense *) irb->ecw;
if (sense->mc == 0)
return;
if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
if (tape_3590_msg[sense->mc] != NULL)
dev_warn (&device->cdev->dev, "The tape unit has "
"issued sense message %s\n",
tape_3590_msg[sense->mc]);
else
dev_warn (&device->cdev->dev, "The tape unit has "
"issued an unknown sense message code 0x%x\n",
sense->mc);
return;
}
if (sense->mc == 0xf0) {
/* Standard Media Information Message */
dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
"RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
sense->fmt.f70.emc, sense->fmt.f70.smc,
sense->fmt.f70.refcode, sense->fmt.f70.mid,
sense->fmt.f70.fid);
tape_3590_print_mim_msg_f0(device, irb);
return;
}
if (sense->mc == 0xf1) {
/* Standard I/O Subsystem Service Information Message */
dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
" MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
sense->fmt.f71.sev, device->cdev->id.dev_model,
sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
sense->fmt.f71.refcode3);
tape_3590_print_io_sim_msg_f1(device, irb);
return;
}
if (sense->mc == 0xf2) {
/* Standard Device Service Information Message */
dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
sense->fmt.f71.sev, device->cdev->id.dev_model,
sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
sense->fmt.f71.refcode3);
tape_3590_print_dev_sim_msg_f2(device, irb);
return;
}
if (sense->mc == 0xf3) {
/* Standard Library Service Information Message */
return;
}
dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
"sense message code %x\n", sense->mc);
}
static int tape_3590_crypt_error(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
u8 cu_rc;
u16 ekm_rc2;
char *sense;
sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
cu_rc = sense[0];
ekm_rc2 = *((u16*) &sense[10]);
if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
/* key not defined on EKM */
return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
if ((cu_rc == 1) || (cu_rc == 2))
/* No connection to EKM */
return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
"encryption key from EKM\n");
return tape_3590_erp_basic(device, request, irb, -ENOKEY);
}
/*
* 3590 error Recovery routine:
* If possible, it tries to recover from the error. If this is not possible,
* inform the user about the problem.
*/
static int
tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
struct tape_3590_sense *sense;
sense = (struct tape_3590_sense *) irb->ecw;
DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
/*
* First check all RC-QRCs where we want to do something special
* - "break": basic error recovery is done
* - "goto out:": just print error message if available
*/
switch (sense->rc_rqc) {
case 0x1110:
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_read_buf_log(device, request, irb);
case 0x2011:
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_read_alternate(device, request, irb);
case 0x2230:
case 0x2231:
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_special_interrupt(device, request, irb);
case 0x2240:
return tape_3590_crypt_error(device, request, irb);
case 0x3010:
DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
device->cdev_id);
return tape_3590_erp_basic(device, request, irb, -ENOSPC);
case 0x3012:
DBF_EVENT(2, "(%08x): Forward at End of Partition\n",
device->cdev_id);
return tape_3590_erp_basic(device, request, irb, -ENOSPC);
case 0x3020:
DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id);
return tape_3590_erp_basic(device, request, irb, -ENOSPC);
case 0x3122:
DBF_EVENT(2, "(%08x): Rewind Unload initiated\n",
device->cdev_id);
return tape_3590_erp_basic(device, request, irb, -EIO);
case 0x3123:
DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
device->cdev_id);
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
return tape_3590_erp_basic(device, request, irb, 0);
case 0x4010:
/*
* print additional msg since default msg
* "device intervention" is not very meaningfull
*/
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
case 0x4012: /* Device Long Busy */
/* XXX: Also use long busy handling here? */
DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_basic(device, request, irb, -EBUSY);
case 0x4014:
DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
return tape_3590_erp_long_busy(device, request, irb);
case 0x5010:
if (sense->rac == 0xd0) {
/* Swap */
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_swap(device, request, irb);
}
if (sense->rac == 0x26) {
/* Read Opposite */
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_read_opposite(device, request,
irb);
}
return tape_3590_erp_basic(device, request, irb, -EIO);
case 0x5020:
case 0x5021:
case 0x5022:
case 0x5040:
case 0x5041:
case 0x5042:
tape_3590_print_era_msg(device, irb);
return tape_3590_erp_swap(device, request, irb);
case 0x5110:
case 0x5111:
return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
case 0x5120:
case 0x1120:
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
case 0x6020:
return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
case 0x8011:
return tape_3590_erp_basic(device, request, irb, -EPERM);
case 0x8013:
dev_warn (&device->cdev->dev, "A different host has privileged"
" access to the tape unit\n");
return tape_3590_erp_basic(device, request, irb, -EPERM);
default:
return tape_3590_erp_basic(device, request, irb, -EIO);
}
}
/*
* 3590 interrupt handler:
*/
static int
tape_3590_irq(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (request == NULL)
return tape_3590_unsolicited_irq(device, irb);
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
DBF_EVENT(2, "End of volume\n");
return tape_3590_erp_failed(device, request, irb, -ENOSPC);
}
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
return tape_3590_unit_check(device, request, irb);
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
if (request->op == TO_FSB || request->op == TO_BSB)
request->rescnt++;
else
DBF_EVENT(5, "Unit Exception!\n");
}
return tape_3590_done(device, request);
}
if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
DBF_EVENT(2, "channel end\n");
return TAPE_IO_PENDING;
}
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
DBF_EVENT(2, "Unit Attention when busy..\n");
return TAPE_IO_PENDING;
}
DBF_EVENT(6, "xunknownirq\n");
tape_dump_sense_dbf(device, request, irb);
return TAPE_IO_STOP;
}
static int tape_3590_read_dev_chars(struct tape_device *device,
struct tape_3590_rdc_data *rdc_data)
{
int rc;
struct tape_request *request;
request = tape_alloc_request(1, sizeof(*rdc_data));
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RDC;
tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
request->cpdata);
rc = tape_do_io(device, request);
if (rc == 0)
memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
tape_free_request(request);
return rc;
}
/*
* Setup device function
*/
static int
tape_3590_setup_device(struct tape_device *device)
{
int rc;
struct tape_3590_disc_data *data;
struct tape_3590_rdc_data *rdc_data;
DBF_EVENT(6, "3590 device setup\n");
data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
if (data == NULL)
return -ENOMEM;
data->read_back_op = READ_PREVIOUS;
device->discdata = data;
rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
if (!rdc_data) {
rc = -ENOMEM;
goto fail_kmalloc;
}
rc = tape_3590_read_dev_chars(device, rdc_data);
if (rc) {
DBF_LH(3, "Read device characteristics failed!\n");
goto fail_rdc_data;
}
rc = tape_std_assign(device);
if (rc)
goto fail_rdc_data;
if (rdc_data->data[31] == 0x13) {
data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
tape_3592_disable_crypt(device);
} else {
DBF_EVENT(6, "Device has NO crypto support\n");
}
/* Try to find out if medium is loaded */
rc = tape_3590_sense_medium(device);
if (rc) {
DBF_LH(3, "3590 medium sense returned %d\n", rc);
goto fail_rdc_data;
}
return 0;
fail_rdc_data:
kfree(rdc_data);
fail_kmalloc:
kfree(data);
return rc;
}
/*
* Cleanup device function
*/
static void
tape_3590_cleanup_device(struct tape_device *device)
{
flush_workqueue(tape_3590_wq);
tape_std_unassign(device);
kfree(device->discdata);
device->discdata = NULL;
}
/*
* List of 3590 magnetic tape commands.
*/
static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = {
[MTRESET] = tape_std_mtreset,
[MTFSF] = tape_std_mtfsf,
[MTBSF] = tape_std_mtbsf,
[MTFSR] = tape_std_mtfsr,
[MTBSR] = tape_std_mtbsr,
[MTWEOF] = tape_std_mtweof,
[MTREW] = tape_std_mtrew,
[MTOFFL] = tape_std_mtoffl,
[MTNOP] = tape_std_mtnop,
[MTRETEN] = tape_std_mtreten,
[MTBSFM] = tape_std_mtbsfm,
[MTFSFM] = tape_std_mtfsfm,
[MTEOM] = tape_std_mteom,
[MTERASE] = tape_std_mterase,
[MTRAS1] = NULL,
[MTRAS2] = NULL,
[MTRAS3] = NULL,
[MTSETBLK] = tape_std_mtsetblk,
[MTSETDENSITY] = NULL,
[MTSEEK] = tape_3590_mtseek,
[MTTELL] = tape_3590_mttell,
[MTSETDRVBUFFER] = NULL,
[MTFSS] = NULL,
[MTBSS] = NULL,
[MTWSM] = NULL,
[MTLOCK] = NULL,
[MTUNLOCK] = NULL,
[MTLOAD] = tape_std_mtload,
[MTUNLOAD] = tape_std_mtunload,
[MTCOMPRESSION] = tape_std_mtcompression,
[MTSETPART] = NULL,
[MTMKPART] = NULL
};
/*
* Tape discipline structure for 3590.
*/
static struct tape_discipline tape_discipline_3590 = {
.owner = THIS_MODULE,
.setup_device = tape_3590_setup_device,
.cleanup_device = tape_3590_cleanup_device,
.process_eov = tape_std_process_eov,
.irq = tape_3590_irq,
.read_block = tape_std_read_block,
.write_block = tape_std_write_block,
.ioctl_fn = tape_3590_ioctl,
.mtop_array = tape_3590_mtop
};
static struct ccw_device_id tape_3590_ids[] = {
{CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
{CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
{ /* end of list */ }
};
static int
tape_3590_online(struct ccw_device *cdev)
{
return tape_generic_online(dev_get_drvdata(&cdev->dev),
&tape_discipline_3590);
}
static struct ccw_driver tape_3590_driver = {
.driver = {
.name = "tape_3590",
.owner = THIS_MODULE,
},
.ids = tape_3590_ids,
.probe = tape_generic_probe,
.remove = tape_generic_remove,
.set_offline = tape_generic_offline,
.set_online = tape_3590_online,
.int_class = IRQIO_TAP,
};
/*
* Setup discipline structure.
*/
static int
tape_3590_init(void)
{
int rc;
TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long));
debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
#ifdef DBF_LIKE_HELL
debug_set_level(TAPE_DBF_AREA, 6);
#endif
DBF_EVENT(3, "3590 init\n");
tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
if (!tape_3590_wq)
return -ENOMEM;
/* Register driver for 3590 tapes. */
rc = ccw_driver_register(&tape_3590_driver);
if (rc) {
destroy_workqueue(tape_3590_wq);
DBF_EVENT(3, "3590 init failed\n");
} else
DBF_EVENT(3, "3590 registered\n");
return rc;
}
static void
tape_3590_exit(void)
{
ccw_driver_unregister(&tape_3590_driver);
destroy_workqueue(tape_3590_wq);
debug_unregister(TAPE_DBF_AREA);
}
MODULE_DEVICE_TABLE(ccw, tape_3590_ids);
MODULE_AUTHOR("(C) 2001,2006 IBM Corporation");
MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
MODULE_LICENSE("GPL");
module_init(tape_3590_init);
module_exit(tape_3590_exit);
| linux-master | drivers/s390/char/tape_3590.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004
*
* Tape class device support
*
* Author: Stefan Bader <[email protected]>
* Based on simple class device code by Greg K-H
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <[email protected]>");
MODULE_DESCRIPTION(
"Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strscpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strscpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
rc = cdev_add(tcd->char_device, dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_ERR_OR_ZERO(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create("tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);
| linux-master | drivers/s390/char/tape_class.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP VT220 terminal driver.
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Peter Oberparleiter <[email protected]>
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/panic_notifier.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "sclp.h"
#include "ctrlchar.h"
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */
/* Representation of a single write request */
struct sclp_vt220_request {
struct list_head list;
struct sclp_req sclp_req;
int retry_count;
};
/* VT220 SCCB */
struct sclp_vt220_sccb {
struct sccb_header header;
struct evbuf_header evbuf;
};
#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
sizeof(struct sclp_vt220_request) - \
sizeof(struct sclp_vt220_sccb))
/* Structures and data needed to register tty driver */
static struct tty_driver *sclp_vt220_driver;
static struct tty_port sclp_vt220_port;
/* Lock to protect internal data from concurrent access */
static DEFINE_SPINLOCK(sclp_vt220_lock);
/* List of empty pages to be used as write request buffers */
static LIST_HEAD(sclp_vt220_empty);
/* List of pending requests */
static LIST_HEAD(sclp_vt220_outqueue);
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
static struct timer_list sclp_vt220_timer;
/* Pointer to current request buffer which has been partially filled but not
* yet sent */
static struct sclp_vt220_request *sclp_vt220_current_request;
/* Number of characters in current request buffer */
static int sclp_vt220_buffered_chars;
/* Counter controlling core driver initialization. */
static int __initdata sclp_vt220_init_count;
/* Flag indicating that sclp_vt220_current_request should really
* have been already queued but wasn't because the SCLP was processing
* another buffer */
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
};
/* Registration structure for SCLP input event buffers */
static struct sclp_register sclp_vt220_register_input = {
.receive_mask = EVTYP_VT220MSG_MASK,
.receiver_fn = sclp_vt220_receiver_fn,
};
/*
* Put provided request buffer back into queue and check emit pending
* buffers if necessary.
*/
static void
sclp_vt220_process_queue(struct sclp_vt220_request *request)
{
unsigned long flags;
void *page;
do {
/* Put buffer back to list of empty buffers */
page = request->sclp_req.sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
/* Move request from outqueue to empty queue */
list_del(&request->list);
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
/* Check if there is a pending buffer on the out queue. */
request = NULL;
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
if (!request) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
} while (__sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
tty_port_tty_wakeup(&sclp_vt220_port);
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* Callback through which the result of a write request is reported by the
* SCLP.
*/
static void
sclp_vt220_callback(struct sclp_req *request, void *data)
{
struct sclp_vt220_request *vt220_request;
struct sclp_vt220_sccb *sccb;
vt220_request = (struct sclp_vt220_request *) data;
if (request->status == SCLP_REQ_FAILED) {
sclp_vt220_process_queue(vt220_request);
return;
}
sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
/* Check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
break;
case 0x05f0: /* Target resource in improper state */
break;
case 0x0340: /* Contained SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
/* Remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* Not all buffers were processed */
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
}
break;
case 0x0040: /* SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
break;
default:
break;
}
sclp_vt220_process_queue(vt220_request);
}
/*
* Emit vt220 request buffer to SCLP. Return zero on success, non-zero
* otherwise.
*/
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sclp_req.status = SCLP_REQ_FILLED;
request->sclp_req.callback = sclp_vt220_callback;
request->sclp_req.callback_data = (void *) request;
return sclp_add_request(&request->sclp_req);
}
/*
* Queue and emit current request.
*/
static void
sclp_vt220_emit_current(void)
{
unsigned long flags;
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (sclp_vt220_current_request) {
sccb = (struct sclp_vt220_sccb *)
sclp_vt220_current_request->sclp_req.sccb;
/* Only emit buffers with content */
if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
list_add_tail(&sclp_vt220_current_request->list,
&sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
if (sclp_vt220_queue_running)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
request = list_first_entry(&sclp_vt220_outqueue,
struct sclp_vt220_request, list);
sclp_vt220_queue_running = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (__sclp_vt220_emit(request))
sclp_vt220_process_queue(request);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
#define SCLP_NORMAL_WRITE 0x00
/*
* Helper function to initialize a page with the sclp request structure.
*/
static struct sclp_vt220_request *
sclp_vt220_initialize_page(void *page)
{
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
/* Place request structure at end of page */
request = ((struct sclp_vt220_request *)
((addr_t) page + PAGE_SIZE)) - 1;
request->retry_count = 0;
request->sclp_req.sccb = page;
/* SCCB goes at start of page */
sccb = (struct sclp_vt220_sccb *) page;
memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
sccb->header.length = sizeof(struct sclp_vt220_sccb);
sccb->header.function_code = SCLP_NORMAL_WRITE;
sccb->header.response_code = 0x0000;
sccb->evbuf.type = EVTYP_VT220MSG;
sccb->evbuf.length = sizeof(struct evbuf_header);
return request;
}
static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
sccb->header.length;
}
static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return sccb->evbuf.length - sizeof(struct evbuf_header);
}
/*
* Add msg to buffer associated with request. Return the number of characters
* added.
*/
static int
sclp_vt220_add_msg(struct sclp_vt220_request *request,
const unsigned char *msg, int count, int convertlf)
{
struct sclp_vt220_sccb *sccb;
void *buffer;
unsigned char c;
int from;
int to;
if (count > sclp_vt220_space_left(request))
count = sclp_vt220_space_left(request);
if (count <= 0)
return 0;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
/* Retrieve character */
c = msg[from];
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
} else
break;
} else
((unsigned char *) buffer)[to++] = c;
}
sccb->header.length += to;
sccb->evbuf.length += to;
return from;
} else {
memcpy(buffer, (const void *) msg, count);
sccb->header.length += count;
sccb->evbuf.length += count;
return count;
}
}
/*
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
sclp_vt220_timeout(struct timer_list *unused)
{
sclp_vt220_emit_current();
}
#define BUFFER_MAX_DELAY HZ/20
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_vt220_drop_buffer(void)
{
struct list_head *list;
struct sclp_vt220_request *request;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_vt220_outqueue.next;
if (sclp_vt220_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_vt220_outqueue)
return 0;
list_del(list);
request = list_entry(list, struct sclp_vt220_request, list);
page = request->sclp_req.sccb;
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
return 1;
}
/*
* Internal implementation of the write function. Write COUNT bytes of data
* from memory at BUF
* to the SCLP interface. In case that the data does not fit into the current
* write buffer, emit the current one and allocate a new one. If there are no
* more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
* is non-zero, the buffer will be scheduled for emitting after a timeout -
* otherwise the user has to explicitly call the flush function.
* A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
* buffer should be converted to 0x0a 0x0d. After completion, return the number
* of bytes written.
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
do {
/* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
if (list_empty(&sclp_vt220_empty))
sclp_console_full++;
while (list_empty(&sclp_vt220_empty)) {
if (may_fail)
goto out;
if (sclp_vt220_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
page = (void *) sclp_vt220_empty.next;
list_del((struct list_head *) page);
sclp_vt220_current_request =
sclp_vt220_initialize_page(page);
}
/* Try to write the string to the current request buffer */
written = sclp_vt220_add_msg(sclp_vt220_current_request,
buf, count, convertlf);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
buf += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
out:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return overall_written;
}
/*
* This routine is called by the kernel to write a series of
* characters to the tty device. The characters may come from
* user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*/
static ssize_t
sclp_vt220_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
#define SCLP_VT220_SESSION_ENDED 0x01
#define SCLP_VT220_SESSION_STARTED 0x80
#define SCLP_VT220_SESSION_DATA 0x00
#ifdef CONFIG_MAGIC_SYSRQ
static int sysrq_pressed;
static struct sysrq_work sysrq;
static void sclp_vt220_reset_session(void)
{
sysrq_pressed = 0;
}
static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
{
int i;
for (i = 0; i < count; i++) {
/* Handle magic sys request */
if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
/*
* If pressed again, reset sysrq_pressed
* and flip CTRL-O character
*/
sysrq_pressed = !sysrq_pressed;
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
sysrq.key = buffer[i];
schedule_sysrq_work(&sysrq);
sysrq_pressed = 0;
continue;
}
tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
}
}
#else
static void sclp_vt220_reset_session(void)
{
}
static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
{
tty_insert_flip_string(&sclp_vt220_port, buffer, count);
}
#endif
/*
* Called by the SCLP to report incoming event buffers.
*/
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
char *buffer;
unsigned int count;
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
switch (*buffer) {
case SCLP_VT220_SESSION_ENDED:
case SCLP_VT220_SESSION_STARTED:
sclp_vt220_reset_session();
break;
case SCLP_VT220_SESSION_DATA:
/* Send input to line discipline */
buffer++;
count--;
sclp_vt220_handle_input(buffer, count);
tty_flip_buffer_push(&sclp_vt220_port);
break;
}
}
/*
* This routine is called when a particular tty device is opened.
*/
static int
sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
}
}
return 0;
}
/*
* This routine is called when a particular tty device is closed.
*/
static void
sclp_vt220_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1)
tty_port_tty_set(&sclp_vt220_port, NULL);
}
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver.
*/
static int
sclp_vt220_put_char(struct tty_struct *tty, u8 ch)
{
return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void
sclp_vt220_flush_chars(struct tty_struct *tty)
{
if (!sclp_vt220_queue_running)
sclp_vt220_emit_current();
else
sclp_vt220_flush_later = 1;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted.
*/
static unsigned int
sclp_vt220_write_room(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
unsigned int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_space_left(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_empty)
count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Return number of buffered chars.
*/
static unsigned int
sclp_vt220_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_vt220_request *r;
unsigned int count = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_chars_stored(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_outqueue) {
r = list_entry(l, struct sclp_vt220_request, list);
count += sclp_vt220_chars_stored(r);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Pass on all buffers to the hardware. Return only when there are no more
* buffers pending.
*/
static void
sclp_vt220_flush_buffer(struct tty_struct *tty)
{
sclp_vt220_emit_current();
}
/* Release allocated pages. */
static void __init __sclp_vt220_free_pages(void)
{
struct list_head *page, *p;
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
free_page((unsigned long) page);
}
}
/* Release memory and unregister from sclp core. Controlled by init counting -
* only the last invoker will actually perform these actions. */
static void __init __sclp_vt220_cleanup(void)
{
sclp_vt220_init_count--;
if (sclp_vt220_init_count != 0)
return;
sclp_unregister(&sclp_vt220_register);
__sclp_vt220_free_pages();
tty_port_destroy(&sclp_vt220_port);
}
/* Allocate buffer pages and register with sclp core. Controlled by init
* counting - only the first invoker will actually perform these actions. */
static int __init __sclp_vt220_init(int num_pages)
{
void *page;
int i;
int rc;
sclp_vt220_init_count++;
if (sclp_vt220_init_count != 1)
return 0;
timer_setup(&sclp_vt220_timer, sclp_vt220_timeout, 0);
tty_port_init(&sclp_vt220_port);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
goto out;
list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
if (rc) {
__sclp_vt220_free_pages();
sclp_vt220_init_count--;
tty_port_destroy(&sclp_vt220_port);
}
return rc;
}
static const struct tty_operations sclp_vt220_ops = {
.open = sclp_vt220_open,
.close = sclp_vt220_close,
.write = sclp_vt220_write,
.put_char = sclp_vt220_put_char,
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
.flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
* symmetry between VM and LPAR systems regarding ttyS1. */
driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
rc = __sclp_vt220_init(MAX_KMEM_PAGES);
if (rc)
goto out_driver;
driver->driver_name = SCLP_VT220_DRIVER_NAME;
driver->name = SCLP_VT220_DEVICE_NAME;
driver->major = SCLP_VT220_MAJOR;
driver->minor_start = SCLP_VT220_MINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &sclp_vt220_ops);
tty_port_link_device(&sclp_vt220_port, driver, 0);
rc = tty_register_driver(driver);
if (rc)
goto out_init;
rc = sclp_register(&sclp_vt220_register_input);
if (rc)
goto out_reg;
sclp_vt220_driver = driver;
return 0;
out_reg:
tty_unregister_driver(driver);
out_init:
__sclp_vt220_cleanup();
out_driver:
tty_driver_kref_put(driver);
return rc;
}
__initcall(sclp_vt220_tty_init);
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
/*
* This panic/reboot notifier runs in atomic context, so
* locking restrictions apply to prevent potential lockups.
*/
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
{
unsigned long flags;
if (spin_is_locked(&sclp_vt220_lock))
return NOTIFY_DONE;
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
del_timer(&sclp_vt220_timer);
while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_vt220_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_vt220_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
/* Structure needed to register with printk */
static struct console sclp_vt220_console =
{
.name = SCLP_VT220_CONSOLE_NAME,
.write = sclp_vt220_con_write,
.device = sclp_vt220_con_device,
.flags = CON_PRINTBUFFER,
.index = SCLP_VT220_CONSOLE_INDEX
};
static int __init
sclp_vt220_con_init(void)
{
int rc;
rc = __sclp_vt220_init(sclp_console_pages);
if (rc)
return rc;
/* Attach linux console */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_vt220_console);
return 0;
}
console_initcall(sclp_vt220_con_init);
#endif /* CONFIG_SCLP_VT220_CONSOLE */
| linux-master | drivers/s390/char/sclp_vt220.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP Store Data support and sysfs interface
*
* Copyright IBM Corp. 2017
*/
#define KMSG_COMPONENT "sclp_sd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/async.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <asm/pgalloc.h>
#include "sclp.h"
#define SD_EQ_STORE_DATA 0
#define SD_EQ_HALT 1
#define SD_EQ_SIZE 2
#define SD_DI_CONFIG 3
struct sclp_sd_evbuf {
struct evbuf_header hdr;
u8 eq;
u8 di;
u8 rflags;
u64 :56;
u32 id;
u16 :16;
u8 fmt;
u8 status;
u64 sat;
u64 sa;
u32 esize;
u32 dsize;
} __packed;
struct sclp_sd_sccb {
struct sccb_header hdr;
struct sclp_sd_evbuf evbuf;
} __packed __aligned(PAGE_SIZE);
/**
* struct sclp_sd_data - Result of a Store Data request
* @esize_bytes: Resulting esize in bytes
* @dsize_bytes: Resulting dsize in bytes
* @data: Pointer to data - must be released using vfree()
*/
struct sclp_sd_data {
size_t esize_bytes;
size_t dsize_bytes;
void *data;
};
/**
* struct sclp_sd_listener - Listener for asynchronous Store Data response
* @list: For enqueueing this struct
* @id: Event ID of response to listen for
* @completion: Can be used to wait for response
* @evbuf: Contains the resulting Store Data response after completion
*/
struct sclp_sd_listener {
struct list_head list;
u32 id;
struct completion completion;
struct sclp_sd_evbuf evbuf;
};
/**
* struct sclp_sd_file - Sysfs representation of a Store Data entity
* @kobj: Kobject
* @data_attr: Attribute for accessing data contents
* @data_mutex: Mutex to serialize access and updates to @data
* @data: Data associated with this entity
* @di: DI value associated with this entity
*/
struct sclp_sd_file {
struct kobject kobj;
struct bin_attribute data_attr;
struct mutex data_mutex;
struct sclp_sd_data data;
u8 di;
};
#define to_sd_file(x) container_of(x, struct sclp_sd_file, kobj)
static struct kset *sclp_sd_kset;
static struct sclp_sd_file *config_file;
static LIST_HEAD(sclp_sd_queue);
static DEFINE_SPINLOCK(sclp_sd_queue_lock);
/**
* sclp_sd_listener_add() - Add listener for Store Data responses
* @listener: Listener to add
*/
static void sclp_sd_listener_add(struct sclp_sd_listener *listener)
{
spin_lock_irq(&sclp_sd_queue_lock);
list_add_tail(&listener->list, &sclp_sd_queue);
spin_unlock_irq(&sclp_sd_queue_lock);
}
/**
* sclp_sd_listener_remove() - Remove listener for Store Data responses
* @listener: Listener to remove
*/
static void sclp_sd_listener_remove(struct sclp_sd_listener *listener)
{
spin_lock_irq(&sclp_sd_queue_lock);
list_del(&listener->list);
spin_unlock_irq(&sclp_sd_queue_lock);
}
/**
* sclp_sd_listener_init() - Initialize a Store Data response listener
* @listener: Response listener to initialize
* @id: Event ID to listen for
*
* Initialize a listener for asynchronous Store Data responses. This listener
* can afterwards be used to wait for a specific response and to retrieve
* the associated response data.
*/
static void sclp_sd_listener_init(struct sclp_sd_listener *listener, u32 id)
{
memset(listener, 0, sizeof(*listener));
listener->id = id;
init_completion(&listener->completion);
}
/**
* sclp_sd_receiver() - Receiver for Store Data events
* @evbuf_hdr: Header of received events
*
* Process Store Data events and complete listeners with matching event IDs.
*/
static void sclp_sd_receiver(struct evbuf_header *evbuf_hdr)
{
struct sclp_sd_evbuf *evbuf = (struct sclp_sd_evbuf *) evbuf_hdr;
struct sclp_sd_listener *listener;
int found = 0;
pr_debug("received event (id=0x%08x)\n", evbuf->id);
spin_lock(&sclp_sd_queue_lock);
list_for_each_entry(listener, &sclp_sd_queue, list) {
if (listener->id != evbuf->id)
continue;
listener->evbuf = *evbuf;
complete(&listener->completion);
found = 1;
break;
}
spin_unlock(&sclp_sd_queue_lock);
if (!found)
pr_debug("unsolicited event (id=0x%08x)\n", evbuf->id);
}
static struct sclp_register sclp_sd_register = {
.send_mask = EVTYP_STORE_DATA_MASK,
.receive_mask = EVTYP_STORE_DATA_MASK,
.receiver_fn = sclp_sd_receiver,
};
/**
* sclp_sd_sync() - Perform Store Data request synchronously
* @page: Address of work page - must be below 2GB
* @eq: Input EQ value
* @di: Input DI value
* @sat: Input SAT value
* @sa: Input SA value used to specify the address of the target buffer
* @dsize_ptr: Optional pointer to input and output DSIZE value
* @esize_ptr: Optional pointer to output ESIZE value
*
* Perform Store Data request with specified parameters and wait for completion.
*
* Return %0 on success and store resulting DSIZE and ESIZE values in
* @dsize_ptr and @esize_ptr (if provided). Return non-zero on error.
*/
static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
u32 *dsize_ptr, u32 *esize_ptr)
{
struct sclp_sd_sccb *sccb = (void *) page;
struct sclp_sd_listener listener;
struct sclp_sd_evbuf *evbuf;
int rc;
sclp_sd_listener_init(&listener, __pa(sccb));
sclp_sd_listener_add(&listener);
/* Prepare SCCB */
memset(sccb, 0, PAGE_SIZE);
sccb->hdr.length = sizeof(sccb->hdr) + sizeof(sccb->evbuf);
evbuf = &sccb->evbuf;
evbuf->hdr.length = sizeof(*evbuf);
evbuf->hdr.type = EVTYP_STORE_DATA;
evbuf->eq = eq;
evbuf->di = di;
evbuf->id = listener.id;
evbuf->fmt = 1;
evbuf->sat = sat;
evbuf->sa = sa;
if (dsize_ptr)
evbuf->dsize = *dsize_ptr;
/* Perform command */
pr_debug("request (eq=%d, di=%d, id=0x%08x)\n", eq, di, listener.id);
rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
pr_debug("request done (rc=%d)\n", rc);
if (rc)
goto out;
/* Evaluate response */
if (sccb->hdr.response_code == 0x73f0) {
pr_debug("event not supported\n");
rc = -EIO;
goto out_remove;
}
if (sccb->hdr.response_code != 0x0020 || !(evbuf->hdr.flags & 0x80)) {
rc = -EIO;
goto out;
}
if (!(evbuf->rflags & 0x80)) {
rc = wait_for_completion_interruptible(&listener.completion);
if (rc)
goto out;
evbuf = &listener.evbuf;
}
switch (evbuf->status) {
case 0:
if (dsize_ptr)
*dsize_ptr = evbuf->dsize;
if (esize_ptr)
*esize_ptr = evbuf->esize;
pr_debug("success (dsize=%u, esize=%u)\n", evbuf->dsize,
evbuf->esize);
break;
case 3:
rc = -ENOENT;
break;
default:
rc = -EIO;
break;
}
out:
if (rc && rc != -ENOENT) {
/* Provide some information about what went wrong */
pr_warn("Store Data request failed (eq=%d, di=%d, "
"response=0x%04x, flags=0x%02x, status=%d, rc=%d)\n",
eq, di, sccb->hdr.response_code, evbuf->hdr.flags,
evbuf->status, rc);
}
out_remove:
sclp_sd_listener_remove(&listener);
return rc;
}
/**
* sclp_sd_store_data() - Obtain data for specified Store Data entity
* @result: Resulting data
* @di: DI value associated with this entity
*
* Perform a series of Store Data requests to obtain the size and contents of
* the specified Store Data entity.
*
* Return:
* %0: Success - result is stored in @result. @result->data must be
* released using vfree() after use.
* %-ENOENT: No data available for this entity
* %<0: Other error
*/
static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
{
u32 dsize = 0, esize = 0;
unsigned long page, asce = 0;
void *data = NULL;
int rc;
page = __get_free_page(GFP_KERNEL | GFP_DMA);
if (!page)
return -ENOMEM;
/* Get size */
rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize);
if (rc)
goto out;
if (dsize == 0)
goto out_result;
/* Allocate memory */
data = vzalloc(array_size((size_t)dsize, PAGE_SIZE));
if (!data) {
rc = -ENOMEM;
goto out;
}
/* Get translation table for buffer */
asce = base_asce_alloc((unsigned long) data, dsize);
if (!asce) {
vfree(data);
rc = -ENOMEM;
goto out;
}
/* Get data */
rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize,
&esize);
if (rc) {
/* Cancel running request if interrupted */
if (rc == -ERESTARTSYS)
sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
vfree(data);
goto out;
}
out_result:
result->esize_bytes = (size_t) esize * PAGE_SIZE;
result->dsize_bytes = (size_t) dsize * PAGE_SIZE;
result->data = data;
out:
base_asce_free(asce);
free_page(page);
return rc;
}
/**
* sclp_sd_data_reset() - Reset Store Data result buffer
* @data: Data buffer to reset
*
* Reset @data to initial state and release associated memory.
*/
static void sclp_sd_data_reset(struct sclp_sd_data *data)
{
vfree(data->data);
data->data = NULL;
data->dsize_bytes = 0;
data->esize_bytes = 0;
}
/**
* sclp_sd_file_release() - Release function for sclp_sd_file object
* @kobj: Kobject embedded in sclp_sd_file object
*/
static void sclp_sd_file_release(struct kobject *kobj)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
sclp_sd_data_reset(&sd_file->data);
kfree(sd_file);
}
/**
* sclp_sd_file_update() - Update contents of sclp_sd_file object
* @sd_file: Object to update
*
* Obtain the current version of data associated with the Store Data entity
* @sd_file.
*
* On success, return %0 and generate a KOBJ_CHANGE event to indicate that the
* data may have changed. Return non-zero otherwise.
*/
static int sclp_sd_file_update(struct sclp_sd_file *sd_file)
{
const char *name = kobject_name(&sd_file->kobj);
struct sclp_sd_data data;
int rc;
rc = sclp_sd_store_data(&data, sd_file->di);
if (rc) {
if (rc == -ENOENT) {
pr_info("No data is available for the %s data entity\n",
name);
}
return rc;
}
mutex_lock(&sd_file->data_mutex);
sclp_sd_data_reset(&sd_file->data);
sd_file->data = data;
mutex_unlock(&sd_file->data_mutex);
pr_info("A %zu-byte %s data entity was retrieved\n", data.dsize_bytes,
name);
kobject_uevent(&sd_file->kobj, KOBJ_CHANGE);
return 0;
}
/**
* sclp_sd_file_update_async() - Wrapper for asynchronous update call
* @data: Object to update
* @cookie: Unused
*/
static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
{
struct sclp_sd_file *sd_file = data;
sclp_sd_file_update(sd_file);
}
/**
* reload_store() - Store function for "reload" sysfs attribute
* @kobj: Kobject of sclp_sd_file object
* @attr: Reload attribute
* @buf: Data written to sysfs attribute
* @count: Count of bytes written
*
* Initiate a reload of the data associated with an sclp_sd_file object.
*/
static ssize_t reload_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
sclp_sd_file_update(sd_file);
return count;
}
static struct kobj_attribute reload_attr = __ATTR_WO(reload);
static struct attribute *sclp_sd_file_default_attrs[] = {
&reload_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(sclp_sd_file_default);
static struct kobj_type sclp_sd_file_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = sclp_sd_file_release,
.default_groups = sclp_sd_file_default_groups,
};
/**
* data_read() - Read function for "data" sysfs attribute
* @file: Open file pointer
* @kobj: Kobject of sclp_sd_file object
* @attr: Data attribute
* @buffer: Target buffer
* @off: Requested file offset
* @size: Requested number of bytes
*
* Store the requested portion of the Store Data entity contents into the
* specified buffer. Return the number of bytes stored on success, or %0
* on EOF.
*/
static ssize_t data_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buffer,
loff_t off, size_t size)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
size_t data_size;
char *data;
mutex_lock(&sd_file->data_mutex);
data = sd_file->data.data;
data_size = sd_file->data.dsize_bytes;
if (!data || off >= data_size) {
size = 0;
} else {
if (off + size > data_size)
size = data_size - off;
memcpy(buffer, data + off, size);
}
mutex_unlock(&sd_file->data_mutex);
return size;
}
/**
* sclp_sd_file_create() - Add a sysfs file representing a Store Data entity
* @name: Name of file
* @di: DI value associated with this entity
*
* Create a sysfs directory with the given @name located under
*
* /sys/firmware/sclp_sd/
*
* The files in this directory can be used to access the contents of the Store
* Data entity associated with @DI.
*
* Return pointer to resulting sclp_sd_file object on success, %NULL otherwise.
* The object must be freed by calling kobject_put() on the embedded kobject
* pointer after use.
*/
static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
{
struct sclp_sd_file *sd_file;
int rc;
sd_file = kzalloc(sizeof(*sd_file), GFP_KERNEL);
if (!sd_file)
return NULL;
sd_file->di = di;
mutex_init(&sd_file->data_mutex);
/* Create kobject located under /sys/firmware/sclp_sd/ */
sd_file->kobj.kset = sclp_sd_kset;
rc = kobject_init_and_add(&sd_file->kobj, &sclp_sd_file_ktype, NULL,
"%s", name);
if (rc) {
kobject_put(&sd_file->kobj);
return NULL;
}
sysfs_bin_attr_init(&sd_file->data_attr);
sd_file->data_attr.attr.name = "data";
sd_file->data_attr.attr.mode = 0444;
sd_file->data_attr.read = data_read;
rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
if (rc) {
kobject_put(&sd_file->kobj);
return NULL;
}
/*
* For completeness only - users interested in entity data should listen
* for KOBJ_CHANGE instead.
*/
kobject_uevent(&sd_file->kobj, KOBJ_ADD);
/* Don't let a slow Store Data request delay further initialization */
async_schedule(sclp_sd_file_update_async, sd_file);
return sd_file;
}
/**
* sclp_sd_init() - Initialize sclp_sd support and register sysfs files
*/
static __init int sclp_sd_init(void)
{
int rc;
rc = sclp_register(&sclp_sd_register);
if (rc)
return rc;
/* Create kset named "sclp_sd" located under /sys/firmware/ */
rc = -ENOMEM;
sclp_sd_kset = kset_create_and_add("sclp_sd", NULL, firmware_kobj);
if (!sclp_sd_kset)
goto err_kset;
rc = -EINVAL;
config_file = sclp_sd_file_create("config", SD_DI_CONFIG);
if (!config_file)
goto err_config;
return 0;
err_config:
kset_unregister(sclp_sd_kset);
err_kset:
sclp_unregister(&sclp_sd_register);
return rc;
}
device_initcall(sclp_sd_init);
| linux-master | drivers/s390/char/sclp_sd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe ([email protected])
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include "sclp.h"
#include "sclp_diag.h"
#include "sclp_ftp.h"
static DECLARE_COMPLETION(sclp_ftp_rx_complete);
static u8 sclp_ftp_ldflg;
static u64 sclp_ftp_fsize;
static u64 sclp_ftp_length;
/**
* sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
* @req: sclp request
* @data: pointer to struct completion
*/
static void sclp_ftp_txcb(struct sclp_req *req, void *data)
{
struct completion *completion = data;
#ifdef DEBUG
pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
req->sccb, 24, req->sccb);
#endif
complete(completion);
}
/**
* sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
* @evbuf: pointer to Diagnostic Test (ET7) event buffer
*/
static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
{
struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
/*
* Check for Diagnostic Test FTP Service
*/
if (evbuf->type != EVTYP_DIAG_TEST ||
diag->route != SCLP_DIAG_FTP_ROUTE ||
diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
return;
#ifdef DEBUG
pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
evbuf, 24, evbuf);
#endif
/*
* Because the event buffer is located in a page which is owned
* by the SCLP core, all data of interest must be copied. The
* error indication is in 'sclp_ftp_ldflg'
*/
sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
sclp_ftp_fsize = diag->mdd.ftp.fsize;
sclp_ftp_length = diag->mdd.ftp.length;
complete(&sclp_ftp_rx_complete);
}
/**
* sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
* @ftp: pointer to FTP descriptor
*
* Return: 0 on success, else a (negative) error code
*/
static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
{
struct completion completion;
struct sclp_diag_sccb *sccb;
struct sclp_req *req;
ssize_t len;
int rc;
req = kzalloc(sizeof(*req), GFP_KERNEL);
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!req || !sccb) {
rc = -ENOMEM;
goto out_free;
}
sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
sizeof(struct sccb_header);
sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
sccb->evbuf.mdd.ftp.srcflg = 0;
sccb->evbuf.mdd.ftp.pgsize = 0;
sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
sccb->evbuf.mdd.ftp.fsize = 0;
sccb->evbuf.mdd.ftp.cmd = ftp->id;
sccb->evbuf.mdd.ftp.offset = ftp->ofs;
sccb->evbuf.mdd.ftp.length = ftp->len;
sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
len = strscpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
if (len < 0) {
rc = -EINVAL;
goto out_free;
}
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = sclp_ftp_txcb;
req->callback_data = &completion;
init_completion(&completion);
rc = sclp_add_request(req);
if (rc)
goto out_free;
/* Wait for end of ftp sclp command. */
wait_for_completion(&completion);
#ifdef DEBUG
pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
sccb->hdr.response_code, sccb->evbuf.hdr.flags);
#endif
/*
* Check if sclp accepted the request. The data transfer runs
* asynchronously and the completion is indicated with an
* sclp ET7 event.
*/
if (req->status != SCLP_REQ_DONE ||
(sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
(sccb->hdr.response_code & 0xffU) != 0x20U) {
rc = -EIO;
}
out_free:
free_page((unsigned long) sccb);
kfree(req);
return rc;
}
/**
* sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&sclp_ftp_rx_complete);
/* Start ftp sclp command. */
len = sclp_ftp_et7(ftp);
if (len)
goto out_unlock;
/*
* There is no way to cancel the sclp ET7 request, the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&sclp_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
#endif
switch (sclp_ftp_ldflg) {
case SCLP_DIAG_FTP_OK:
len = sclp_ftp_length;
if (fsize)
*fsize = sclp_ftp_fsize;
break;
case SCLP_DIAG_FTP_LDNPERM:
len = -EPERM;
break;
case SCLP_DIAG_FTP_LDRUNS:
len = -EBUSY;
break;
case SCLP_DIAG_FTP_LDFAIL:
len = -ENOENT;
break;
default:
len = -EIO;
break;
}
out_unlock:
return len;
}
/*
* ET7 event listener
*/
static struct sclp_register sclp_ftp_event = {
.send_mask = EVTYP_DIAG_TEST_MASK, /* want tx events */
.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
.receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
.state_change_fn = NULL,
};
/**
* sclp_ftp_startup() - startup of FTP services, when running on LPAR
*/
int sclp_ftp_startup(void)
{
#ifdef DEBUG
unsigned long info;
#endif
int rc;
rc = sclp_register(&sclp_ftp_event);
if (rc)
return rc;
#ifdef DEBUG
info = get_zeroed_page(GFP_KERNEL);
if (info != 0) {
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
info222->name[sizeof(info222->name) - 1] = '\0';
EBCASC_500(info222->name, sizeof(info222->name) - 1);
pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
info222->lpar_number, info222->name);
}
free_page(info);
}
#endif /* DEBUG */
return 0;
}
/**
* sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
*/
void sclp_ftp_shutdown(void)
{
sclp_unregister(&sclp_ftp_event);
}
| linux-master | drivers/s390/char/sclp_ftp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IOCTL interface for SCLP
*
* Copyright IBM Corp. 2012
*
* Author: Michael Holzheu <[email protected]>
*/
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <asm/sclp_ctl.h>
#include <asm/sclp.h>
#include "sclp.h"
/*
* Supported command words
*/
static unsigned int sclp_ctl_sccb_wlist[] = {
0x00400002,
0x00410002,
};
/*
* Check if command word is supported
*/
static int sclp_ctl_cmdw_supported(unsigned int cmdw)
{
int i;
for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
if (cmdw == sclp_ctl_sccb_wlist[i])
return 1;
}
return 0;
}
static void __user *u64_to_uptr(u64 value)
{
if (is_compat_task())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
}
/*
* Start SCLP request
*/
static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
return -EFAULT;
if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
copied = PAGE_SIZE -
copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
if (offsetof(struct sccb_header, length) +
sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
if (sccb->length < 8) {
rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
if (rc)
goto out_free;
if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
rc = -EFAULT;
out_free:
free_page((unsigned long) sccb);
return rc;
}
/*
* SCLP SCCB ioctl function
*/
static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
void __user *argp;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *) arg;
switch (cmd) {
case SCLP_CTL_SCCB:
return sclp_ctl_ioctl_sccb(argp);
default: /* unknown ioctl number */
return -ENOTTY;
}
}
/*
* File operations
*/
static const struct file_operations sclp_ctl_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.unlocked_ioctl = sclp_ctl_ioctl,
.compat_ioctl = sclp_ctl_ioctl,
.llseek = no_llseek,
};
/*
* Misc device definition
*/
static struct miscdevice sclp_ctl_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sclp",
.fops = &sclp_ctl_fops,
};
builtin_misc_device(sclp_ctl_device);
| linux-master | drivers/s390/char/sclp_ctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* 3215 line mode terminal driver.
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <[email protected]>
*
* Updated:
* Aug-2000: Added tab support
* Dan Morrison, IBM Corporation <[email protected]>
*/
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/vt_kern.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/serial.h> /* ASYNC_* flags */
#include <linux/slab.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <linux/io.h>
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
#include <asm/delay.h>
#include <asm/cpcmd.h>
#include <asm/setup.h>
#include "ctrlchar.h"
#define NR_3215 1
#define NR_3215_REQ (4*NR_3215)
#define RAW3215_BUFFER_SIZE 65536 /* output buffer size */
#define RAW3215_INBUF_SIZE 256 /* input buffer size */
#define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */
#define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */
#define RAW3215_MAX_BYTES 3968 /* max. bytes to write with one ssch */
#define RAW3215_MAX_NEWLINE 50 /* max. lines to write with one ssch */
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
#define TAB_STOP_SIZE 8 /* tab stop size */
/*
* Request types for a 3215 device
*/
enum raw3215_type {
RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
};
/*
* Request structure for a 3215 device
*/
struct raw3215_req {
enum raw3215_type type; /* type of the request */
int start, len; /* start index & len in output buffer */
int delayable; /* indication to wait for more data */
int residual; /* residual count for read request */
struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
struct raw3215_info *info; /* pointer to main structure */
struct raw3215_req *next; /* pointer to next request */
} __attribute__ ((aligned(8)));
struct raw3215_info {
struct tty_port port;
struct ccw_device *cdev; /* device for tty driver */
spinlock_t *lock; /* pointer to irq lock */
int flags; /* state flags */
char *buffer; /* pointer to output buffer */
char *inbuf; /* pointer to input buffer */
int head; /* first free byte in output buffer */
int count; /* number of bytes in output buffer */
int written; /* number of bytes in write requests */
struct raw3215_req *queued_read; /* pointer to queued read requests */
struct raw3215_req *queued_write;/* pointer to queued write requests */
wait_queue_head_t empty_wait; /* wait queue for flushing */
struct timer_list timer; /* timer for delayed output */
int line_pos; /* position on the line (for tabs) */
char ubuffer[80]; /* copy_from_user buffer */
};
/* array of 3215 devices structures */
static struct raw3215_info *raw3215[NR_3215];
/* spinlock to protect the raw3215 array */
static DEFINE_SPINLOCK(raw3215_device_lock);
/* list of free request structures */
static struct raw3215_req *raw3215_freelist;
/* spinlock to protect free list */
static DEFINE_SPINLOCK(raw3215_freelist_lock);
static struct tty_driver *tty3215_driver;
static bool con3215_drop = true;
/*
* Get a request structure from the free list
*/
static inline struct raw3215_req *raw3215_alloc_req(void)
{
struct raw3215_req *req;
unsigned long flags;
spin_lock_irqsave(&raw3215_freelist_lock, flags);
req = raw3215_freelist;
raw3215_freelist = req->next;
spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
return req;
}
/*
* Put a request structure back to the free list
*/
static inline void raw3215_free_req(struct raw3215_req *req)
{
unsigned long flags;
if (req->type == RAW3215_FREE)
return; /* don't free a free request */
req->type = RAW3215_FREE;
spin_lock_irqsave(&raw3215_freelist_lock, flags);
req->next = raw3215_freelist;
raw3215_freelist = req;
spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
}
/*
* Set up a read request that reads up to 160 byte from the 3215 device.
* If there is a queued read request it is used, but that shouldn't happen
* because a 3215 terminal won't accept a new read before the old one is
* completed.
*/
static void raw3215_mk_read_req(struct raw3215_info *raw)
{
struct raw3215_req *req;
struct ccw1 *ccw;
/* there can only be ONE read request at a time */
req = raw->queued_read;
if (req == NULL) {
/* no queued read request, use new req structure */
req = raw3215_alloc_req();
req->type = RAW3215_READ;
req->info = raw;
raw->queued_read = req;
}
ccw = req->ccws;
ccw->cmd_code = 0x0A; /* read inquiry */
ccw->flags = 0x20; /* ignore incorrect length */
ccw->count = 160;
ccw->cda = (__u32)__pa(raw->inbuf);
}
/*
* Set up a write request with the information from the main structure.
* A ccw chain is created that writes as much as possible from the output
* buffer to the 3215 device. If a queued write exists it is replaced by
* the new, probably lengthened request.
*/
static void raw3215_mk_write_req(struct raw3215_info *raw)
{
struct raw3215_req *req;
struct ccw1 *ccw;
int len, count, ix, lines;
if (raw->count <= raw->written)
return;
/* check if there is a queued write request */
req = raw->queued_write;
if (req == NULL) {
/* no queued write request, use new req structure */
req = raw3215_alloc_req();
req->type = RAW3215_WRITE;
req->info = raw;
raw->queued_write = req;
} else {
raw->written -= req->len;
}
ccw = req->ccws;
req->start = (raw->head - raw->count + raw->written) &
(RAW3215_BUFFER_SIZE - 1);
/*
* now we have to count newlines. We can at max accept
* RAW3215_MAX_NEWLINE newlines in a single ssch due to
* a restriction in VM
*/
lines = 0;
ix = req->start;
while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
if (raw->buffer[ix] == 0x15)
lines++;
ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
}
len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
if (len > RAW3215_MAX_BYTES)
len = RAW3215_MAX_BYTES;
req->len = len;
raw->written += len;
/* set the indication if we should try to enlarge this request */
req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
ix = req->start;
while (len > 0) {
if (ccw > req->ccws)
ccw[-1].flags |= 0x40; /* use command chaining */
ccw->cmd_code = 0x01; /* write, auto carrier return */
ccw->flags = 0x20; /* ignore incorrect length ind. */
ccw->cda = (__u32)__pa(raw->buffer + ix);
count = len;
if (ix + count > RAW3215_BUFFER_SIZE)
count = RAW3215_BUFFER_SIZE - ix;
ccw->count = count;
len -= count;
ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
ccw++;
}
/*
* Add a NOP to the channel program. 3215 devices are purely
* emulated and its much better to avoid the channel end
* interrupt in this case.
*/
if (ccw > req->ccws)
ccw[-1].flags |= 0x40; /* use command chaining */
ccw->cmd_code = 0x03; /* NOP */
ccw->flags = 0;
ccw->cda = 0;
ccw->count = 1;
}
/*
* Start a read or a write request
*/
static void raw3215_start_io(struct raw3215_info *raw)
{
struct raw3215_req *req;
int res;
req = raw->queued_read;
if (req != NULL &&
!(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
/* dequeue request */
raw->queued_read = NULL;
res = ccw_device_start(raw->cdev, req->ccws,
(unsigned long) req, 0, 0);
if (res != 0) {
/* do_IO failed, put request back to queue */
raw->queued_read = req;
} else {
raw->flags |= RAW3215_WORKING;
}
}
req = raw->queued_write;
if (req != NULL &&
!(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
/* dequeue request */
raw->queued_write = NULL;
res = ccw_device_start(raw->cdev, req->ccws,
(unsigned long) req, 0, 0);
if (res != 0) {
/* do_IO failed, put request back to queue */
raw->queued_write = req;
} else {
raw->flags |= RAW3215_WORKING;
}
}
}
/*
* Function to start a delayed output after RAW3215_TIMEOUT seconds
*/
static void raw3215_timeout(struct timer_list *t)
{
struct raw3215_info *raw = from_timer(raw, t, timer);
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_TIMER_RUNS;
raw3215_mk_write_req(raw);
raw3215_start_io(raw);
if ((raw->queued_read || raw->queued_write) &&
!(raw->flags & RAW3215_WORKING) &&
!(raw->flags & RAW3215_TIMER_RUNS)) {
raw->timer.expires = RAW3215_TIMEOUT + jiffies;
add_timer(&raw->timer);
raw->flags |= RAW3215_TIMER_RUNS;
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
* Function to conditionally start an IO. A read is started immediately,
* a write is only started immediately if the flush flag is on or the
* amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
* done immediately a timer is started with a delay of RAW3215_TIMEOUT.
*/
static inline void raw3215_try_io(struct raw3215_info *raw)
{
if (!tty_port_initialized(&raw->port))
return;
if (raw->queued_read != NULL)
raw3215_start_io(raw);
else if (raw->queued_write != NULL) {
if ((raw->queued_write->delayable == 0) ||
(raw->flags & RAW3215_FLUSHING)) {
/* execute write requests bigger than minimum size */
raw3215_start_io(raw);
}
}
if ((raw->queued_read || raw->queued_write) &&
!(raw->flags & RAW3215_WORKING) &&
!(raw->flags & RAW3215_TIMER_RUNS)) {
raw->timer.expires = RAW3215_TIMEOUT + jiffies;
add_timer(&raw->timer);
raw->flags |= RAW3215_TIMER_RUNS;
}
}
/*
* Try to start the next IO and wake up processes waiting on the tty.
*/
static void raw3215_next_io(struct raw3215_info *raw, struct tty_struct *tty)
{
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
if (tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE)
tty_wakeup(tty);
}
/*
* Interrupt routine, called from common io layer
*/
static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct raw3215_info *raw;
struct raw3215_req *req;
struct tty_struct *tty;
int cstat, dstat;
int count;
raw = dev_get_drvdata(&cdev->dev);
req = (struct raw3215_req *) intparm;
tty = tty_port_tty_get(&raw->port);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if (cstat != 0)
raw3215_next_io(raw, tty);
if (dstat & 0x01) { /* we got a unit exception */
dstat &= ~0x01; /* we can ignore it */
}
switch (dstat) {
case 0x80:
if (cstat != 0)
break;
/* Attention interrupt, someone hit the enter key */
raw3215_mk_read_req(raw);
raw3215_next_io(raw, tty);
break;
case 0x08:
case 0x0C:
/* Channel end interrupt. */
if ((raw = req->info) == NULL)
goto put_tty; /* That shouldn't happen ... */
if (req->type == RAW3215_READ) {
/* store residual count, then wait for device end */
req->residual = irb->scsw.cmd.count;
}
if (dstat == 0x08)
break;
fallthrough;
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
goto put_tty; /* That shouldn't happen ... */
if (req->type == RAW3215_READ && tty != NULL) {
unsigned int cchar;
count = 160 - req->residual;
EBCASC(raw->inbuf, count);
cchar = ctrlchar_handle(raw->inbuf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
tty_insert_flip_char(&raw->port, cchar,
TTY_NORMAL);
tty_flip_buffer_push(&raw->port);
break;
case CTRLCHAR_NONE:
if (count < 2 ||
(strncmp(raw->inbuf+count-2, "\252n", 2) &&
strncmp(raw->inbuf+count-2, "^n", 2)) ) {
/* add the auto \n */
raw->inbuf[count] = '\n';
count++;
} else
count -= 2;
tty_insert_flip_string(&raw->port, raw->inbuf,
count);
tty_flip_buffer_push(&raw->port);
break;
}
} else if (req->type == RAW3215_WRITE) {
raw->count -= req->len;
raw->written -= req->len;
}
raw->flags &= ~RAW3215_WORKING;
raw3215_free_req(req);
/* check for empty wait */
if (waitqueue_active(&raw->empty_wait) &&
raw->queued_write == NULL &&
raw->queued_read == NULL) {
wake_up_interruptible(&raw->empty_wait);
}
raw3215_next_io(raw, tty);
break;
default:
/* Strange interrupt, I'll do my best to clean up */
if (req != NULL && req->type != RAW3215_FREE) {
if (req->type == RAW3215_WRITE) {
raw->count -= req->len;
raw->written -= req->len;
}
raw->flags &= ~RAW3215_WORKING;
raw3215_free_req(req);
}
raw3215_next_io(raw, tty);
}
put_tty:
tty_kref_put(tty);
}
/*
* Need to drop data to avoid blocking. Drop as much data as possible.
* This is unqueued part in the buffer and the queued part in the request.
* Also adjust the head position to append new data and set count
* accordingly.
*
* Return number of bytes available in buffer.
*/
static unsigned int raw3215_drop(struct raw3215_info *raw)
{
struct raw3215_req *req;
req = raw->queued_write;
if (req) {
/* Drop queued data and delete request */
raw->written -= req->len;
raw3215_free_req(req);
raw->queued_write = NULL;
}
raw->head = (raw->head - raw->count + raw->written) &
(RAW3215_BUFFER_SIZE - 1);
raw->count = raw->written;
return RAW3215_BUFFER_SIZE - raw->count;
}
/*
* Wait until length bytes are available int the output buffer.
* If drop mode is active and wait condition holds true, start dropping
* data.
* Has to be called with the s390irq lock held. Can be called
* disabled.
*/
static unsigned int raw3215_make_room(struct raw3215_info *raw,
unsigned int length, bool drop)
{
while (RAW3215_BUFFER_SIZE - raw->count < length) {
if (drop)
return raw3215_drop(raw);
/* there might be a request pending */
raw->flags |= RAW3215_FLUSHING;
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
#ifdef CONFIG_TN3215_CONSOLE
ccw_device_wait_idle(raw->cdev);
#endif
/* Enough room freed up ? */
if (RAW3215_BUFFER_SIZE - raw->count >= length)
break;
/* there might be another cpu waiting for the lock */
spin_unlock(get_ccwdev_lock(raw->cdev));
udelay(100);
spin_lock(get_ccwdev_lock(raw->cdev));
}
return length;
}
#define RAW3215_COUNT 1
#define RAW3215_STORE 2
/*
* Add text to console buffer. Find tabs in input and calculate size
* including tab replacement.
* This function operates in 2 different modes, depending on parameter
* opmode:
* RAW3215_COUNT: Get the size needed for the input string with
* proper tab replacement calculation.
* Return value is the number of bytes required to store the
* input. However no data is actually stored.
* The parameter todrop is not used.
* RAW3215_STORE: Add data to the console buffer. The parameter todrop is
* valid and contains the number of bytes to be dropped from head of
* string without blocking.
* Return value is the number of bytes copied.
*/
static unsigned int raw3215_addtext(const char *str, unsigned int length,
struct raw3215_info *raw, int opmode,
unsigned int todrop)
{
unsigned int c, ch, i, blanks, expanded_size = 0;
unsigned int column = raw->line_pos;
if (opmode == RAW3215_COUNT)
todrop = 0;
for (c = 0; c < length; ++c) {
blanks = 1;
ch = str[c];
switch (ch) {
case '\n':
expanded_size++;
column = 0;
break;
case '\t':
blanks = TAB_STOP_SIZE - (column % TAB_STOP_SIZE);
column += blanks;
expanded_size += blanks;
ch = ' ';
break;
default:
expanded_size++;
column++;
break;
}
if (opmode == RAW3215_COUNT)
continue;
if (todrop && expanded_size < todrop) /* Drop head data */
continue;
for (i = 0; i < blanks; i++) {
raw->buffer[raw->head] = (char)_ascebc[(int)ch];
raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
raw->count++;
}
raw->line_pos = column;
}
return expanded_size - todrop;
}
/*
* String write routine for 3215 devices
*/
static void raw3215_write(struct raw3215_info *raw, const char *str,
unsigned int length)
{
unsigned int count, avail;
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
count = raw3215_addtext(str, length, raw, RAW3215_COUNT, 0);
avail = raw3215_make_room(raw, count, con3215_drop);
if (avail) {
raw3215_addtext(str, length, raw, RAW3215_STORE,
count - avail);
}
if (!(raw->flags & RAW3215_WORKING)) {
raw3215_mk_write_req(raw);
/* start or queue request */
raw3215_try_io(raw);
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
* Put character routine for 3215 devices
*/
static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
{
raw3215_write(raw, &ch, 1);
}
/*
* Flush routine, it simply sets the flush flag and tries to start
* pending IO.
*/
static void raw3215_flush_buffer(struct raw3215_info *raw)
{
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if (raw->count > 0) {
raw->flags |= RAW3215_FLUSHING;
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
/*
* Fire up a 3215 device.
*/
static int raw3215_startup(struct raw3215_info *raw)
{
unsigned long flags;
if (tty_port_initialized(&raw->port))
return 0;
raw->line_pos = 0;
tty_port_set_initialized(&raw->port, true);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
return 0;
}
/*
* Shutdown a 3215 device.
*/
static void raw3215_shutdown(struct raw3215_info *raw)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
if (!tty_port_initialized(&raw->port) || (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
if ((raw->flags & RAW3215_WORKING) ||
raw->queued_write != NULL ||
raw->queued_read != NULL) {
add_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
schedule();
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
tty_port_set_initialized(&raw->port, true);
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
static struct raw3215_info *raw3215_alloc_info(void)
{
struct raw3215_info *info;
info = kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
if (!info)
return NULL;
info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
if (!info->buffer || !info->inbuf) {
kfree(info->inbuf);
kfree(info->buffer);
kfree(info);
return NULL;
}
timer_setup(&info->timer, raw3215_timeout, 0);
init_waitqueue_head(&info->empty_wait);
tty_port_init(&info->port);
return info;
}
static void raw3215_free_info(struct raw3215_info *raw)
{
kfree(raw->inbuf);
kfree(raw->buffer);
tty_port_destroy(&raw->port);
kfree(raw);
}
static int raw3215_probe(struct ccw_device *cdev)
{
struct raw3215_info *raw;
int line;
/* Console is special. */
if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
return 0;
raw = raw3215_alloc_info();
if (raw == NULL)
return -ENOMEM;
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
spin_lock(&raw3215_device_lock);
for (line = 0; line < NR_3215; line++) {
if (!raw3215[line]) {
raw3215[line] = raw;
break;
}
}
spin_unlock(&raw3215_device_lock);
if (line == NR_3215) {
raw3215_free_info(raw);
return -ENODEV;
}
return 0;
}
static void raw3215_remove(struct ccw_device *cdev)
{
struct raw3215_info *raw;
unsigned int line;
ccw_device_set_offline(cdev);
raw = dev_get_drvdata(&cdev->dev);
if (raw) {
spin_lock(&raw3215_device_lock);
for (line = 0; line < NR_3215; line++)
if (raw3215[line] == raw)
break;
raw3215[line] = NULL;
spin_unlock(&raw3215_device_lock);
dev_set_drvdata(&cdev->dev, NULL);
raw3215_free_info(raw);
}
}
static int raw3215_set_online(struct ccw_device *cdev)
{
struct raw3215_info *raw;
raw = dev_get_drvdata(&cdev->dev);
if (!raw)
return -ENODEV;
return raw3215_startup(raw);
}
static int raw3215_set_offline(struct ccw_device *cdev)
{
struct raw3215_info *raw;
raw = dev_get_drvdata(&cdev->dev);
if (!raw)
return -ENODEV;
raw3215_shutdown(raw);
return 0;
}
static struct ccw_device_id raw3215_id[] = {
{ CCW_DEVICE(0x3215, 0) },
{ /* end of list */ },
};
static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count)
{
bool drop;
int rc;
rc = kstrtobool(buf, &drop);
if (!rc)
con3215_drop = drop;
return rc ?: count;
}
static ssize_t con_drop_show(struct device_driver *dev, char *buf)
{
return sysfs_emit(buf, "%d\n", con3215_drop ? 1 : 0);
}
static DRIVER_ATTR_RW(con_drop);
static struct attribute *con3215_drv_attrs[] = {
&driver_attr_con_drop.attr,
NULL,
};
static struct attribute_group con3215_drv_attr_group = {
.attrs = con3215_drv_attrs,
NULL,
};
static const struct attribute_group *con3215_drv_attr_groups[] = {
&con3215_drv_attr_group,
NULL,
};
static struct ccw_driver raw3215_ccw_driver = {
.driver = {
.name = "3215",
.groups = con3215_drv_attr_groups,
.owner = THIS_MODULE,
},
.ids = raw3215_id,
.probe = &raw3215_probe,
.remove = &raw3215_remove,
.set_online = &raw3215_set_online,
.set_offline = &raw3215_set_offline,
.int_class = IRQIO_C15,
};
static void handle_write(struct raw3215_info *raw, const char *str, int count)
{
int i;
while (count > 0) {
i = min_t(int, count, RAW3215_BUFFER_SIZE - 1);
raw3215_write(raw, str, i);
count -= i;
str += i;
}
}
#ifdef CONFIG_TN3215_CONSOLE
/*
* Write a string to the 3215 console
*/
static void con3215_write(struct console *co, const char *str, unsigned int count)
{
handle_write(raw3215[0], str, count);
}
static struct tty_driver *con3215_device(struct console *c, int *index)
{
*index = c->index;
return tty3215_driver;
}
/*
* The below function is called as a panic/reboot notifier before the
* system enters a disabled, endless loop.
*
* Notice we must use the spin_trylock() alternative, to prevent lockups
* in atomic context (panic routine runs with secondary CPUs, local IRQs
* and preemption disabled).
*/
static int con3215_notify(struct notifier_block *self,
unsigned long event, void *data)
{
struct raw3215_info *raw;
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
if (!spin_trylock_irqsave(get_ccwdev_lock(raw->cdev), flags))
return NOTIFY_DONE;
raw3215_make_room(raw, RAW3215_BUFFER_SIZE, false);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3215_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3215_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
/*
* The console structure for the 3215 console
*/
static struct console con3215 = {
.name = "ttyS",
.write = con3215_write,
.device = con3215_device,
.flags = CON_PRINTBUFFER,
};
/*
* 3215 console initialization code called from console_init().
*/
static int __init con3215_init(void)
{
struct ccw_device *cdev;
struct raw3215_info *raw;
struct raw3215_req *req;
int i;
/* Check if 3215 is to be the console */
if (!CONSOLE_IS_3215)
return -ENODEV;
/* Set the console mode for VM */
if (MACHINE_IS_VM) {
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
/* allocate 3215 request structures */
raw3215_freelist = NULL;
for (i = 0; i < NR_3215_REQ; i++) {
req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
if (!req)
return -ENOMEM;
req->next = raw3215_freelist;
raw3215_freelist = req;
}
cdev = ccw_device_create_console(&raw3215_ccw_driver);
if (IS_ERR(cdev))
return -ENODEV;
raw3215[0] = raw = raw3215_alloc_info();
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
if (ccw_device_enable_console(cdev)) {
ccw_device_destroy_console(cdev);
raw3215_free_info(raw);
raw3215[0] = NULL;
return -ENODEV;
}
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);
raw3215[0] = NULL;
return -ENODEV;
}
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3215);
return 0;
}
console_initcall(con3215_init);
#endif
static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct raw3215_info *raw;
raw = raw3215[tty->index];
if (raw == NULL)
return -ENODEV;
tty->driver_data = raw;
return tty_port_install(&raw->port, driver, tty);
}
/*
* tty3215_open
*
* This routine is called whenever a 3215 tty is opened.
*/
static int tty3215_open(struct tty_struct *tty, struct file * filp)
{
struct raw3215_info *raw = tty->driver_data;
tty_port_tty_set(&raw->port, tty);
/*
* Start up 3215 device
*/
return raw3215_startup(raw);
}
/*
* tty3215_close()
*
* This routine is called when the 3215 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static void tty3215_close(struct tty_struct *tty, struct file * filp)
{
struct raw3215_info *raw = tty->driver_data;
if (raw == NULL || tty->count > 1)
return;
tty->closing = 1;
/* Shutdown the terminal */
raw3215_shutdown(raw);
tty->closing = 0;
tty_port_tty_set(&raw->port, NULL);
}
/*
* Returns the amount of free space in the output buffer.
*/
static unsigned int tty3215_write_room(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
/* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
else
return 0;
}
/*
* String write routine for 3215 ttys
*/
static ssize_t tty3215_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
handle_write(tty->driver_data, buf, count);
return count;
}
/*
* Put character routine for 3215 ttys
*/
static int tty3215_put_char(struct tty_struct *tty, u8 ch)
{
struct raw3215_info *raw = tty->driver_data;
raw3215_putchar(raw, ch);
return 1;
}
static void tty3215_flush_chars(struct tty_struct *tty)
{
}
/*
* Returns the number of characters in the output buffer
*/
static unsigned int tty3215_chars_in_buffer(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
return raw->count;
}
static void tty3215_flush_buffer(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
raw3215_flush_buffer(raw);
tty_wakeup(tty);
}
/*
* Disable reading from a 3215 tty
*/
static void tty3215_throttle(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
raw->flags |= RAW3215_THROTTLED;
}
/*
* Enable reading from a 3215 tty
*/
static void tty3215_unthrottle(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
unsigned long flags;
if (raw->flags & RAW3215_THROTTLED) {
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_THROTTLED;
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
/*
* Disable writing to a 3215 tty
*/
static void tty3215_stop(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
raw->flags |= RAW3215_STOPPED;
}
/*
* Enable writing to a 3215 tty
*/
static void tty3215_start(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
unsigned long flags;
if (raw->flags & RAW3215_STOPPED) {
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_STOPPED;
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
}
static const struct tty_operations tty3215_ops = {
.install = tty3215_install,
.open = tty3215_open,
.close = tty3215_close,
.write = tty3215_write,
.put_char = tty3215_put_char,
.flush_chars = tty3215_flush_chars,
.write_room = tty3215_write_room,
.chars_in_buffer = tty3215_chars_in_buffer,
.flush_buffer = tty3215_flush_buffer,
.throttle = tty3215_throttle,
.unthrottle = tty3215_unthrottle,
.stop = tty3215_stop,
.start = tty3215_start,
};
static int __init con3215_setup_drop(char *str)
{
bool drop;
int rc;
rc = kstrtobool(str, &drop);
if (!rc)
con3215_drop = drop;
return rc;
}
early_param("con3215_drop", con3215_setup_drop);
/*
* 3215 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
*/
static int __init tty3215_init(void)
{
struct tty_driver *driver;
int ret;
if (!CONSOLE_IS_3215)
return 0;
driver = tty_alloc_driver(NR_3215, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
return PTR_ERR(driver);
ret = ccw_driver_register(&raw3215_ccw_driver);
if (ret) {
tty_driver_kref_put(driver);
return ret;
}
/*
* Initialize the tty_driver structure
* Entries in tty3215_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
driver->driver_name = "tty3215";
driver->name = "ttyS";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
driver->init_termios.c_oflag = ONLCR;
driver->init_termios.c_lflag = ISIG;
tty_set_operations(driver, &tty3215_ops);
ret = tty_register_driver(driver);
if (ret) {
tty_driver_kref_put(driver);
return ret;
}
tty3215_driver = driver;
return 0;
}
device_initcall(tty3215_init);
| linux-master | drivers/s390/char/con3215.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Unified handling of special chars.
*
* Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <[email protected]> <[email protected]>
*
*/
#include <linux/stddef.h>
#include <asm/errno.h>
#include <linux/sysrq.h>
#include <linux/ctype.h>
#include "ctrlchar.h"
#ifdef CONFIG_MAGIC_SYSRQ
static struct sysrq_work ctrlchar_sysrq;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
handle_sysrq(sysrq->key);
}
void schedule_sysrq_work(struct sysrq_work *sw)
{
INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
schedule_work(&sw->work);
}
#endif
/**
* ctrlchar_handle - check for special chars at start of input
*
* @buf: console input buffer
* @len: length of valid data in buffer
* @tty: the tty struct for this console
*
* Return: CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
*/
unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
{
if ((len < 2) || (len > 3))
return CTRLCHAR_NONE;
/* hat is 0xb1 in codepage 037 (US etc.) and thus */
/* converted to 0x5e in ascii ('^') */
if ((buf[0] != '^') && (buf[0] != '\252'))
return CTRLCHAR_NONE;
#ifdef CONFIG_MAGIC_SYSRQ
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq.key = buf[2];
schedule_sysrq_work(&ctrlchar_sysrq);
return CTRLCHAR_SYSRQ;
}
#endif
if (len != 2)
return CTRLCHAR_NONE;
switch (tolower(buf[1])) {
case 'c':
return INTR_CHAR(tty) | CTRLCHAR_CTRL;
case 'd':
return EOF_CHAR(tty) | CTRLCHAR_CTRL;
case 'z':
return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
}
return CTRLCHAR_NONE;
}
| linux-master | drivers/s390/char/ctrlchar.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - tty functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <[email protected]>
* -- Copyright IBM Corp. 2003
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/uaccess.h>
#include "raw3270.h"
#include "keyboard.h"
#define TTY3270_CHAR_BUF_SIZE 256
#define TTY3270_OUTPUT_BUFFER_SIZE 4096
#define TTY3270_SCREEN_PAGES 8 /* has to be power-of-two */
#define TTY3270_RECALL_SIZE 16 /* has to be power-of-two */
#define TTY3270_STATUS_AREA_SIZE 40
static struct tty_driver *tty3270_driver;
static int tty3270_max_index;
static struct raw3270_fn tty3270_fn;
#define TTY3270_HIGHLIGHT_BLINK 1
#define TTY3270_HIGHLIGHT_REVERSE 2
#define TTY3270_HIGHLIGHT_UNDERSCORE 4
struct tty3270_attribute {
unsigned char alternate_charset:1; /* Graphics charset */
unsigned char highlight:3; /* Blink/reverse/underscore */
unsigned char f_color:4; /* Foreground color */
unsigned char b_color:4; /* Background color */
};
struct tty3270_cell {
unsigned char character;
struct tty3270_attribute attributes;
};
struct tty3270_line {
struct tty3270_cell *cells;
int len;
int dirty;
};
static const unsigned char sfq_read_partition[] = {
0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81
};
#define ESCAPE_NPAR 8
/*
* The main tty view data structure.
* FIXME:
* 1) describe line orientation & lines list concept against screen
* 2) describe conversion of screen to lines
* 3) describe line format.
*/
struct tty3270 {
struct raw3270_view view;
struct tty_port port;
/* Output stuff. */
unsigned char wcc; /* Write control character. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct raw3270_request *write; /* Single write request. */
struct timer_list timer; /* Output delay timer. */
char *converted_line; /* RAW 3270 data stream */
unsigned int line_view_start; /* Start of visible area */
unsigned int line_write_start; /* current write position */
unsigned int oops_line; /* line counter used when print oops */
/* Current tty screen. */
unsigned int cx, cy; /* Current output position. */
struct tty3270_attribute attributes;
struct tty3270_attribute saved_attributes;
int allocated_lines;
struct tty3270_line *screen;
/* Input stuff. */
char *prompt; /* Output string for input area. */
char *input; /* Input string for read request. */
struct raw3270_request *read; /* Single read request. */
struct raw3270_request *kreset; /* Single keyboard reset request. */
struct raw3270_request *readpartreq;
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
int esc_state, esc_ques, esc_npar;
int esc_par[ESCAPE_NPAR];
unsigned int saved_cx, saved_cy;
/* Command recalling. */
char **rcl_lines; /* Array of recallable lines */
int rcl_write_index; /* Write index of recallable items */
int rcl_read_index; /* Read index of recallable items */
/* Character array for put_char/flush_chars. */
unsigned int char_count;
char char_buf[TTY3270_CHAR_BUF_SIZE];
};
/* tty3270->update_flags. See tty3270_update for details. */
#define TTY_UPDATE_INPUT 0x1 /* Update input line. */
#define TTY_UPDATE_STATUS 0x2 /* Update status line. */
#define TTY_UPDATE_LINES 0x4 /* Update visible screen lines */
#define TTY_UPDATE_ALL 0x7 /* Recreate screen. */
#define TTY3270_INPUT_AREA_ROWS 2
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
mod_timer(&tp->timer, jiffies + expires);
}
static int tty3270_tty_rows(struct tty3270 *tp)
{
return tp->view.rows - TTY3270_INPUT_AREA_ROWS;
}
static char *tty3270_add_ba(struct tty3270 *tp, char *cp, char order, int x, int y)
{
*cp++ = order;
raw3270_buffer_address(tp->view.dev, cp, x, y);
return cp + 2;
}
static char *tty3270_add_ra(struct tty3270 *tp, char *cp, int x, int y, char c)
{
cp = tty3270_add_ba(tp, cp, TO_RA, x, y);
*cp++ = c;
return cp;
}
static char *tty3270_add_sa(struct tty3270 *tp, char *cp, char attr, char value)
{
*cp++ = TO_SA;
*cp++ = attr;
*cp++ = value;
return cp;
}
static char *tty3270_add_ge(struct tty3270 *tp, char *cp, char c)
{
*cp++ = TO_GE;
*cp++ = c;
return cp;
}
static char *tty3270_add_sf(struct tty3270 *tp, char *cp, char type)
{
*cp++ = TO_SF;
*cp++ = type;
return cp;
}
static int tty3270_line_increment(struct tty3270 *tp, unsigned int line, unsigned int incr)
{
return (line + incr) & (tp->allocated_lines - 1);
}
static struct tty3270_line *tty3270_get_write_line(struct tty3270 *tp, unsigned int num)
{
return tp->screen + tty3270_line_increment(tp, tp->line_write_start, num);
}
static struct tty3270_line *tty3270_get_view_line(struct tty3270 *tp, unsigned int num)
{
return tp->screen + tty3270_line_increment(tp, tp->line_view_start, num - tp->nr_up);
}
static int tty3270_input_size(int cols)
{
return cols * 2 - 11;
}
static void tty3270_update_prompt(struct tty3270 *tp, char *input)
{
strcpy(tp->prompt, input);
tp->update_flags |= TTY_UPDATE_INPUT;
tty3270_set_timer(tp, 1);
}
/*
* The input line are the two last lines of the screen.
*/
static int tty3270_add_prompt(struct tty3270 *tp)
{
int count = 0;
char *cp;
cp = tp->converted_line;
cp = tty3270_add_ba(tp, cp, TO_SBA, 0, -2);
*cp++ = tp->view.ascebc['>'];
if (*tp->prompt) {
cp = tty3270_add_sf(tp, cp, TF_INMDT);
count = min_t(int, strlen(tp->prompt),
tp->view.cols * 2 - TTY3270_STATUS_AREA_SIZE - 2);
memcpy(cp, tp->prompt, count);
cp += count;
} else {
cp = tty3270_add_sf(tp, cp, tp->inattr);
}
*cp++ = TO_IC;
/* Clear to end of input line. */
if (count < tp->view.cols * 2 - 11)
cp = tty3270_add_ra(tp, cp, -TTY3270_STATUS_AREA_SIZE, -1, 0);
return cp - tp->converted_line;
}
static char *tty3270_ebcdic_convert(struct tty3270 *tp, char *d, char *s)
{
while (*s)
*d++ = tp->view.ascebc[(int)*s++];
return d;
}
/*
* The status line is the last line of the screen. It shows the string
* "Running"/"History X" in the lower right corner of the screen.
*/
static int tty3270_add_status(struct tty3270 *tp)
{
char *cp = tp->converted_line;
int len;
cp = tty3270_add_ba(tp, cp, TO_SBA, -TTY3270_STATUS_AREA_SIZE, -1);
cp = tty3270_add_sf(tp, cp, TF_LOG);
cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAC_GREEN);
cp = tty3270_ebcdic_convert(tp, cp, " 7");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
cp = tty3270_ebcdic_convert(tp, cp, "PrevPg");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
cp = tty3270_ebcdic_convert(tp, cp, " 8");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
cp = tty3270_ebcdic_convert(tp, cp, "NextPg");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
cp = tty3270_ebcdic_convert(tp, cp, " 12");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
cp = tty3270_ebcdic_convert(tp, cp, "Recall");
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
cp = tty3270_ebcdic_convert(tp, cp, " ");
if (tp->nr_up) {
len = sprintf(cp, "History %d", -tp->nr_up);
codepage_convert(tp->view.ascebc, cp, len);
cp += len;
} else {
cp = tty3270_ebcdic_convert(tp, cp, oops_in_progress ? "Crashed" : "Running");
}
cp = tty3270_add_sf(tp, cp, TF_LOG);
cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAC_RESET);
return cp - (char *)tp->converted_line;
}
static void tty3270_blank_screen(struct tty3270 *tp)
{
struct tty3270_line *line;
int i;
for (i = 0; i < tty3270_tty_rows(tp); i++) {
line = tty3270_get_write_line(tp, i);
line->len = 0;
line->dirty = 1;
}
tp->nr_up = 0;
}
/*
* Write request completion callback.
*/
static void tty3270_write_callback(struct raw3270_request *rq, void *data)
{
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
if (rq->rc != 0) {
/* Write wasn't successful. Refresh all. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
raw3270_request_reset(rq);
xchg(&tp->write, rq);
}
static int tty3270_required_length(struct tty3270 *tp, struct tty3270_line *line)
{
unsigned char f_color, b_color, highlight;
struct tty3270_cell *cell;
int i, flen = 3; /* Prefix (TO_SBA). */
flen += line->len;
highlight = 0;
f_color = TAC_RESET;
b_color = TAC_RESET;
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->attributes.highlight != highlight) {
flen += 3; /* TO_SA to switch highlight. */
highlight = cell->attributes.highlight;
}
if (cell->attributes.f_color != f_color) {
flen += 3; /* TO_SA to switch color. */
f_color = cell->attributes.f_color;
}
if (cell->attributes.b_color != b_color) {
flen += 3; /* TO_SA to switch color. */
b_color = cell->attributes.b_color;
}
if (cell->attributes.alternate_charset)
flen += 1; /* TO_GE to switch to graphics extensions */
}
if (highlight)
flen += 3; /* TO_SA to reset hightlight. */
if (f_color != TAC_RESET)
flen += 3; /* TO_SA to reset color. */
if (b_color != TAC_RESET)
flen += 3; /* TO_SA to reset color. */
if (line->len < tp->view.cols)
flen += 4; /* Postfix (TO_RA). */
return flen;
}
static char *tty3270_add_reset_attributes(struct tty3270 *tp, struct tty3270_line *line,
char *cp, struct tty3270_attribute *attr, int lineno)
{
if (attr->highlight)
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
if (attr->f_color != TAC_RESET)
cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAX_RESET);
if (attr->b_color != TAC_RESET)
cp = tty3270_add_sa(tp, cp, TAT_BGCOLOR, TAX_RESET);
if (line->len < tp->view.cols)
cp = tty3270_add_ra(tp, cp, 0, lineno + 1, 0);
return cp;
}
static char tty3270_graphics_translate(struct tty3270 *tp, char ch)
{
switch (ch) {
case 'q': /* - */
return 0xa2;
case 'x': /* '|' */
return 0x85;
case 'l': /* |- */
return 0xc5;
case 't': /* |_ */
return 0xc6;
case 'u': /* _| */
return 0xd6;
case 'k': /* -| */
return 0xd5;
case 'j':
return 0xd4;
case 'm':
return 0xc4;
case 'n': /* + */
return 0xd3;
case 'v':
return 0xc7;
case 'w':
return 0xd7;
default:
return ch;
}
}
static char *tty3270_add_attributes(struct tty3270 *tp, struct tty3270_line *line,
struct tty3270_attribute *attr, char *cp, int lineno)
{
const unsigned char colors[16] = {
[0] = TAC_DEFAULT,
[1] = TAC_RED,
[2] = TAC_GREEN,
[3] = TAC_YELLOW,
[4] = TAC_BLUE,
[5] = TAC_PINK,
[6] = TAC_TURQ,
[7] = TAC_WHITE,
[9] = TAC_DEFAULT
};
const unsigned char highlights[8] = {
[TTY3270_HIGHLIGHT_BLINK] = TAX_BLINK,
[TTY3270_HIGHLIGHT_REVERSE] = TAX_REVER,
[TTY3270_HIGHLIGHT_UNDERSCORE] = TAX_UNDER,
};
struct tty3270_cell *cell;
int c, i;
cp = tty3270_add_ba(tp, cp, TO_SBA, 0, lineno);
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->attributes.highlight != attr->highlight) {
attr->highlight = cell->attributes.highlight;
cp = tty3270_add_sa(tp, cp, TAT_EXTHI, highlights[attr->highlight]);
}
if (cell->attributes.f_color != attr->f_color) {
attr->f_color = cell->attributes.f_color;
cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, colors[attr->f_color]);
}
if (cell->attributes.b_color != attr->b_color) {
attr->b_color = cell->attributes.b_color;
cp = tty3270_add_sa(tp, cp, TAT_BGCOLOR, colors[attr->b_color]);
}
c = cell->character;
if (cell->attributes.alternate_charset)
cp = tty3270_add_ge(tp, cp, tty3270_graphics_translate(tp, c));
else
*cp++ = tp->view.ascebc[c];
}
return cp;
}
static void tty3270_reset_attributes(struct tty3270_attribute *attr)
{
attr->highlight = TAX_RESET;
attr->f_color = TAC_RESET;
attr->b_color = TAC_RESET;
}
/*
* Convert a tty3270_line to a 3270 data fragment usable for output.
*/
static unsigned int tty3270_convert_line(struct tty3270 *tp, struct tty3270_line *line, int lineno)
{
struct tty3270_attribute attr;
int flen;
char *cp;
/* Determine how long the fragment will be. */
flen = tty3270_required_length(tp, line);
if (flen > PAGE_SIZE)
return 0;
/* Write 3270 data fragment. */
tty3270_reset_attributes(&attr);
cp = tty3270_add_attributes(tp, line, &attr, tp->converted_line, lineno);
cp = tty3270_add_reset_attributes(tp, line, cp, &attr, lineno);
return cp - (char *)tp->converted_line;
}
static void tty3270_update_lines_visible(struct tty3270 *tp, struct raw3270_request *rq)
{
struct tty3270_line *line;
int len, i;
for (i = 0; i < tty3270_tty_rows(tp); i++) {
line = tty3270_get_view_line(tp, i);
if (!line->dirty)
continue;
len = tty3270_convert_line(tp, line, i);
if (raw3270_request_add_data(rq, tp->converted_line, len))
break;
line->dirty = 0;
}
if (i == tty3270_tty_rows(tp)) {
for (i = 0; i < tp->allocated_lines; i++)
tp->screen[i].dirty = 0;
tp->update_flags &= ~TTY_UPDATE_LINES;
}
}
static void tty3270_update_lines_all(struct tty3270 *tp, struct raw3270_request *rq)
{
struct tty3270_line *line;
char buf[4];
int len, i;
for (i = 0; i < tp->allocated_lines; i++) {
line = tty3270_get_write_line(tp, i + tp->cy + 1);
if (!line->dirty)
continue;
len = tty3270_convert_line(tp, line, tp->oops_line);
if (raw3270_request_add_data(rq, tp->converted_line, len))
break;
line->dirty = 0;
if (++tp->oops_line >= tty3270_tty_rows(tp))
tp->oops_line = 0;
}
if (i == tp->allocated_lines) {
if (tp->oops_line < tty3270_tty_rows(tp)) {
tty3270_add_ra(tp, buf, 0, tty3270_tty_rows(tp), 0);
if (raw3270_request_add_data(rq, buf, sizeof(buf)))
return;
}
tp->update_flags &= ~TTY_UPDATE_LINES;
}
}
/*
* Update 3270 display.
*/
static void tty3270_update(struct timer_list *t)
{
struct tty3270 *tp = from_timer(tp, t, timer);
struct raw3270_request *wrq;
u8 cmd = TC_WRITE;
int rc, len;
wrq = xchg(&tp->write, 0);
if (!wrq) {
tty3270_set_timer(tp, 1);
return;
}
spin_lock_irq(&tp->view.lock);
if (tp->update_flags == TTY_UPDATE_ALL)
cmd = TC_EWRITEA;
raw3270_request_set_cmd(wrq, cmd);
raw3270_request_add_data(wrq, &tp->wcc, 1);
tp->wcc = TW_NONE;
/*
* Update status line.
*/
if (tp->update_flags & TTY_UPDATE_STATUS) {
len = tty3270_add_status(tp);
if (raw3270_request_add_data(wrq, tp->converted_line, len) == 0)
tp->update_flags &= ~TTY_UPDATE_STATUS;
}
/*
* Write input line.
*/
if (tp->update_flags & TTY_UPDATE_INPUT) {
len = tty3270_add_prompt(tp);
if (raw3270_request_add_data(wrq, tp->converted_line, len) == 0)
tp->update_flags &= ~TTY_UPDATE_INPUT;
}
if (tp->update_flags & TTY_UPDATE_LINES) {
if (oops_in_progress)
tty3270_update_lines_all(tp, wrq);
else
tty3270_update_lines_visible(tp, wrq);
}
wrq->callback = tty3270_write_callback;
rc = raw3270_start(&tp->view, wrq);
if (rc == 0) {
if (tp->update_flags)
tty3270_set_timer(tp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&tp->write, wrq);
}
spin_unlock_irq(&tp->view.lock);
}
/*
* Command recalling.
*/
static void tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
{
char *p;
if (len <= 0)
return;
p = tp->rcl_lines[tp->rcl_write_index++];
tp->rcl_write_index &= TTY3270_RECALL_SIZE - 1;
memcpy(p, input, len);
p[len] = '\0';
tp->rcl_read_index = tp->rcl_write_index;
}
static void tty3270_rcl_backward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
int i = 0;
spin_lock_irq(&tp->view.lock);
if (tp->inattr == TF_INPUT) {
do {
tp->rcl_read_index--;
tp->rcl_read_index &= TTY3270_RECALL_SIZE - 1;
} while (!*tp->rcl_lines[tp->rcl_read_index] &&
i++ < TTY3270_RECALL_SIZE - 1);
tty3270_update_prompt(tp, tp->rcl_lines[tp->rcl_read_index]);
}
spin_unlock_irq(&tp->view.lock);
}
/*
* Deactivate tty view.
*/
static void tty3270_exit_tty(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
raw3270_deactivate_view(&tp->view);
}
static void tty3270_redraw(struct tty3270 *tp)
{
int i;
for (i = 0; i < tty3270_tty_rows(tp); i++)
tty3270_get_view_line(tp, i)->dirty = 1;
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
/*
* Scroll forward in history.
*/
static void tty3270_scroll_forward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
spin_lock_irq(&tp->view.lock);
if (tp->nr_up >= tty3270_tty_rows(tp))
tp->nr_up -= tty3270_tty_rows(tp) / 2;
else
tp->nr_up = 0;
tty3270_redraw(tp);
spin_unlock_irq(&tp->view.lock);
}
/*
* Scroll backward in history.
*/
static void tty3270_scroll_backward(struct kbd_data *kbd)
{
struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
spin_lock_irq(&tp->view.lock);
tp->nr_up += tty3270_tty_rows(tp) / 2;
if (tp->nr_up > tp->allocated_lines - tty3270_tty_rows(tp))
tp->nr_up = tp->allocated_lines - tty3270_tty_rows(tp);
tty3270_redraw(tp);
spin_unlock_irq(&tp->view.lock);
}
/*
* Pass input line to tty.
*/
static void tty3270_read_tasklet(unsigned long data)
{
struct raw3270_request *rrq = (struct raw3270_request *)data;
static char kreset_data = TW_KR;
struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
char *input;
int len;
spin_lock_irq(&tp->view.lock);
/*
* Two AID keys are special: For 0x7d (enter) the input line
* has to be emitted to the tty and for 0x6d the screen
* needs to be redrawn.
*/
input = NULL;
len = 0;
switch (tp->input[0]) {
case AID_ENTER:
/* Enter: write input to tty. */
input = tp->input + 6;
len = tty3270_input_size(tp->view.cols) - 6 - rrq->rescnt;
if (tp->inattr != TF_INPUTN)
tty3270_rcl_add(tp, input, len);
if (tp->nr_up > 0)
tp->nr_up = 0;
/* Clear input area. */
tty3270_update_prompt(tp, "");
tty3270_set_timer(tp, 1);
break;
case AID_CLEAR:
/* Display has been cleared. Redraw. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
if (!list_empty(&tp->readpartreq->list))
break;
raw3270_start_request(&tp->view, tp->readpartreq, TC_WRITESF,
(char *)sfq_read_partition, sizeof(sfq_read_partition));
break;
case AID_READ_PARTITION:
raw3270_read_modified_cb(tp->readpartreq, tp->input);
break;
default:
break;
}
spin_unlock_irq(&tp->view.lock);
/* Start keyboard reset command. */
raw3270_start_request(&tp->view, tp->kreset, TC_WRITE, &kreset_data, 1);
while (len-- > 0)
kbd_keycode(tp->kbd, *input++);
/* Emit keycode for AID byte. */
kbd_keycode(tp->kbd, 256 + tp->input[0]);
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
raw3270_put_view(&tp->view);
}
/*
* Read request completion callback.
*/
static void tty3270_read_callback(struct raw3270_request *rq, void *data)
{
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&tp->readlet);
}
/*
* Issue a read request. Call with device lock.
*/
static void tty3270_issue_read(struct tty3270 *tp, int lock)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&tp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = tty3270_read_callback;
rrq->callback_data = tp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, tp->input, tty3270_input_size(tp->view.cols));
/* Issue the read modified request. */
if (lock)
rc = raw3270_start(&tp->view, rrq);
else
rc = raw3270_start_irq(&tp->view, rrq);
if (rc) {
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
}
}
/*
* Hang up the tty
*/
static void tty3270_hangup_tasklet(unsigned long data)
{
struct tty3270 *tp = (struct tty3270 *)data;
tty_port_tty_hangup(&tp->port, true);
raw3270_put_view(&tp->view);
}
/*
* Switch to the tty view.
*/
static int tty3270_activate(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
return 0;
}
static void tty3270_deactivate(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
del_timer(&tp->timer);
}
static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
if (!tp->throttle)
tty3270_issue_read(tp, 0);
else
tp->attn = 1;
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
rq->rc = -EIO;
raw3270_get_view(&tp->view);
tasklet_schedule(&tp->hanglet);
} else {
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/* Interrupt without an outstanding request -> update all */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
}
/*
* Allocate tty3270 structure.
*/
static struct tty3270 *tty3270_alloc_view(void)
{
struct tty3270 *tp;
tp = kzalloc(sizeof(*tp), GFP_KERNEL);
if (!tp)
goto out_err;
tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
if (IS_ERR(tp->write))
goto out_tp;
tp->read = raw3270_request_alloc(0);
if (IS_ERR(tp->read))
goto out_write;
tp->kreset = raw3270_request_alloc(1);
if (IS_ERR(tp->kreset))
goto out_read;
tp->readpartreq = raw3270_request_alloc(sizeof(sfq_read_partition));
if (IS_ERR(tp->readpartreq))
goto out_reset;
tp->kbd = kbd_alloc();
if (!tp->kbd)
goto out_readpartreq;
tty_port_init(&tp->port);
timer_setup(&tp->timer, tty3270_update, 0);
tasklet_init(&tp->readlet, tty3270_read_tasklet,
(unsigned long)tp->read);
tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
(unsigned long)tp);
return tp;
out_readpartreq:
raw3270_request_free(tp->readpartreq);
out_reset:
raw3270_request_free(tp->kreset);
out_read:
raw3270_request_free(tp->read);
out_write:
raw3270_request_free(tp->write);
out_tp:
kfree(tp);
out_err:
return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 structure.
*/
static void tty3270_free_view(struct tty3270 *tp)
{
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
raw3270_request_free(tp->write);
free_page((unsigned long)tp->converted_line);
tty_port_destroy(&tp->port);
kfree(tp);
}
/*
* Allocate tty3270 screen.
*/
static struct tty3270_line *tty3270_alloc_screen(struct tty3270 *tp, unsigned int rows,
unsigned int cols, int *allocated_out)
{
struct tty3270_line *screen;
int allocated, lines;
allocated = __roundup_pow_of_two(rows) * TTY3270_SCREEN_PAGES;
screen = kcalloc(allocated, sizeof(struct tty3270_line), GFP_KERNEL);
if (!screen)
goto out_err;
for (lines = 0; lines < allocated; lines++) {
screen[lines].cells = kcalloc(cols, sizeof(struct tty3270_cell), GFP_KERNEL);
if (!screen[lines].cells)
goto out_screen;
}
*allocated_out = allocated;
return screen;
out_screen:
while (lines--)
kfree(screen[lines].cells);
kfree(screen);
out_err:
return ERR_PTR(-ENOMEM);
}
static char **tty3270_alloc_recall(int cols)
{
char **lines;
int i;
lines = kmalloc_array(TTY3270_RECALL_SIZE, sizeof(char *), GFP_KERNEL);
if (!lines)
return NULL;
for (i = 0; i < TTY3270_RECALL_SIZE; i++) {
lines[i] = kcalloc(1, tty3270_input_size(cols) + 1, GFP_KERNEL);
if (!lines[i])
break;
}
if (i == TTY3270_RECALL_SIZE)
return lines;
while (i--)
kfree(lines[i]);
kfree(lines);
return NULL;
}
static void tty3270_free_recall(char **lines)
{
int i;
for (i = 0; i < TTY3270_RECALL_SIZE; i++)
kfree(lines[i]);
kfree(lines);
}
/*
* Free tty3270 screen.
*/
static void tty3270_free_screen(struct tty3270_line *screen, int old_lines)
{
int lines;
for (lines = 0; lines < old_lines; lines++)
kfree(screen[lines].cells);
kfree(screen);
}
/*
* Resize tty3270 screen
*/
static void tty3270_resize(struct raw3270_view *view,
int new_model, int new_rows, int new_cols,
int old_model, int old_rows, int old_cols)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
struct tty3270_line *screen, *oscreen;
char **old_rcl_lines, **new_rcl_lines;
char *old_prompt, *new_prompt;
char *old_input, *new_input;
struct tty_struct *tty;
struct winsize ws;
int new_allocated, old_allocated = tp->allocated_lines;
if (old_model == new_model &&
old_cols == new_cols &&
old_rows == new_rows) {
spin_lock_irq(&tp->view.lock);
tty3270_redraw(tp);
spin_unlock_irq(&tp->view.lock);
return;
}
new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA);
if (!new_input)
return;
new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL);
if (!new_prompt)
goto out_input;
screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated);
if (IS_ERR(screen))
goto out_prompt;
new_rcl_lines = tty3270_alloc_recall(new_cols);
if (!new_rcl_lines)
goto out_screen;
/* Switch to new output size */
spin_lock_irq(&tp->view.lock);
tty3270_blank_screen(tp);
oscreen = tp->screen;
tp->screen = screen;
tp->allocated_lines = new_allocated;
tp->view.rows = new_rows;
tp->view.cols = new_cols;
tp->view.model = new_model;
tp->update_flags = TTY_UPDATE_ALL;
old_input = tp->input;
old_prompt = tp->prompt;
old_rcl_lines = tp->rcl_lines;
tp->input = new_input;
tp->prompt = new_prompt;
tp->rcl_lines = new_rcl_lines;
tp->rcl_read_index = 0;
tp->rcl_write_index = 0;
spin_unlock_irq(&tp->view.lock);
tty3270_free_screen(oscreen, old_allocated);
kfree(old_input);
kfree(old_prompt);
tty3270_free_recall(old_rcl_lines);
tty3270_set_timer(tp, 1);
/* Informat tty layer about new size */
tty = tty_port_tty_get(&tp->port);
if (!tty)
return;
ws.ws_row = tty3270_tty_rows(tp);
ws.ws_col = tp->view.cols;
tty_do_resize(tty, &ws);
tty_kref_put(tty);
return;
out_screen:
tty3270_free_screen(screen, new_rows);
out_prompt:
kfree(new_prompt);
out_input:
kfree(new_input);
}
/*
* Unlink tty3270 data structure from tty.
*/
static void tty3270_release(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
struct tty_struct *tty = tty_port_tty_get(&tp->port);
if (tty) {
tty->driver_data = NULL;
tty_port_tty_set(&tp->port, NULL);
tty_hangup(tty);
raw3270_put_view(&tp->view);
tty_kref_put(tty);
}
}
/*
* Free tty3270 data structure
*/
static void tty3270_free(struct raw3270_view *view)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
del_timer_sync(&tp->timer);
tty3270_free_screen(tp->screen, tp->allocated_lines);
free_page((unsigned long)tp->converted_line);
kfree(tp->input);
kfree(tp->prompt);
tty3270_free_view(tp);
}
/*
* Delayed freeing of tty3270 views.
*/
static void tty3270_del_views(void)
{
int i;
for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
if (!IS_ERR(view))
raw3270_del_view(view);
}
}
static struct raw3270_fn tty3270_fn = {
.activate = tty3270_activate,
.deactivate = tty3270_deactivate,
.intv = (void *)tty3270_irq,
.release = tty3270_release,
.free = tty3270_free,
.resize = tty3270_resize
};
static int
tty3270_create_view(int index, struct tty3270 **newtp)
{
struct tty3270 *tp;
int rc;
if (tty3270_max_index < index + 1)
tty3270_max_index = index + 1;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
index + RAW3270_FIRSTMINOR,
RAW3270_VIEW_LOCK_IRQ);
if (rc)
goto out_free_view;
tp->screen = tty3270_alloc_screen(tp, tp->view.rows, tp->view.cols,
&tp->allocated_lines);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
goto out_put_view;
}
tp->converted_line = (void *)__get_free_page(GFP_KERNEL);
if (!tp->converted_line) {
rc = -ENOMEM;
goto out_free_screen;
}
tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA);
if (!tp->input) {
rc = -ENOMEM;
goto out_free_converted_line;
}
tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL);
if (!tp->prompt) {
rc = -ENOMEM;
goto out_free_input;
}
tp->rcl_lines = tty3270_alloc_recall(tp->view.cols);
if (!tp->rcl_lines) {
rc = -ENOMEM;
goto out_free_prompt;
}
/* Create blank line for every line in the tty output area. */
tty3270_blank_screen(tp);
tp->kbd->port = &tp->port;
tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
kbd_ascebc(tp->kbd, tp->view.ascebc);
raw3270_activate_view(&tp->view);
raw3270_put_view(&tp->view);
*newtp = tp;
return 0;
out_free_prompt:
kfree(tp->prompt);
out_free_input:
kfree(tp->input);
out_free_converted_line:
free_page((unsigned long)tp->converted_line);
out_free_screen:
tty3270_free_screen(tp->screen, tp->view.rows);
out_put_view:
raw3270_put_view(&tp->view);
raw3270_del_view(&tp->view);
out_free_view:
tty3270_free_view(tp);
return rc;
}
/*
* This routine is called whenever a 3270 tty is opened first time.
*/
static int
tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct raw3270_view *view;
struct tty3270 *tp;
int rc;
/* Check if the tty3270 is already there. */
view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
if (IS_ERR(view)) {
rc = tty3270_create_view(tty->index, &tp);
if (rc)
return rc;
} else {
tp = container_of(view, struct tty3270, view);
tty->driver_data = tp;
tp->inattr = TF_INPUT;
}
tty->winsize.ws_row = tty3270_tty_rows(tp);
tty->winsize.ws_col = tp->view.cols;
rc = tty_port_install(&tp->port, driver, tty);
if (rc) {
raw3270_put_view(&tp->view);
return rc;
}
tty->driver_data = tp;
return 0;
}
/*
* This routine is called whenever a 3270 tty is opened.
*/
static int tty3270_open(struct tty_struct *tty, struct file *filp)
{
struct tty3270 *tp = tty->driver_data;
struct tty_port *port = &tp->port;
port->count++;
tty_port_tty_set(port, tty);
return 0;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static void tty3270_close(struct tty_struct *tty, struct file *filp)
{
struct tty3270 *tp = tty->driver_data;
if (tty->count > 1)
return;
if (tp)
tty_port_tty_set(&tp->port, NULL);
}
static void tty3270_cleanup(struct tty_struct *tty)
{
struct tty3270 *tp = tty->driver_data;
if (tp) {
tty->driver_data = NULL;
raw3270_put_view(&tp->view);
}
}
/*
* We always have room.
*/
static unsigned int tty3270_write_room(struct tty_struct *tty)
{
return INT_MAX;
}
/*
* Insert character into the screen at the current position with the
* current color and highlight. This function does NOT do cursor movement.
*/
static void tty3270_put_character(struct tty3270 *tp, char ch)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tty3270_get_write_line(tp, tp->cy);
if (line->len <= tp->cx) {
while (line->len < tp->cx) {
cell = line->cells + line->len;
cell->character = ' ';
cell->attributes = tp->attributes;
line->len++;
}
line->len++;
}
cell = line->cells + tp->cx;
cell->character = ch;
cell->attributes = tp->attributes;
line->dirty = 1;
}
/*
* Do carriage return.
*/
static void tty3270_cr(struct tty3270 *tp)
{
tp->cx = 0;
}
/*
* Do line feed.
*/
static void tty3270_lf(struct tty3270 *tp)
{
struct tty3270_line *line;
int i;
if (tp->cy < tty3270_tty_rows(tp) - 1) {
tp->cy++;
} else {
tp->line_view_start = tty3270_line_increment(tp, tp->line_view_start, 1);
tp->line_write_start = tty3270_line_increment(tp, tp->line_write_start, 1);
for (i = 0; i < tty3270_tty_rows(tp); i++)
tty3270_get_view_line(tp, i)->dirty = 1;
}
line = tty3270_get_write_line(tp, tp->cy);
line->len = 0;
line->dirty = 1;
}
static void tty3270_ri(struct tty3270 *tp)
{
if (tp->cy > 0)
tp->cy--;
}
static void tty3270_reset_cell(struct tty3270 *tp, struct tty3270_cell *cell)
{
cell->character = ' ';
tty3270_reset_attributes(&cell->attributes);
}
/*
* Insert characters at current position.
*/
static void tty3270_insert_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int k;
line = tty3270_get_write_line(tp, tp->cy);
while (line->len < tp->cx)
tty3270_reset_cell(tp, &line->cells[line->len++]);
if (n > tp->view.cols - tp->cx)
n = tp->view.cols - tp->cx;
k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
while (k--)
line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
line->len += n;
if (line->len > tp->view.cols)
line->len = tp->view.cols;
while (n-- > 0) {
line->cells[tp->cx + n].character = ' ';
line->cells[tp->cx + n].attributes = tp->attributes;
}
}
/*
* Delete characters at current position.
*/
static void tty3270_delete_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int i;
line = tty3270_get_write_line(tp, tp->cy);
if (line->len <= tp->cx)
return;
if (line->len - tp->cx <= n) {
line->len = tp->cx;
return;
}
for (i = tp->cx; i + n < line->len; i++)
line->cells[i] = line->cells[i + n];
line->len -= n;
}
/*
* Erase characters at current position.
*/
static void tty3270_erase_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tty3270_get_write_line(tp, tp->cy);
while (line->len > tp->cx && n-- > 0) {
cell = line->cells + tp->cx++;
tty3270_reset_cell(tp, cell);
}
tp->cx += n;
tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
}
/*
* Erase line, 3 different cases:
* Esc [ 0 K Erase from current position to end of line inclusive
* Esc [ 1 K Erase from beginning of line to current position inclusive
* Esc [ 2 K Erase entire line (without moving cursor)
*/
static void tty3270_erase_line(struct tty3270 *tp, int mode)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
int i, start, end;
line = tty3270_get_write_line(tp, tp->cy);
switch (mode) {
case 0:
start = tp->cx;
end = tp->view.cols;
break;
case 1:
start = 0;
end = tp->cx;
break;
case 2:
start = 0;
end = tp->view.cols;
break;
default:
return;
}
for (i = start; i < end; i++) {
cell = line->cells + i;
tty3270_reset_cell(tp, cell);
cell->attributes.b_color = tp->attributes.b_color;
}
if (line->len <= end)
line->len = end;
}
/*
* Erase display, 3 different cases:
* Esc [ 0 J Erase from current position to bottom of screen inclusive
* Esc [ 1 J Erase from top of screen to current position inclusive
* Esc [ 2 J Erase entire screen (without moving the cursor)
*/
static void tty3270_erase_display(struct tty3270 *tp, int mode)
{
struct tty3270_line *line;
int i, start, end;
switch (mode) {
case 0:
tty3270_erase_line(tp, 0);
start = tp->cy + 1;
end = tty3270_tty_rows(tp);
break;
case 1:
start = 0;
end = tp->cy;
tty3270_erase_line(tp, 1);
break;
case 2:
start = 0;
end = tty3270_tty_rows(tp);
break;
default:
return;
}
for (i = start; i < end; i++) {
line = tty3270_get_write_line(tp, i);
line->len = 0;
line->dirty = 1;
}
}
/*
* Set attributes found in an escape sequence.
* Esc [ <attr> ; <attr> ; ... m
*/
static void tty3270_set_attributes(struct tty3270 *tp)
{
int i, attr;
for (i = 0; i <= tp->esc_npar; i++) {
attr = tp->esc_par[i];
switch (attr) {
case 0: /* Reset */
tty3270_reset_attributes(&tp->attributes);
break;
/* Highlight. */
case 4: /* Start underlining. */
tp->attributes.highlight = TTY3270_HIGHLIGHT_UNDERSCORE;
break;
case 5: /* Start blink. */
tp->attributes.highlight = TTY3270_HIGHLIGHT_BLINK;
break;
case 7: /* Start reverse. */
tp->attributes.highlight = TTY3270_HIGHLIGHT_REVERSE;
break;
case 24: /* End underlining */
tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_UNDERSCORE;
break;
case 25: /* End blink. */
tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_BLINK;
break;
case 27: /* End reverse. */
tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_REVERSE;
break;
/* Foreground color. */
case 30: /* Black */
case 31: /* Red */
case 32: /* Green */
case 33: /* Yellow */
case 34: /* Blue */
case 35: /* Magenta */
case 36: /* Cyan */
case 37: /* White */
case 39: /* Black */
tp->attributes.f_color = attr - 30;
break;
/* Background color. */
case 40: /* Black */
case 41: /* Red */
case 42: /* Green */
case 43: /* Yellow */
case 44: /* Blue */
case 45: /* Magenta */
case 46: /* Cyan */
case 47: /* White */
case 49: /* Black */
tp->attributes.b_color = attr - 40;
break;
}
}
}
static inline int tty3270_getpar(struct tty3270 *tp, int ix)
{
return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
}
static void tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
int max_cx = max(0, cx);
int max_cy = max(0, cy);
tp->cx = min_t(int, tp->view.cols - 1, max_cx);
line = tty3270_get_write_line(tp, tp->cy);
while (line->len < tp->cx) {
cell = line->cells + line->len;
cell->character = ' ';
cell->attributes = tp->attributes;
line->len++;
}
tp->cy = min_t(int, tty3270_tty_rows(tp) - 1, max_cy);
}
/*
* Process escape sequences. Known sequences:
* Esc 7 Save Cursor Position
* Esc 8 Restore Cursor Position
* Esc [ Pn ; Pn ; .. m Set attributes
* Esc [ Pn ; Pn H Cursor Position
* Esc [ Pn ; Pn f Cursor Position
* Esc [ Pn A Cursor Up
* Esc [ Pn B Cursor Down
* Esc [ Pn C Cursor Forward
* Esc [ Pn D Cursor Backward
* Esc [ Pn G Cursor Horizontal Absolute
* Esc [ Pn X Erase Characters
* Esc [ Ps J Erase in Display
* Esc [ Ps K Erase in Line
* // FIXME: add all the new ones.
*
* Pn is a numeric parameter, a string of zero or more decimal digits.
* Ps is a selective parameter.
*/
static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
{
enum { ES_NORMAL, ES_ESC, ES_SQUARE, ES_PAREN, ES_GETPARS };
if (tp->esc_state == ES_NORMAL) {
if (ch == 0x1b)
/* Starting new escape sequence. */
tp->esc_state = ES_ESC;
return;
}
if (tp->esc_state == ES_ESC) {
tp->esc_state = ES_NORMAL;
switch (ch) {
case '[':
tp->esc_state = ES_SQUARE;
break;
case '(':
tp->esc_state = ES_PAREN;
break;
case 'E':
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 'M':
tty3270_ri(tp);
break;
case 'D':
tty3270_lf(tp);
break;
case 'Z': /* Respond ID. */
kbd_puts_queue(&tp->port, "\033[?6c");
break;
case '7': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_attributes = tp->attributes;
break;
case '8': /* Restore cursor position. */
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->attributes = tp->saved_attributes;
break;
case 'c': /* Reset terminal. */
tp->cx = 0;
tp->cy = 0;
tp->saved_cx = 0;
tp->saved_cy = 0;
tty3270_reset_attributes(&tp->attributes);
tty3270_reset_attributes(&tp->saved_attributes);
tty3270_erase_display(tp, 2);
break;
}
return;
}
switch (tp->esc_state) {
case ES_PAREN:
tp->esc_state = ES_NORMAL;
switch (ch) {
case 'B':
tp->attributes.alternate_charset = 0;
break;
case '0':
tp->attributes.alternate_charset = 1;
break;
}
return;
case ES_SQUARE:
tp->esc_state = ES_GETPARS;
memset(tp->esc_par, 0, sizeof(tp->esc_par));
tp->esc_npar = 0;
tp->esc_ques = (ch == '?');
if (tp->esc_ques)
return;
fallthrough;
case ES_GETPARS:
if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
tp->esc_npar++;
return;
}
if (ch >= '0' && ch <= '9') {
tp->esc_par[tp->esc_npar] *= 10;
tp->esc_par[tp->esc_npar] += ch - '0';
return;
}
break;
default:
break;
}
tp->esc_state = ES_NORMAL;
if (ch == 'n' && !tp->esc_ques) {
if (tp->esc_par[0] == 5) /* Status report. */
kbd_puts_queue(&tp->port, "\033[0n");
else if (tp->esc_par[0] == 6) { /* Cursor report. */
char buf[40];
sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
kbd_puts_queue(&tp->port, buf);
}
return;
}
if (tp->esc_ques)
return;
switch (ch) {
case 'm':
tty3270_set_attributes(tp);
break;
case 'H': /* Set cursor position. */
case 'f':
tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
tty3270_getpar(tp, 0) - 1);
break;
case 'd': /* Set y position. */
tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
break;
case 'A': /* Cursor up. */
case 'F':
tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
break;
case 'B': /* Cursor down. */
case 'e':
case 'E':
tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
break;
case 'C': /* Cursor forward. */
case 'a':
tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
break;
case 'D': /* Cursor backward. */
tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
break;
case 'G': /* Set x position. */
case '`':
tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
break;
case 'X': /* Erase Characters. */
tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
break;
case 'J': /* Erase display. */
tty3270_erase_display(tp, tp->esc_par[0]);
break;
case 'K': /* Erase line. */
tty3270_erase_line(tp, tp->esc_par[0]);
break;
case 'P': /* Delete characters. */
tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
break;
case '@': /* Insert characters. */
tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
break;
case 's': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_attributes = tp->attributes;
break;
case 'u': /* Restore cursor position. */
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->attributes = tp->saved_attributes;
break;
}
}
/*
* String write routine for 3270 ttys
*/
static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
const unsigned char *buf, int count)
{
int i_msg, i;
spin_lock_irq(&tp->view.lock);
for (i_msg = 0; !tty->flow.stopped && i_msg < count; i_msg++) {
if (tp->esc_state != 0) {
/* Continue escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
continue;
}
switch (buf[i_msg]) {
case 0x00:
break;
case 0x07: /* '\a' -- Alarm */
tp->wcc |= TW_PLUSALARM;
break;
case 0x08: /* Backspace. */
if (tp->cx > 0) {
tp->cx--;
tty3270_put_character(tp, ' ');
}
break;
case 0x09: /* '\t' -- Tabulate */
for (i = tp->cx % 8; i < 8; i++) {
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
break;
}
tty3270_put_character(tp, ' ');
tp->cx++;
}
break;
case 0x0a: /* '\n' -- New Line */
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 0x0c: /* '\f' -- Form Feed */
tty3270_erase_display(tp, 2);
tp->cx = 0;
tp->cy = 0;
break;
case 0x0d: /* '\r' -- Carriage Return */
tp->cx = 0;
break;
case 0x0e:
tp->attributes.alternate_charset = 1;
break;
case 0x0f: /* SuSE "exit alternate mode" */
tp->attributes.alternate_charset = 0;
break;
case 0x1b: /* Start escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
break;
default: /* Insert normal character. */
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
}
tty3270_put_character(tp, buf[i_msg]);
tp->cx++;
break;
}
}
/* Setup timer to update display after 1/10 second */
tp->update_flags |= TTY_UPDATE_LINES;
if (!timer_pending(&tp->timer))
tty3270_set_timer(tp, msecs_to_jiffies(100));
spin_unlock_irq(&tp->view.lock);
}
/*
* String write routine for 3270 ttys
*/
static ssize_t tty3270_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return 0;
if (tp->char_count > 0) {
tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
tty3270_do_write(tp, tty, buf, count);
return count;
}
/*
* Put single characters to the ttys character buffer
*/
static int tty3270_put_char(struct tty_struct *tty, u8 ch)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
return 0;
tp->char_buf[tp->char_count++] = ch;
return 1;
}
/*
* Flush all characters from the ttys characeter buffer put there
* by tty3270_put_char.
*/
static void tty3270_flush_chars(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
if (tp->char_count > 0) {
tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
}
/*
* Check for visible/invisible input switches
*/
static void tty3270_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct tty3270 *tp;
int new;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_irq(&tp->view.lock);
if (L_ICANON(tty)) {
new = L_ECHO(tty) ? TF_INPUT : TF_INPUTN;
if (new != tp->inattr) {
tp->inattr = new;
tty3270_update_prompt(tp, "");
tty3270_set_timer(tp, 1);
}
}
spin_unlock_irq(&tp->view.lock);
}
/*
* Disable reading from a 3270 tty
*/
static void tty3270_throttle(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 1;
}
/*
* Enable reading from a 3270 tty
*/
static void tty3270_unthrottle(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 0;
if (tp->attn)
tty3270_issue_read(tp, 1);
}
/*
* Hang up the tty device.
*/
static void tty3270_hangup(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_irq(&tp->view.lock);
tp->cx = 0;
tp->cy = 0;
tp->saved_cx = 0;
tp->saved_cy = 0;
tty3270_reset_attributes(&tp->attributes);
tty3270_reset_attributes(&tp->saved_attributes);
tty3270_blank_screen(tp);
tp->update_flags = TTY_UPDATE_ALL;
spin_unlock_irq(&tp->view.lock);
tty3270_set_timer(tp, 1);
}
static void tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
{
}
static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty_io_error(tty))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, arg);
}
#ifdef CONFIG_COMPAT
static long tty3270_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty_io_error(tty))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct tty_operations tty3270_ops = {
.install = tty3270_install,
.cleanup = tty3270_cleanup,
.open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
.put_char = tty3270_put_char,
.flush_chars = tty3270_flush_chars,
.write_room = tty3270_write_room,
.throttle = tty3270_throttle,
.unthrottle = tty3270_unthrottle,
.hangup = tty3270_hangup,
.wait_until_sent = tty3270_wait_until_sent,
.ioctl = tty3270_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tty3270_compat_ioctl,
#endif
.set_termios = tty3270_set_termios
};
static void tty3270_create_cb(int minor)
{
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
}
static void tty3270_destroy_cb(int minor)
{
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
}
static struct raw3270_notifier tty3270_notifier = {
.create = tty3270_create_cb,
.destroy = tty3270_destroy_cb,
};
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
*/
static int __init tty3270_init(void)
{
struct tty_driver *driver;
int ret;
driver = tty_alloc_driver(RAW3270_MAXDEVS,
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_RESET_TERMIOS);
if (IS_ERR(driver))
return PTR_ERR(driver);
/*
* Initialize the tty_driver structure
* Entries in tty3270_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
driver->driver_name = "tty3270";
driver->name = "3270/tty";
driver->major = IBM_TTY3270_MAJOR;
driver->minor_start = RAW3270_FIRSTMINOR;
driver->name_base = RAW3270_FIRSTMINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
tty_driver_kref_put(driver);
return ret;
}
tty3270_driver = driver;
raw3270_register_notifier(&tty3270_notifier);
return 0;
}
static void __exit tty3270_exit(void)
{
struct tty_driver *driver;
raw3270_unregister_notifier(&tty3270_notifier);
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
tty_driver_kref_put(driver);
tty3270_del_views();
}
#if IS_ENABLED(CONFIG_TN3270_CONSOLE)
static struct tty3270 *condev;
static void
con3270_write(struct console *co, const char *str, unsigned int count)
{
struct tty3270 *tp = co->data;
unsigned long flags;
char c;
spin_lock_irqsave(&tp->view.lock, flags);
while (count--) {
c = *str++;
if (c == 0x0a) {
tty3270_cr(tp);
tty3270_lf(tp);
} else {
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
}
tty3270_put_character(tp, c);
tp->cx++;
}
}
spin_unlock_irqrestore(&tp->view.lock, flags);
}
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
*index = c->index;
return tty3270_driver;
}
static void
con3270_wait_write(struct tty3270 *tp)
{
while (!tp->write) {
raw3270_wait_cons_dev(tp->view.dev);
barrier();
}
}
/*
* The below function is called as a panic/reboot notifier before the
* system enters a disabled, endless loop.
*
* Notice we must use the spin_trylock() alternative, to prevent lockups
* in atomic context (panic routine runs with secondary CPUs, local IRQs
* and preemption disabled).
*/
static int con3270_notify(struct notifier_block *self,
unsigned long event, void *data)
{
struct tty3270 *tp;
unsigned long flags;
int rc;
tp = condev;
if (!tp->view.dev)
return NOTIFY_DONE;
if (!raw3270_view_lock_unavailable(&tp->view)) {
rc = raw3270_activate_view(&tp->view);
if (rc)
return NOTIFY_DONE;
}
if (!spin_trylock_irqsave(&tp->view.lock, flags))
return NOTIFY_DONE;
con3270_wait_write(tp);
tp->nr_up = 0;
tp->update_flags = TTY_UPDATE_ALL;
while (tp->update_flags != 0) {
spin_unlock_irqrestore(&tp->view.lock, flags);
tty3270_update(&tp->timer);
spin_lock_irqsave(&tp->view.lock, flags);
con3270_wait_write(tp);
}
spin_unlock_irqrestore(&tp->view.lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3270_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3270_notify,
.priority = INT_MIN + 1, /* run the callback late */
};
static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
.device = con3270_device,
.flags = CON_PRINTBUFFER,
};
static int __init
con3270_init(void)
{
struct raw3270_view *view;
struct raw3270 *rp;
struct tty3270 *tp;
int rc;
/* Check if 3270 is to be the console */
if (!CONSOLE_IS_3270)
return -ENODEV;
/* Set the console mode for VM */
if (MACHINE_IS_VM) {
cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
rp = raw3270_setup_console();
if (IS_ERR(rp))
return PTR_ERR(rp);
/* Check if the tty3270 is already there. */
view = raw3270_find_view(&tty3270_fn, RAW3270_FIRSTMINOR);
if (IS_ERR(view)) {
rc = tty3270_create_view(0, &tp);
if (rc)
return rc;
} else {
tp = container_of(view, struct tty3270, view);
tp->inattr = TF_INPUT;
}
con3270.data = tp;
condev = tp;
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
console_initcall(con3270_init);
#endif
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
module_init(tty3270_init);
module_exit(tty3270_exit);
| linux-master | drivers/s390/char/con3270.c |
// SPDX-License-Identifier: GPL-2.0
/*
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2002
* Author(s): Carsten Otte <[email protected]>
* Michael Holzheu <[email protected]>
* Tuan Ngo-Anh <[email protected]>
* Martin Schwidefsky <[email protected]>
* Stefan Bader <[email protected]>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/timer.h>
#include <asm/types.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
/*
* tape_std_assign
*/
static void
tape_std_assign_timeout(struct timer_list *t)
{
struct tape_request * request = from_timer(request, t, timer);
struct tape_device * device = request->device;
int rc;
BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
"%i\n", device->cdev_id, rc);
}
int
tape_std_assign(struct tape_device *device)
{
int rc;
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_ASSIGN;
tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/*
* The assign command sometimes blocks if the device is assigned
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
timer_setup(&request->timer, tape_std_assign_timeout, 0);
mod_timer(&request->timer, jiffies + msecs_to_jiffies(2000));
rc = tape_do_io_interruptible(device, request);
del_timer_sync(&request->timer);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* tape_std_unassign
*/
int
tape_std_unassign (struct tape_device *device)
{
int rc;
struct tape_request *request;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
return -EIO;
}
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_UNASSIGN;
tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* TAPE390_DISPLAY: Show a string on the tape display.
*/
int
tape_std_display(struct tape_device *device, struct display_struct *disp)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(2, 17);
if (IS_ERR(request)) {
DBF_EVENT(3, "TAPE: load display failed\n");
return PTR_ERR(request);
}
request->op = TO_DIS;
*(unsigned char *) request->cpdata = disp->cntrl;
DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return rc;
}
/*
* Read block id.
*/
int
tape_std_read_block_id(struct tape_device *device, __u64 *id)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(3, 8);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RBI;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0)
/* Get result from read buffer. */
*id = *(__u64 *) request->cpdata;
tape_free_request(request);
return rc;
}
int
tape_std_terminate_write(struct tape_device *device)
{
int rc;
if(device->required_tapemarks == 0)
return 0;
DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
device->required_tapemarks);
rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
if (rc)
return rc;
device->required_tapemarks = 0;
return tape_mtop(device, MTBSR, 1);
}
/*
* MTLOAD: Loads the tape.
* The default implementation just wait until the tape medium state changes
* to MS_LOADED.
*/
int
tape_std_mtload(struct tape_device *device, int count)
{
return wait_event_interruptible(device->state_change_wq,
(device->medium_state == MS_LOADED));
}
/*
* MTSETBLK: Set block size.
*/
int
tape_std_mtsetblk(struct tape_device *device, int count)
{
struct idal_buffer *new;
DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
if (count <= 0) {
/*
* Just set block_size to 0. tapechar_read/tapechar_write
* will realloc the idal buffer if a bigger one than the
* current is needed.
*/
device->char_data.block_size = 0;
return 0;
}
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == count)
/* We already have a idal buffer of that size. */
return 0;
if (count > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
count, MAX_BLOCKSIZE);
return -EINVAL;
}
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
device->char_data.block_size = count;
DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
return 0;
}
/*
* MTRESET: Set block size to 0.
*/
int
tape_std_mtreset(struct tape_device *device, int count)
{
DBF_EVENT(6, "TCHAR:devreset:\n");
device->char_data.block_size = 0;
return 0;
}
/*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*/
int
tape_std_mtfsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*/
int
tape_std_mtfsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "FSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*/
int
tape_std_mtbsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "BSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTWEOF: Write 'count' file marks at the current position.
*/
int
tape_std_mtweof(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_WTM;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*/
int
tape_std_mtbsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*/
int
tape_std_mtbsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTFSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*/
int
tape_std_mtfsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTBSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTREW: Rewind the tape.
*/
int
tape_std_mtrew(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_REW;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*/
int
tape_std_mtoffl(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTNOP: 'No operation'.
*/
int
tape_std_mtnop(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
*/
int
tape_std_mteom(struct tape_device *device, int mt_count)
{
int rc;
/*
* Seek from the beginning of tape (rewind).
*/
if ((rc = tape_mtop(device, MTREW, 1)) < 0)
return rc;
/*
* The logical end of volume is given by two sewuential tapemarks.
* Look for this by skipping to the next file (over one tapemark)
* and then test for another one (fsr returns 1 if a tapemark was
* encountered).
*/
do {
if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
return rc;
if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
return rc;
} while (rc == 0);
return tape_mtop(device, MTBSR, 1);
}
/*
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*/
int
tape_std_mtreten(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it, MTRETEN rc gets ignored */
tape_do_io_interruptible(device, request);
tape_free_request(request);
return tape_mtop(device, MTREW, 1);
}
/*
* MTERASE: erases the tape.
*/
int
tape_std_mterase(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(6, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_DSE;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTUNLOAD: Rewind the tape and unload it.
*/
int
tape_std_mtunload(struct tape_device *device, int mt_count)
{
return tape_mtop(device, MTOFFL, mt_count);
}
/*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*/
int
tape_std_mtcompression(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
if (mt_count == 0)
*device->modeset_byte &= ~0x08;
else
*device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* Read Block
*/
struct tape_request *
tape_std_read_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
/*
* We have to alloc 4 ccws in order to be able to transform request
* into a read backward request in error case.
*/
request = tape_alloc_request(4, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xrbl fail");
return request;
}
request->op = TO_RFO;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
device->char_data.idal_buf);
DBF_EVENT(6, "xrbl ccwg\n");
return request;
}
/*
* Read Block backward transformation function.
*/
void
tape_std_read_backward(struct tape_device *device, struct tape_request *request)
{
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg");}
/*
* Write Block
*/
struct tape_request *
tape_std_write_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xwbl fail\n");
return request;
}
request->op = TO_WRI;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
device->char_data.idal_buf);
DBF_EVENT(6, "xwbl ccwg\n");
return request;
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void
tape_std_process_eov(struct tape_device *device)
{
/*
* End of volume: We have to backspace the last written record, then
* we TRY to write a tapemark and then backspace over the written TM
*/
if (tape_mtop(device, MTBSR, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0) {
tape_mtop(device, MTBSR, 1);
}
}
EXPORT_SYMBOL(tape_std_assign);
EXPORT_SYMBOL(tape_std_unassign);
EXPORT_SYMBOL(tape_std_display);
EXPORT_SYMBOL(tape_std_read_block_id);
EXPORT_SYMBOL(tape_std_mtload);
EXPORT_SYMBOL(tape_std_mtsetblk);
EXPORT_SYMBOL(tape_std_mtreset);
EXPORT_SYMBOL(tape_std_mtfsf);
EXPORT_SYMBOL(tape_std_mtfsr);
EXPORT_SYMBOL(tape_std_mtbsr);
EXPORT_SYMBOL(tape_std_mtweof);
EXPORT_SYMBOL(tape_std_mtbsfm);
EXPORT_SYMBOL(tape_std_mtbsf);
EXPORT_SYMBOL(tape_std_mtfsfm);
EXPORT_SYMBOL(tape_std_mtrew);
EXPORT_SYMBOL(tape_std_mtoffl);
EXPORT_SYMBOL(tape_std_mtnop);
EXPORT_SYMBOL(tape_std_mteom);
EXPORT_SYMBOL(tape_std_mtreten);
EXPORT_SYMBOL(tape_std_mterase);
EXPORT_SYMBOL(tape_std_mtunload);
EXPORT_SYMBOL(tape_std_mtcompression);
EXPORT_SYMBOL(tape_std_read_block);
EXPORT_SYMBOL(tape_std_read_backward);
EXPORT_SYMBOL(tape_std_write_block);
EXPORT_SYMBOL(tape_std_process_eov);
| linux-master | drivers/s390/char/tape_std.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ccw based virtio transport
*
* Copyright IBM Corp. 2012, 2014
*
* Author(s): Cornelia Huck <[email protected]>
*/
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/virtio_ring.h>
#include <linux/pfn.h>
#include <linux/async.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/bitops.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/kvm_para.h>
#include <linux/notifier.h>
#include <asm/diag.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/virtio-ccw.h>
#include <asm/isc.h>
#include <asm/airq.h>
#include <asm/tpi.h>
/*
* virtio related functions
*/
struct vq_config_block {
__u16 index;
__u16 num;
} __packed;
#define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
struct vcdev_dma_area {
unsigned long indicators;
unsigned long indicators2;
struct vq_config_block config_block;
__u8 status;
};
struct virtio_ccw_device {
struct virtio_device vdev;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
__u32 curr_io;
int err;
unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
rwlock_t irq_lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
bool is_thinint;
bool going_away;
bool device_lost;
unsigned int config_ready;
void *airq_info;
struct vcdev_dma_area *dma_area;
};
static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
{
return &vcdev->dma_area->indicators;
}
static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
{
return &vcdev->dma_area->indicators2;
}
struct vq_info_block_legacy {
__u64 queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
struct vq_info_block {
__u64 desc;
__u32 res0;
__u16 index;
__u16 num;
__u64 avail;
__u64 used;
} __packed;
struct virtio_feature_desc {
__le32 features;
__u8 index;
} __packed;
struct virtio_thinint_area {
unsigned long summary_indicator;
unsigned long indicator;
u64 bit_nr;
u8 isc;
} __packed;
struct virtio_rev_info {
__u16 revision;
__u16 length;
__u8 data[];
};
/* the highest virtio-ccw revision we support */
#define VIRTIO_CCW_REV_MAX 2
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
union {
struct vq_info_block s;
struct vq_info_block_legacy l;
} *info_block;
int bit_nr;
struct list_head node;
long cookie;
};
#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
#define MAX_AIRQ_AREAS 20
static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
u8 summary_indicator_idx;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
static DEFINE_MUTEX(airq_areas_lock);
static u8 *summary_indicators;
static inline u8 *get_summary_indicator(struct airq_info *info)
{
return summary_indicators + info->summary_indicator_idx;
}
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
#define CCW_CMD_SET_CONF_IND 0x53
#define CCW_CMD_READ_FEAT 0x12
#define CCW_CMD_WRITE_FEAT 0x11
#define CCW_CMD_READ_CONF 0x22
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
#define CCW_CMD_READ_STATUS 0x72
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define CCW_CMD_SET_VIRTIO_REV 0x83
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
#define VIRTIO_CCW_DOING_SET_IND 0x01000000
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
{
return container_of(vdev, struct virtio_ccw_device, vdev);
}
static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
{
unsigned long i, flags;
write_lock_irqsave(&info->lock, flags);
for (i = 0; i < airq_iv_end(info->aiv); i++) {
if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
airq_iv_free_bit(info->aiv, i);
airq_iv_set_ptr(info->aiv, i, 0);
break;
}
}
write_unlock_irqrestore(&info->lock, flags);
}
static void virtio_airq_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
inc_irq_stat(IRQIO_VAI);
read_lock(&info->lock);
/* Walk through indicators field, summary indicator active. */
for (ai = 0;;) {
ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
if (ai == -1UL)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
*(get_summary_indicator(info)) = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
if (ai == -1UL)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
read_unlock(&info->lock);
}
static struct airq_info *new_airq_info(int index)
{
struct airq_info *info;
int rc;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
rwlock_init(&info->lock);
info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
| AIRQ_IV_CACHELINE, NULL);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
info->summary_indicator_idx = index;
info->airq.lsi_ptr = get_summary_indicator(info);
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
if (rc) {
airq_iv_release(info->aiv);
kfree(info);
return NULL;
}
return info;
}
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
int i, j;
struct airq_info *info;
unsigned long indicator_addr = 0;
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
mutex_unlock(&airq_areas_lock);
if (!info)
return 0;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
continue;
}
*first = bit;
*airq_info = info;
indicator_addr = (unsigned long)info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);
}
return indicator_addr;
}
static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
if (!vcdev->airq_info)
return;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
{
unsigned long flags;
__u32 ret;
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
if (vcdev->err)
ret = 0;
else
ret = vcdev->curr_io & flag;
spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
return ret;
}
static int ccw_io_helper(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw, __u32 intparm)
{
int ret;
unsigned long flags;
int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
mutex_lock(&vcdev->io_lock);
do {
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
if (!ret) {
if (!vcdev->curr_io)
vcdev->err = 0;
vcdev->curr_io |= flag;
}
spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
cpu_relax();
} while (ret == -EBUSY);
wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
ret = ret ? ret : vcdev->err;
mutex_unlock(&vcdev->io_lock);
return ret;
}
static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw)
{
int ret;
unsigned long *indicatorp = NULL;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*thinint_area));
if (!thinint_area)
return;
thinint_area->summary_indicator =
(unsigned long) get_summary_indicator(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)virt_to_phys(thinint_area);
} else {
/* payload is the address of the indicators */
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(indicators(vcdev)));
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)virt_to_phys(indicatorp);
}
/* Deregister indicators from host. */
*indicators(vcdev) = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
VIRTIO_CCW_DOING_SET_IND_ADAPTER :
VIRTIO_CCW_DOING_SET_IND);
if (ret && (ret != -ENODEV))
dev_info(&vcdev->cdev->dev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
static inline bool virtio_ccw_do_kvm_notify(struct virtqueue *vq, u32 data)
{
struct virtio_ccw_vq_info *info = vq->priv;
struct virtio_ccw_device *vcdev;
struct subchannel_id schid;
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
BUILD_BUG_ON(sizeof(struct subchannel_id) != sizeof(unsigned int));
info->cookie = kvm_hypercall3(KVM_S390_VIRTIO_CCW_NOTIFY,
*((unsigned int *)&schid),
data, info->cookie);
if (info->cookie < 0)
return false;
return true;
}
static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
{
return virtio_ccw_do_kvm_notify(vq, vq->index);
}
static bool virtio_ccw_kvm_notify_with_data(struct virtqueue *vq)
{
return virtio_ccw_do_kvm_notify(vq, vring_notification_data(vq));
}
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw, int index)
{
int ret;
vcdev->dma_area->config_block.index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
return vcdev->dma_area->config_block.num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
{
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags;
int ret;
unsigned int index = vq->index;
/* Remove from our list. */
spin_lock_irqsave(&vcdev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vcdev->lock, flags);
/* Release from host. */
if (vcdev->revision == 0) {
info->info_block->l.queue = 0;
info->info_block->l.align = 0;
info->info_block->l.index = index;
info->info_block->l.num = 0;
ccw->count = sizeof(info->info_block->l);
} else {
info->info_block->s.desc = 0;
info->info_block->s.index = index;
info->info_block->s.num = 0;
info->info_block->s.avail = 0;
info->info_block->s.used = 0;
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
ccw->cda = (__u32)virt_to_phys(info->info_block);
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
/*
* -ENODEV isn't considered an error: The device is gone anyway.
* This may happen on device detach.
*/
if (ret && (ret != -ENODEV))
dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
ret, index);
vring_del_virtqueue(vq);
ccw_device_dma_free(vcdev->cdev, info->info_block,
sizeof(*info->info_block));
kfree(info);
}
static void virtio_ccw_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
virtio_ccw_drop_indicator(vcdev, ccw);
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int i, vq_callback_t *callback,
const char *name, bool ctx,
struct ccw1 *ccw)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
bool (*notify)(struct virtqueue *vq);
int err;
struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info;
u64 queue;
unsigned long flags;
bool may_reduce;
if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
notify = virtio_ccw_kvm_notify_with_data;
else
notify = virtio_ccw_kvm_notify;
/* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
if (!info) {
dev_warn(&vcdev->cdev->dev, "no info\n");
err = -ENOMEM;
goto out_err;
}
info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*info->info_block));
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
goto out_err;
}
info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
if (info->num < 0) {
err = info->num;
goto out_err;
}
may_reduce = vcdev->revision > 0;
vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
vdev, true, may_reduce, ctx,
notify, callback, name);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM;
goto out_err;
}
vq->num_max = info->num;
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */
queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
info->info_block->s.desc = queue;
info->info_block->s.index = i;
info->info_block->s.num = info->num;
info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
ccw->cda = (__u32)virt_to_phys(info->info_block);
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
goto out_err;
}
info->vq = vq;
vq->priv = info;
/* Save it to our list. */
spin_lock_irqsave(&vcdev->lock, flags);
list_add(&info->node, &vcdev->virtqueues);
spin_unlock_irqrestore(&vcdev->lock, flags);
return vq;
out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
ccw_device_dma_free(vcdev->cdev, info->info_block,
sizeof(*info->info_block));
}
kfree(info);
return ERR_PTR(err);
}
static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtqueue *vqs[], int nvqs,
struct ccw1 *ccw)
{
int ret;
struct virtio_thinint_area *thinint_area = NULL;
unsigned long indicator_addr;
struct airq_info *info;
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*thinint_area));
if (!thinint_area) {
ret = -ENOMEM;
goto out;
}
/* Try to get an indicator. */
indicator_addr = get_airq_indicator(vqs, nvqs,
&thinint_area->bit_nr,
&vcdev->airq_info);
if (!indicator_addr) {
ret = -ENOSPC;
goto out;
}
thinint_area->indicator = virt_to_phys((void *)indicator_addr);
info = vcdev->airq_info;
thinint_area->summary_indicator =
virt_to_phys(get_summary_indicator(info));
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)virt_to_phys(thinint_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
if (ret) {
if (ret == -EOPNOTSUPP) {
/*
* The host does not support adapter interrupts
* for virtio-ccw, stop trying.
*/
virtio_ccw_use_airq = 0;
pr_info("Adapter interrupts unsupported on host\n");
} else
dev_warn(&vcdev->cdev->dev,
"enabling adapter interrupts = %d\n", ret);
virtio_ccw_drop_indicators(vcdev);
}
out:
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
return ret;
}
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
const bool *ctx,
struct irq_affinity *desc)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
names[i], ctx ? ctx[i] : false,
ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
goto out;
}
}
ret = -ENOMEM;
/*
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(indicators(vcdev)));
if (!indicatorp)
goto out;
*indicatorp = (unsigned long) indicators(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
/* no error, just fall back to legacy interrupts */
vcdev->is_thinint = false;
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
*indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)virt_to_phys(indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
*indicatorp = (unsigned long) indicators2(vcdev);
*indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
ccw->count = sizeof(indicators2(vcdev));
ccw->cda = (__u32)virt_to_phys(indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
sizeof(indicators(vcdev)));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
sizeof(indicators(vcdev)));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
}
static void virtio_ccw_reset(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Zero status bits. */
vcdev->dma_area->status = 0;
/* Send a reset ccw on device. */
ccw->cmd_code = CCW_CMD_VDEV_RESET;
ccw->flags = 0;
ccw->count = 0;
ccw->cda = 0;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u64 virtio_ccw_get_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
int ret;
u64 rc;
struct ccw1 *ccw;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return 0;
features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
rc = 0;
goto out_free;
}
/* Read the feature bits from the host. */
features->index = 0;
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret) {
rc = 0;
goto out_free;
}
rc = le32_to_cpu(features->features);
if (vcdev->revision == 0)
goto out_free;
/* Read second half of the feature bits from the host. */
features->index = 1;
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret == 0)
rc |= (u64)le32_to_cpu(features->features) << 32;
out_free:
ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return rc;
}
static void ccw_transport_features(struct virtio_device *vdev)
{
/*
* Currently nothing to do here.
*/
}
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
struct ccw1 *ccw;
int ret;
if (vcdev->revision >= 1 &&
!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev, "virtio: device uses revision 1 "
"but does not have VIRTIO_F_VERSION_1\n");
return -EINVAL;
}
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
ret = -ENOMEM;
goto out_free;
}
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
/* Give virtio_ccw a chance to accept features. */
ccw_transport_features(vdev);
features->index = 0;
features->features = cpu_to_le32((u32)vdev->features);
/* Write the first half of the feature bits to the host. */
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
if (ret)
goto out_free;
if (vcdev->revision == 0)
goto out_free;
features->index = 1;
features->features = cpu_to_le32(vdev->features >> 32);
/* Write the second half of the feature bits to the host. */
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return ret;
}
static void virtio_ccw_get_config(struct virtio_device *vdev,
unsigned int offset, void *buf, unsigned len)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
int ret;
struct ccw1 *ccw;
void *config_area;
unsigned long flags;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
/* Read the config area from the host. */
ccw->cmd_code = CCW_CMD_READ_CONF;
ccw->flags = 0;
ccw->count = offset + len;
ccw->cda = (__u32)virt_to_phys(config_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
if (ret)
goto out_free;
spin_lock_irqsave(&vcdev->lock, flags);
memcpy(vcdev->config, config_area, offset + len);
if (vcdev->config_ready < offset + len)
vcdev->config_ready = offset + len;
spin_unlock_irqrestore(&vcdev->lock, flags);
if (buf)
memcpy(buf, config_area + offset, len);
out_free:
ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static void virtio_ccw_set_config(struct virtio_device *vdev,
unsigned int offset, const void *buf,
unsigned len)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
void *config_area;
unsigned long flags;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
/* Make sure we don't overwrite fields. */
if (vcdev->config_ready < offset)
virtio_ccw_get_config(vdev, 0, NULL, offset);
spin_lock_irqsave(&vcdev->lock, flags);
memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
spin_unlock_irqrestore(&vcdev->lock, flags);
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
ccw->cda = (__u32)virt_to_phys(config_area);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
if (vcdev->revision < 2)
return vcdev->dma_area->status;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
ccw->count = sizeof(vcdev->dma_area->status);
ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
* was hotunplugged, and then we clean up via the machine check
* handler anyway), vcdev->dma_area->status was not overwritten and we just
* return the old status, which is fine.
*/
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return vcdev->dma_area->status;
}
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
int ret;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Write the status to the host. */
vcdev->dma_area->status = status;
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
/* We use ssch for setting the status which is a serializing
* instruction that guarantees the memory writes have
* completed before ssch.
*/
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
vcdev->dma_area->status = old_status;
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
return dev_name(&vcdev->cdev->dev);
}
static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct airq_info *info = vcdev->airq_info;
if (info) {
/*
* This device uses adapter interrupts: synchronize with
* vring_interrupt() called by virtio_airq_handler()
* via the indicator area lock.
*/
write_lock_irq(&info->lock);
write_unlock_irq(&info->lock);
} else {
/* This device uses classic interrupts: synchronize
* with vring_interrupt() called by
* virtio_ccw_int_handler() via the per-device
* irq_lock
*/
write_lock_irq(&vcdev->irq_lock);
write_unlock_irq(&vcdev->irq_lock);
}
}
static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
.get = virtio_ccw_get_config,
.set = virtio_ccw_set_config,
.get_status = virtio_ccw_get_status,
.set_status = virtio_ccw_set_status,
.reset = virtio_ccw_reset,
.find_vqs = virtio_ccw_find_vqs,
.del_vqs = virtio_ccw_del_vqs,
.bus_name = virtio_ccw_bus_name,
.synchronize_cbs = virtio_ccw_synchronize_cbs,
};
/*
* ccw bus driver related functions
*/
static void virtio_ccw_release_dev(struct device *_d)
{
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
sizeof(*vcdev->dma_area));
kfree(vcdev);
}
static int irb_is_error(struct irb *irb)
{
if (scsw_cstat(&irb->scsw) != 0)
return 1;
if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
return 1;
if (scsw_cc(&irb->scsw) != 0)
return 1;
return 0;
}
static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
int index)
{
struct virtio_ccw_vq_info *info;
unsigned long flags;
struct virtqueue *vq;
vq = NULL;
spin_lock_irqsave(&vcdev->lock, flags);
list_for_each_entry(info, &vcdev->virtqueues, node) {
if (info->vq->index == index) {
vq = info->vq;
break;
}
}
spin_unlock_irqrestore(&vcdev->lock, flags);
return vq;
}
static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
__u32 activity)
{
if (vcdev->curr_io & activity) {
switch (activity) {
case VIRTIO_CCW_DOING_READ_FEAT:
case VIRTIO_CCW_DOING_WRITE_FEAT:
case VIRTIO_CCW_DOING_READ_CONFIG:
case VIRTIO_CCW_DOING_WRITE_CONFIG:
case VIRTIO_CCW_DOING_WRITE_STATUS:
case VIRTIO_CCW_DOING_READ_STATUS:
case VIRTIO_CCW_DOING_SET_VQ:
case VIRTIO_CCW_DOING_SET_IND:
case VIRTIO_CCW_DOING_SET_CONF_IND:
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
default:
/* don't know what to do... */
dev_warn(&vcdev->cdev->dev,
"Suspicious activity '%08x'\n", activity);
WARN_ON(1);
break;
}
}
}
static void virtio_ccw_int_handler(struct ccw_device *cdev,
unsigned long intparm,
struct irb *irb)
{
__u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
int i;
struct virtqueue *vq;
if (!vcdev)
return;
if (IS_ERR(irb)) {
vcdev->err = PTR_ERR(irb);
virtio_ccw_check_activity(vcdev, activity);
/* Don't poke around indicators, something's wrong. */
return;
}
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
/* OK */
}
if (irb_is_error(irb)) {
/* Command reject? */
if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_CMD_REJECT))
vcdev->err = -EOPNOTSUPP;
else
/* Map everything else to -EIO. */
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
/*
* Paired with virtio_ccw_synchronize_cbs() and interrupts are
* disabled here.
*/
read_lock(&vcdev->irq_lock);
#endif
for_each_set_bit(i, indicators(vcdev),
sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
clear_bit(i, indicators(vcdev));
barrier();
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
read_unlock(&vcdev->irq_lock);
#endif
if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
clear_bit(0, indicators2(vcdev));
}
}
/*
* We usually want to autoonline all devices, but give the admin
* a way to exempt devices from this.
*/
#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
static char *no_auto = "";
module_param(no_auto, charp, 0444);
MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
{
struct ccw_dev_id id;
ccw_device_get_id(cdev, &id);
if (test_bit(id.devno, devs_no_auto[id.ssid]))
return 0;
return 1;
}
static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
{
struct ccw_device *cdev = data;
int ret;
ret = ccw_device_set_online(cdev);
if (ret)
dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
}
static int virtio_ccw_probe(struct ccw_device *cdev)
{
cdev->handler = virtio_ccw_int_handler;
if (virtio_ccw_check_autoonline(cdev))
async_schedule(virtio_ccw_auto_online, cdev);
return 0;
}
static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
{
unsigned long flags;
struct virtio_ccw_device *vcdev;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
vcdev = dev_get_drvdata(&cdev->dev);
if (!vcdev || vcdev->going_away) {
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return NULL;
}
vcdev->going_away = true;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return vcdev;
}
static void virtio_ccw_remove(struct ccw_device *cdev)
{
unsigned long flags;
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
if (vcdev && cdev->online) {
if (vcdev->device_lost)
virtio_break_device(&vcdev->vdev);
unregister_virtio_device(&vcdev->vdev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
}
cdev->handler = NULL;
}
static int virtio_ccw_offline(struct ccw_device *cdev)
{
unsigned long flags;
struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
if (!vcdev)
return 0;
if (vcdev->device_lost)
virtio_break_device(&vcdev->vdev);
unregister_virtio_device(&vcdev->vdev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return 0;
}
static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
{
struct virtio_rev_info *rev;
struct ccw1 *ccw;
int ret;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
if (!rev) {
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
}
/* Set transport revision */
ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
ccw->flags = 0;
ccw->count = sizeof(*rev);
ccw->cda = (__u32)virt_to_phys(rev);
vcdev->revision = VIRTIO_CCW_REV_MAX;
do {
rev->revision = vcdev->revision;
/* none of our supported revisions carry payload */
rev->length = 0;
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VIRTIO_REV);
if (ret == -EOPNOTSUPP) {
if (vcdev->revision == 0)
/*
* The host device does not support setting
* the revision: let's operate it in legacy
* mode.
*/
ret = 0;
else
vcdev->revision--;
}
} while (ret == -EOPNOTSUPP);
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
return ret;
}
static int virtio_ccw_online(struct ccw_device *cdev)
{
int ret;
struct virtio_ccw_device *vcdev;
unsigned long flags;
vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
if (!vcdev) {
dev_warn(&cdev->dev, "Could not get memory for virtio\n");
ret = -ENOMEM;
goto out_free;
}
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*vcdev->dma_area));
if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
}
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
rwlock_init(&vcdev->irq_lock);
mutex_init(&vcdev->io_lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
ret = virtio_ccw_set_transport_rev(vcdev);
if (ret)
goto out_free;
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
ret);
goto out_put;
}
return 0;
out_put:
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
put_device(&vcdev->vdev.dev);
return ret;
out_free:
if (vcdev) {
ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
sizeof(*vcdev->dma_area));
}
kfree(vcdev);
return ret;
}
static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
{
int rc;
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
/*
* Make sure vcdev is set
* i.e. set_offline/remove callback not already running
*/
if (!vcdev)
return NOTIFY_DONE;
switch (event) {
case CIO_GONE:
vcdev->device_lost = true;
rc = NOTIFY_DONE;
break;
case CIO_OPER:
rc = NOTIFY_OK;
break;
default:
rc = NOTIFY_DONE;
break;
}
return rc;
}
static struct ccw_device_id virtio_ids[] = {
{ CCW_DEVICE(0x3832, 0) },
{},
};
static struct ccw_driver virtio_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "virtio_ccw",
},
.ids = virtio_ids,
.probe = virtio_ccw_probe,
.remove = virtio_ccw_remove,
.set_offline = virtio_ccw_offline,
.set_online = virtio_ccw_online,
.notify = virtio_ccw_cio_notify,
.int_class = IRQIO_VIR,
};
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
diff = 0;
*val = 0;
while (diff <= max_digit) {
int value = hex_to_bin(**cp);
if (value < 0)
break;
*val = *val * 16 + value;
(*cp)++;
diff++;
}
if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
return 1;
return 0;
}
static int __init parse_busid(char *str, unsigned int *cssid,
unsigned int *ssid, unsigned int *devno)
{
char *str_work;
int rc, ret;
rc = 1;
if (*str == '\0')
goto out;
str_work = str;
ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
if (ret || (str_work[0] != '\0'))
goto out;
rc = 0;
out:
return rc;
}
static void __init no_auto_parse(void)
{
unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
char *parm, *str;
int rc;
str = no_auto;
while ((parm = strsep(&str, ","))) {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from);
if (rc)
continue;
if (parm != NULL) {
rc = parse_busid(parm, &to_cssid,
&to_ssid, &to);
if ((from_ssid > to_ssid) ||
((from_ssid == to_ssid) && (from > to)))
rc = -EINVAL;
} else {
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
}
if (rc)
continue;
while ((from_ssid < to_ssid) ||
((from_ssid == to_ssid) && (from <= to))) {
set_bit(from, devs_no_auto[from_ssid]);
from++;
if (from > __MAX_SUBCHANNEL) {
from_ssid++;
from = 0;
}
}
}
}
static int __init virtio_ccw_init(void)
{
int rc;
/* parse no_auto string before we do anything further */
no_auto_parse();
summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
if (!summary_indicators)
return -ENOMEM;
rc = ccw_driver_register(&virtio_ccw_driver);
if (rc)
cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
return rc;
}
device_initcall(virtio_ccw_init);
| linux-master | drivers/s390/virtio/virtio_ccw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* sysfs attributes.
*
* Copyright IBM Corp. 2008, 2020
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "zfcp_diag.h"
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
_show, _store)
#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct ccw_device *cdev = to_ccwdev(dev); \
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
int i; \
\
if (!adapter) \
return -ENODEV; \
\
i = sprintf(buf, _format, _value); \
zfcp_ccw_adapter_put(adapter); \
return i; \
} \
static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
zfcp_erp_wait(port->adapter);
return count;
}
static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
struct scsi_device *sdev;
unsigned int status, failed = 1;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
status = atomic_read(&sdev_to_zfcp(sdev)->status);
failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
scsi_device_put(sdev);
}
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
unsigned long val;
struct scsi_device *sdev;
if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai2");
zfcp_erp_wait(unit->port->adapter);
} else
zfcp_unit_scsi_scan(unit);
return count;
}
static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int i;
if (!adapter)
return -ENODEV;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
i = sprintf(buf, "1\n");
else
i = sprintf(buf, "0\n");
zfcp_ccw_adapter_put(adapter);
return i;
}
static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
unsigned long val;
int retval = 0;
if (!adapter)
return -ENODEV;
if (kstrtoul(buf, 0, &val) || val != 0) {
retval = -EINVAL;
goto out;
}
zfcp_erp_adapter_reset_sync(adapter, "syafai2");
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_adapter_failed_show,
zfcp_sysfs_adapter_failed_store);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int retval = 0;
if (!adapter)
return -ENODEV;
/*
* If `scsi_host` is missing, we can't schedule `scan_work`, as it
* makes use of the corresponding fc_host object. But this state is
* only possible if xconfig/xport data has never completed yet,
* and we couldn't successfully scan for ports anyway.
*/
if (adapter->scsi_host == NULL) {
retval = -ENODEV;
goto out;
}
/*
* Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither
* a random delay nor a rate limit is applied here.
*/
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
{
lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
atomic_set(&port->units, -1);
}
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
{
lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
return atomic_read(&port->units) == -1;
}
static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
{
struct zfcp_adapter *const adapter = port->adapter;
unsigned long flags;
struct scsi_device *sdev;
bool in_use = true;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) > 0)
goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
if (sdev->sdev_state == SDEV_DEL ||
sdev->sdev_state == SDEV_CANCEL)
continue;
if (zsdev->port != port)
continue;
/* alive scsi_device under port of interest */
goto unlock_host_lock;
}
/* port is about to be removed, so no more unit_add or slave_alloc */
zfcp_sysfs_port_set_removing(port);
in_use = false;
unlock_host_lock:
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
unlock_port_units_mutex:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
return in_use;
}
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
int retval = -EINVAL;
if (!adapter)
return -ENODEV;
if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out;
else
retval = 0;
if (zfcp_sysfs_port_in_use(port)) {
retval = -EBUSY;
put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
goto out;
}
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
zfcp_erp_port_shutdown(port, 0, "syprs_1");
device_unregister(&port->dev);
put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
static ssize_t
zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
ssize_t rc;
if (!adapter)
return -ENODEV;
/* ceil(log(2^64 - 1) / log(10)) = 20 */
rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ssize_t
zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
unsigned long max_age;
ssize_t rc;
if (!adapter)
return -ENODEV;
rc = kstrtoul(buf, 10, &max_age);
if (rc != 0)
goto out;
adapter->diagnostics->max_age = max_age;
rc = count;
out:
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
zfcp_sysfs_adapter_diag_max_age_show,
zfcp_sysfs_adapter_diag_max_age_store);
static ssize_t zfcp_sysfs_adapter_fc_security_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
unsigned int status;
int i;
if (!adapter)
return -ENODEV;
/*
* Adapter status COMMON_OPEN implies xconf data and xport data
* was done. Adapter FC Endpoint Security capability remains
* unchanged in case of COMMON_ERP_FAILED (e.g. due to local link
* down).
*/
status = atomic_read(&adapter->status);
if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
i = sprintf(buf, "unknown\n");
else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
i = sprintf(buf, "unsupported\n");
else {
i = zfcp_fsf_scnprint_fc_security(
buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
ZFCP_FSF_PRINT_FMT_LIST);
i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
}
zfcp_ccw_adapter_put(adapter);
return i;
}
static ZFCP_DEV_ATTR(adapter, fc_security, S_IRUGO,
zfcp_sysfs_adapter_fc_security_show,
NULL);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
&dev_attr_adapter_port_remove.attr,
&dev_attr_adapter_port_rescan.attr,
&dev_attr_adapter_peer_wwnn.attr,
&dev_attr_adapter_peer_wwpn.attr,
&dev_attr_adapter_peer_d_id.attr,
&dev_attr_adapter_card_version.attr,
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
&dev_attr_adapter_diag_max_age.attr,
&dev_attr_adapter_fc_security.attr,
NULL
};
static const struct attribute_group zfcp_sysfs_adapter_attr_group = {
.attrs = zfcp_adapter_attrs,
};
static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
int retval;
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_remove(port, fcp_lun))
return -EINVAL;
return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_adapter *adapter = port->adapter;
unsigned int status = atomic_read(&port->status);
int i;
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
i = sprintf(buf, "unknown\n");
else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
i = sprintf(buf, "unsupported\n");
else {
i = zfcp_fsf_scnprint_fc_security(
buf, PAGE_SIZE - 1, port->connection_info,
ZFCP_FSF_PRINT_FMT_SINGLEITEM);
i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
}
return i;
}
static ZFCP_DEV_ATTR(port, fc_security, S_IRUGO,
zfcp_sysfs_port_fc_security_show,
NULL);
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
&dev_attr_port_fc_security.attr,
NULL
};
static struct attribute_group zfcp_port_attr_group = {
.attrs = zfcp_port_attrs,
};
const struct attribute_group *zfcp_port_attr_groups[] = {
&zfcp_port_attr_group,
NULL,
};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
.attrs = zfcp_unit_attrs,
};
const struct attribute_group *zfcp_unit_attr_groups[] = {
&zfcp_unit_attr_group,
NULL,
};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
csum = lat->_name.channel.sum * adapter->timer_ticks; \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
spin_unlock_bh(&lat->lock); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
do_div(fmax, 1000); \
do_div(csum, 1000); \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
lat->_name.fabric.sum = 0; \
lat->_name.fabric.min = 0xFFFFFFFF; \
lat->_name.fabric.max = 0; \
lat->_name.channel.sum = 0; \
lat->_name.channel.min = 0xFFFFFFFF; \
lat->_name.channel.max = 0; \
lat->_name.counter = 0; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
return (ssize_t) count; \
} \
static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
zfcp_sysfs_unit_##_name##_latency_show, \
zfcp_sysfs_unit_##_name##_latency_store);
ZFCP_DEFINE_LATENCY_ATTR(read);
ZFCP_DEFINE_LATENCY_ATTR(write);
ZFCP_DEFINE_LATENCY_ATTR(cmd);
#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
dev_name(&zfcp_sdev->port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) zfcp_sdev->port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n",
(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
unsigned long val;
if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai3");
zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter);
return count;
}
static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_scsi_zfcp_failed_show,
zfcp_sysfs_scsi_zfcp_failed_store);
ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
static struct attribute *zfcp_sdev_attrs[] = {
&dev_attr_fcp_lun.attr,
&dev_attr_wwpn.attr,
&dev_attr_hba_id.attr,
&dev_attr_read_latency.attr,
&dev_attr_write_latency.attr,
&dev_attr_cmd_latency.attr,
&dev_attr_zfcp_access_denied.attr,
&dev_attr_zfcp_failed.attr,
&dev_attr_zfcp_in_recovery.attr,
&dev_attr_zfcp_status.attr,
NULL
};
static const struct attribute_group zfcp_sysfs_sdev_attr_group = {
.attrs = zfcp_sdev_attrs
};
const struct attribute_group *zfcp_sysfs_sdev_attr_groups[] = {
&zfcp_sysfs_sdev_attr_group,
NULL
};
static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_port *qtcb_port;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
static int zfcp_sysfs_adapter_ex_config(struct device *dev,
struct fsf_statistics_info *stat_inf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_config *qtcb_config;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
GFP_KERNEL);
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
return retval;
}
#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct fsf_statistics_info stat_info; \
int retval; \
\
retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
if (retval) \
return retval; \
\
return sprintf(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
(unsigned long long) stat_info.input_req,
(unsigned long long) stat_info.output_req,
(unsigned long long) stat_info.control_req);
ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
(unsigned long long) stat_info.input_mb,
(unsigned long long) stat_info.output_mb);
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_qdio *qdio =
((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
u64 util;
spin_lock_bh(&qdio->stat_lock);
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
(unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
static struct attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization.attr,
&dev_attr_requests.attr,
&dev_attr_megabytes.attr,
&dev_attr_seconds_active.attr,
&dev_attr_queue_full.attr,
NULL
};
static const struct attribute_group zfcp_sysfs_shost_attr_group = {
.attrs = zfcp_sysfs_shost_attrs
};
const struct attribute_group *zfcp_sysfs_shost_attr_groups[] = {
&zfcp_sysfs_shost_attr_group,
NULL
};
static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
struct zfcp_diag_header *diag_hdr;
struct fc_els_flogi *nsp;
ssize_t rc = -ENOLINK;
unsigned long flags;
unsigned int status;
if (!adapter)
return -ENODEV;
status = atomic_read(&adapter->status);
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
goto out;
diag_hdr = &adapter->diagnostics->config_data.header;
rc = zfcp_diag_update_buffer_limited(
adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
if (rc != 0)
goto out;
spin_lock_irqsave(&diag_hdr->access_lock, flags);
/* nport_serv_param doesn't contain the ELS_Command code */
nsp = (struct fc_els_flogi *)((unsigned long)
adapter->diagnostics->config_data
.data.nport_serv_param -
sizeof(u32));
rc = scnprintf(buf, 5 + 2, "%hu\n",
be16_to_cpu(nsp->fl_csp.sp_bb_cred));
spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
out:
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct zfcp_adapter *const adapter = \
zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
struct zfcp_diag_header *diag_hdr; \
ssize_t rc = -ENOLINK; \
unsigned long flags; \
unsigned int status; \
\
if (!adapter) \
return -ENODEV; \
\
status = atomic_read(&adapter->status); \
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
goto out; \
\
if (!zfcp_diag_support_sfp(adapter)) { \
rc = -EOPNOTSUPP; \
goto out; \
} \
\
diag_hdr = &adapter->diagnostics->port_data.header; \
\
rc = zfcp_diag_update_buffer_limited( \
adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
if (rc != 0) \
goto out; \
\
spin_lock_irqsave(&diag_hdr->access_lock, flags); \
rc = scnprintf( \
buf, (_prtsize) + 2, _prtfmt "\n", \
adapter->diagnostics->port_data.data._qtcb_member); \
spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
\
out: \
zfcp_ccw_adapter_put(adapter); \
return rc; \
} \
static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
static struct attribute *zfcp_sysfs_diag_attrs[] = {
&dev_attr_adapter_diag_sfp_temperature.attr,
&dev_attr_adapter_diag_sfp_vcc.attr,
&dev_attr_adapter_diag_sfp_tx_bias.attr,
&dev_attr_adapter_diag_sfp_tx_power.attr,
&dev_attr_adapter_diag_sfp_rx_power.attr,
&dev_attr_adapter_diag_sfp_port_tx_type.attr,
&dev_attr_adapter_diag_sfp_optical_port.attr,
&dev_attr_adapter_diag_sfp_sfp_invalid.attr,
&dev_attr_adapter_diag_sfp_connector_type.attr,
&dev_attr_adapter_diag_sfp_fec_active.attr,
&dev_attr_adapter_diag_b2b_credit.attr,
NULL,
};
static const struct attribute_group zfcp_sysfs_diag_attr_group = {
.name = "diagnostics",
.attrs = zfcp_sysfs_diag_attrs,
};
const struct attribute_group *zfcp_sysfs_adapter_attr_groups[] = {
&zfcp_sysfs_adapter_attr_group,
&zfcp_sysfs_diag_attr_group,
NULL,
};
| linux-master | drivers/s390/scsi/zfcp_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Registration and callback for the s390 common I/O layer.
*
* Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#define ZFCP_MODEL_PRIV 0x4
static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
unsigned long flags;
spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
adapter = dev_get_drvdata(&cdev->dev);
if (adapter)
kref_get(&adapter->ref);
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
return adapter;
}
void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
{
unsigned long flags;
spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
kref_put(&adapter->ref, zfcp_adapter_release);
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}
/**
* zfcp_ccw_activate - activate adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
* @clear: Status flags to clear.
* @tag: s390dbf trace record tag
*/
static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_clear_adapter_status(adapter, clear);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
tag);
/*
* We want to scan ports here, with some random backoff and without
* rate limit. Recovery has already scheduled a port scan for us,
* but with both random delay and rate limit. Nevertheless we get
* what we want here by flushing the scheduled work after sleeping
* an equivalent random time.
* Let the port scan random delay elapse first. If recovery finishes
* up to that point in time, that would be perfect for both recovery
* and port scan. If not, i.e. recovery takes ages, there was no
* point in waiting a random delay on top of the time consumed by
* recovery.
*/
msleep(zfcp_fc_port_scan_backoff());
zfcp_erp_wait(adapter);
flush_delayed_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return 0;
}
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_probe - probe function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer for each FCP
* device found on the current system. This is only a stub to make cio
* work: To only allocate adapter resources for devices actually used,
* the allocation is deferred to the first call to ccw_set_online.
*/
static int zfcp_ccw_probe(struct ccw_device *cdev)
{
return 0;
}
/**
* zfcp_ccw_remove - remove function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and removes an adapter
* from the system. Task of this function is to get rid of all units and
* ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too.
*/
static void zfcp_ccw_remove(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
struct zfcp_unit *unit, *u;
LIST_HEAD(unit_remove_lh);
LIST_HEAD(port_remove_lh);
ccw_device_set_offline(cdev);
adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return;
write_lock_irq(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list) {
write_lock(&port->unit_list_lock);
list_splice_init(&port->unit_list, &unit_remove_lh);
write_unlock(&port->unit_list_lock);
}
list_splice_init(&adapter->port_list, &port_remove_lh);
write_unlock_irq(&adapter->port_list_lock);
zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
device_unregister(&unit->dev);
list_for_each_entry_safe(port, p, &port_remove_lh, list)
device_unregister(&port->dev);
zfcp_adapter_unregister(adapter);
}
/**
* zfcp_ccw_set_online - set_online function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an
* adapter into state online. The first call will allocate all
* adapter resources that will be retained until the device is removed
* via zfcp_ccw_remove.
*
* Setting an fcp device online means that it will be registered with
* the SCSI stack, that the QDIO queues will be set up and that the
* adapter will be opened.
*/
static int zfcp_ccw_set_online(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter) {
adapter = zfcp_adapter_enqueue(cdev);
if (IS_ERR(adapter)) {
dev_err(&cdev->dev,
"Setting up data structures for the "
"FCP adapter failed\n");
return PTR_ERR(adapter);
}
kref_get(&adapter->ref);
}
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_ccw_activate(cdev, 0, "ccsonl1");
/*
* We want to scan ports here, always, with some random delay and
* without rate limit - basically what zfcp_ccw_activate() has
* achieved for us. Not quite! That port scan depended on
* !no_auto_port_rescan. So let's cover the no_auto_port_rescan
* case here to make sure a port scan is done unconditionally.
* Since zfcp_ccw_activate() has waited the desired random time,
* we can immediately schedule and flush a port scan for the
* remaining cases.
*/
zfcp_fc_inverse_conditional_port_scan(adapter);
flush_delayed_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_set_offline - set_offline function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
static int zfcp_ccw_set_offline(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_set_adapter_status(adapter, 0);
zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
zfcp_erp_wait(adapter);
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_notify - ccw notify function
* @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
*
* This function gets called by the common i/o layer if an adapter has gone
* or reappeared.
*/
static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 1;
switch (event) {
case CIO_GONE:
dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
case CIO_NO_PATH:
dev_warn(&cdev->dev,
"The CHPID for the FCP device is offline\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccnoti4");
break;
case CIO_BOXED:
dev_warn(&cdev->dev, "The FCP device did not respond within "
"the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
break;
}
zfcp_ccw_adapter_put(adapter);
return 1;
}
/**
* zfcp_ccw_shutdown - handle shutdown from cio
* @cdev: device for adapter to shutdown.
*/
static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return;
zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
zfcp_ccw_adapter_put(adapter);
}
struct ccw_driver zfcp_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "zfcp",
},
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
};
| linux-master | drivers/s390/scsi/zfcp_ccw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Tracking of manually configured LUNs and helper functions to
* register the LUNs with the SCSI midlayer.
*
* Copyright IBM Corp. 2010
*/
#include "zfcp_def.h"
#include "zfcp_ext.h"
/**
* zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
* @unit: The zfcp LUN/unit to register
*
* When the SCSI midlayer is not allowed to automatically scan and
* attach SCSI devices, zfcp has to register the single devices with
* the SCSI midlayer.
*/
void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
{
struct fc_rport *rport = unit->port->rport;
u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun,
SCSI_SCAN_MANUAL);
}
static void zfcp_unit_scsi_scan_work(struct work_struct *work)
{
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
scsi_work);
zfcp_unit_scsi_scan(unit);
put_device(&unit->dev);
}
/**
* zfcp_unit_queue_scsi_scan - Register configured units on port
* @port: The zfcp_port where to register units
*
* After opening a port, all units configured on this port have to be
* registered with the SCSI midlayer. This function should be called
* after calling fc_remote_port_add, so that the fc_rport is already
* ONLINE and the call to scsi_scan_target runs the same way as the
* call in the FC transport class.
*/
void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
list_for_each_entry(unit, &port->unit_list, list) {
get_device(&unit->dev);
if (scsi_queue_work(port->adapter->scsi_host,
&unit->scsi_work) <= 0)
put_device(&unit->dev);
}
read_unlock_irq(&port->unit_list_lock);
}
static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
get_device(&unit->dev);
return unit;
}
return NULL;
}
/**
* zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
* @port: zfcp_port where to look for the unit
* @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
*
* If zfcp_unit is found, a reference is acquired that has to be
* released later.
*
* Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
* with the specified FCP LUN.
*/
struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
read_unlock_irq(&port->unit_list_lock);
return unit;
}
/**
* zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
* @dev: pointer to device in zfcp_unit
*/
static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
atomic_dec(&unit->port->units);
kfree(unit);
}
/**
* zfcp_unit_add - add unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be added
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
*/
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (zfcp_sysfs_port_is_removing(port)) {
/* port is already gone */
retval = -ENODEV;
goto out;
}
unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
retval = -EEXIST;
goto out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit) {
retval = -ENOMEM;
goto out;
}
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
unit->dev.groups = zfcp_unit_attr_groups;
INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
retval = -ENOMEM;
goto out;
}
if (device_register(&unit->dev)) {
put_device(&unit->dev);
retval = -ENOMEM;
goto out;
}
atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
/*
* lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
* due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
*/
mutex_unlock(&zfcp_sysfs_port_units_mutex);
zfcp_unit_scsi_scan(unit);
return retval;
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
return retval;
}
/**
* zfcp_unit_sdev - Return SCSI device for zfcp_unit
* @unit: The zfcp_unit where to get the SCSI device for
*
* Returns: scsi_device pointer on success, NULL if there is no SCSI
* device for this zfcp_unit
*
* On success, the caller also holds a reference to the SCSI device
* that must be released with scsi_device_put.
*/
struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
port = unit->port;
shost = port->adapter->scsi_host;
return scsi_device_lookup(shost, 0, port->starget_id, lun);
}
/**
* zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
* @unit: The unit to lookup the SCSI device for
*
* Returns the zfcp LUN status field of the SCSI device if the SCSI device
* for the zfcp_unit exists, 0 otherwise.
*/
unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
{
unsigned int status = 0;
struct scsi_device *sdev;
struct zfcp_scsi_dev *zfcp_sdev;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
scsi_device_put(sdev);
}
return status;
}
/**
* zfcp_unit_remove - Remove entry from list of configured units
* @port: The port where to remove the unit from the configuration
* @fcp_lun: The 64 bit LUN of the unit to remove
*
* Returns: -EINVAL if a unit with the specified LUN does not exist,
* 0 on success.
*/
int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
struct scsi_device *sdev;
write_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
if (unit)
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
if (!unit)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
device_unregister(&unit->dev);
put_device(&unit->dev); /* undo _zfcp_unit_find() */
return 0;
}
| linux-master | drivers/s390/scsi/zfcp_unit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Debug traces for zfcp.
*
* Copyright IBM Corp. 2002, 2023
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include "zfcp_dbf.h"
#include "zfcp_ext.h"
#include "zfcp_fc.h"
static u32 dbfsize = 4;
module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize,
"number of pages for each debug feature area (default 4)");
static u32 dbflevel = 3;
module_param(dbflevel, uint, 0400);
MODULE_PARM_DESC(dbflevel,
"log level for each debug feature area "
"(default 3, range 0..6)");
static inline unsigned int zfcp_dbf_plen(unsigned int offset)
{
return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
}
static inline
void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
u64 req_id)
{
struct zfcp_dbf_pay *pl = &dbf->pay_buf;
u16 offset = 0, rec_length;
spin_lock(&dbf->pay_lock);
memset(pl, 0, sizeof(*pl));
pl->fsf_req_id = req_id;
memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
while (offset < length) {
rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
(u16) (length - offset));
memcpy(pl->data, data + offset, rec_length);
debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
offset += rec_length;
pl->counter++;
}
spin_unlock(&dbf->pay_lock);
}
/**
* zfcp_dbf_hba_fsf_res - trace event for fsf responses
* @tag: tag indicating which kind of FSF response has been received
* @level: trace level to be used for event
* @req: request for which a response was received
*/
void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
struct fsf_qtcb_header *q_head = &req->qtcb->header;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_RES;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = q_head->fsf_command;
rec->fsf_seq_no = q_pref->req_seq_no;
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
rec->u.res.port_handle = q_head->port_handle;
rec->u.res.lun_handle = q_head->lun_handle;
memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
rec->pl_len = q_head->log_length;
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
* FC Endpoint Security (FCES)
* @tag: tag indicating which kind of FC Endpoint Security event has occurred
* @req: request for which a response was received
* @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
* @fc_security_old: old FC Endpoint Security of FCP device or connection
* @fc_security_new: new FC Endpoint Security of FCP device or connection
*/
void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
u32 fc_security_old, u32 fc_security_new)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
struct fsf_qtcb_header *q_head = &req->qtcb->header;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
static int const level = 3;
unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_FCES;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = q_head->fsf_command;
rec->fsf_seq_no = q_pref->req_seq_no;
rec->u.fces.req_issued = req->issued;
rec->u.fces.fsf_status = q_head->fsf_status;
rec->u.fces.port_handle = q_head->port_handle;
rec->u.fces.wwpn = wwpn;
rec->u.fces.fc_security_old = fc_security_old;
rec->u.fces.fc_security_new = fc_security_new;
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_fsf_reqid - trace only the tag and a request ID
* @tag: tag documenting the source
* @level: trace level
* @adapter: adapter instance the request ID belongs to
* @req_id: the request ID to trace
*/
void zfcp_dbf_hba_fsf_reqid(const char *const tag, const int level,
struct zfcp_adapter *const adapter,
const u64 req_id)
{
struct zfcp_dbf *const dbf = adapter->dbf;
struct zfcp_dbf_hba *const rec = &dbf->hba_buf;
struct zfcp_dbf_hba_res *const res = &rec->u.res;
unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_RES;
rec->fsf_req_id = req_id;
rec->fsf_req_status = ~0u;
rec->fsf_cmd = ~0u;
rec->fsf_seq_no = ~0u;
res->req_issued = ~0ull;
res->prot_status = ~0u;
memset(res->prot_status_qual, 0xff, sizeof(res->prot_status_qual));
res->fsf_status = ~0u;
memset(res->fsf_status_qual, 0xff, sizeof(res->fsf_status_qual));
res->port_handle = ~0u;
res->lun_handle = ~0u;
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request providing the unsolicited status
*/
void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_status_read_buffer *srb = req->data;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
static int const level = 2;
unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_USS;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
if (!srb)
goto log;
rec->u.uss.status_type = srb->status_type;
rec->u.uss.status_subtype = srb->status_subtype;
rec->u.uss.d_id = ntoh24(srb->d_id);
rec->u.uss.lun = srb->fcp_lun;
memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
sizeof(rec->u.uss.queue_designator));
/* status read buffer payload length */
rec->pl_len = (!srb->length) ? 0 : srb->length -
offsetof(struct fsf_status_read_buffer, payload);
if (rec->pl_len)
zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
"fsf_uss", req->req_id);
log:
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_bit_err - trace event for bit error conditions
* @tag: tag indicating which kind of bit error unsolicited status was received
* @req: request which caused the bit_error condition
*/
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
struct fsf_status_read_buffer *sr_buf = req->data;
static int const level = 1;
unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BIT;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload));
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_def_err - trace event for deferred error messages
* @adapter: pointer to struct zfcp_adapter
* @req_id: request id which caused the deferred error message
* @scount: number of sbals incl. the signaling sbal
* @pl: array of all involved sbals
*/
void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
void **pl)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_pay *payload = &dbf->pay_buf;
unsigned long flags;
static int const level = 1;
u16 length;
if (unlikely(!debug_level_enabled(dbf->pay, level)))
return;
if (!pl)
return;
spin_lock_irqsave(&dbf->pay_lock, flags);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, "def_err", 7);
payload->fsf_req_id = req_id;
payload->counter = 0;
length = min((u16)sizeof(struct qdio_buffer),
(u16)ZFCP_DBF_PAY_MAX_REC);
while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
payload->counter++;
}
spin_unlock_irqrestore(&dbf->pay_lock, flags);
}
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
rec->adapter_status = atomic_read(&adapter->status);
if (port) {
rec->port_status = atomic_read(&port->status);
rec->wwpn = port->wwpn;
rec->d_id = port->d_id;
}
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
} else
rec->lun = ZFCP_DBF_INVALID_LUN;
}
/**
* zfcp_dbf_rec_trig - trace event related to triggered recovery
* @tag: identifier for event
* @adapter: adapter on which the erp_action should run
* @port: remote port involved in the erp_action
* @sdev: scsi device involved in the erp_action
* @want: wanted erp_action
* @need: required erp_action
*
* The adapter->erp_lock has to be held.
*/
void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
struct zfcp_port *port, struct scsi_device *sdev,
u8 want, u8 need)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
static int const level = 1;
struct list_head *entry;
unsigned long flags;
lockdep_assert_held(&adapter->erp_lock);
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = ZFCP_DBF_REC_TRIG;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
zfcp_dbf_set_common(rec, adapter, port, sdev);
list_for_each(entry, &adapter->erp_ready_head)
rec->u.trig.ready++;
list_for_each(entry, &adapter->erp_running_head)
rec->u.trig.running++;
rec->u.trig.want = want;
rec->u.trig.need = need;
debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
* zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
* @tag: identifier for event
* @adapter: adapter on which the erp_action should run
* @port: remote port involved in the erp_action
* @sdev: scsi device involved in the erp_action
* @want: wanted erp_action
* @need: required erp_action
*
* The adapter->erp_lock must not be held.
*/
void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
struct zfcp_port *port, struct scsi_device *sdev,
u8 want, u8 need)
{
unsigned long flags;
read_lock_irqsave(&adapter->erp_lock, flags);
zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
* @level: trace level to be used for event
* @tag: identifier for event
* @erp: erp_action running
*/
void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
unsigned long flags;
if (!debug_level_enabled(dbf->rec, level))
return;
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = ZFCP_DBF_REC_RUN;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
rec->u.run.fsf_req_id = erp->fsf_req_id;
rec->u.run.rec_status = erp->status;
rec->u.run.rec_step = erp->step;
rec->u.run.rec_action = erp->type;
if (erp->sdev)
rec->u.run.rec_count =
atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
else if (erp->port)
rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
* zfcp_dbf_rec_run - trace event related to running recovery
* @tag: identifier for event
* @erp: erp_action running
*/
void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
{
zfcp_dbf_rec_run_lvl(1, tag, erp);
}
/**
* zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
* @tag: identifier for event
* @wka_port: well known address port
* @req_id: request ID to correlate with potential HBA trace record
*/
void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
u64 req_id)
{
struct zfcp_dbf *dbf = wka_port->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
static int const level = 1;
unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = ZFCP_DBF_REC_RUN;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->port_status = wka_port->status;
rec->d_id = wka_port->d_id;
rec->lun = ZFCP_DBF_INVALID_LUN;
rec->u.run.fsf_req_id = req_id;
rec->u.run.rec_status = ~0;
rec->u.run.rec_step = ~0;
rec->u.run.rec_action = ~0;
rec->u.run.rec_count = ~0;
debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
#define ZFCP_DBF_SAN_LEVEL 1
static inline
void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
char *paytag, struct scatterlist *sg, u8 id, u16 len,
u64 req_id, u32 d_id, u16 cap_len)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
struct zfcp_dbf_pay *payload = &dbf->pay_buf;
u16 pay_sum = 0;
spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->pl_len = len; /* full length even if we cap pay below */
if (!sg)
goto out;
rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
if (len <= rec_len)
goto out; /* skip pay record if full content in rec->payload */
/* if (len > rec_len):
* dump data up to cap_len ignoring small duplicate in rec->payload
*/
spin_lock(&dbf->pay_lock);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
payload->fsf_req_id = req_id;
payload->counter = 0;
for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
u16 pay_len, offset = 0;
while (offset < sg->length && pay_sum < cap_len) {
pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
(u16)(sg->length - offset));
/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
memcpy(payload->data, sg_virt(sg) + offset, pay_len);
debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
zfcp_dbf_plen(pay_len));
payload->counter++;
offset += pay_len;
pay_sum += pay_len;
}
}
spin_unlock(&dbf->pay_lock);
out:
debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
/**
* zfcp_dbf_san_req - trace event for issued SAN request
* @tag: identifier for event
* @fsf: request containing issued CT or ELS data
* @d_id: N_Port_ID where SAN request is sent to
* d_id: destination ID
*/
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)zfcp_qdio_real_bytes(ct_els->req);
zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
length, fsf->req_id, d_id, length);
}
static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct zfcp_fsf_req *fsf,
u16 len)
{
struct zfcp_fsf_ct_els *ct_els = fsf->data;
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
if (!(memcmp(tag, "fsscth2", 7) == 0
&& ct_els->d_id == FC_FID_DIR_SERV
&& reqh->ct_rev == FC_CT_REV
&& reqh->ct_in_id[0] == 0
&& reqh->ct_in_id[1] == 0
&& reqh->ct_in_id[2] == 0
&& reqh->ct_fs_type == FC_FST_DIR
&& reqh->ct_fs_subtype == FC_NS_SUBTYPE
&& reqh->ct_options == 0
&& reqh->_ct_resvd1 == 0
&& reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
/* reqh->ct_mr_size can vary so do not match but read below */
&& reqh->_ct_resvd2 == 0
&& reqh->ct_reason == 0
&& reqh->ct_explan == 0
&& reqh->ct_vendor == 0
&& reqn->fn_resvd == 0
&& reqn->fn_domain_id_scope == 0
&& reqn->fn_area_id_scope == 0
&& reqn->fn_fc4_type == FC_TYPE_FCP))
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
/* cap all but accept CT responses to at least the CT header */
resph = (struct fc_ct_hdr *)acc;
if ((ct_els->status) ||
(resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
* response, allowing us to skip special handling for it - just skip it
*/
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++resp_entry);
last = acc->fp_flags & FC_NS_FID_LAST;
}
len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
return len; /* cap after last entry */
}
/**
* zfcp_dbf_san_res - trace event for received SAN request
* @tag: identifier for event
* @fsf: request containing received CT or ELS data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
length, fsf->req_id, ct_els->d_id,
zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
* @tag: identifier for event
* @fsf: request containing received ELS data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
struct scatterlist sg;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
sg_init_one(&sg, srb->payload.data, length);
zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
fsf->req_id, ntoh24(srb->d_id), length);
}
/**
* zfcp_dbf_scsi_common() - Common trace event helper for scsi.
* @tag: Identifier for event.
* @level: trace level of event.
* @sdev: Pointer to SCSI device as context for this event.
* @sc: Pointer to SCSI command, or NULL with task management function (TMF).
* @fsf: Pointer to FSF request, or NULL.
*/
void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
struct fcp_resp_with_ext *fcp_rsp;
struct fcp_resp_rsp_info *fcp_rsp_info;
unsigned long flags;
spin_lock_irqsave(&dbf->scsi_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
if (sc) {
rec->scsi_result = sc->result;
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
rec->scsi_lun = (u32)sc->device->lun;
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (u64)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
} else {
rec->scsi_result = ~0;
rec->scsi_retries = ~0;
rec->scsi_allowed = ~0;
rec->scsi_id = sdev->id;
rec->scsi_lun = (u32)sdev->lun;
rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
rec->host_scribble = ~0;
memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
}
if (fsf) {
rec->fsf_req_id = fsf->req_id;
rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
/* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
/* complete FCP_RSP IU in associated PAYload record
* but only if there are optional parts
*/
if (fcp_rsp->resp.fr_flags != 0)
zfcp_dbf_pl_write(
dbf, fcp_rsp,
/* at least one full PAY record
* but not beyond hardware response field
*/
min_t(u16, max_t(u16, rec->pl_len,
ZFCP_DBF_PAY_MAX_REC),
FSF_FCP_RSP_SIZE),
"fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
/**
* zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
* @tag: Identifier for event.
* @adapter: Pointer to zfcp adapter as context for this event.
* @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
* @ret: Return value of calling function.
*
* This SCSI trace variant does not depend on any of:
* scsi_cmnd, zfcp_fsf_req, scsi_device.
*/
void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
unsigned int scsi_id, int ret)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
unsigned long flags;
static int const level = 1;
if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
return;
spin_lock_irqsave(&dbf->scsi_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
rec->scsi_retries = ~0;
rec->scsi_allowed = ~0;
rec->fcp_rsp_info = ~0;
rec->scsi_id = scsi_id;
rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
rec->host_scribble = ~0;
memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;
d = debug_register(name, size, 1, rec_size);
if (!d)
return NULL;
debug_register_view(d, &debug_hex_ascii_view);
debug_set_level(d, dbflevel);
return d;
}
static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
{
if (!dbf)
return;
debug_unregister(dbf->scsi);
debug_unregister(dbf->san);
debug_unregister(dbf->hba);
debug_unregister(dbf->pay);
debug_unregister(dbf->rec);
kfree(dbf);
}
/**
* zfcp_dbf_adapter_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
{
char name[DEBUG_MAX_NAME_LEN];
struct zfcp_dbf *dbf;
dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
if (!dbf)
return -ENOMEM;
spin_lock_init(&dbf->pay_lock);
spin_lock_init(&dbf->hba_lock);
spin_lock_init(&dbf->san_lock);
spin_lock_init(&dbf->scsi_lock);
spin_lock_init(&dbf->rec_lock);
/* debug feature area which records recovery activity */
sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
if (!dbf->rec)
goto err_out;
/* debug feature area which records HBA (FSF and QDIO) conditions */
sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
if (!dbf->hba)
goto err_out;
/* debug feature area which records payload info */
sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
if (!dbf->pay)
goto err_out;
/* debug feature area which records SAN command failures and recovery */
sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
if (!dbf->san)
goto err_out;
/* debug feature area which records SCSI command failures and recovery */
sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
if (!dbf->scsi)
goto err_out;
adapter->dbf = dbf;
return 0;
err_out:
zfcp_dbf_unregister(dbf);
return -ENOMEM;
}
/**
* zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
{
struct zfcp_dbf *dbf = adapter->dbf;
adapter->dbf = NULL;
zfcp_dbf_unregister(dbf);
}
| linux-master | drivers/s390/scsi/zfcp_dbf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Setup and helper functions to access QDIO.
*
* Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
static bool enable_multibuffer = true;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
#define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
#define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
return;
}
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
{
int i, sbal_idx;
for (i = first; i < first + cnt; i++) {
sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
}
}
/* this needs to be called prior to updating the queue fill level */
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
unsigned long long now, span;
int used;
now = get_tod_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
}
static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
{
struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
struct ccw_device *cdev = qdio->adapter->ccw_device;
unsigned int start, error;
int completed;
completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
if (completed > 0) {
if (error) {
zfcp_qdio_handler_error(qdio, "qdreqt1", error);
} else {
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
spin_lock_irq(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock_irq(&qdio->stat_lock);
atomic_add(completed, &qdio->req_q_free);
wake_up(&qdio->req_q_wq);
}
}
if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
timer_reduce(&qdio->request_timer,
jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
}
static void zfcp_qdio_request_timer(struct timer_list *timer)
{
struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
tasklet_schedule(&qdio->request_tasklet);
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
int sbal_no, sbal_idx;
if (unlikely(qdio_err)) {
if (zfcp_adapter_multi_buffer_active(adapter)) {
void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
struct qdio_buffer_element *sbale;
u64 req_id;
u8 scount;
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
req_id = sbale->addr;
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %
QDIO_MAX_BUFFERS_PER_Q;
pl[sbal_no] = qdio->res_q[sbal_idx];
}
zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
}
zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
/*
* go through all SBALs from input queue currently
* returned by QDIO layer
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
* put SBALs back to response queue
*/
if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
{
struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
struct ccw_device *cdev = qdio->adapter->ccw_device;
unsigned int start, error;
int completed;
if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
tasklet_schedule(&qdio->request_tasklet);
/* Check the Response Queue: */
completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
if (completed < 0)
return;
if (completed > 0)
zfcp_qdio_int_resp(cdev, error, 0, start, completed,
(unsigned long) qdio);
if (qdio_start_irq(cdev))
/* More work pending: */
tasklet_schedule(&qdio->irq_tasklet);
}
static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
tasklet_schedule(&qdio->irq_tasklet);
}
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
if (q_req->sbal_last == q_req->sbal_limit)
return NULL;
/* set chaining flag in first SBALE of current SBAL */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
q_req->sbal_last++;
q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
/* keep this requests number of SBALs up-to-date */
q_req->sbal_number++;
BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
/* start at first SBALE of new SBAL */
q_req->sbale_curr = 0;
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->sflags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
sbale = zfcp_qdio_sbale_next(qdio, q_req);
if (!sbale) {
atomic_inc(&qdio->req_q_full);
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
q_req->sbal_number);
return -EINVAL;
}
sbale->addr = sg_phys(sg);
sbale->length = sg->length;
}
return 0;
}
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
return 0;
}
/**
* zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
* @qdio: pointer to struct zfcp_qdio
*
* The req_q_lock must be held by the caller of this function, and
* this function may only be called from process context; it will
* sleep when waiting for a free sbal.
*
* Returns: 0 on success, -EIO if there is no free sbal after waiting.
*/
int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
if (ret > 0)
return 0;
if (!ret) {
atomic_inc(&qdio->req_q_full);
/* assume hanging outbound queue, try queue recovery */
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
return -EIO;
}
/**
* zfcp_qdio_send - send req to QDIO
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
int retval;
u8 sbal_number = q_req->sbal_number;
/*
* This should actually be a spin_lock_bh(stat_lock), to protect against
* Request Queue completion processing in tasklet context.
* But we can't do so (and are safe), as we always get called with IRQs
* disabled by spin_lock_irq[save](req_q_lock).
*/
lockdep_assert_irqs_disabled();
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
atomic_sub(sbal_number, &qdio->req_q_free);
retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
q_req->sbal_first, sbal_number,
NULL);
if (unlikely(retval)) {
/* Failed to submit the IO, roll back our modifications. */
atomic_add(sbal_number, &qdio->req_q_free);
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
sbal_number);
return retval;
}
if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
tasklet_schedule(&qdio->request_tasklet);
else
timer_reduce(&qdio->request_timer,
jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
/* account for transferred buffers */
qdio->req_q_idx += sbal_number;
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
return 0;
}
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @qdio: pointer to struct zfcp_qdio
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
int ret;
ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
if (ret)
return -ENOMEM;
ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
if (ret)
goto free_req_q;
init_waitqueue_head(&qdio->req_q_wq);
ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
if (ret)
goto free_res_q;
return 0;
free_res_q:
qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
free_req_q:
qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
return ret;
}
/**
* zfcp_qdio_close - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
struct zfcp_adapter *adapter = qdio->adapter;
int idx, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/*
* Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
* during qdio_shutdown().
*/
spin_lock_irq(&qdio->req_q_lock);
atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);
tasklet_disable(&qdio->irq_tasklet);
tasklet_disable(&qdio->request_tasklet);
del_timer_sync(&qdio->request_timer);
qdio_stop_irq(adapter->ccw_device);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&qdio->req_q_free);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
}
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, 0);
}
void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
const struct zfcp_qdio *const qdio)
{
struct Scsi_Host *const shost = adapter->scsi_host;
if (shost == NULL)
return;
shost->sg_tablesize = qdio->max_sbale_per_req;
shost->max_sectors = qdio->max_sbale_per_req * 8;
}
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, otherwise -EIO
*/
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer **input_sbals[1] = {qdio->res_q};
struct qdio_buffer **output_sbals[1] = {qdio->req_q};
struct qdio_buffer_element *sbale;
struct qdio_initialize init_data = {0};
struct zfcp_adapter *adapter = qdio->adapter;
struct ccw_device *cdev = adapter->ccw_device;
struct qdio_ssqd_desc ssqd;
int cc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
init_data.q_format = QDIO_ZFCP_QFMT;
init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
if (enable_multibuffer)
init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
init_data.no_input_qs = 1;
init_data.no_output_qs = 1;
init_data.input_handler = zfcp_qdio_int_resp;
init_data.output_handler = zfcp_qdio_int_req;
init_data.irq_poll = zfcp_qdio_poll;
init_data.int_parm = (unsigned long) qdio;
init_data.input_sbal_addr_array = input_sbals;
init_data.output_sbal_addr_array = output_sbals;
if (qdio_establish(cdev, &init_data))
goto failed_establish;
if (qdio_get_ssqd_desc(cdev, &ssqd))
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else {
atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
}
qdio->max_sbale_per_req =
ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
- 2;
if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
sbale->sflags = 0;
sbale->addr = 0;
}
if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
/* Enable processing for Request Queue completions: */
tasklet_enable(&qdio->request_tasklet);
/* Enable processing for QDIO interrupts: */
tasklet_enable(&qdio->irq_tasklet);
/* This results in a qdio_start_irq(): */
tasklet_schedule(&qdio->irq_tasklet);
zfcp_qdio_shost_update(adapter, qdio);
return 0;
failed_qdio:
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&cdev->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
if (!qdio)
return;
tasklet_kill(&qdio->irq_tasklet);
tasklet_kill(&qdio->request_tasklet);
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
kfree(qdio);
}
int zfcp_qdio_setup(struct zfcp_adapter *adapter)
{
struct zfcp_qdio *qdio;
qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
if (!qdio)
return -ENOMEM;
qdio->adapter = adapter;
if (zfcp_qdio_allocate(qdio)) {
kfree(qdio);
return -ENOMEM;
}
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
tasklet_disable(&qdio->irq_tasklet);
tasklet_disable(&qdio->request_tasklet);
adapter->qdio = qdio;
return 0;
}
/**
* zfcp_qdio_siosl - Trigger logging in FCP channel
* @adapter: The zfcp_adapter where to trigger logging
*
* Call the cio siosl function to trigger hardware logging. This
* wrapper function sets a flag to ensure hardware logging is only
* triggered once before going through qdio shutdown.
*
* The triggers are always run from qdio tasklet context, so no
* additional synchronization is necessary.
*/
void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
{
int rc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
return;
rc = ccw_device_siosl(adapter->ccw_device);
if (!rc)
atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status);
}
| linux-master | drivers/s390/scsi/zfcp_qdio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corp. 2008, 2017
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/bsg-lib.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
struct kmem_cache *zfcp_fc_req_cache;
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
[ELS_ADDR_FMT_DOM] = 0xFF0000,
[ELS_ADDR_FMT_FAB] = 0x000000,
};
static bool no_auto_port_rescan;
module_param(no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
static unsigned int port_scan_backoff = 500;
module_param(port_scan_backoff, uint, 0600);
MODULE_PARM_DESC(port_scan_backoff,
"upper limit of port scan random backoff in msecs (default 500)");
static unsigned int port_scan_ratelimit = 60000;
module_param(port_scan_ratelimit, uint, 0600);
MODULE_PARM_DESC(port_scan_ratelimit,
"minimum interval between port scans in msecs (default 60000)");
unsigned int zfcp_fc_port_scan_backoff(void)
{
if (!port_scan_backoff)
return 0;
return get_random_u32_below(port_scan_backoff);
}
static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
{
unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
adapter->next_port_scan = jiffies + interval + backoff;
}
static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
{
unsigned long now = jiffies;
unsigned long next = adapter->next_port_scan;
unsigned long delay = 0, max;
/* delay only needed within waiting period */
if (time_before(now, next)) {
delay = next - now;
/* paranoia: never ever delay scans longer than specified */
max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
delay = min(delay, max);
}
queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
}
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
zfcp_fc_port_scan(adapter);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (!no_auto_port_rescan)
return;
zfcp_fc_port_scan(adapter);
}
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
*/
void zfcp_fc_post_event(struct work_struct *work)
{
struct zfcp_fc_event *event = NULL, *tmp = NULL;
LIST_HEAD(tmp_lh);
struct zfcp_fc_events *events = container_of(work,
struct zfcp_fc_events, work);
struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
events);
spin_lock_bh(&events->list_lock);
list_splice_init(&events->list, &tmp_lh);
spin_unlock_bh(&events->list_lock);
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
event->code, event->data);
list_del(&event->list);
kfree(event);
}
}
/**
* zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
* @adapter: The adapter where to enqueue the event
* @event_code: The event code (as defined in fc_host_event_code in
* scsi_transport_fc.h)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
if (!event)
return;
event->code = event_code;
event->data = event_data;
spin_lock(&adapter->events.list_lock);
list_add_tail(&event->list, &adapter->events.list);
spin_unlock(&adapter->events.list_lock);
queue_work(adapter->work_queue, &adapter->events.work);
}
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
int ret = -EIO;
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port)) {
/* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
}
wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
ret = 0;
goto out;
}
out:
mutex_unlock(&wka_port->mutex);
return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_fc_wka_port *wka_port =
container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
/* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
wait_event(wka_port->closed,
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->opened);
init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
{
if (!gs)
return;
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fc_els_rscn_page *page)
{
unsigned long flags;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
unsigned int afmt;
head = (struct fc_els_rscn *) status_buffer->payload.data;
page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
if (no_entries > 1) {
/* handle failed ports */
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if (port->d_id)
continue;
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
page);
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer;
struct fc_els_flogi *plogi;
status_buffer = (struct fsf_status_read_buffer *) req->data;
plogi = (struct fc_els_flogi *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fc_els_logo *logo =
(struct fc_els_logo *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
}
/**
* zfcp_fc_incoming_els - handle incoming ELS
* @fsf_req: request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_dbf_san_in_els("fciels1", fsf_req);
if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
if (ct_els->status)
return;
if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
return;
/* looks like a valid d_id */
ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
{
complete(data);
}
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
ct_hdr->ct_rev = FC_CT_REV;
ct_hdr->ct_fs_type = FC_FST_DIR;
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
ct_hdr->ct_cmd = cpu_to_be16(cmd);
ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
fc_req->ct_els.port = port;
fc_req->ct_els.handler = zfcp_fc_complete;
fc_req->ct_els.handler_data = &completion;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
* zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
int ret;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
goto out;
}
if (!port->d_id) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
goto out;
}
zfcp_erp_port_reopen(port, 0, "fcgpn_3");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
* @port: The zfcp_port to lookup the d_id for.
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
put_device(&port->dev);
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
* @plogi: plogi payload
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
port->d_id = 0;
dev_warn(&port->adapter->ccw_device->dev,
"A port opened with WWPN 0x%016Lx returned data that "
"identifies it as WWPN 0x%016Lx\n",
(unsigned long long) port->wwpn,
(unsigned long long) be64_to_cpu(plogi->fl_wwpn));
return;
}
port->wwnn = be64_to_cpu(plogi->fl_wwnn);
port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS1;
if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS2;
if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS3;
if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
port->supported_classes |= FC_COS_CLASS4;
}
static void zfcp_fc_adisc_handler(void *data)
{
struct zfcp_fc_req *fc_req = data;
struct zfcp_port *port = fc_req->ct_els.port;
struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
goto out;
}
if (!port->wwnn)
port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2");
goto out;
}
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
fc_req->ct_els.port = port;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
fc_req->ct_els.handler = zfcp_fc_adisc_handler;
fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
d_id = port->d_id; /* remember as destination for send els below */
/*
* Force fresh GID_PN lookup on next port recovery.
* Must happen after request setup and before sending request,
* to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
*/
port->d_id = 0;
ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
{
struct zfcp_port *port =
container_of(work, struct zfcp_port, test_link_work);
int retval;
set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
put_device(&port->dev);
}
/**
* zfcp_fc_sg_free_table - free memory used by scatterlists
* @sg: pointer to scatterlist
* @count: number of scatterlist which are to be free'ed
* the scatterlist are expected to reference pages always
*/
static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
{
int i;
for (i = 0; i < count; i++, sg = sg_next(sg))
if (sg)
free_page((unsigned long) sg_virt(sg));
else
break;
}
/**
* zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
* @sg: pointer to struct scatterlist
* @count: number of scatterlists which should be assigned with buffers
* of size page
*
* Returns: 0 on success, -ENOMEM otherwise
*/
static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
{
void *addr;
int i;
sg_init_table(sg, count);
for (i = 0; i < count; i++, sg = sg_next(sg)) {
addr = (void *) get_zeroed_page(GFP_KERNEL);
if (!addr) {
zfcp_fc_sg_free_table(sg, i);
return -ENOMEM;
}
sg_set_buf(sg, addr, PAGE_SIZE);
}
return 0;
}
static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return NULL;
if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
sizeof(struct zfcp_fc_gpn_ft_req));
return fc_req;
}
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
return ret;
}
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list))
return;
list_move_tail(&port->list, lh);
}
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
unsigned long flags;
LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
if (hdr->ct_reason == FC_FS_RJT_UNABL)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
hdr->ct_mr_size);
return -E2BIG;
}
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
last = acc->fp_flags & FC_NS_FID_LAST;
d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
if (be64_to_cpu(acc->fp_wwpn) ==
fc_host_port_name(adapter->scsi_host))
continue;
port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
ZFCP_STATUS_COMMON_NOESC, d_id);
if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1");
else if (PTR_ERR(port) != -EEXIST)
ret = PTR_ERR(port);
}
zfcp_erp_wait(adapter);
write_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
zfcp_fc_validate_port(port, &remove_lh);
write_unlock_irqrestore(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2");
device_unregister(&port->dev);
}
return ret;
}
/**
* zfcp_fc_scan_ports - scan remote ports and attach new ports
* @work: reference to scheduled work
*/
void zfcp_fc_scan_ports(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
scan_work);
int ret, i;
struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
zfcp_fc_port_scan_time(adapter);
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
fc_req = zfcp_fc_alloc_sg_env(buf_num);
if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
char devno[] = "DEVNO:";
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
int ret;
zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (ret)
return ret;
wait_for_completion(&completion);
if (ct_els->status)
return ct_els->status;
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
!(strstr(gspn_rsp->gspn.fp_name, devno)))
snprintf(fc_host_symbolic_name(adapter->scsi_host),
FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
gspn_rsp->gspn.fp_name, devno,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
strscpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
}
static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct Scsi_Host *shost = adapter->scsi_host;
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
int ret, len;
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
FC_SYMBOLIC_NAME_SIZE);
rspn_req->rspn.fr_name_len = len;
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
}
/**
* zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
* @work: ns_up_work of the adapter where to update the symbolic port name
*
* Retrieve the current symbolic port name that may have been set by
* the hardware using the GSPN request and update the fc_host
* symbolic_name sysfs attribute. When running in NPIV mode (and hence
* the port name is unique for this system), update the symbolic port
* name to add Linux specific information and update the FC nameserver
* using the RSPN request.
*/
void zfcp_fc_sym_name_update(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
ns_up_work);
int ret;
struct zfcp_fc_req *fc_req;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return;
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out_free;
ret = zfcp_fc_gspn(adapter, fc_req);
if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
goto out_ds_put;
memset(fc_req, 0, sizeof(*fc_req));
zfcp_fc_rspn(adapter, fc_req);
out_ds_put:
zfcp_fc_wka_port_put(&adapter->gs->ds);
out_free:
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
}
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
struct fc_bsg_request *bsg_request = job->request;
struct fc_rport *rport = fc_bsg_to_rport(job);
struct Scsi_Host *shost;
preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *) shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
return &adapter->gs->as;
case FC_FST_MGMT:
return &adapter->gs->ms;
case FC_FST_TIME:
return &adapter->gs->ts;
break;
case FC_FST_DIR:
return &adapter->gs->ds;
break;
default:
return NULL;
}
}
static void zfcp_fc_ct_job_handler(void *data)
{
struct bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
zfcp_fc_wka_port_put(wka_port);
zfcp_fc_ct_els_job_handler(data);
}
static int zfcp_fc_exec_els_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_bsg_request *bsg_request = job->request;
struct zfcp_port *port;
u32 d_id;
if (rport) {
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -EINVAL;
d_id = port->d_id;
put_device(&port->dev);
} else
d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->timeout / HZ);
}
static int zfcp_fc_exec_ct_job(struct bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
struct zfcp_fsf_ct_els *ct = job->dd_data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
if (!wka_port)
return -EINVAL;
ret = zfcp_fc_wka_port_get(wka_port);
if (ret)
return ret;
ct->handler = zfcp_fc_ct_job_handler;
ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ);
if (ret)
zfcp_fc_wka_port_put(wka_port);
return ret;
}
int zfcp_fc_exec_bsg_job(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
struct fc_bsg_request *bsg_request = job->request;
struct fc_rport *rport = fc_bsg_to_rport(job);
shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
ct_els->req = job->request_payload.sg_list;
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
case FC_BSG_RPT_CT:
case FC_BSG_HST_CT:
return zfcp_fc_exec_ct_job(job, adapter);
default:
return -EINVAL;
}
}
int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
}
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_fc_wka_ports *wka_ports;
wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
adapter->gs = wka_ports;
zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
return 0;
}
void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
kfree(adapter->gs);
adapter->gs = NULL;
}
| linux-master | drivers/s390/scsi/zfcp_fc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Functions to handle diagnostics.
*
* Copyright IBM Corp. 2018
*/
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include "zfcp_diag.h"
#include "zfcp_ext.h"
#include "zfcp_def.h"
static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
/**
* zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
* @adapter: the adapter to setup diagnostics for.
*
* Creates the data-structures to store the diagnostics for an adapter. This
* overwrites whatever was stored before at &zfcp_adapter->diagnostics!
*
* Return:
* * 0 - Everyting is OK
* * -ENOMEM - Could not allocate all/parts of the data-structures;
* &zfcp_adapter->diagnostics remains unchanged
*/
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
{
struct zfcp_diag_adapter *diag;
struct zfcp_diag_header *hdr;
diag = kzalloc(sizeof(*diag), GFP_KERNEL);
if (diag == NULL)
return -ENOMEM;
diag->max_age = (5 * 1000); /* default value: 5 s */
/* setup header for port_data */
hdr = &diag->port_data.header;
spin_lock_init(&hdr->access_lock);
hdr->buffer = &diag->port_data.data;
hdr->buffer_size = sizeof(diag->port_data.data);
/* set the timestamp so that the first test on age will always fail */
hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
/* setup header for config_data */
hdr = &diag->config_data.header;
spin_lock_init(&hdr->access_lock);
hdr->buffer = &diag->config_data.data;
hdr->buffer_size = sizeof(diag->config_data.data);
/* set the timestamp so that the first test on age will always fail */
hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
adapter->diagnostics = diag;
return 0;
}
/**
* zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
* @adapter: the adapter whose diagnostic structures should be freed.
*
* Frees all data-structures in the given adapter that store diagnostics
* information. Can savely be called with partially setup diagnostics.
*/
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
{
kfree(adapter->diagnostics);
adapter->diagnostics = NULL;
}
/**
* zfcp_diag_update_xdata() - Update a diagnostics buffer.
* @hdr: the meta data to update.
* @data: data to use for the update.
* @incomplete: flag stating whether the data in @data is incomplete.
*/
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
const void *const data, const bool incomplete)
{
const unsigned long capture_timestamp = jiffies;
unsigned long flags;
spin_lock_irqsave(&hdr->access_lock, flags);
/* make sure we never go into the past with an update */
if (!time_after_eq(capture_timestamp, hdr->timestamp))
goto out;
hdr->timestamp = capture_timestamp;
hdr->incomplete = incomplete;
memcpy(hdr->buffer, data, hdr->buffer_size);
out:
spin_unlock_irqrestore(&hdr->access_lock, flags);
}
/**
* zfcp_diag_update_port_data_buffer() - Implementation of
* &typedef zfcp_diag_update_buffer_func
* to collect and update Port Data.
* @adapter: Adapter to collect Port Data from.
*
* This call is SYNCHRONOUS ! It blocks till the respective command has
* finished completely, or has failed in some way.
*
* Return:
* * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
* this also includes cases where data was retrieved, but
* incomplete; you'll have to check the flag ``incomplete``
* of &struct zfcp_diag_header.
* * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
* excluding -EAGAIN)
*/
int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
{
int rc;
rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
if (rc == -EAGAIN)
rc = 0; /* signaling incomplete via struct zfcp_diag_header */
/* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
return rc;
}
/**
* zfcp_diag_update_config_data_buffer() - Implementation of
* &typedef zfcp_diag_update_buffer_func
* to collect and update Config Data.
* @adapter: Adapter to collect Config Data from.
*
* This call is SYNCHRONOUS ! It blocks till the respective command has
* finished completely, or has failed in some way.
*
* Return:
* * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
* this also includes cases where data was retrieved, but
* incomplete; you'll have to check the flag ``incomplete``
* of &struct zfcp_diag_header.
* * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
* excluding -EAGAIN)
*/
int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
{
int rc;
rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
if (rc == -EAGAIN)
rc = 0; /* signaling incomplete via struct zfcp_diag_header */
/* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
return rc;
}
static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
struct zfcp_diag_header *const hdr,
zfcp_diag_update_buffer_func buffer_update,
unsigned long *const flags)
__must_hold(hdr->access_lock)
{
int rc;
if (hdr->updating == 1) {
rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
hdr->updating == 0,
hdr->access_lock);
rc = (rc == 0 ? -EAGAIN : -EINTR);
} else {
hdr->updating = 1;
spin_unlock_irqrestore(&hdr->access_lock, *flags);
/* unlocked, because update function sleeps */
rc = buffer_update(adapter);
spin_lock_irqsave(&hdr->access_lock, *flags);
hdr->updating = 0;
/*
* every thread waiting here went via an interruptible wait,
* so its fine to only wake those
*/
wake_up_interruptible_all(&__zfcp_diag_publish_wait);
}
return rc;
}
static bool
__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
const struct zfcp_diag_header *const hdr)
__must_hold(hdr->access_lock)
{
const unsigned long now = jiffies;
/*
* Should not happen (data is from the future).. if it does, still
* signal that it needs refresh
*/
if (!time_after_eq(now, hdr->timestamp))
return false;
if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
return false;
return true;
}
/**
* zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
* diagnostics buffer rate limited.
* @adapter: Adapter to collect the diagnostics from.
* @hdr: buffer-header for which to update with the collected diagnostics.
* @buffer_update: Specific implementation for collecting and updating.
*
* This function will cause an update of the given @hdr by calling the also
* given @buffer_update function. If called by multiple sources at the same
* time, it will synchornize the update by only allowing one source to call
* @buffer_update and the others to wait for that source to complete instead
* (the wait is interruptible).
*
* Additionally this version is rate-limited and will only exit if either the
* buffer is fresh enough (within the limit) - it will do nothing if the buffer
* is fresh enough to begin with -, or if the source/thread that started this
* update is the one that made the update (to prevent endless loops).
*
* Return:
* * 0 - If the update was successfully published and/or the buffer is
* fresh enough
* * -EINTR - If the thread went into the wait-state and was interrupted
* * whatever @buffer_update returns
*/
int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
struct zfcp_diag_header *const hdr,
zfcp_diag_update_buffer_func buffer_update)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&hdr->access_lock, flags);
for (rc = 0;
!__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
rc = 0) {
rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
&flags);
if (rc != -EAGAIN)
break;
}
spin_unlock_irqrestore(&hdr->access_lock, flags);
return rc;
}
| linux-master | drivers/s390/scsi/zfcp_diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Module interface and handling of zfcp data structures.
*
* Copyright IBM Corp. 2002, 2020
*/
/*
* Driver authors:
* Martin Peschke (originator of the driver)
* Raimund Schroeder
* Aron Zeh
* Wolfgang Taphorn
* Stefan Bader
* Heiko Carstens (kernel 2.6 port of the driver)
* Andreas Herrmann
* Maxim Shchetynin
* Volker Sameske
* Ralph Wuerthner
* Michael Loehr
* Swen Schillig
* Christof Schmitt
* Martin Petermann
* Sven Schuetz
* Steffen Maier
* Benjamin Block
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20
MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - [email protected]");
MODULE_DESCRIPTION("FCP HBA driver");
MODULE_LICENSE("GPL");
static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
unsigned long size)
{
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (!cdev)
return;
if (ccw_device_set_online(cdev))
goto out_ccw_device;
adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
goto out_ccw_device;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out_port;
flush_work(&port->rport_work);
zfcp_unit_add(port, lun);
put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
put_device(&cdev->dev);
return;
}
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
char *str, *str_saved;
char busid[ZFCP_BUS_ID_SIZE];
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
str_saved = kstrdup(devstr, GFP_KERNEL);
str = str_saved;
if (!str)
return;
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strscpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
kfree(str_saved);
zfcp_init_device_configure(busid, wwpn, lun);
return;
err_out:
kfree(str_saved);
pr_err("%s is not a valid SCSI device\n", devstr);
}
static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
if (zfcp_experimental_dix)
pr_warn("DIX is enabled. It is experimental and might cause problems\n");
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
sizeof(struct zfcp_fc_req));
if (!zfcp_fc_req_cache)
goto out_fc_cache;
zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
if (!zfcp_scsi_transport_template)
goto out_transport;
scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
retval = ccw_driver_register(&zfcp_ccw_driver);
if (retval) {
pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
goto out_ccw_register;
}
if (init_device)
zfcp_init_device_setup(init_device);
return 0;
out_ccw_register:
fc_release_transport(zfcp_scsi_transport_template);
out_transport:
kmem_cache_destroy(zfcp_fc_req_cache);
out_fc_cache:
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
return retval;
}
module_init(zfcp_module_init);
static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
fc_release_transport(zfcp_scsi_transport_template);
kmem_cache_destroy(zfcp_fc_req_cache);
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
/**
* zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
* @adapter: pointer to adapter to search for port
* @wwpn: wwpn to search for
*
* Returns: pointer to zfcp_port or NULL
*/
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
u64 wwpn)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return NULL;
}
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
adapter->pool.erp_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.erp_req)
return -ENOMEM;
adapter->pool.gid_pn_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.gid_pn_req)
return -ENOMEM;
adapter->pool.scsi_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_req)
return -ENOMEM;
adapter->pool.scsi_abort =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_abort)
return -ENOMEM;
adapter->pool.status_read_req =
mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
sizeof(struct zfcp_fsf_req));
if (!adapter->pool.status_read_req)
return -ENOMEM;
adapter->pool.qtcb_pool =
mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
adapter->pool.sr_data =
mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
return 0;
}
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
mempool_destroy(adapter->pool.erp_req);
mempool_destroy(adapter->pool.scsi_req);
mempool_destroy(adapter->pool.scsi_abort);
mempool_destroy(adapter->pool.qtcb_pool);
mempool_destroy(adapter->pool.status_read_req);
mempool_destroy(adapter->pool.sr_data);
mempool_destroy(adapter->pool.gid_pn);
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
*
* Return:
* * 0 on success meaning at least one status read is pending
* * 1 if posting failed and not a single status read buffer is pending,
* also triggers adapter reopen recovery
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (zfcp_fsf_status_read(adapter->qdio)) {
atomic_inc(&adapter->stat_miss); /* undo add -1 */
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
}
return 0;
}
static void _zfcp_status_read_scheduler(struct work_struct *work)
{
zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
stat_work));
}
static void zfcp_version_change_lost_work(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
version_change_lost_work);
zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
}
static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
{
struct zfcp_adapter *adapter =
container_of(sl, struct zfcp_adapter, service_level);
seq_printf(m, "zfcp: %s microcode level %x\n",
dev_name(&adapter->ccw_device->dev),
adapter->fsf_lic_version);
}
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
char name[TASK_COMM_LEN];
snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev));
adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (adapter->work_queue)
return 0;
return -ENOMEM;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
{
if (adapter->work_queue)
destroy_workqueue(adapter->work_queue);
adapter->work_queue = NULL;
}
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
* Returns: struct zfcp_adapter*
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
*/
struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
if (!get_device(&ccw_device->dev))
return ERR_PTR(-ENODEV);
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
if (!adapter) {
put_device(&ccw_device->dev);
return ERR_PTR(-ENOMEM);
}
kref_init(&adapter->ref);
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
INIT_WORK(&adapter->version_change_lost_work,
zfcp_version_change_lost_work);
adapter->next_port_scan = jiffies;
adapter->erp_action.adapter = adapter;
if (zfcp_diag_adapter_setup(adapter))
goto failed;
if (zfcp_qdio_setup(adapter))
goto failed;
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
adapter->req_list = zfcp_reqlist_alloc();
if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
goto failed;
if (zfcp_setup_adapter_work_queue(adapter))
goto failed;
if (zfcp_fc_gs_setup(adapter))
goto failed;
rwlock_init(&adapter->port_list_lock);
INIT_LIST_HEAD(&adapter->port_list);
INIT_LIST_HEAD(&adapter->events.list);
INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
spin_lock_init(&adapter->events.list_lock);
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
if (zfcp_erp_thread_setup(adapter))
goto failed;
adapter->service_level.seq_print = zfcp_print_sl;
dev_set_drvdata(&ccw_device->dev, adapter);
if (device_add_groups(&ccw_device->dev, zfcp_sysfs_adapter_attr_groups))
goto err_sysfs;
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
return adapter;
err_sysfs:
failed:
/* TODO: make this more fine-granular */
cancel_delayed_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
cancel_work_sync(&adapter->version_change_lost_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter);
zfcp_qdio_destroy(adapter->qdio);
zfcp_ccw_adapter_put(adapter); /* final put to release */
return ERR_PTR(-ENOMEM);
}
void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
{
struct ccw_device *cdev = adapter->ccw_device;
cancel_delayed_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
cancel_work_sync(&adapter->version_change_lost_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
device_remove_groups(&cdev->dev, zfcp_sysfs_adapter_attr_groups);
zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter);
zfcp_qdio_destroy(adapter->qdio);
zfcp_ccw_adapter_put(adapter); /* final put to release */
}
/**
* zfcp_adapter_release - remove the adapter from the resource list
* @ref: pointer to struct kref
* locks: adapter list write lock is assumed to be held by caller
*/
void zfcp_adapter_release(struct kref *ref)
{
struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
ref);
struct ccw_device *cdev = adapter->ccw_device;
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter);
put_device(&cdev->dev);
}
static void zfcp_port_release(struct device *dev)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
}
/**
* zfcp_port_enqueue - enqueue port to port list of adapter
* @adapter: adapter where remote port is added
* @wwpn: WWPN of the remote port to be enqueued
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval = -ENOMEM;
kref_get(&adapter->ref);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
atomic_set(&port->units, 0);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
port->adapter = adapter;
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
port->dev.parent = &adapter->ccw_device->dev;
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
port->erp_action.adapter = adapter;
port->erp_action.port = port;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
if (device_register(&port->dev)) {
put_device(&port->dev);
goto err_out;
}
write_lock_irq(&adapter->port_list_lock);
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);
atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port;
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
| linux-master | drivers/s390/scsi/zfcp_aux.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Interface to Linux SCSI midlayer.
*
* Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_eh.h>
#include <linux/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
bool zfcp_experimental_dix;
module_param_named(dix, zfcp_experimental_dix, bool, 0400);
MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
/* if previous slave_alloc returned early, there is nothing to do */
if (!zfcp_sdev->port)
return;
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_change_queue_depth(sdp, default_depth);
return 0;
}
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
zfcp_dbf_scsi_fail_send(scpnt);
scsi_done(scpnt);
}
static
int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
int status, scsi_result, ret;
/* reset the status for this request */
scpnt->result = 0;
scpnt->host_scribble = NULL;
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_fail_send(scpnt);
scsi_done(scpnt);
return 0;
}
status = atomic_read(&zfcp_sdev->status);
if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
!(atomic_read(&zfcp_sdev->port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED)) {
/* only LUN access denied, but port is good
* not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
return 0;
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
/* This could be
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
return 0;
}
ret = zfcp_fsf_fcp_cmnd(scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
return SCSI_MLQUEUE_HOST_BUSY;
return ret;
}
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port;
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
zfcp_sdev->erp_action.adapter = adapter;
zfcp_sdev->erp_action.sdev = sdev;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
zfcp_sdev->erp_action.port = port;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (zfcp_sysfs_port_is_removing(port)) {
/* port is already gone */
mutex_unlock(&zfcp_sysfs_port_units_mutex);
put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
return -ENXIO;
}
mutex_unlock(&zfcp_sysfs_port_units_mutex);
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
if (!unit && !(allow_lun_scan && npiv)) {
put_device(&port->dev);
return -ENXIO;
}
zfcp_sdev->port = port;
zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
spin_lock_init(&zfcp_sdev->latencies.lock);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
zfcp_erp_wait(port->adapter);
return 0;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
struct Scsi_Host *scsi_host = scpnt->device->host;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
u64 old_reqid = (u64) scpnt->host_scribble;
int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
while (retry--) {
abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
if (abrt_req)
break;
zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
return SUCCESS;
}
}
if (!abrt_req) {
zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
return FAILED;
}
wait_for_completion(&abrt_req->completion);
if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
dbf_tag = "abrt_ok";
else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
dbf_tag = "abrt_nn";
else {
dbf_tag = "abrt_fa";
retval = FAILED;
}
zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
zfcp_fsf_req_free(abrt_req);
return retval;
}
struct zfcp_scsi_req_filter {
u8 tmf_scope;
u32 lun_handle;
u32 port_handle;
};
static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
{
struct zfcp_scsi_req_filter *filter =
(struct zfcp_scsi_req_filter *)data;
/* already aborted - prevent side-effects - or not a SCSI command */
if (old_req->data == NULL ||
zfcp_fsf_req_is_status_read_buffer(old_req) ||
old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
return;
/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
if (old_req->qtcb->header.port_handle != filter->port_handle)
return;
if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
old_req->qtcb->header.lun_handle != filter->lun_handle)
return;
zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
old_req->data = NULL;
}
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
{
struct zfcp_adapter *adapter = zsdev->port->adapter;
struct zfcp_scsi_req_filter filter = {
.tmf_scope = FCP_TMF_TGT_RESET,
.port_handle = zsdev->port->handle,
};
unsigned long flags;
if (tm_flags == FCP_TMF_LUN_RESET) {
filter.tmf_scope = FCP_TMF_LUN_RESET;
filter.lun_handle = zsdev->lun_handle;
}
/*
* abort_lock secures against other processings - in the abort-function
* and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
*/
write_lock_irqsave(&adapter->abort_lock, flags);
zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
&filter);
write_unlock_irqrestore(&adapter->abort_lock, flags);
}
/**
* zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
* @sdev: Pointer to SCSI device to send the task management command to.
* @tm_flags: Task management flags,
* here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
*/
static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
if (fsf_req)
break;
zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_rport(rport);
if (ret) {
zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
return SUCCESS;
}
}
if (!fsf_req) {
zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
return FAILED;
}
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
retval = FAILED;
} else {
zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
zfcp_fsf_req_free(fsf_req);
return retval;
}
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
struct scsi_device *sdev = scpnt->device;
return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
struct scsi_target *starget = scsi_target(scpnt->device);
struct fc_rport *rport = starget_to_rport(starget);
struct Scsi_Host *shost = rport_to_shost(rport);
struct scsi_device *sdev = NULL, *tmp_sdev;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int ret;
shost_for_each_device(tmp_sdev, shost) {
if (tmp_sdev->id == starget->id) {
sdev = tmp_sdev;
break;
}
}
if (!sdev) {
ret = FAILED;
zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
return ret;
}
ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);
/* release reference from above shost_for_each_device */
if (sdev)
scsi_device_put(tmp_sdev);
return ret;
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
zfcp_erp_wait(adapter);
}
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
if (fc_ret)
ret = fc_ret;
zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
return ret;
}
/**
* zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
* @shost: Pointer to Scsi_Host to perform action on.
* @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
*
* Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
*
* This is similar to zfcp_sysfs_adapter_failed_store().
*/
static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int ret = 0;
if (reset_type != SCSI_ADAPTER_RESET) {
ret = -EOPNOTSUPP;
zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
return ret;
}
zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
return ret;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
static const struct scsi_host_template zfcp_scsi_host_template = {
.module = THIS_MODULE,
.name = "zfcp",
.queuecommand = zfcp_scsi_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.change_queue_depth = scsi_change_queue_depth,
.host_reset = zfcp_scsi_sysfs_host_reset,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
.sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
/* GCD, adjusted later */
.max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
/* report size limit per scatter-gather segment */
.max_segment_size = ZFCP_QDIO_SBALE_LEN,
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_groups = zfcp_sysfs_shost_attr_groups,
.sdev_groups = zfcp_sysfs_sdev_attr_groups,
.track_queue_depth = 1,
.supported_mode = MODE_INITIATOR,
};
/**
* zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
* SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
*
* Allocates the SCSI host object for the given adapter, sets basic properties
* (such as the transport template, QDIO limits, ...), and registers it with
* the midlayer.
*
* During registration with the midlayer the corresponding FC host object for
* the referenced transport class is also implicitely allocated.
*
* Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
* adapter->scsi_host is already set, nothing is done.
*
* Return:
* * 0 - Allocation and registration was successful
* * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
* was changed
* * -EIO - Allocation or registration failed
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host)
goto err_out;
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
adapter->scsi_host->max_lun = 0xFFFFFFFF;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
/* make all basic properties known at registration time */
zfcp_qdio_shost_update(adapter, adapter->qdio);
zfcp_scsi_set_prot(adapter);
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
goto err_out;
}
return 0;
err_out:
adapter->scsi_host = NULL;
dev_err(&adapter->ccw_device->dev,
"Registering the FCP device with the SCSI stack failed\n");
return -EIO;
}
/**
* zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
* @adapter: The zfcp adapter to unregister.
*/
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
shost = adapter->scsi_host;
if (!shost)
return;
read_lock_irq(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
port->rport = NULL;
read_unlock_irq(&adapter->port_list_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
}
static struct fc_host_statistics*
zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
{
struct fc_host_statistics *fc_stats;
if (!adapter->fc_stats) {
fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return NULL;
adapter->fc_stats = fc_stats; /* freed in adapter_release */
}
memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
return adapter->fc_stats;
}
static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data,
struct fsf_qtcb_bottom_port *old)
{
fc_stats->seconds_since_last_reset =
data->seconds_since_last_reset - old->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames - old->tx_frames;
fc_stats->tx_words = data->tx_words - old->tx_words;
fc_stats->rx_frames = data->rx_frames - old->rx_frames;
fc_stats->rx_words = data->rx_words - old->rx_words;
fc_stats->lip_count = data->lip - old->lip;
fc_stats->nos_count = data->nos - old->nos;
fc_stats->error_frames = data->error_frames - old->error_frames;
fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
fc_stats->link_failure_count = data->link_failure - old->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
fc_stats->loss_of_signal_count =
data->loss_of_signal - old->loss_of_signal;
fc_stats->prim_seq_protocol_err_count =
data->psp_error_counts - old->psp_error_counts;
fc_stats->invalid_tx_word_count =
data->invalid_tx_words - old->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
fc_stats->fcp_input_requests =
data->input_requests - old->input_requests;
fc_stats->fcp_output_requests =
data->output_requests - old->output_requests;
fc_stats->fcp_control_requests =
data->control_requests - old->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}
static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data)
{
fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames;
fc_stats->tx_words = data->tx_words;
fc_stats->rx_frames = data->rx_frames;
fc_stats->rx_words = data->rx_words;
fc_stats->lip_count = data->lip;
fc_stats->nos_count = data->nos;
fc_stats->error_frames = data->error_frames;
fc_stats->dumped_frames = data->dumped_frames;
fc_stats->link_failure_count = data->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync;
fc_stats->loss_of_signal_count = data->loss_of_signal;
fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
fc_stats->invalid_tx_word_count = data->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs;
fc_stats->fcp_input_requests = data->input_requests;
fc_stats->fcp_output_requests = data->output_requests;
fc_stats->fcp_control_requests = data->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb;
}
static struct fc_host_statistics *
zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
{
struct zfcp_adapter *adapter;
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)host->hostdata[0];
fc_stats = zfcp_scsi_init_fc_host_stats(adapter);
if (!fc_stats)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret != 0 && ret != -EAGAIN) {
kfree(data);
return NULL;
}
if (adapter->stats_reset &&
((jiffies/HZ - adapter->stats_reset) <
data->seconds_since_last_reset))
zfcp_scsi_adjust_fc_host_stats(fc_stats, data,
adapter->stats_reset_data);
else
zfcp_scsi_set_fc_host_stats(fc_stats, data);
kfree(data);
return fc_stats;
}
static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret != 0 && ret != -EAGAIN)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
kfree(adapter->stats_reset_data);
adapter->stats_reset_data = data; /* finally freed in
adapter_release */
}
}
static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int status = atomic_read(&adapter->status);
if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
!(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
else
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
}
static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport,
u32 timeout)
{
rport->dev_loss_tmo = timeout;
}
/**
* zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
* port. Using a reopen avoids a conflict with a shutdown
* overwriting a reopen. The "forced" ensures that a disappeared port
* is not opened again as valid due to the cached plogi data in
* non-NPIV mode.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port;
struct Scsi_Host *shost = rport_to_shost(rport);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
} else {
zfcp_erp_port_forced_no_port_dbf(
"sctrpin", adapter,
rport->port_name /* zfcp_scsi_rport_register */,
rport->port_id /* zfcp_scsi_rport_register */);
}
}
static void zfcp_scsi_rport_register(struct zfcp_port *port)
{
struct fc_rport_identifiers ids;
struct fc_rport *rport;
if (port->rport)
return;
ids.node_name = port->wwnn;
ids.port_name = port->wwpn;
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
"Registering port 0x%016Lx failed\n",
(unsigned long long)port->wwpn);
return;
}
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
port->starget_id = rport->scsi_target_id;
zfcp_unit_queue_scsi_scan(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
{
struct fc_rport *rport = port->rport;
if (rport) {
zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
}
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
zfcp_scsi_schedule_rport_block(port);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_scsi_rport_work(struct work_struct *work)
{
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
set_worker_desc("zrp%c-%16llx",
(port->rport_task == RPORT_ADD) ? 'a' : 'd',
port->wwpn); /* < WORKER_DESC_LEN=24 */
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_register(port);
} else {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_block(port);
}
}
put_device(&port->dev);
}
/**
* zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
* @adapter: The adapter where to configure DIF/DIX for the SCSI host
*/
void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
{
unsigned int mask = 0;
unsigned int data_div;
struct Scsi_Host *shost = adapter->scsi_host;
data_div = atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
if ((enable_dif || zfcp_experimental_dix) &&
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
mask |= SHOST_DIF_TYPE1_PROTECTION;
if (zfcp_experimental_dix && data_div &&
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->max_sectors = shost->sg_tablesize * 8;
}
scsi_host_set_prot(shost, mask);
}
/**
* zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
* @scmd: The SCSI command to report the error for
* @ascq: The ASCQ to put in the sense buffer
*
* See the error handling in sd_done for the sense codes used here.
* Set DID_SOFT_ERROR to retry the request, if possible.
*/
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
scsi_build_sense(scmd, 1, ILLEGAL_REQUEST, 0x10, ascq);
set_host_byte(scmd, DID_SOFT_ERROR);
}
void zfcp_scsi_shost_update_config_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_config *const bottom,
const bool bottom_incomplete)
{
struct Scsi_Host *const shost = adapter->scsi_host;
const struct fc_els_flogi *nsp, *plogi;
if (shost == NULL)
return;
snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
"0x%08x", bottom->lic_version);
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
snprintf(fc_host_hardware_version(shost),
FC_VERSION_STRING_SIZE,
"0x%08x", bottom->hardware_version);
memcpy(fc_host_serial_number(shost), bottom->serial_number,
min(FC_SERIAL_NUMBER_SIZE, 17));
EBCASC(fc_host_serial_number(shost),
min(FC_SERIAL_NUMBER_SIZE, 17));
}
/* adjust pointers for missing command code */
nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
"IBM");
fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
zfcp_scsi_set_prot(adapter);
/* do not evaluate invalid fields */
if (bottom_incomplete)
return;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) =
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
fallthrough;
default:
fc_host_fabric_name(shost) = 0;
break;
}
}
void zfcp_scsi_shost_update_port_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_port *const bottom)
{
struct Scsi_Host *const shost = adapter->scsi_host;
if (shost == NULL)
return;
fc_host_permanent_port_name(shost) = bottom->wwpn;
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
}
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
.show_starget_node_name = 1,
.show_rport_supported_classes = 1,
.show_rport_maxframe_size = 1,
.show_rport_dev_loss_tmo = 1,
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_permanent_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.show_host_manufacturer = 1,
.show_host_model = 1,
.show_host_hardware_version = 1,
.show_host_firmware_version = 1,
.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_scsi_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.bsg_request = zfcp_fc_exec_bsg_job,
.bsg_timeout = zfcp_fc_timeout_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
.show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.show_host_fabric_name = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
| linux-master | drivers/s390/scsi/zfcp_scsi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Implementation of FSF commands.
*
* Copyright IBM Corp. 2002, 2023
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
struct kmem_cache *zfcp_fsf_qtcb_cache;
static bool ber_stop = true;
module_param(ber_stop, bool, 0600);
MODULE_PARM_DESC(ber_stop,
"Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
struct zfcp_adapter *adapter = fsf_req->adapter;
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"fsrth_1");
}
static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
unsigned long timeout)
{
fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
fsf_req->timer.expires = jiffies + timeout;
add_timer(&fsf_req->timer);
}
static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
{
BUG_ON(!fsf_req->erp_action);
fsf_req->timer.function = zfcp_erp_timeout_handler;
fsf_req->timer.expires = jiffies + 30 * HZ;
add_timer(&fsf_req->timer);
}
/* association between FSF command and FSF QTCB type */
static u32 fsf_qtcb_type[] = {
[FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
[FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
[FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
[FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
};
static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
{
dev_err(&req->adapter->ccw_device->dev, "FCP device not "
"operational because of an unsupported FC class\n");
zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
/**
* zfcp_fsf_req_free - free memory used by fsf request
* @req: pointer to struct zfcp_fsf_req
*/
void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
{
if (likely(req->pool)) {
if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
mempool_free(req, req->pool);
return;
}
if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
{
unsigned long flags;
struct fsf_status_read_buffer *sr_buf = req->data;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
int d_id = ntoh24(sr_buf->d_id);
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->d_id == d_id) {
zfcp_erp_port_reopen(port, 0, "fssrpc1");
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost = adapter->scsi_host;
adapter->hydra_version = 0;
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
/* if there is no shost yet, we have nothing to zero-out */
if (shost == NULL)
return;
fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
struct fsf_link_down_info *link_down)
{
struct zfcp_adapter *adapter = req->adapter;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return;
atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
zfcp_scsi_schedule_rports_block(adapter);
zfcp_fsf_fc_host_link_down(adapter);
if (!link_down)
goto out;
switch (link_down->error_code) {
case FSF_PSQ_LINK_NO_LIGHT:
dev_warn(&req->adapter->ccw_device->dev,
"There is no light signal from the local "
"fibre channel cable\n");
break;
case FSF_PSQ_LINK_WRAP_PLUG:
dev_warn(&req->adapter->ccw_device->dev,
"There is a wrap plug instead of a fibre "
"channel cable\n");
break;
case FSF_PSQ_LINK_NO_FCP:
dev_warn(&req->adapter->ccw_device->dev,
"The adjacent fibre channel node does not "
"support FCP\n");
break;
case FSF_PSQ_LINK_FIRMWARE_UPDATE:
dev_warn(&req->adapter->ccw_device->dev,
"The FCP device is suspended because of a "
"firmware update\n");
break;
case FSF_PSQ_LINK_INVALID_WWPN:
dev_warn(&req->adapter->ccw_device->dev,
"The FCP device detected a WWPN that is "
"duplicate or not valid\n");
break;
case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
dev_warn(&req->adapter->ccw_device->dev,
"The fibre channel fabric does not support NPIV\n");
break;
case FSF_PSQ_LINK_NO_FCP_RESOURCES:
dev_warn(&req->adapter->ccw_device->dev,
"The FCP adapter cannot support more NPIV ports\n");
break;
case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
dev_warn(&req->adapter->ccw_device->dev,
"The adjacent switch cannot support "
"more NPIV ports\n");
break;
case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
dev_warn(&req->adapter->ccw_device->dev,
"The FCP adapter could not log in to the "
"fibre channel fabric\n");
break;
case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
dev_warn(&req->adapter->ccw_device->dev,
"The WWPN assignment file on the FCP adapter "
"has been damaged\n");
break;
case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
dev_warn(&req->adapter->ccw_device->dev,
"The mode table on the FCP adapter "
"has been damaged\n");
break;
case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
dev_warn(&req->adapter->ccw_device->dev,
"All NPIV ports on the FCP adapter have "
"been assigned\n");
break;
default:
dev_warn(&req->adapter->ccw_device->dev,
"The link between the FCP adapter and "
"the FC fabric is down\n");
}
out:
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *sr_buf = req->data;
struct fsf_link_down_info *ldi =
(struct fsf_link_down_info *) &sr_buf->payload;
switch (sr_buf->status_subtype) {
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
case FSF_STATUS_READ_SUB_FDISC_FAILED:
zfcp_fsf_link_down_info_eval(req, ldi);
break;
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
zfcp_fsf_link_down_info_eval(req, NULL);
}
}
static void
zfcp_fsf_status_read_version_change(struct zfcp_adapter *adapter,
struct fsf_status_read_buffer *sr_buf)
{
if (sr_buf->status_subtype == FSF_STATUS_READ_SUB_LIC_CHANGE) {
u32 version = sr_buf->payload.version_change.current_version;
WRITE_ONCE(adapter->fsf_lic_version, version);
snprintf(fc_host_firmware_version(adapter->scsi_host),
FC_VERSION_STRING_SIZE, "%#08x", version);
}
}
static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_status_read_buffer *sr_buf = req->data;
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
zfcp_dbf_hba_fsf_uss("fssrh_4", req);
switch (sr_buf->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
zfcp_fsf_status_read_port_closed(req);
break;
case FSF_STATUS_READ_INCOMING_ELS:
zfcp_fc_incoming_els(req);
break;
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
zfcp_dbf_hba_bit_err("fssrh_3", req);
if (ber_stop) {
dev_warn(&adapter->ccw_device->dev,
"All paths over this FCP device are disused because of excessive bit errors\n");
zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
} else {
dev_warn(&adapter->ccw_device->dev,
"The error threshold for checksum statistics has been exceeded\n");
}
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
break;
case FSF_STATUS_READ_LINK_UP:
dev_info(&adapter->ccw_device->dev,
"The local link has been restored\n");
/* All ports should be marked as ready to run again */
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
"fssrh_2");
zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
break;
case FSF_STATUS_READ_NOTIFICATION_LOST:
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
zfcp_fc_conditional_port_scan(adapter);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_VERSION_CHANGE)
queue_work(adapter->work_queue,
&adapter->version_change_lost_work);
break;
case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
adapter->adapter_features = sr_buf->payload.word[0];
break;
case FSF_STATUS_READ_VERSION_CHANGE:
zfcp_fsf_status_read_version_change(adapter, sr_buf);
break;
}
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
queue_work(adapter->work_queue, &adapter->stat_work);
}
static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
{
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_FCP_RSP_AVAILABLE:
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
case FSF_SQ_NO_RETRY_POSSIBLE:
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
return;
case FSF_SQ_COMMAND_ABORTED:
break;
case FSF_SQ_NO_RECOM:
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter reported a problem "
"that cannot be recovered\n");
zfcp_qdio_siosl(req->adapter);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
break;
}
/* all non-return stats set FSFREQ_ERROR*/
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
{
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
return;
switch (req->qtcb->header.fsf_status) {
case FSF_UNKNOWN_COMMAND:
dev_err(&req->adapter->ccw_device->dev,
"The FCP adapter does not recognize the command 0x%x\n",
req->qtcb->header.fsf_command);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
zfcp_fsf_fsfstatus_qual_eval(req);
break;
}
}
static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb *qtcb = req->qtcb;
union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
zfcp_dbf_hba_fsf_response(req);
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
return;
}
switch (qtcb->prefix.prot_status) {
case FSF_PROT_GOOD:
case FSF_PROT_FSF_STATUS_PRESENTED:
return;
case FSF_PROT_QTCB_VERSION_ERROR:
dev_err(&adapter->ccw_device->dev,
"QTCB version 0x%x not supported by FCP adapter "
"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
psq->word[0], psq->word[1]);
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
break;
case FSF_PROT_ERROR_STATE:
case FSF_PROT_SEQ_NUMB_ERROR:
zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PROT_UNSUPP_QTCB_TYPE:
dev_err(&adapter->ccw_device->dev,
"The QTCB type is not supported by the FCP adapter\n");
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
break;
case FSF_PROT_DUPLICATE_REQUEST_ID:
dev_err(&adapter->ccw_device->dev,
"0x%Lx is an ambiguous request identifier\n",
(unsigned long long)qtcb->bottom.support.req_handle);
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
break;
case FSF_PROT_LINK_DOWN:
zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
/* go through reopen to flush pending requests */
zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
break;
case FSF_PROT_REEST_QUEUE:
/* All ports should be marked as ready to run again */
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED,
"fspse_8");
break;
default:
dev_err(&adapter->ccw_device->dev,
"0x%x is not a valid transfer protocol status\n",
qtcb->prefix.prot_status);
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
}
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
/**
* zfcp_fsf_req_complete - process completion of a FSF request
* @req: The FSF request that has been completed.
*
* When a request has been completed either from the FCP adapter,
* or it has been dismissed due to a queue shutdown, this function
* is called to process the completion status and trigger further
* events related to the FSF request.
* Caller must ensure that the request has been removed from
* adapter->req_list, to protect against concurrent modification
* by zfcp_erp_strategy_check_fsfreq().
*/
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
struct zfcp_erp_action *erp_action;
if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
zfcp_fsf_status_read_handler(req);
return;
}
del_timer_sync(&req->timer);
zfcp_fsf_protstatus_eval(req);
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
erp_action = req->erp_action;
if (erp_action)
zfcp_erp_notify(erp_action, 0);
if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
zfcp_fsf_req_free(req);
else
complete(&req->completion);
}
/**
* zfcp_fsf_req_dismiss_all - dismiss all fsf requests
* @adapter: pointer to struct zfcp_adapter
*
* Never ever call this without shutting down the adapter first.
* Otherwise the adapter would continue using and corrupting s390 storage.
* Included BUG_ON() call to ensure this is done.
* ERP is supposed to be the only user of this function.
*/
void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *req, *tmp;
LIST_HEAD(remove_queue);
BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
zfcp_reqlist_move(adapter->req_list, &remove_queue);
list_for_each_entry_safe(req, tmp, &remove_queue, list) {
list_del(&req->list);
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_fsf_req_complete(req);
}
}
#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
#define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6)
#define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
fdmi_speed |= FC_PORTSPEED_1GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
fdmi_speed |= FC_PORTSPEED_2GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
fdmi_speed |= FC_PORTSPEED_4GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
fdmi_speed |= FC_PORTSPEED_10GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
fdmi_speed |= FC_PORTSPEED_8GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
fdmi_speed |= FC_PORTSPEED_16GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
fdmi_speed |= FC_PORTSPEED_32GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
fdmi_speed |= FC_PORTSPEED_64GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
fdmi_speed |= FC_PORTSPEED_128GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
return fdmi_speed;
}
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
struct fc_els_flogi *plogi;
/* adjust pointers for missing command code */
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
/* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0;
adapter->hydra_version = bottom->adapter_type;
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
break;
case FSF_TOPO_FABRIC:
break;
case FSF_TOPO_AL:
default:
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
return -EIO;
}
return 0;
}
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_diag_header *const diag_hdr =
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
/*
* usually we wait with an update till the cache is too old,
* but because we have the data available, update it anyway
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
dev_err(&adapter->ccw_device->dev,
"FCP adapter maximum QTCB size (%d bytes) "
"is too small\n",
bottom->max_qtcb_size);
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
return;
}
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
/* avoids adapter shutdown to be able to recognize
* events such as LINK UP */
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
break;
default:
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
return;
}
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version;
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports newer "
"control block versions\n");
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
return;
}
if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
"The FCP adapter only supports older "
"control block versions\n");
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
}
}
/*
* Mapping of FC Endpoint Security flag masks to mnemonics
*
* NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
* changes.
*/
static const struct {
u32 mask;
char *name;
} zfcp_fsf_fc_security_mnemonics[] = {
{ FSF_FC_SECURITY_AUTH, "Authentication" },
{ FSF_FC_SECURITY_ENC_FCSP2 |
FSF_FC_SECURITY_ENC_ERAS, "Encryption" },
};
/* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
#define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
/**
* zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
* mnemonics and place in a buffer
* @buf : the buffer to place the translated FC Endpoint Security flag(s)
* into
* @size : the size of the buffer, including the trailing null space
* @fc_security: one or more FC Endpoint Security flags, or zero
* @fmt : specifies whether a list or a single item is to be put into the
* buffer
*
* The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
* If the FC Endpoint Security flags are zero "none" is placed into the buffer.
*
* With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
* a comma followed by a space into the buffer. If one or more FC Endpoint
* Security flags cannot be translated into a mnemonic, as they are undefined
* in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
* representation is placed into the buffer.
*
* With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
* the buffer. If the FC Endpoint Security flag cannot be translated, as it is
* undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
* representation is placed into the buffer. If more than one FC Endpoint
* Security flag was specified, their value in hexadecimal representation is
* placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
* can be used to define a buffer that is large enough to hold one mnemonic.
*
* Return: The number of characters written into buf not including the trailing
* '\0'. If size is == 0 the function returns 0.
*/
ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
enum zfcp_fsf_print_fmt fmt)
{
const char *prefix = "";
ssize_t len = 0;
int i;
if (fc_security == 0)
return scnprintf(buf, size, "none");
if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
return scnprintf(buf, size, "0x%08x", fc_security);
for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
continue;
len += scnprintf(buf + len, size - len, "%s%s", prefix,
zfcp_fsf_fc_security_mnemonics[i].name);
prefix = ", ";
fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
}
if (fc_security != 0)
len += scnprintf(buf + len, size - len, "%s0x%08x",
prefix, fc_security);
return len;
}
static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *req)
{
if (adapter->fc_security_algorithms ==
adapter->fc_security_algorithms_old) {
/* no change, no trace */
return;
}
zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
adapter->fc_security_algorithms_old,
adapter->fc_security_algorithms);
adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
}
static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms =
bottom->fc_security_algorithms;
else
adapter->fc_security_algorithms = 0;
zfcp_fsf_dbf_adapter_fc_security(adapter, req);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_diag_header *const diag_hdr =
&req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
/*
* usually we wait with an update till the cache is too old,
* but because we have the data available, update it anyway
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
}
}
static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
struct zfcp_fsf_req *req;
if (likely(pool))
req = mempool_alloc(pool, GFP_ATOMIC);
else
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (unlikely(!req))
return NULL;
memset(req, 0, sizeof(*req));
req->pool = pool;
return req;
}
static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
{
struct fsf_qtcb *qtcb;
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
if (unlikely(!qtcb))
return NULL;
memset(qtcb, 0, sizeof(*qtcb));
return qtcb;
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
u32 fsf_cmd, u8 sbtype,
mempool_t *pool)
{
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
if (unlikely(!req))
return ERR_PTR(-ENOMEM);
if (adapter->req_no == 0)
adapter->req_no++;
timer_setup(&req->timer, NULL, 0);
init_completion(&req->completion);
req->adapter = adapter;
req->req_id = adapter->req_no;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
req->qtcb = zfcp_fsf_qtcb_alloc(
adapter->pool.qtcb_pool);
else
req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
if (unlikely(!req->qtcb)) {
zfcp_fsf_req_free(req);
return ERR_PTR(-ENOMEM);
}
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = fsf_cmd;
}
zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
req->qtcb, sizeof(struct fsf_qtcb));
return req;
}
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
u64 req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer_sync(&req->timer);
/* lookup request again, list might have changed */
if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
zfcp_dbf_hba_fsf_reqid("fsrsrmf", 1, adapter, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
return -EIO;
}
/*
* NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
* ONLY TOUCH SYNC req AGAIN ON req->completion.
*
* The request might complete and be freed concurrently at any point
* now. This is not protected by the QDIO-lock (req_q_lock). So any
* uncontrolled access after this might result in an use-after-free bug.
* Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
* when it is completed via req->completion, is it safe to use req
* again.
*/
/* Don't increase for unsolicited status */
if (!is_srb)
adapter->fsf_req_seq_no++;
adapter->req_no++;
return 0;
}
/**
* zfcp_fsf_status_read - send status read request
* @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, ERROR otherwise
*/
int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
{
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
struct page *page;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
SBAL_SFLAGS0_TYPE_STATUS,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
goto failed_req_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
failed_req_send:
req->data = NULL;
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev;
union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
"fsafch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
case FSF_LUN_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
break;
case FSF_FCP_COMMAND_DOES_NOT_EXIST:
req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
break;
case FSF_PORT_BOXED:
zfcp_erp_set_port_status(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"fsafch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_GOOD:
req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
break;
}
}
/**
* zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
* @scmnd: The SCSI command to abort
* Returns: pointer to struct zfcp_fsf_req
*/
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
{
struct zfcp_fsf_req *req = NULL;
struct scsi_device *sdev = scmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
u64 old_req_id = (u64) scmnd->host_scribble;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
goto out;
}
if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = sdev;
req->handler = zfcp_fsf_abort_fcp_command_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.support.req_handle = old_req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req)) {
/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
}
out_error_free:
zfcp_fsf_req_free(req);
req = NULL;
out:
spin_unlock_irq(&qdio->req_q_lock);
return req;
}
static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_fsf_ct_els *ct = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
ct->status = -EINVAL;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto skip_fsfstatus;
switch (header->fsf_status) {
case FSF_GOOD:
ct->status = 0;
zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]){
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_PORT_BOXED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
fallthrough;
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
case FSF_RESPONSE_SIZE_TOO_LARGE:
case FSF_SBAL_MISMATCH:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
skip_fsfstatus:
if (ct->handler)
ct->handler(ct->handler_data);
}
static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
struct zfcp_qdio_req *q_req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct fsf_qtcb *qtcb = req->qtcb;
u32 feat = adapter->adapter_features;
if (zfcp_adapter_multi_buffer_active(adapter)) {
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
qtcb->bottom.support.req_buf_length =
zfcp_qdio_real_bytes(sg_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
qtcb->bottom.support.resp_buf_length =
zfcp_qdio_real_bytes(sg_resp);
zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
zfcp_qdio_set_scount(qdio, &req->qdio_req);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
sg_req, sg_resp);
return 0;
}
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
return -EOPNOTSUPP;
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
return 0;
}
static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp,
unsigned int timeout)
{
int ret;
ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
if (ret)
return ret;
/* common settings for ct/gs and els requests */
if (timeout > 255)
timeout = 255; /* max value accepted by hardware */
req->qtcb->bottom.support.service_class = FSF_CLASS_3;
req->qtcb->bottom.support.timeout = timeout;
zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
return 0;
}
/**
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
* @wka_port: pointer to zfcp WKA port to send CT/GS to
* @ct: pointer to struct zfcp_send_ct with data for request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
* @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
struct zfcp_fsf_ct_els *ct, mempool_t *pool,
unsigned int timeout)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int ret = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
if (ret)
goto failed_send;
req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
ct->d_id = wka_port->d_id;
req->data = ct;
zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
failed_send:
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
return ret;
}
static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
{
struct zfcp_fsf_ct_els *send_els = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
send_els->status = -EINVAL;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto skip_fsfstatus;
switch (header->fsf_status) {
case FSF_GOOD:
send_els->status = 0;
zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]){
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_RETRY_IF_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_ELS_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
case FSF_RESPONSE_SIZE_TOO_LARGE:
break;
case FSF_SBAL_MISMATCH:
/* should never occur, avoided in zfcp_fsf_send_els */
fallthrough;
default:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
skip_fsfstatus:
if (send_els->handler)
send_els->handler(send_els->handler_data);
}
/**
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
* @adapter: pointer to zfcp adapter
* @d_id: N_Port_ID to send ELS to
* @els: pointer to struct zfcp_send_els with data for the command
* @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
struct zfcp_fsf_ct_els *els, unsigned int timeout)
{
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = adapter->qdio;
int ret = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
if (!zfcp_adapter_multi_buffer_active(adapter))
zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
if (ret)
goto failed_send;
hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
els->d_id = d_id;
req->data = els;
zfcp_dbf_san_req("fssels1", req, d_id);
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
failed_send:
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
return ret;
}
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
FSF_FEATURE_REQUEST_SFP_DATA |
FSF_FEATURE_FC_SECURITY;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
/**
* zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
* @qdio: pointer to the QDIO-Queue to use for sending the command.
* @data: pointer to the QTCB-Bottom for storing the result of the command,
* might be %NULL.
*
* Returns:
* * 0 - Exchange Config Data was successful, @data is complete
* * -EIO - Exchange Config Data was not successful, @data is invalid
* * -EAGAIN - @data contains incomplete data
* * -ENOMEM - Some memory allocation failed along the way
*/
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
SBAL_SFLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
FSF_FEATURE_REQUEST_SFP_DATA |
FSF_FEATURE_FC_SECURITY;
if (data)
req->data = data;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
if (req->status &
(ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
retval = -EIO;
else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
return retval;
out_unlock:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
/**
* zfcp_fsf_exchange_port_data - request information about local port
* @erp_action: ERP action for the adapter for which port data is requested
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
/**
* zfcp_fsf_exchange_port_data_sync() - Request information about local port.
* @qdio: pointer to the QDIO-Queue to use for sending the command.
* @data: pointer to the QTCB-Bottom for storing the result of the command,
* might be %NULL.
*
* Returns:
* * 0 - Exchange Port Data was successful, @data is complete
* * -EIO - Exchange Port Data was not successful, @data is invalid
* * -EAGAIN - @data contains incomplete data
* * -ENOMEM - Some memory allocation failed along the way
* * -EOPNOTSUPP - This operation is not supported
*/
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
SBAL_SFLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
if (data)
req->data = data;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
if (req->status &
(ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
retval = -EIO;
else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
return retval;
out_unlock:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
struct zfcp_fsf_req *req)
{
char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
if (port->connection_info == port->connection_info_old) {
/* no change, no log nor trace */
return;
}
zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
port->connection_info_old,
port->connection_info);
zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
port->connection_info_old,
ZFCP_FSF_PRINT_FMT_SINGLEITEM);
zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
port->connection_info,
ZFCP_FSF_PRINT_FMT_SINGLEITEM);
if (strncmp(mnemonic_old, mnemonic_new,
ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
/* no change in string representation, no log */
goto out;
}
if (port->connection_info_old == 0) {
/* activation */
dev_info(&port->adapter->ccw_device->dev,
"FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
port->wwpn, mnemonic_new);
} else if (port->connection_info == 0) {
/* deactivation */
dev_warn(&port->adapter->ccw_device->dev,
"FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
port->wwpn, mnemonic_old);
} else {
/* change */
dev_warn(&port->adapter->ccw_device->dev,
"FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
port->wwpn, mnemonic_old, mnemonic_new);
}
out:
port->connection_info_old = port->connection_info;
}
static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
u64 wwpn)
{
switch (fsf_sqw0) {
/*
* Open Port command error codes
*/
case FSF_SQ_SECURITY_REQUIRED:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
wwpn);
break;
case FSF_SQ_SECURITY_TIMEOUT:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
wwpn);
break;
case FSF_SQ_SECURITY_KM_UNAVAILABLE:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
wwpn);
break;
case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
wwpn);
break;
case FSF_SQ_SECURITY_AUTH_FAILURE:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
wwpn);
break;
/*
* Send FCP command error codes
*/
case FSF_SQ_SECURITY_ENC_FAILURE:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
wwpn);
break;
/*
* Unknown error codes
*/
default:
dev_warn_ratelimited(dev,
"FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
fsf_sqw0, wwpn);
}
}
static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
struct fc_els_flogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
goto out;
switch (header->fsf_status) {
case FSF_PORT_ALREADY_OPEN:
break;
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&adapter->ccw_device->dev,
"Not enough FCP adapter resources to open "
"remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_erp_set_port_status(port,
ZFCP_STATUS_COMMON_ERP_FAILED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SECURITY_ERROR:
zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
header->fsf_status_qual.word[0],
port->wwpn);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
/* no zfcp_fc_test_link() with failed open port */
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_NO_RETRY_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_GOOD:
port->handle = header->port_handle;
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
port->connection_info = bottom->connection_info;
else
port->connection_info = 0;
zfcp_fsf_log_port_fc_security(port, req);
atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
&port->status);
/* check whether D_ID has changed during open */
/*
* FIXME: This check is not airtight, as the FCP channel does
* not monitor closures of target port connections caused on
* the remote side. Thus, they might miss out on invalidating
* locally cached WWPNs (and other N_Port parameters) of gone
* target ports. So, our heroic attempt to make things safe
* could be undermined by 'open port' response data tagged with
* obsolete WWPNs. Another reason to monitor potential
* connection closures ourself at least (by interpreting
* incoming ELS' and unsolicited status). It just crosses my
* mind that one should be able to cross-check by means of
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
plogi = (struct fc_els_flogi *) bottom->els;
if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
zfcp_fc_plogi_evaluate(port, plogi);
break;
case FSF_UNKNOWN_OP_SUBTYPE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
out:
put_device(&port->dev);
}
/**
* zfcp_fsf_open_port - create and send open port request
* @erp_action: pointer to struct zfcp_erp_action
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
hton24(req->qtcb->bottom.support.d_id, port->d_id);
req->data = port;
req->erp_action = erp_action;
erp_action->fsf_req_id = req->req_id;
get_device(&port->dev);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_port *port = req->data;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
break;
case FSF_GOOD:
zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
break;
}
}
/**
* zfcp_fsf_close_port - create and send close port request
* @erp_action: pointer to struct zfcp_erp_action
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_fc_wka_port *wka_port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
goto out;
}
switch (header->fsf_status) {
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
fallthrough;
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
fallthrough;
case FSF_PORT_ALREADY_OPEN:
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
wake_up(&wka_port->opened);
}
/**
* zfcp_fsf_open_wka_port - create and send open wka-port request
* @wka_port: pointer to struct zfcp_fc_wka_port
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
u64 req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
req_id = req->req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
return retval;
}
static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_fc_wka_port *wka_port = req->data;
if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->closed);
}
/**
* zfcp_fsf_close_wka_port - create and send close wka port request
* @wka_port: WKA port to open
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
u64 req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
req->qtcb->header.port_handle = wka_port->handle;
req_id = req->req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
return retval;
}
static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
{
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
struct scsi_device *sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fscpph2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_GOOD:
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
break;
}
}
/**
* zfcp_fsf_close_physical_port - close physical port
* @erp_action: pointer to struct zfcp_erp_action
* Returns: 0 on success
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
union fsf_status_qual *qual = &header->fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED,
&zfcp_sdev->status);
switch (header->fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
fallthrough;
case FSF_LUN_ALREADY_OPEN:
break;
case FSF_PORT_BOXED:
zfcp_erp_set_port_status(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_SHARING_VIOLATION:
if (qual->word[0])
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"LUN 0x%016Lx on port 0x%016Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn,
qual->fsf_queue_designator.cssid,
qual->fsf_queue_designator.hla);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
dev_warn(&adapter->ccw_device->dev,
"No handle is available for LUN "
"0x%016Lx on port 0x%016Lx\n",
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
fallthrough;
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_GOOD:
zfcp_sdev->lun_handle = header->lun_handle;
atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
/**
* zfcp_fsf_open_lun - open LUN
* @erp_action: pointer to struct zfcp_erp_action
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
SBAL_SFLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
req->handler = zfcp_fsf_open_lun_handler;
req->data = erp_action->sdev;
req->erp_action = erp_action;
erp_action->fsf_req_id = req->req_id;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_HANDLE_NOT_VALID:
zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
zfcp_erp_set_port_status(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_GOOD:
atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
/**
* zfcp_fsf_close_lun - close LUN
* @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/
int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->handler = zfcp_fsf_close_lun_handler;
req->data = erp_action->sdev;
req->erp_action = erp_action;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
}
static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
{
lat_rec->sum += lat;
lat_rec->min = min(lat_rec->min, lat);
lat_rec->max = max(lat_rec->max, lat);
}
static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
struct zfcp_latency_cont *lat = NULL;
struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;
lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
blktrc.flags = 0;
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
blktrc.inb_usage = 0;
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
!(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
zfcp_sdev = sdev_to_zfcp(scsi->device);
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
switch (req->qtcb->bottom.io.data_direction) {
case FSF_DATADIR_DIF_READ_STRIP:
case FSF_DATADIR_DIF_READ_CONVERT:
case FSF_DATADIR_READ:
lat = &zfcp_sdev->latencies.read;
break;
case FSF_DATADIR_DIF_WRITE_INSERT:
case FSF_DATADIR_DIF_WRITE_CONVERT:
case FSF_DATADIR_WRITE:
lat = &zfcp_sdev->latencies.write;
break;
case FSF_DATADIR_CMND:
lat = &zfcp_sdev->latencies.cmd;
break;
}
if (lat) {
spin_lock(&zfcp_sdev->latencies.lock);
zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
lat->counter++;
spin_unlock(&zfcp_sdev->latencies.lock);
}
}
blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
}
/**
* zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
* @req: Pointer to FSF request.
* @sdev: Pointer to SCSI device as request context.
*/
static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCPLUN_NOT_VALID:
case FSF_LUN_HANDLE_NOT_VALID:
zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
break;
case FSF_DIRECTION_INDICATOR_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
"Incorrect direction %d, LUN 0x%016Lx on port "
"0x%016Lx closed\n",
req->qtcb->bottom.io.data_direction,
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
"Incorrect FCP_CMND length %d, FCP device closed\n",
req->qtcb->bottom.io.fcp_cmnd_length);
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
zfcp_erp_set_port_status(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(zfcp_sdev->port,
ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_LUN_BOXED:
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"fssfch6");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_ADAPTER_STATUS_AVAILABLE:
if (header->fsf_status_qual.word[0] ==
FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
zfcp_fc_test_link(zfcp_sdev->port);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SECURITY_ERROR:
zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
header->fsf_status_qual.word[0],
zfcp_sdev->port->wwpn);
zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
}
static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
{
struct scsi_cmnd *scpnt;
struct fcp_resp_with_ext *fcp_rsp;
unsigned long flags;
read_lock_irqsave(&req->adapter->abort_lock, flags);
scpnt = req->data;
if (unlikely(!scpnt)) {
read_unlock_irqrestore(&req->adapter->abort_lock, flags);
return;
}
zfcp_fsf_fcp_handler_common(req, scpnt->device);
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
goto skip_fsfstatus;
}
switch (req->qtcb->header.fsf_status) {
case FSF_INCONSISTENT_PROT_DATA:
case FSF_INVALID_PROT_PARM:
set_host_byte(scpnt, DID_ERROR);
goto skip_fsfstatus;
case FSF_BLOCK_GUARD_CHECK_FAILURE:
zfcp_scsi_dif_sense_error(scpnt, 0x1);
goto skip_fsfstatus;
case FSF_APP_TAG_CHECK_FAILURE:
zfcp_scsi_dif_sense_error(scpnt, 0x2);
goto skip_fsfstatus;
case FSF_REF_TAG_CHECK_FAILURE:
zfcp_scsi_dif_sense_error(scpnt, 0x3);
goto skip_fsfstatus;
}
BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
skip_fsfstatus:
zfcp_fsf_req_trace(req, scpnt);
zfcp_dbf_scsi_result(scpnt, req);
scpnt->host_scribble = NULL;
scsi_done(scpnt);
/*
* We must hold this lock until scsi_done has been called.
* Otherwise we may call scsi_done after abort regarding this
* command has completed.
* Note: scsi_done must not block!
*/
read_unlock_irqrestore(&req->adapter->abort_lock, flags);
}
static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
{
switch (scsi_get_prot_op(scsi_cmnd)) {
case SCSI_PROT_NORMAL:
switch (scsi_cmnd->sc_data_direction) {
case DMA_NONE:
*data_dir = FSF_DATADIR_CMND;
break;
case DMA_FROM_DEVICE:
*data_dir = FSF_DATADIR_READ;
break;
case DMA_TO_DEVICE:
*data_dir = FSF_DATADIR_WRITE;
break;
case DMA_BIDIRECTIONAL:
return -EINVAL;
}
break;
case SCSI_PROT_READ_STRIP:
*data_dir = FSF_DATADIR_DIF_READ_STRIP;
break;
case SCSI_PROT_WRITE_INSERT:
*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
break;
case SCSI_PROT_READ_PASS:
*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
break;
case SCSI_PROT_WRITE_PASS:
*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
* @scsi_cmnd: scsi command to be sent
*/
int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
{
struct zfcp_fsf_req *req;
struct fcp_cmnd *fcp_cmnd;
u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
int retval = -EIO;
struct scsi_device *sdev = scsi_cmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct fsf_qtcb_bottom_io *io;
unsigned long flags;
if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
spin_lock_irqsave(&qdio->req_q_lock, flags);
if (atomic_read(&qdio->req_q_free) <= 0) {
atomic_inc(&qdio->req_q_full);
goto out;
}
if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
sbtype = SBAL_SFLAGS0_TYPE_WRITE;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
sbtype, adapter->pool.scsi_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
BUILD_BUG_ON(sizeof(scsi_cmnd->host_scribble) < sizeof(req->req_id));
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
io = &req->qtcb->bottom.io;
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_fcp_cmnd_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
io->service_class = FSF_CLASS_3;
io->fcp_cmnd_length = FCP_CMND_LEN;
if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
io->data_block_length = scsi_prot_interval(scsi_cmnd);
io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
}
if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
goto failed_scsi_cmnd;
BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_prot_sglist(scsi_cmnd));
if (retval)
goto failed_scsi_cmnd;
io->prot_data_length = zfcp_qdio_real_bytes(
scsi_prot_sglist(scsi_cmnd));
}
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_sglist(scsi_cmnd));
if (unlikely(retval))
goto failed_scsi_cmnd;
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
if (zfcp_adapter_multi_buffer_active(adapter))
zfcp_qdio_set_scount(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
failed_scsi_cmnd:
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
spin_unlock_irqrestore(&qdio->req_q_lock, flags);
return retval;
}
static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct fcp_resp_with_ext *fcp_rsp;
struct fcp_resp_rsp_info *rsp_info;
zfcp_fsf_fcp_handler_common(req, sdev);
fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
(req->status & ZFCP_STATUS_FSFREQ_ERROR))
req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
}
/**
* zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
* @sdev: Pointer to SCSI device to send the task management command to.
* @tm_flags: Unsigned byte for task management flags.
*
* Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
*/
struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags)
{
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
if (unlikely(!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)))
return NULL;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
SBAL_SFLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
req = NULL;
goto out;
}
req->data = sdev;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req)) {
/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
}
zfcp_fsf_req_free(req);
req = NULL;
out:
spin_unlock_irq(&qdio->req_q_lock);
return req;
}
/**
* zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
* @qdio: pointer to struct zfcp_qdio
* @sbal_idx: response queue index of SBAL to be processed
*/
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
{
struct zfcp_adapter *adapter = qdio->adapter;
struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
u64 req_id;
int idx;
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
req_id = sbale->addr;
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req) {
/*
* Unknown request means that we have potentially memory
* corruption and must stop the machine immediately.
*/
zfcp_qdio_siosl(adapter);
panic("error: unknown req_id (%llx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
}
zfcp_fsf_req_complete(fsf_req);
if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
break;
}
}
| linux-master | drivers/s390/scsi/zfcp_fsf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Error Recovery Procedures (ERP).
*
* Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3
enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
};
/*
* Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
* Used to indicate that an ERP action could not be set up despite a detected
* need for some recovery.
*/
#define ZFCP_ERP_ACTION_NONE 0xc0
/*
* Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
* Used to indicate that ERP not needed because the object has
* ZFCP_STATUS_COMMON_ERP_FAILED.
*/
#define ZFCP_ERP_ACTION_FAILED 0xe0
enum zfcp_erp_act_result {
ZFCP_ERP_SUCCEEDED = 0,
ZFCP_ERP_FAILED = 1,
ZFCP_ERP_CONTINUES = 2,
ZFCP_ERP_EXIT = 3,
ZFCP_ERP_DISMISSED = 4,
ZFCP_ERP_NOMEM = 5,
};
static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
{
zfcp_erp_clear_adapter_status(adapter,
ZFCP_STATUS_COMMON_UNBLOCKED | mask);
}
static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act)
{
struct zfcp_erp_action *curr_act;
list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
if (act == curr_act)
return true;
return false;
}
static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
list_move(&act->list, &adapter->erp_ready_head);
zfcp_dbf_rec_run("erardy1", act);
wake_up(&adapter->erp_ready_wq);
zfcp_dbf_rec_run("erardy2", act);
}
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
{
act->status |= ZFCP_STATUS_ERP_DISMISSED;
if (zfcp_erp_action_is_running(act))
zfcp_erp_action_ready(act);
}
static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
}
static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
{
struct scsi_device *sdev;
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
else {
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
spin_unlock(port->adapter->scsi_host->host_lock);
}
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&adapter->erp_action);
else {
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
zfcp_erp_action_dismiss_port(port);
read_unlock(&adapter->port_list_lock);
}
}
static enum zfcp_erp_act_type zfcp_erp_handle_failed(
enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
struct zfcp_port *port, struct scsi_device *sdev)
{
enum zfcp_erp_act_type need = want;
struct zfcp_scsi_dev *zsdev;
switch (want) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zsdev = sdev_to_zfcp(sdev);
if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(
port, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_adapter_status(
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
}
return need;
}
static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
enum zfcp_erp_act_type need = want;
int l_status, p_status, a_status;
struct zfcp_scsi_dev *zfcp_sdev;
switch (want) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
l_status = atomic_read(&zfcp_sdev->status);
if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT:
p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
!(a_status & ZFCP_STATUS_COMMON_OPEN))
return 0; /* shutdown requested for closed adapter */
}
return need;
}
static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
u32 act_status,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
need != ZFCP_ERP_ACTION_REOPEN_PORT &&
need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
return NULL;
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
if (scsi_device_get(sdev))
return NULL;
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
WARN_ON_ONCE(erp_action->port != NULL);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
}
WARN_ON_ONCE(erp_action->adapter != adapter);
memset(&erp_action->list, 0, sizeof(erp_action->list));
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
erp_action->fsf_req_id = 0;
erp_action->type = need;
erp_action->status = act_status;
return erp_action;
}
static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
char *dbftag, u32 act_status)
{
enum zfcp_erp_act_type need;
struct zfcp_erp_action *act;
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
if (!need) {
need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
goto out;
}
if (!adapter->erp_thread) {
need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
}
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act) {
need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
}
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
out:
zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
}
void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
struct zfcp_adapter *adapter,
u64 port_name, u32 port_id)
{
unsigned long flags;
static /* don't waste stack */ struct zfcp_port tmpport;
write_lock_irqsave(&adapter->erp_lock, flags);
/* Stand-in zfcp port with fields just good enough for
* zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
* Under lock because tmpport is static.
*/
atomic_set(&tmpport.status, -1); /* unknown */
tmpport.wwpn = port_name;
tmpport.d_id = port_id;
zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
ZFCP_ERP_ACTION_NONE);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *dbftag)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, dbftag, 0);
}
/**
* zfcp_erp_adapter_reopen - Reopen adapter.
* @adapter: Adapter to reopen.
* @clear: Status flags to clear.
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
char *dbftag)
{
unsigned long flags;
zfcp_erp_adapter_block(adapter, clear);
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_adapter_shutdown - Shutdown adapter.
* @adapter: Adapter to shut down.
* @clear: Status flags to clear.
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
}
/**
* zfcp_erp_port_shutdown - Shutdown port
* @port: Port to shut down.
* @clear: Status flags to clear.
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_port_reopen(port, clear | flags, dbftag);
}
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
{
zfcp_erp_clear_port_status(port,
ZFCP_STATUS_COMMON_UNBLOCKED | clear);
}
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_forced_reopen - Forced close of port and open again
* @port: Port to force close and to reopen.
* @clear: Status flags to clear.
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_port_forced_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
char *dbftag)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, dbftag, 0);
}
/**
* zfcp_erp_port_reopen - trigger remote port recovery
* @port: port to recover
* @clear: flags in port status to be cleared
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_port_reopen(port, clear, dbftag);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
{
zfcp_erp_clear_lun_status(sdev,
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
}
static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
char *dbftag, u32 act_status)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
zfcp_erp_lun_block(sdev, clear);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, dbftag, act_status);
}
/**
* zfcp_erp_lun_reopen - initiate reopen of a LUN
* @sdev: SCSI device / LUN to be reopened
* @clear: specifies flags in LUN status to be cleared
* @dbftag: Tag for debug trace event.
*
* Return: 0 on success, < 0 on error
*/
void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_lun_shutdown - Shutdown LUN
* @sdev: SCSI device / LUN to shut down.
* @clear: Status flags to clear.
* @dbftag: Tag for debug trace event.
*/
void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
{
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
}
/**
* zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
* @sdev: SCSI device / LUN to shut down.
* @dbftag: Tag for debug trace event.
*
* Do not acquire a reference for the LUN when creating the ERP
* action. It is safe, because this function waits for the ERP to
* complete first. This allows to shutdown the LUN, even when the SCSI
* device is in the state SDEV_DEL when scsi_device_get will fail.
*/
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
{
unsigned long flags;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_adapter *adapter = port->adapter;
int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
write_lock_irqsave(&adapter->erp_lock, flags);
_zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
write_unlock_irqrestore(&adapter->erp_lock, flags);
zfcp_erp_wait(adapter);
}
static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status)
{
return (atomic_read(status) ^ mask) & mask;
}
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
zfcp_dbf_rec_run("erator1", erp_action);
}
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_fsf_req *req;
if (!act->fsf_req_id)
return;
spin_lock(&adapter->req_list->lock);
req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
/* lock-free concurrent access with
* zfcp_erp_timeout_handler()
*/
WRITE_ONCE(req->erp_action, NULL);
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
act->fsf_req_id = 0;
} else
act->fsf_req_id = 0;
spin_unlock(&adapter->req_list->lock);
}
/**
* zfcp_erp_notify - Trigger ERP action.
* @erp_action: ERP action to continue.
* @set_mask: ERP action status flags to set.
*/
void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
{
struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
write_lock_irqsave(&adapter->erp_lock, flags);
if (zfcp_erp_action_is_running(erp_action)) {
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
}
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/**
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
* @t: timer list entry embedded in zfcp FSF request
*/
void zfcp_erp_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
struct zfcp_erp_action *act;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
return;
/* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
act = READ_ONCE(fsf_req->erp_action);
if (!act)
return;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
static void zfcp_erp_memwait_handler(struct timer_list *t)
{
struct zfcp_erp_action *act = from_timer(act, t, timer);
zfcp_erp_notify(act, 0);
}
static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
{
timer_setup(&erp_action->timer, zfcp_erp_memwait_handler, 0);
erp_action->timer.expires = jiffies + HZ;
add_timer(&erp_action->timer);
}
void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
int clear, char *dbftag)
{
unsigned long flags;
struct zfcp_port *port;
write_lock_irqsave(&adapter->erp_lock, flags);
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
_zfcp_erp_port_forced_reopen(port, clear, dbftag);
read_unlock(&adapter->port_list_lock);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *dbftag)
{
struct zfcp_port *port;
read_lock(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
_zfcp_erp_port_reopen(port, clear, dbftag);
read_unlock(&adapter->port_list_lock);
}
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
char *dbftag)
{
struct scsi_device *sdev;
spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
{
switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
break;
case ZFCP_ERP_ACTION_REOPEN_LUN:
_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
break;
}
}
static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
{
switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
break;
case ZFCP_ERP_ACTION_REOPEN_LUN:
/* NOP */
break;
}
}
static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
unsigned long flags;
read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) {
atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status);
wake_up(&adapter->erp_done_wqh);
}
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
struct zfcp_erp_action *erp_action)
{
int retries;
int sleep = 1;
struct zfcp_adapter *adapter = erp_action->adapter;
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
for (retries = 7; retries; retries--) {
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action);
write_unlock_irq(&adapter->erp_lock);
if (zfcp_fsf_exchange_config_data(erp_action)) {
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
return ZFCP_ERP_FAILED;
}
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
break;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
break;
ssleep(sleep);
sleep *= 2;
}
atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static void
zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
struct zfcp_erp_action *act)
{
int ret;
struct zfcp_adapter *adapter = act->adapter;
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(act);
write_unlock_irq(&adapter->erp_lock);
ret = zfcp_fsf_exchange_port_data(act);
if (ret == -EOPNOTSUPP)
return ZFCP_ERP_SUCCEEDED;
if (ret)
return ZFCP_ERP_FAILED;
zfcp_dbf_rec_run("erasox1", act);
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
zfcp_dbf_rec_run("erasox2", act);
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result
zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
{
struct zfcp_diag_adapter_config_data *const config_data =
&adapter->diagnostics->config_data;
struct zfcp_diag_adapter_port_data *const port_data =
&adapter->diagnostics->port_data;
unsigned long flags;
int rc;
rc = zfcp_scsi_adapter_register(adapter);
if (rc == -EEXIST)
return ZFCP_ERP_SUCCEEDED;
else if (rc)
return ZFCP_ERP_FAILED;
/*
* We allocated the shost for the first time. Before it was NULL,
* and so we deferred all updates in the xconf- and xport-data
* handlers. We need to make up for that now, and make all the updates
* that would have been done before.
*
* We can be sure that xconf- and xport-data succeeded, because
* otherwise this function is not called. But they might have been
* incomplete.
*/
spin_lock_irqsave(&config_data->header.access_lock, flags);
zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
!!config_data->header.incomplete);
spin_unlock_irqrestore(&config_data->header.access_lock, flags);
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
spin_lock_irqsave(&port_data->header.access_lock, flags);
zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
spin_unlock_irqrestore(&port_data->header.access_lock, flags);
}
/*
* There is a remote possibility that the 'Exchange Port Data' request
* reports a different connectivity status than 'Exchange Config Data'.
* But any change to the connectivity status of the local optic that
* happens after the initial xconf request is expected to be reported
* to us, as soon as we post Status Read Buffers to the FCP channel
* firmware after this function. So any resulting inconsistency will
* only be momentary.
*/
if (config_data->header.incomplete)
zfcp_fsf_fc_host_link_down(adapter);
return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act)
{
if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.status_read_req,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
/* close queues to ensure that buffers are not accessed by adapter */
zfcp_qdio_close(adapter->qdio);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
zfcp_fc_wka_ports_force_offline(adapter->gs);
/* all ports and LUNs are closed */
zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
if (zfcp_qdio_open(adapter->qdio)) {
atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&adapter->status);
return ZFCP_ERP_FAILED;
}
if (zfcp_erp_adapter_strategy_open_fsf(act)) {
zfcp_erp_adapter_strategy_close(act);
return ZFCP_ERP_FAILED;
}
atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
zfcp_erp_adapter_strategy_close(act);
if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
}
if (zfcp_erp_adapter_strategy_open(act)) {
ssleep(8);
return ZFCP_ERP_FAILED;
}
return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
struct zfcp_erp_action *act)
{
int retval;
retval = zfcp_fsf_close_physical_port(act);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int status = atomic_read(&port->status);
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
(status & ZFCP_STATUS_COMMON_OPEN))
return zfcp_erp_port_forced_strategy_close(erp_action);
else
return ZFCP_ERP_FAILED;
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
return ZFCP_ERP_SUCCEEDED;
break;
case ZFCP_ERP_STEP_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_OPENING:
case ZFCP_ERP_STEP_LUN_CLOSING:
case ZFCP_ERP_STEP_LUN_OPENING:
/* NOP */
break;
}
return ZFCP_ERP_FAILED;
}
static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
struct zfcp_erp_action *erp_action)
{
int retval;
retval = zfcp_fsf_close_port(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
struct zfcp_erp_action *erp_action)
{
int retval;
retval = zfcp_fsf_open_port(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return ZFCP_ERP_FAILED;
}
port->d_id = adapter->peer_d_id;
return zfcp_erp_port_strategy_open_port(act);
}
static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
int p_status = atomic_read(&port->status);
switch (act->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_CLOSING:
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!port->d_id) {
zfcp_fc_trigger_did_lookup(port);
return ZFCP_ERP_EXIT;
}
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
if (p_status & ZFCP_STATUS_COMMON_OPEN) {
if (!port->d_id) {
zfcp_fc_trigger_did_lookup(port);
return ZFCP_ERP_EXIT;
}
return ZFCP_ERP_SUCCEEDED;
}
if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
port->d_id = 0;
return ZFCP_ERP_FAILED;
}
/* no early return otherwise, continue after switch case */
break;
case ZFCP_ERP_STEP_LUN_CLOSING:
case ZFCP_ERP_STEP_LUN_OPENING:
/* NOP */
break;
}
return ZFCP_ERP_FAILED;
}
static enum zfcp_erp_act_result zfcp_erp_port_strategy(
struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
int p_status = atomic_read(&port->status);
if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
!(p_status & ZFCP_STATUS_COMMON_OPEN))
goto close_init_done;
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_port_strategy_close(erp_action);
break;
case ZFCP_ERP_STEP_PORT_CLOSING:
if (p_status & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
break;
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_OPENING:
case ZFCP_ERP_STEP_LUN_CLOSING:
case ZFCP_ERP_STEP_LUN_OPENING:
/* NOP */
break;
}
close_init_done:
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
return zfcp_erp_port_strategy_open_common(erp_action);
}
static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
&zfcp_sdev->status);
}
static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_close_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
struct zfcp_erp_action *erp_action)
{
int retval = zfcp_fsf_open_lun(erp_action);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
struct zfcp_erp_action *erp_action)
{
struct scsi_device *sdev = erp_action->sdev;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_lun_strategy_clearstati(sdev);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
/* already closed */
fallthrough;
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
return zfcp_erp_lun_strategy_open(erp_action);
case ZFCP_ERP_STEP_LUN_OPENING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_SUCCEEDED;
break;
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_OPENING:
/* NOP */
break;
}
return ZFCP_ERP_FAILED;
}
static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
struct scsi_device *sdev, enum zfcp_erp_act_result result)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&zfcp_sdev->erp_counter, 0);
zfcp_erp_lun_unblock(sdev);
break;
case ZFCP_ERP_FAILED :
atomic_inc(&zfcp_sdev->erp_counter);
if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
"ERP failed for LUN 0x%016Lx on "
"port 0x%016Lx\n",
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_CONTINUES:
case ZFCP_ERP_EXIT:
case ZFCP_ERP_DISMISSED:
case ZFCP_ERP_NOMEM:
/* NOP */
break;
}
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_lun_block(sdev, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
struct zfcp_port *port, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&port->erp_counter, 0);
zfcp_erp_port_unblock(port);
break;
case ZFCP_ERP_FAILED :
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
zfcp_erp_port_block(port, 0);
result = ZFCP_ERP_EXIT;
}
atomic_inc(&port->erp_counter);
if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&port->adapter->ccw_device->dev,
"ERP failed for remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_erp_set_port_status(port,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_CONTINUES:
case ZFCP_ERP_EXIT:
case ZFCP_ERP_DISMISSED:
case ZFCP_ERP_NOMEM:
/* NOP */
break;
}
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_port_block(port, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
{
switch (result) {
case ZFCP_ERP_SUCCEEDED :
atomic_set(&adapter->erp_counter, 0);
zfcp_erp_adapter_unblock(adapter);
break;
case ZFCP_ERP_FAILED :
atomic_inc(&adapter->erp_counter);
if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&adapter->ccw_device->dev,
"ERP cannot recover an error "
"on the FCP device\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_CONTINUES:
case ZFCP_ERP_EXIT:
case ZFCP_ERP_DISMISSED:
case ZFCP_ERP_NOMEM:
/* NOP */
break;
}
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_adapter_block(adapter, 0);
result = ZFCP_ERP_EXIT;
}
return result;
}
static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_port *port = erp_action->port;
struct scsi_device *sdev = erp_action->sdev;
switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
result = zfcp_erp_strategy_check_lun(sdev, result);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
result = zfcp_erp_strategy_check_port(port, result);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
result = zfcp_erp_strategy_check_adapter(adapter, result);
break;
}
return result;
}
static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
{
int status = atomic_read(target_status);
if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
return 1; /* take it online */
if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
!(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
return 1; /* take it offline */
return 0;
}
static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
{
enum zfcp_erp_act_type type = act->type;
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
struct zfcp_scsi_dev *zfcp_sdev;
u32 erp_status = act->status;
switch (type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
_zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg1");
return ZFCP_ERP_EXIT;
}
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
_zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg2");
return ZFCP_ERP_EXIT;
}
break;
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
_zfcp_erp_lun_reopen(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED,
"ersscg3", 0);
return ZFCP_ERP_EXIT;
}
break;
}
return result;
}
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_scsi_dev *zfcp_sdev;
adapter->erp_total_count--;
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
adapter->erp_low_mem_count--;
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
list_del(&erp_action->list);
zfcp_dbf_rec_run("eractd1", erp_action);
switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->port->status);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->adapter->status);
break;
}
}
/**
* zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
* @port: zfcp_port whose fc_rport we should try to unblock
*/
static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
{
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
int port_status;
struct Scsi_Host *shost = adapter->scsi_host;
struct scsi_device *sdev;
write_lock_irqsave(&adapter->erp_lock, flags);
port_status = atomic_read(&port->status);
if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
(port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
/* new ERP of severity >= port triggered elsewhere meanwhile or
* local link down (adapter erp_failed but not clear unblock)
*/
zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
write_unlock_irqrestore(&adapter->erp_lock, flags);
return;
}
spin_lock(shost->host_lock);
__shost_for_each_device(sdev, shost) {
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
if (sdev->sdev_state == SDEV_DEL ||
sdev->sdev_state == SDEV_CANCEL)
continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
lun_status = atomic_read(&zsdev->status);
if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
continue; /* unblock rport despite failed LUNs */
/* LUN recovery not given up yet [maybe follow-up pending] */
if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
(lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
/* LUN blocked:
* not yet unblocked [LUN recovery pending]
* or meanwhile blocked [new LUN recovery triggered]
*/
zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
spin_unlock(shost->host_lock);
write_unlock_irqrestore(&adapter->erp_lock, flags);
return;
}
}
/* now port has no child or all children have completed recovery,
* and no ERP of severity >= port was meanwhile triggered elsewhere
*/
zfcp_scsi_schedule_rport_register(port);
spin_unlock(shost->host_lock);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
enum zfcp_erp_act_result result)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct scsi_device *sdev = act->sdev;
switch (act->type) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
zfcp_erp_try_rport_unblock(port);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
/* This switch case might also happen after a forced reopen
* was successfully done and thus overwritten with a new
* non-forced reopen at `ersfs_2'. In this case, we must not
* do the clean-up of the non-forced version.
*/
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_try_rport_unblock(port);
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
zfcp_fc_conditional_port_scan(adapter);
queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
}
static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
struct zfcp_erp_action *erp_action)
{
switch (erp_action->type) {
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
return zfcp_erp_adapter_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
return zfcp_erp_port_forced_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_PORT:
return zfcp_erp_port_strategy(erp_action);
case ZFCP_ERP_ACTION_REOPEN_LUN:
return zfcp_erp_lun_strategy(erp_action);
}
return ZFCP_ERP_FAILED;
}
static enum zfcp_erp_act_result zfcp_erp_strategy(
struct zfcp_erp_action *erp_action)
{
enum zfcp_erp_act_result result;
unsigned long flags;
struct zfcp_adapter *adapter = erp_action->adapter;
kref_get(&adapter->ref);
write_lock_irqsave(&adapter->erp_lock, flags);
zfcp_erp_strategy_check_fsfreq(erp_action);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
zfcp_erp_action_dequeue(erp_action);
result = ZFCP_ERP_DISMISSED;
goto unlock;
}
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
result = ZFCP_ERP_FAILED;
goto check_target;
}
zfcp_erp_action_to_running(erp_action);
/* no lock to allow for blocking operations */
write_unlock_irqrestore(&adapter->erp_lock, flags);
result = zfcp_erp_strategy_do_action(erp_action);
write_lock_irqsave(&adapter->erp_lock, flags);
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
result = ZFCP_ERP_CONTINUES;
switch (result) {
case ZFCP_ERP_NOMEM:
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
++adapter->erp_low_mem_count;
erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
}
if (adapter->erp_total_count == adapter->erp_low_mem_count)
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
else {
zfcp_erp_strategy_memwait(erp_action);
result = ZFCP_ERP_CONTINUES;
}
goto unlock;
case ZFCP_ERP_CONTINUES:
if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
--adapter->erp_low_mem_count;
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
}
goto unlock;
case ZFCP_ERP_SUCCEEDED:
case ZFCP_ERP_FAILED:
case ZFCP_ERP_EXIT:
case ZFCP_ERP_DISMISSED:
/* NOP */
break;
}
check_target:
result = zfcp_erp_strategy_check_target(erp_action, result);
zfcp_erp_action_dequeue(erp_action);
result = zfcp_erp_strategy_statechange(erp_action, result);
if (result == ZFCP_ERP_EXIT)
goto unlock;
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_strategy_followup_success(erp_action);
if (result == ZFCP_ERP_FAILED)
zfcp_erp_strategy_followup_failed(erp_action);
unlock:
write_unlock_irqrestore(&adapter->erp_lock, flags);
if (result != ZFCP_ERP_CONTINUES)
zfcp_erp_action_cleanup(erp_action, result);
kref_put(&adapter->ref, zfcp_adapter_release);
return result;
}
static int zfcp_erp_thread(void *data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
struct zfcp_erp_action *act;
unsigned long flags;
for (;;) {
wait_event_interruptible(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head) ||
kthread_should_stop());
if (kthread_should_stop())
break;
write_lock_irqsave(&adapter->erp_lock, flags);
act = list_first_entry_or_null(&adapter->erp_ready_head,
struct zfcp_erp_action, list);
write_unlock_irqrestore(&adapter->erp_lock, flags);
if (act) {
/* there is more to come after dismission, no notify */
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
zfcp_erp_wakeup(adapter);
}
}
return 0;
}
/**
* zfcp_erp_thread_setup - Start ERP thread for adapter
* @adapter: Adapter to start the ERP thread for
*
* Return: 0 on success, or error code from kthread_run().
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
struct task_struct *thread;
thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
dev_name(&adapter->ccw_device->dev));
if (IS_ERR(thread)) {
dev_err(&adapter->ccw_device->dev,
"Creating an ERP thread for the FCP device failed.\n");
return PTR_ERR(thread);
}
adapter->erp_thread = thread;
return 0;
}
/**
* zfcp_erp_thread_kill - Stop ERP thread.
* @adapter: Adapter where the ERP thread should be stopped.
*
* The caller of this routine ensures that the specified adapter has
* been shut down and that this operation has been completed. Thus,
* there are no pending erp_actions which would need to be handled
* here.
*/
void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
kthread_stop(adapter->erp_thread);
adapter->erp_thread = NULL;
WARN_ON(!list_empty(&adapter->erp_ready_head));
WARN_ON(!list_empty(&adapter->erp_running_head));
}
/**
* zfcp_erp_wait - wait for completion of error recovery on an adapter
* @adapter: adapter for which to wait for completion of its error recovery
*/
void zfcp_erp_wait(struct zfcp_adapter *adapter)
{
wait_event(adapter->erp_done_wqh,
!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_PENDING));
}
/**
* zfcp_erp_set_adapter_status - set adapter status bits
* @adapter: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached ports and LUNs.
*/
void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
struct zfcp_port *port;
struct scsi_device *sdev;
unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
atomic_or(mask, &adapter->status);
if (!common_mask)
return;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
/*
* if `scsi_host` is missing, xconfig/xport data has never completed
* yet, so we can't access it, but there are also no SDEVs yet
*/
if (adapter->scsi_host == NULL)
return;
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_clear_adapter_status - clear adapter status bits
* @adapter: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached ports and LUNs.
*/
void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
{
struct zfcp_port *port;
struct scsi_device *sdev;
unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
atomic_andnot(mask, &adapter->status);
if (!common_mask)
return;
if (clear_counter)
atomic_set(&adapter->erp_counter, 0);
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
atomic_andnot(common_mask, &port->status);
if (clear_counter)
atomic_set(&port->erp_counter, 0);
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
/*
* if `scsi_host` is missing, xconfig/xport data has never completed
* yet, so we can't access it, but there are also no SDEVs yet
*/
if (adapter->scsi_host == NULL)
return;
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_set_port_status - set port status bits
* @port: port to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached LUNs.
*/
void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags;
atomic_or(mask, &port->status);
if (!common_mask)
return;
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_or(common_mask,
&sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_clear_port_status - clear port status bits
* @port: adapter to change the status
* @mask: status bits to change
*
* Changes in common status bits are propagated to attached LUNs.
*/
void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags;
atomic_andnot(mask, &port->status);
if (!common_mask)
return;
if (clear_counter)
atomic_set(&port->erp_counter, 0);
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_andnot(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
* zfcp_erp_set_lun_status - set lun status bits
* @sdev: SCSI device / lun to set the status bits
* @mask: status bits to change
*/
void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_or(mask, &zfcp_sdev->status);
}
/**
* zfcp_erp_clear_lun_status - clear lun status bits
* @sdev: SCSi device / lun to clear the status bits
* @mask: status bits to change
*/
void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_andnot(mask, &zfcp_sdev->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&zfcp_sdev->erp_counter, 0);
}
/**
* zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
* @adapter: Pointer to zfcp_adapter to reopen.
* @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
*/
void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
{
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
zfcp_erp_wait(adapter);
}
| linux-master | drivers/s390/scsi/zfcp_erp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* finite state machine for device handling
*
* Copyright IBM Corp. 2002, 2008
* Author(s): Cornelia Huck ([email protected])
* Martin Schwidefsky ([email protected])
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "chsc.h"
#include "ioasm.h"
#include "chp.h"
static int timeout_log_enabled;
static int __init ccw_timeout_log_setup(char *unused)
{
timeout_log_enabled = 1;
return 1;
}
__setup("ccw_timeout_log", ccw_timeout_log_setup);
static void ccw_timeout_log(struct ccw_device *cdev)
{
struct schib schib;
struct subchannel *sch;
struct io_subchannel_private *private;
union orb *orb;
int cc;
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
cc = stsch(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, "
"device information:\n", get_tod_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
printk(KERN_WARNING "cio: ccw device bus id: %s\n",
dev_name(&cdev->dev));
printk(KERN_WARNING "cio: subchannel bus id: %s\n",
dev_name(&sch->dev));
printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
"vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
if (orb->tm.b) {
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
phys_to_virt(orb->tm.tcw),
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
if ((void *)(addr_t)orb->cmd.cpa ==
&private->dma_area->sense_ccw ||
(void *)(addr_t)orb->cmd.cpa ==
cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
phys_to_virt(orb->cmd.cpa),
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
cdev->private->state);
printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
printk(KERN_WARNING "cio: schib:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
&schib, sizeof(schib), 0);
printk(KERN_WARNING "cio: ccw device flags:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
&cdev->private->flags, sizeof(cdev->private->flags), 0);
}
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
void
ccw_device_timeout(struct timer_list *t)
{
struct ccw_device_private *priv = from_timer(priv, t, timer);
struct ccw_device *cdev = priv->cdev;
spin_lock_irq(cdev->ccwlock);
if (timeout_log_enabled)
ccw_timeout_log(cdev);
dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
spin_unlock_irq(cdev->ccwlock);
}
/*
* Set timeout
*/
void
ccw_device_set_timeout(struct ccw_device *cdev, int expires)
{
if (expires == 0)
del_timer(&cdev->private->timer);
else
mod_timer(&cdev->private->timer, jiffies + expires);
}
int
ccw_device_cancel_halt_clear(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent);
ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
if (ret == -EIO)
CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
return ret;
}
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
return cdev->id.cu_type ==
cdev->private->dma_area->senseid.cu_type &&
cdev->id.cu_model ==
cdev->private->dma_area->senseid.cu_model &&
cdev->id.dev_type ==
cdev->private->dma_area->senseid.dev_type &&
cdev->id.dev_model ==
cdev->private->dma_area->senseid.dev_model;
}
/*
* The machine won't give us any notification by machine check if a chpid has
* been varied online on the SE so we have to find out by magic (i. e. driving
* the channel subsystem to device selection and updating our path masks).
*/
static void
__recover_lost_chpids(struct subchannel *sch, int old_lpm)
{
int mask, i;
struct chp_id chpid;
chp_id_init(&chpid);
for (i = 0; i<8; i++) {
mask = 0x80 >> i;
if (!(sch->lpm & mask))
continue;
if (old_lpm & mask)
continue;
chpid.id = sch->schib.pmcw.chpid[i];
if (!chp_is_registered(chpid))
css_schedule_eval_all();
}
}
/*
* Stop device recognition.
*/
static void
ccw_device_recog_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
int old_lpm;
sch = to_subchannel(cdev->dev.parent);
if (cio_disable_subchannel(sch))
state = DEV_STATE_NOT_OPER;
/*
* Now that we tried recognition, we have performed device selection
* through ssch() and the path information is up to date.
*/
old_lpm = sch->lpm;
/* Check since device may again have become not operational. */
if (cio_update_schib(sch))
state = DEV_STATE_NOT_OPER;
else
sch->lpm = sch->schib.pmcw.pam & sch->opm;
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
/* Force reprobe on all chpids. */
old_lpm = 0;
if (sch->lpm != old_lpm)
__recover_lost_chpids(sch, old_lpm);
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
(state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q);
return;
}
switch (state) {
case DEV_STATE_NOT_OPER:
break;
case DEV_STATE_OFFLINE:
if (!cdev->online) {
ccw_device_update_sense_data(cdev);
break;
}
cdev->private->state = DEV_STATE_OFFLINE;
cdev->private->flags.recog_done = 1;
if (ccw_device_test_sense_data(cdev)) {
cdev->private->flags.donotify = 1;
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
} else {
ccw_device_update_sense_data(cdev);
ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
return;
case DEV_STATE_BOXED:
if (cdev->id.cu_type != 0) { /* device was recognized before */
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_BOXED;
wake_up(&cdev->private->wait_q);
return;
}
break;
}
cdev->private->state = state;
io_subchannel_recog_done(cdev);
wake_up(&cdev->private->wait_q);
}
/*
* Function called from device_id.c after sense id has completed.
*/
void
ccw_device_sense_id_done(struct ccw_device *cdev, int err)
{
switch (err) {
case 0:
ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
break;
case -ETIME: /* Sense id stopped by timeout. */
ccw_device_recog_done(cdev, DEV_STATE_BOXED);
break;
default:
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
/**
* ccw_device_notify() - inform the device's driver about an event
* @cdev: device for which an event occurred
* @event: event that occurred
*
* Returns:
* -%EINVAL if the device is offline or has no driver.
* -%EOPNOTSUPP if the device's driver has no notifier registered.
* %NOTIFY_OK if the driver wants to keep the device.
* %NOTIFY_BAD if the driver doesn't want to keep the device.
*/
int ccw_device_notify(struct ccw_device *cdev, int event)
{
int ret = -EINVAL;
if (!cdev->drv)
goto out;
if (!cdev->online)
goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event);
if (!cdev->drv->notify) {
ret = -EOPNOTSUPP;
goto out;
}
if (cdev->drv->notify(cdev, event))
ret = NOTIFY_OK;
else
ret = NOTIFY_BAD;
out:
return ret;
}
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Re-enable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
/* Save indication for new paths. */
cdev->private->path_new_mask = sch->vpm;
return;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
/*
* Finished with online/offline processing.
*/
static void
ccw_device_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
if (state != DEV_STATE_ONLINE)
cio_disable_subchannel(sch);
/* Reset device status. */
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
cdev->private->state = state;
switch (state) {
case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->online &&
ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
cdev->private->state = DEV_STATE_NOT_OPER;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
} else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
break;
default:
break;
}
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
ccw_device_oper_notify(cdev);
}
wake_up(&cdev->private->wait_q);
}
/*
* Start device recognition.
*/
void ccw_device_recognition(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
/*
* We used to start here with a sense pgid to find out whether a device
* is locked by someone else. Unfortunately, the sense pgid command
* code has other meanings on devices predating the path grouping
* algorithm, so we start with sense id and box the device after an
* timeout (or if sense pgid during path verification detects the device
* is locked, as may happen on newer devices).
*/
cdev->private->flags.recog_done = 0;
cdev->private->state = DEV_STATE_SENSE_ID;
if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) {
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
return;
}
ccw_device_sense_id_start(cdev);
}
/*
* Handle events for states that use the ccw request infrastructure.
*/
static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
{
switch (e) {
case DEV_EVENT_NOTOPER:
ccw_request_notoper(cdev);
break;
case DEV_EVENT_INTERRUPT:
ccw_request_handler(cdev);
break;
case DEV_EVENT_TIMEOUT:
ccw_request_timeout(cdev);
break;
default:
break;
}
}
static void ccw_device_report_path_events(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
int path_event[8];
int chp, mask;
for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
path_event[chp] = PE_NONE;
if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
path_event[chp] |= PE_PATH_GONE;
if (mask & cdev->private->path_new_mask & sch->vpm)
path_event[chp] |= PE_PATH_AVAILABLE;
if (mask & cdev->private->pgid_reset_mask & sch->vpm)
path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
}
if (cdev->online && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
}
static void ccw_device_reset_path_events(struct ccw_device *cdev)
{
cdev->private->path_gone_mask = 0;
cdev->private->path_new_mask = 0;
cdev->private->pgid_reset_mask = 0;
}
static void create_fake_irb(struct irb *irb, int type)
{
memset(irb, 0, sizeof(*irb));
if (type == FAKE_CMD_IRB) {
struct cmd_scsw *scsw = &irb->scsw.cmd;
scsw->cc = 1;
scsw->fctl = SCSW_FCTL_START_FUNC;
scsw->actl = SCSW_ACTL_START_PEND;
scsw->stctl = SCSW_STCTL_STATUS_PEND;
} else if (type == FAKE_TM_IRB) {
struct tm_scsw *scsw = &irb->scsw.tm;
scsw->x = 1;
scsw->cc = 1;
scsw->fctl = SCSW_FCTL_START_FUNC;
scsw->actl = SCSW_ACTL_START_PEND;
scsw->stctl = SCSW_STCTL_STATUS_PEND;
}
}
static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
ccw_device_schedule_recovery();
cdev->private->path_broken_mask = broken_paths;
}
void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
if (cio_update_schib(sch)) {
err = -ENODEV;
goto callback;
}
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
if (cdev->private->flags.doverify) {
ccw_device_verify_start(cdev);
return;
}
callback:
switch (err) {
case 0:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->dma_area->irb);
memset(&cdev->private->dma_area->irb, 0,
sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
break;
case -ETIME:
case -EUSERS:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
case -EACCES:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
break;
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
ccw_device_reset_path_events(cdev);
}
/*
* Get device online.
*/
int
ccw_device_online(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
if ((cdev->private->state != DEV_STATE_OFFLINE) &&
(cdev->private->state != DEV_STATE_BOXED))
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret != 0) {
/* Couldn't enable the subchannel for i/o. Sick device. */
if (ret == -ENODEV)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
/* Start initial path verification. */
cdev->private->state = DEV_STATE_VERIFY;
ccw_device_verify_start(cdev);
return 0;
}
void
ccw_device_disband_done(struct ccw_device *cdev, int err)
{
switch (err) {
case 0:
ccw_device_done(cdev, DEV_STATE_OFFLINE);
break;
case -ETIME:
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
default:
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
/*
* Shutdown device.
*/
int
ccw_device_offline(struct ccw_device *cdev)
{
struct subchannel *sch;
/* Allow ccw_device_offline while disconnected. */
if (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_NOT_OPER) {
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
return 0;
}
if (cdev->private->state == DEV_STATE_BOXED) {
ccw_device_done(cdev, DEV_STATE_BOXED);
return 0;
}
if (ccw_device_is_orphan(cdev)) {
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
sch = to_subchannel(cdev->dev.parent);
if (cio_update_schib(sch))
return -ENODEV;
if (scsw_actl(&sch->schib.scsw) != 0)
return -EBUSY;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
if (!cdev->private->flags.pgroup) {
/* No, set state offline immediately. */
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
/* Start Set Path Group commands. */
cdev->private->state = DEV_STATE_DISBAND_PGID;
ccw_device_disband_start(cdev);
return 0;
}
/*
* Handle not operational event in non-special state.
*/
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
}
/*
* Handle path verification event in offline state.
*/
static void ccw_device_offline_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
css_schedule_eval(sch->schid);
}
/*
* Handle path verification event.
*/
static void
ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
if (cdev->private->state == DEV_STATE_W4SENSE) {
cdev->private->flags.doverify = 1;
return;
}
sch = to_subchannel(cdev->dev.parent);
/*
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib.
*/
if (cio_update_schib(sch)) {
ccw_device_verify_done(cdev, -ENODEV);
return;
}
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
(scsw_stctl(&cdev->private->dma_area->irb.scsw) &
SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
* delay until final status was delivered.
*/
cdev->private->flags.doverify = 1;
return;
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
ccw_device_verify_start(cdev);
}
/*
* Handle path verification event in boxed state.
*/
static void ccw_device_boxed_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (cdev->online) {
if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)))
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
else
ccw_device_online_verify(cdev, dev_event);
} else
css_schedule_eval(sch->schid);
}
/*
* Pass interrupt to device driver.
*/
static int ccw_device_call_handler(struct ccw_device *cdev)
{
unsigned int stctl;
int ending_status;
/*
* we allow for the device action handler if .
* - we received ending status
* - the action handler requested to see all interrupts
* - we received an intermediate status
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
if (!ending_status &&
!cdev->private->options.repall &&
!(stctl & SCSW_STCTL_INTER_STATUS) &&
!(cdev->private->options.fast &&
(stctl & SCSW_STCTL_PRIM_STATUS)))
return 0;
if (ending_status)
ccw_device_set_timeout(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->dma_area->irb);
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
return 1;
}
/*
* Got an interrupt for a normal io (state online).
*/
static void
ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
int is_cmd;
irb = this_cpu_ptr(&cio_irb);
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!irb->esw.esw0.erw.cons) {
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
memcpy(&cdev->private->dma_area->irb, irb,
sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
}
call_handler_unsol:
if (cdev->handler)
cdev->handler (cdev, 0, irb);
if (cdev->private->flags.doverify)
ccw_device_online_verify(cdev, 0);
return;
}
/* Accumulate status and find out if a basic sense is needed. */
ccw_device_accumulate_irb(cdev, irb);
if (is_cmd && cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE;
}
return;
}
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
}
/*
* Got an timeout in online state.
*/
static void
ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ccw_device_set_timeout(cdev, 0);
cdev->private->iretry = 255;
cdev->private->async_kill_io_rc = -ETIMEDOUT;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
if (ret)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
/*
* Got an interrupt for a basic sense.
*/
static void
ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
irb = this_cpu_ptr(&cio_irb);
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (scsw_cc(&irb->scsw) == 1)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
"interrupt during w4sense...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
}
return;
}
/*
* Check if a halt or clear has been issued in the meanwhile. If yes,
* only deliver the halt/clear interrupt to the device driver as if it
* had killed the original request.
*/
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
/* Add basic sense info to irb. */
ccw_device_accumulate_basic_sense(cdev, irb);
if (cdev->private->flags.dosense) {
/* Another basic sense is needed. */
ccw_device_do_sense(cdev, irb);
return;
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
/* In case sensing interfered with setting the device online */
wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
}
static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
/* OK, i/o is dead now. Call interrupt handler. */
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(cdev->private->async_kill_io_rc));
}
static void
ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
return;
}
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(cdev->private->async_kill_io_rc));
}
void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
ccw_device_set_timeout(cdev, 0);
cdev->private->iretry = 255;
cdev->private->async_kill_io_rc = -EIO;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
static void
ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
/* Start verification after current task finished. */
cdev->private->flags.doverify = 1;
}
static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
void ccw_device_trigger_reprobe(struct ccw_device *cdev)
{
struct subchannel *sch;
if (cdev->private->state != DEV_STATE_DISCONNECTED)
return;
sch = to_subchannel(cdev->dev.parent);
/* Update some values. */
if (cio_update_schib(sch))
return;
/*
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
*/
sch->lpm = sch->schib.pmcw.pam & sch->opm;
/*
* Use the initial configuration since we can't be sure that the old
* paths are valid.
*/
io_subchannel_init_config(sch);
if (cio_commit_config(sch))
return;
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
css_schedule_eval(sch->schid);
else
ccw_device_start_id(cdev, 0);
}
static void ccw_device_disabled_irq(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
* An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
}
static void
ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
{
retry_set_schib(cdev);
cdev->private->state = DEV_STATE_ONLINE;
dev_fsm_event(cdev, dev_event);
}
static void ccw_device_update_cmfblock(struct ccw_device *cdev,
enum dev_event dev_event)
{
cmf_retry_copy_block(cdev);
cdev->private->state = DEV_STATE_ONLINE;
dev_fsm_event(cdev, dev_event);
}
static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
static void
ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
} else {
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
}
/*
* No operation action. This is used e.g. to ignore a timeout event in
* state offline.
*/
static void
ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
{
}
/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_W4SENSE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
/* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
},
[DEV_STATE_QUIESCE] = {
[DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
[DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
[DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
/* special states for devices gone not operational */
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_CMFCHANGE] = {
[DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
[DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
[DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
[DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
},
[DEV_STATE_CMFUPDATE] = {
[DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
[DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
[DEV_STATE_STEAL_LOCK] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
| linux-master | drivers/s390/cio/device_fsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 chsc subchannels
*
* Copyright IBM Corp. 2008, 2011
*
* Author(s): Cornelia Huck <[email protected]>
*
*/
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/kernel_stat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc_sch.h"
#include "ioasm.h"
static debug_info_t *chsc_debug_msg_id;
static debug_info_t *chsc_debug_log_id;
static struct chsc_request *on_close_request;
static struct chsc_async_area *on_close_chsc_area;
static DEFINE_MUTEX(on_close_mutex);
#define CHSC_MSG(imp, args...) do { \
debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
} while (0)
#define CHSC_LOG(imp, txt) do { \
debug_text_event(chsc_debug_log_id, imp , txt); \
} while (0)
static void CHSC_LOG_HEX(int level, void *data, int length)
{
debug_event(chsc_debug_log_id, level, data, length);
}
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("driver for s390 chsc subchannels");
MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
struct irb *irb = this_cpu_ptr(&cio_irb);
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
inc_irq_stat(IRQIO_CSC);
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
sch->schid.ssid, sch->schid.sch_no);
return;
}
private->request = NULL;
memcpy(&request->irb, irb, sizeof(*irb));
cio_update_schib(sch);
complete(&request->completion);
put_device(&sch->dev);
}
static int chsc_subchannel_probe(struct subchannel *sch)
{
struct chsc_private *private;
int ret;
CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
sch->schid.ssid, sch->schid.sch_no);
sch->isc = CHSC_SCH_ISC;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
dev_set_drvdata(&sch->dev, private);
ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
dev_set_drvdata(&sch->dev, NULL);
kfree(private);
}
return ret;
}
static void chsc_subchannel_remove(struct subchannel *sch)
{
struct chsc_private *private;
cio_disable_subchannel(sch);
private = dev_get_drvdata(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
}
kfree(private);
}
static void chsc_subchannel_shutdown(struct subchannel *sch)
{
cio_disable_subchannel(sch);
}
static struct css_device_id chsc_subchannel_ids[] = {
{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "chsc_subchannel",
},
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
.remove = chsc_subchannel_remove,
.shutdown = chsc_subchannel_shutdown,
};
static int __init chsc_init_dbfs(void)
{
chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
debug_set_level(chsc_debug_msg_id, 2);
chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
if (!chsc_debug_log_id)
goto out;
debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
debug_set_level(chsc_debug_log_id, 2);
return 0;
out:
debug_unregister(chsc_debug_msg_id);
return -ENOMEM;
}
static void chsc_remove_dbfs(void)
{
debug_unregister(chsc_debug_log_id);
debug_unregister(chsc_debug_msg_id);
}
static int __init chsc_init_sch_driver(void)
{
return css_driver_register(&chsc_subchannel_driver);
}
static void chsc_cleanup_sch_driver(void)
{
css_driver_unregister(&chsc_subchannel_driver);
}
static DEFINE_SPINLOCK(chsc_lock);
static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
{
struct subchannel *sch = to_subchannel(dev);
return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
}
static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
{
struct device *dev;
dev = driver_find_device(&chsc_subchannel_driver.drv,
sch ? &sch->dev : NULL, NULL,
chsc_subchannel_match_next_free);
return dev ? to_subchannel(dev) : NULL;
}
/**
* chsc_async() - try to start a chsc request asynchronously
* @chsc_area: request to be started
* @request: request structure to associate
*
* Tries to start a chsc request on one of the existing chsc subchannels.
* Returns:
* %0 if the request was performed synchronously
* %-EINPROGRESS if the request was successfully started
* %-EBUSY if all chsc subchannels are busy
* %-ENODEV if no chsc subchannels are available
* Context:
* interrupts disabled, chsc_lock held
*/
static int chsc_async(struct chsc_async_area *chsc_area,
struct chsc_request *request)
{
int cc;
struct chsc_private *private;
struct subchannel *sch = NULL;
int ret = -ENODEV;
char dbf[10];
chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
private = dev_get_drvdata(&sch->dev);
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
continue;
}
chsc_area->header.sid = sch->schid;
CHSC_LOG(2, "schid");
CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
cc = chsc(chsc_area);
snprintf(dbf, sizeof(dbf), "cc:%d", cc);
CHSC_LOG(2, dbf);
switch (cc) {
case 0:
ret = 0;
break;
case 1:
sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
ret = -EINPROGRESS;
private->request = request;
break;
case 2:
ret = -EBUSY;
break;
default:
ret = -ENODEV;
}
spin_unlock(sch->lock);
CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
sch->schid.ssid, sch->schid.sch_no, cc);
if (ret == -EINPROGRESS)
return -EINPROGRESS;
put_device(&sch->dev);
if (ret == 0)
return 0;
}
return ret;
}
static void chsc_log_command(void *chsc_area)
{
char dbf[10];
snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
CHSC_LOG(0, dbf);
CHSC_LOG_HEX(0, chsc_area, 32);
}
static int chsc_examine_irb(struct chsc_request *request)
{
int backed_up;
if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
return -EIO;
backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
if (scsw_cstat(&request->irb.scsw) == 0)
return 0;
if (!backed_up)
return 0;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
return -EIO;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
return -EPERM;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
return -EAGAIN;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
return -EAGAIN;
return -EIO;
}
static int chsc_ioctl_start(void __user *user_area)
{
struct chsc_request *request;
struct chsc_async_area *chsc_area;
int ret;
char dbf[10];
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request) {
ret = -ENOMEM;
goto out_free;
}
init_completion(&request->completion);
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
ret = -EFAULT;
goto out_free;
}
chsc_log_command(chsc_area);
spin_lock_irq(&chsc_lock);
ret = chsc_async(chsc_area, request);
spin_unlock_irq(&chsc_lock);
if (ret == -EINPROGRESS) {
wait_for_completion(&request->completion);
ret = chsc_examine_irb(request);
}
/* copy area back to user */
if (!ret)
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
out_free:
snprintf(dbf, sizeof(dbf), "ret:%d", ret);
CHSC_LOG(0, dbf);
kfree(request);
free_page((unsigned long)chsc_area);
return ret;
}
static int chsc_ioctl_on_close_set(void __user *user_area)
{
char dbf[13];
int ret;
mutex_lock(&on_close_mutex);
if (on_close_chsc_area) {
ret = -EBUSY;
goto out_unlock;
}
on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
if (!on_close_request) {
ret = -ENOMEM;
goto out_unlock;
}
on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
if (!on_close_chsc_area) {
ret = -ENOMEM;
goto out_free_request;
}
if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
ret = -EFAULT;
goto out_free_chsc;
}
ret = 0;
goto out_unlock;
out_free_chsc:
free_page((unsigned long)on_close_chsc_area);
on_close_chsc_area = NULL;
out_free_request:
kfree(on_close_request);
on_close_request = NULL;
out_unlock:
mutex_unlock(&on_close_mutex);
snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
CHSC_LOG(0, dbf);
return ret;
}
static int chsc_ioctl_on_close_remove(void)
{
char dbf[13];
int ret;
mutex_lock(&on_close_mutex);
if (!on_close_chsc_area) {
ret = -ENOENT;
goto out_unlock;
}
free_page((unsigned long)on_close_chsc_area);
on_close_chsc_area = NULL;
kfree(on_close_request);
on_close_request = NULL;
ret = 0;
out_unlock:
mutex_unlock(&on_close_mutex);
snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
CHSC_LOG(0, dbf);
return ret;
}
static int chsc_ioctl_start_sync(void __user *user_area)
{
struct chsc_sync_area *chsc_area;
int ret, ccode;
chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!chsc_area)
return -ENOMEM;
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
ret = -EFAULT;
goto out_free;
}
if (chsc_area->header.code & 0x4000) {
ret = -EINVAL;
goto out_free;
}
chsc_log_command(chsc_area);
ccode = chsc(chsc_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
else
ret = 0;
out_free:
free_page((unsigned long)chsc_area);
return ret;
}
static int chsc_ioctl_info_channel_path(void __user *user_cd)
{
struct chsc_chp_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scpcd_area->request.length = 0x0010;
scpcd_area->request.code = 0x0028;
scpcd_area->m = cd->m;
scpcd_area->fmt1 = cd->fmt;
scpcd_area->cssid = cd->chpid.cssid;
scpcd_area->first_chpid = cd->chpid.id;
scpcd_area->last_chpid = cd->chpid.id;
ccode = chsc(scpcd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scpcd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scpcd: response code=%x\n",
scpcd_area->response.code);
goto out_free;
}
memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scpcd_area);
return ret;
}
static int chsc_ioctl_info_cu(void __user *user_cd)
{
struct chsc_cu_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_cun : 8;
u32 : 24;
u32 last_cun : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scucd_area->request.length = 0x0010;
scucd_area->request.code = 0x0026;
scucd_area->m = cd->m;
scucd_area->fmt1 = cd->fmt;
scucd_area->cssid = cd->cssid;
scucd_area->first_cun = cd->cun;
scucd_area->last_cun = cd->cun;
ccode = chsc(scucd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scucd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scucd: response code=%x\n",
scucd_area->response.code);
goto out_free;
}
memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scucd_area);
return ret;
}
static int chsc_ioctl_info_sch_cu(void __user *user_cud)
{
struct chsc_sch_cud *cud;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 5;
u32 fmt1 : 4;
u32 : 2;
u32 ssid : 2;
u32 first_sch : 16;
u32 : 8;
u32 cssid : 8;
u32 last_sch : 16;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
if (!cud) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cud, user_cud, sizeof(*cud))) {
ret = -EFAULT;
goto out_free;
}
sscud_area->request.length = 0x0010;
sscud_area->request.code = 0x0006;
sscud_area->m = cud->schid.m;
sscud_area->fmt1 = cud->fmt;
sscud_area->ssid = cud->schid.ssid;
sscud_area->first_sch = cud->schid.sch_no;
sscud_area->cssid = cud->schid.cssid;
sscud_area->last_sch = cud->schid.sch_no;
ccode = chsc(sscud_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sscud_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sscud: response code=%x\n",
sscud_area->response.code);
goto out_free;
}
memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
if (copy_to_user(user_cud, cud, sizeof(*cud)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cud);
free_page((unsigned long)sscud_area);
return ret;
}
static int chsc_ioctl_conf_info(void __user *user_ci)
{
struct chsc_conf_info *ci;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 6;
u32 ssid : 2;
u32 : 8;
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
if (!ci) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ci, user_ci, sizeof(*ci))) {
ret = -EFAULT;
goto out_free;
}
sci_area->request.length = 0x0010;
sci_area->request.code = 0x0012;
sci_area->m = ci->id.m;
sci_area->fmt1 = ci->fmt;
sci_area->cssid = ci->id.cssid;
sci_area->ssid = ci->id.ssid;
ccode = chsc(sci_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sci_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sci: response code=%x\n",
sci_area->response.code);
goto out_free;
}
memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
if (copy_to_user(user_ci, ci, sizeof(*ci)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ci);
free_page((unsigned long)sci_area);
return ret;
}
static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
{
struct chsc_comp_list *ccl;
int ret, ccode;
struct {
struct chsc_header request;
u32 ctype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u64 : 64;
u32 list_parm[2];
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sccl_area;
struct {
u32 m : 1;
u32 : 31;
u32 cssid : 8;
u32 : 16;
u32 chpid : 8;
} __attribute__ ((packed)) *chpid_parm;
struct {
u32 f_cssid : 8;
u32 l_cssid : 8;
u32 : 16;
u32 res;
} __attribute__ ((packed)) *cssids_parm;
sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
if (!ccl) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
ret = -EFAULT;
goto out_free;
}
sccl_area->request.length = 0x0020;
sccl_area->request.code = 0x0030;
sccl_area->fmt = ccl->req.fmt;
sccl_area->ctype = ccl->req.ctype;
switch (sccl_area->ctype) {
case CCL_CU_ON_CHP:
case CCL_IOP_CHP:
chpid_parm = (void *)&sccl_area->list_parm;
chpid_parm->m = ccl->req.chpid.m;
chpid_parm->cssid = ccl->req.chpid.chp.cssid;
chpid_parm->chpid = ccl->req.chpid.chp.id;
break;
case CCL_CSS_IMG:
case CCL_CSS_IMG_CONF_CHAR:
cssids_parm = (void *)&sccl_area->list_parm;
cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
break;
}
ccode = chsc(sccl_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sccl_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sccl: response code=%x\n",
sccl_area->response.code);
goto out_free;
}
memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ccl);
free_page((unsigned long)sccl_area);
return ret;
}
static int chsc_ioctl_chpd(void __user *user_chpd)
{
struct chsc_scpd *scpd_area;
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpd_area || !chpd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
scpd_area);
if (ret)
goto out_free;
memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
free_page((unsigned long)scpd_area);
return ret;
}
static int chsc_ioctl_dcal(void __user *user_dcal)
{
struct chsc_dcal *dcal;
int ret, ccode;
struct {
struct chsc_header request;
u32 atype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u32 res0[2];
u32 list_parm[2];
u32 res1[2];
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
if (!dcal) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
ret = -EFAULT;
goto out_free;
}
sdcal_area->request.length = 0x0020;
sdcal_area->request.code = 0x0034;
sdcal_area->atype = dcal->req.atype;
sdcal_area->fmt = dcal->req.fmt;
memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
sizeof(sdcal_area->list_parm));
ccode = chsc(sdcal_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sdcal_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sdcal: response code=%x\n",
sdcal_area->response.code);
goto out_free;
}
memcpy(&dcal->sdcal, &sdcal_area->response,
sdcal_area->response.length);
if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(dcal);
free_page((unsigned long)sdcal_area);
return ret;
}
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
void __user *argp;
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
return chsc_ioctl_start(argp);
case CHSC_START_SYNC:
return chsc_ioctl_start_sync(argp);
case CHSC_INFO_CHANNEL_PATH:
return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
return chsc_ioctl_dcal(argp);
case CHSC_ON_CLOSE_SET:
return chsc_ioctl_on_close_set(argp);
case CHSC_ON_CLOSE_REMOVE:
return chsc_ioctl_on_close_remove();
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
}
static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
static int chsc_open(struct inode *inode, struct file *file)
{
if (!atomic_dec_and_test(&chsc_ready_for_use)) {
atomic_inc(&chsc_ready_for_use);
return -EBUSY;
}
return nonseekable_open(inode, file);
}
static int chsc_release(struct inode *inode, struct file *filp)
{
char dbf[13];
int ret;
mutex_lock(&on_close_mutex);
if (!on_close_chsc_area)
goto out_unlock;
init_completion(&on_close_request->completion);
CHSC_LOG(0, "on_close");
chsc_log_command(on_close_chsc_area);
spin_lock_irq(&chsc_lock);
ret = chsc_async(on_close_chsc_area, on_close_request);
spin_unlock_irq(&chsc_lock);
if (ret == -EINPROGRESS) {
wait_for_completion(&on_close_request->completion);
ret = chsc_examine_irb(on_close_request);
}
snprintf(dbf, sizeof(dbf), "relret:%d", ret);
CHSC_LOG(0, dbf);
free_page((unsigned long)on_close_chsc_area);
on_close_chsc_area = NULL;
kfree(on_close_request);
on_close_request = NULL;
out_unlock:
mutex_unlock(&on_close_mutex);
atomic_inc(&chsc_ready_for_use);
return 0;
}
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
.open = chsc_open,
.release = chsc_release,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
.llseek = no_llseek,
};
static struct miscdevice chsc_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "chsc",
.fops = &chsc_fops,
};
static int __init chsc_misc_init(void)
{
return misc_register(&chsc_misc_device);
}
static void chsc_misc_cleanup(void)
{
misc_deregister(&chsc_misc_device);
}
static int __init chsc_sch_init(void)
{
int ret;
ret = chsc_init_dbfs();
if (ret)
return ret;
isc_register(CHSC_SCH_ISC);
ret = chsc_init_sch_driver();
if (ret)
goto out_dbf;
ret = chsc_misc_init();
if (ret)
goto out_driver;
return ret;
out_driver:
chsc_cleanup_sch_driver();
out_dbf:
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
return ret;
}
static void __exit chsc_sch_exit(void)
{
chsc_misc_cleanup();
chsc_cleanup_sch_driver();
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
}
module_init(chsc_sch_init);
module_exit(chsc_sch_exit);
| linux-master | drivers/s390/cio/chsc_sch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- channel subsystem call
*
* Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung ([email protected])
* Cornelia Huck ([email protected])
* Arnd Bergmann ([email protected])
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
#include <asm/ap.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chp.h"
#include "chsc.h"
static void *sei_page;
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
#define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */
#define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */
/**
* chsc_error_from_response() - convert a chsc response to an error
* @response: chsc response code
*
* Returns an appropriate Linux error code for @response.
*/
int chsc_error_from_response(int response)
{
switch (response) {
case 0x0001:
return 0;
case 0x0002:
case 0x0003:
case 0x0006:
case 0x0007:
case 0x0008:
case 0x000a:
case 0x0104:
return -EINVAL;
case 0x0004:
case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */
return -EOPNOTSUPP;
case 0x000b:
case 0x0107: /* "Channel busy" for the op 0x003d */
return -EBUSY;
case 0x0100:
case 0x0102:
return -ENOMEM;
case 0x0108: /* "HW limit exceeded" for the op 0x003d */
return -EUSERS;
default:
return -EIO;
}
}
EXPORT_SYMBOL_GPL(chsc_error_from_response);
struct chsc_ssd_area {
struct chsc_header request;
u16 :10;
u16 ssid:2;
u16 :4;
u16 f_sch; /* first subchannel */
u16 :16;
u16 l_sch; /* last subchannel */
u32 :32;
struct chsc_header response;
u32 :32;
u8 sch_valid : 1;
u8 dev_valid : 1;
u8 st : 3; /* subchannel type */
u8 zeroes : 3;
u8 unit_addr; /* unit address */
u16 devno; /* device number */
u8 path_mask;
u8 fla_valid_mask;
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
} __packed __aligned(PAGE_SIZE);
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
struct chsc_ssd_area *ssd_area;
unsigned long flags;
int ccode;
int ret;
int i;
int mask;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
ssd_area->f_sch = schid.sch_no;
ssd_area->l_sch = schid.sch_no;
ccode = chsc(ssd_area);
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
goto out;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
goto out;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd_area->path_mask & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = ssd_area->chpid[i];
}
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
/**
* chsc_ssqd() - store subchannel QDIO data (SSQD)
* @schid: id of the subchannel on which SSQD is performed
* @ssqd: request and response block for SSQD
*
* Returns 0 on success.
*/
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
{
memset(ssqd, 0, sizeof(*ssqd));
ssqd->request.length = 0x0010;
ssqd->request.code = 0x0024;
ssqd->first_sch = schid.sch_no;
ssqd->last_sch = schid.sch_no;
ssqd->ssid = schid.ssid;
if (chsc(ssqd))
return -EIO;
return chsc_error_from_response(ssqd->response.code);
}
EXPORT_SYMBOL_GPL(chsc_ssqd);
/**
* chsc_sadc() - set adapter device controls (SADC)
* @schid: id of the subchannel on which SADC is performed
* @scssc: request and response block for SADC
* @summary_indicator_addr: summary indicator address
* @subchannel_indicator_addr: subchannel indicator address
* @isc: Interruption Subclass for this subchannel
*
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
scssc->request.code = 0x0021;
scssc->operation_code = 0;
scssc->summary_indicator_addr = summary_indicator_addr;
scssc->subchannel_indicator_addr = subchannel_indicator_addr;
scssc->ks = PAGE_DEFAULT_KEY >> 4;
scssc->kc = PAGE_DEFAULT_KEY >> 4;
scssc->isc = isc;
scssc->schid = schid;
/* enable the time delay disablement facility */
if (css_general_characteristics.aif_tdd)
scssc->word_with_d_bit = 0x10000000;
if (chsc(scssc))
return -EIO;
return chsc_error_from_response(scssc->response.code);
}
EXPORT_SYMBOL_GPL(chsc_sadc);
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
if (sch->driver && sch->driver->chp_event)
if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
goto out_unreg;
spin_unlock_irq(sch->lock);
return 0;
out_unreg:
sch->lpm = 0;
spin_unlock_irq(sch->lock);
css_schedule_eval(sch->schid);
return 0;
}
void chsc_chp_offline(struct chp_id chpid)
{
struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
char dbf_txt[15];
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) <= 0)
return;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
mutex_lock(&chp->lock);
chp_update_desc(chp);
mutex_unlock(&chp->lock);
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_ONLINE);
spin_unlock_irq(sch->lock);
return 0;
}
static void s390_process_res_acc(struct chp_link *link)
{
char dbf_txt[15];
sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
link->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt);
if (link->fla != 0) {
sprintf(dbf_txt, "fla%x", link->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* I/O resources may have become accessible.
* Scan through all subchannels that may be concerned and
* do a validation on those.
* The more information we have (info), the less scanning
* will we have to do.
*/
for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
css_schedule_reprobe();
}
static int process_fces_event(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_FCES_EVENT);
spin_unlock_irq(sch->lock);
return 0;
}
struct chsc_sei_nt0_area {
u8 flags;
u8 vf; /* validity flags */
u8 rs; /* reporting source */
u8 cc; /* content code */
u16 fla; /* full link address */
u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
/* ccdf has to be big enough for a link-incident record */
u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
} __packed;
struct chsc_sei_nt2_area {
u8 flags; /* p and v bit */
u8 reserved1;
u8 reserved2;
u8 cc; /* content code */
u32 reserved3[13];
u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
} __packed;
#define CHSC_SEI_NT0 (1ULL << 63)
#define CHSC_SEI_NT2 (1ULL << 61)
struct chsc_sei {
struct chsc_header request;
u32 reserved1;
u64 ntsm; /* notification type mask */
struct chsc_header response;
u32 :24;
u8 nt;
union {
struct chsc_sei_nt0_area nt0_area;
struct chsc_sei_nt2_area nt2_area;
u8 nt_area[PAGE_SIZE - 24];
} u;
} __packed __aligned(PAGE_SIZE);
/*
* Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
*/
#define LIR_IQ_CLASS_INFO 0
#define LIR_IQ_CLASS_DEGRADED 1
#define LIR_IQ_CLASS_NOT_OPERATIONAL 2
struct lir {
struct {
u32 null:1;
u32 reserved:3;
u32 class:2;
u32 reserved2:2;
} __packed iq;
u32 ic:8;
u32 reserved:16;
struct node_descriptor incident_node;
struct node_descriptor attached_node;
u8 reserved2[32];
} __packed;
#define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
#define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
static char *store_ebcdic(char *dest, const char *src, unsigned long len,
char delim)
{
memcpy(dest, src, len);
EBCASC(dest, len);
if (delim)
dest[len++] = delim;
return dest + len;
}
static void chsc_link_from_sei(struct chp_link *link,
struct chsc_sei_nt0_area *sei_area)
{
if ((sei_area->vf & SEI_VF_FLA) != 0) {
link->fla = sei_area->fla;
link->fla_mask = ((sei_area->vf & SEI_VF_FLA) == SEI_VF_FLA) ?
0xffff : 0xff00;
}
}
/* Format node ID and parameters for output in LIR log message. */
static void format_node_data(char *params, char *id, struct node_descriptor *nd)
{
memset(params, 0, PARAMS_LEN);
memset(id, 0, NODEID_LEN);
if (nd->validity != ND_VALIDITY_VALID) {
strncpy(params, "n/a", PARAMS_LEN - 1);
strncpy(id, "n/a", NODEID_LEN - 1);
return;
}
/* PARAMS=xx,xxxxxx */
snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
sprintf(id, "%04X", nd->tag);
}
static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct lir *lir = (struct lir *) &sei_area->ccdf;
char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
aunodeid[NODEID_LEN];
CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
/* Ignore NULL Link Incident Records. */
if (lir->iq.null)
return;
/* Inform user that a link requires maintenance actions because it has
* become degraded or not operational. Note that this log message is
* the primary intention behind a Link Incident Record. */
format_node_data(iuparams, iunodeid, &lir->incident_node);
format_node_data(auparams, aunodeid, &lir->attached_node);
switch (lir->iq.class) {
case LIR_IQ_CLASS_DEGRADED:
pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
"IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
sei_area->rs, sei_area->rsid, lir->ic, iuparams,
iunodeid, auparams, aunodeid);
break;
case LIR_IQ_CLASS_NOT_OPERATIONAL:
pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
"IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
sei_area->rs, sei_area->rsid, lir->ic, iuparams,
iunodeid, auparams, aunodeid);
break;
default:
break;
}
}
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
"rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
return;
chp_id_init(&chpid);
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
if (!status)
return;
if (status < 0) {
chp_new(chpid);
} else {
chp = chpid_to_chp(chpid);
mutex_lock(&chp->lock);
chp_update_desc(chp);
mutex_unlock(&chp->lock);
}
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
chsc_link_from_sei(&link, sei_area);
s390_process_res_acc(&link);
}
static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
{
struct channel_path *chp;
struct chp_id chpid;
u8 *data;
int num;
CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
if (sei_area->rs != 0)
return;
data = sei_area->ccdf;
chp_id_init(&chpid);
for (num = 0; num <= __MAX_CHPID; num++) {
if (!chp_test_bit(data, num))
continue;
chpid.id = num;
CIO_CRW_EVENT(4, "Update information for channel path "
"%x.%02x\n", chpid.cssid, chpid.id);
chp = chpid_to_chp(chpid);
if (!chp) {
chp_new(chpid);
continue;
}
mutex_lock(&chp->lock);
chp_update_desc(chp);
mutex_unlock(&chp->lock);
}
}
struct chp_config_data {
u8 map[32];
u8 op;
u8 pc;
};
static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
int num;
char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
if (sei_area->rs != 0)
return;
data = (struct chp_config_data *) &(sei_area->ccdf);
chp_id_init(&chpid);
for (num = 0; num <= __MAX_CHPID; num++) {
if (!chp_test_bit(data->map, num))
continue;
chpid.id = num;
pr_notice("Processing %s for channel path %x.%02x\n",
events[data->op], chpid.cssid, chpid.id);
switch (data->op) {
case 0:
chp_cfg_schedule(chpid, 1);
break;
case 1:
chp_cfg_schedule(chpid, 0);
break;
case 2:
chp_cfg_cancel_deconfigure(chpid);
break;
}
}
}
static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
int ret;
CIO_CRW_EVENT(4, "chsc: scm change notification\n");
if (sei_area->rs != 7)
return;
ret = scm_update_information();
if (ret)
CIO_CRW_EVENT(0, "chsc: updating change notification"
" failed (rc=%d).\n", ret);
}
static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
{
int ret;
CIO_CRW_EVENT(4, "chsc: scm available information\n");
if (sei_area->rs != 7)
return;
ret = scm_process_availability_information();
if (ret)
CIO_CRW_EVENT(0, "chsc: process availability information"
" failed (rc=%d).\n", ret);
}
static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
{
CIO_CRW_EVENT(3, "chsc: ap config changed\n");
if (sei_area->rs != 5)
return;
ap_bus_cfg_chg();
}
static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
struct channel_path *chp;
CIO_CRW_EVENT(4,
"chsc: FCES status notification (rs=%02x, rs_id=%04x, FCES-status=%x)\n",
sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
if (sei_area->rs != SEI_RS_CHPID)
return;
chp_id_init(&chpid);
chpid.id = sei_area->rsid;
/* Ignore the event on unknown/invalid chp */
chp = chpid_to_chp(chpid);
if (!chp)
return;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
chsc_link_from_sei(&link, sei_area);
for_each_subchannel_staged(process_fces_event, NULL, &link);
}
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
case 1:
zpci_event_error(sei_area->ccdf);
break;
case 2:
zpci_event_availability(sei_area->ccdf);
break;
default:
CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
sei_area->cc);
break;
}
}
static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area);
break;
case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
case 3: /* ap config changed */
chsc_process_sei_ap_cfg_chg(sei_area);
break;
case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area);
break;
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
case 12: /* scm change notification */
chsc_process_sei_scm_change(sei_area);
break;
case 14: /* scm available notification */
chsc_process_sei_scm_avail(sei_area);
break;
case 15: /* FCES event notification */
chsc_process_sei_fces_event(sei_area);
break;
default: /* other stuff */
CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
/* Check if we might have lost some information. */
if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
css_schedule_eval_all();
}
}
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
static int ntsm_unsupported;
while (true) {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e;
if (!ntsm_unsupported)
sei->ntsm = ntsm;
if (chsc(sei))
break;
if (sei->response.code != 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
sei->response.code, sei->ntsm);
if (sei->response.code == 3 && sei->ntsm) {
/* Fallback for old firmware. */
ntsm_unsupported = 1;
continue;
}
break;
}
CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
switch (sei->nt) {
case 0:
chsc_process_sei_nt0(&sei->u.nt0_area);
break;
case 2:
chsc_process_sei_nt2(&sei->u.nt2_area);
break;
default:
CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
break;
}
if (!(sei->u.nt0_area.flags & 0x80))
break;
}
}
/*
* Handle channel subsystem related CRWs.
* Use store event information to find out what's going on.
*
* Note: Access to sei_page is serialized through machine check handler
* thread, so no need for locking.
*/
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
CIO_TRACE_EVENT(2, "prcss");
chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
{
struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
char dbf_txt[15];
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) != 0) {
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
mutex_lock(&chp->lock);
chp_update_desc(chp);
mutex_unlock(&chp->lock);
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
css_schedule_reprobe();
}
}
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
struct chp_id chpid, int on)
{
unsigned long flags;
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
spin_lock_irqsave(sch->lock, flags);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, &link,
on ? CHP_VARY_ON : CHP_VARY_OFF);
spin_unlock_irqrestore(sch->lock, flags);
}
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
{
struct chp_id *chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 0);
return 0;
}
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
{
struct chp_id *chpid = data;
__s390_subchannel_vary_chpid(sch, *chpid, 1);
return 0;
}
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
* @on: non-zero for vary online, zero for vary offline
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
/*
* Redo PathVerification on the devices the chpid connects to
*/
if (on) {
/* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
NULL, &chpid);
css_schedule_reprobe();
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
return 0;
}
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
int i;
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
}
}
static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
int i, ret;
ret = 0;
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
ret = chp_add_cmg_attr(css->chps[i]);
if (ret)
goto cleanup;
}
return ret;
cleanup:
for (--i; i >= 0; i--) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
}
return ret;
}
int __chsc_do_secm(struct channel_subsystem *css, int enable)
{
struct {
struct chsc_header request;
u32 operation_code : 2;
u32 : 30;
u32 key : 4;
u32 : 28;
u32 zeroes1;
u32 cub_addr1;
u32 zeroes2;
u32 cub_addr2;
u32 reserved[13];
struct chsc_header response;
u32 status : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
} *secm_area;
unsigned long flags;
int ret, ccode;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
secm_area = chsc_page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
switch (secm_area->response.code) {
case 0x0102:
case 0x0103:
ret = -EINVAL;
break;
default:
ret = chsc_error_from_response(secm_area->response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
int ret;
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
return -ENOMEM;
}
}
ret = __chsc_do_secm(css, enable);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
__chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
chsc_remove_cmg_attr(css);
}
if (!css->cm_enabled) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m, void *page)
{
struct chsc_scpd *scpd_area;
int ccode, ret;
if ((rfmt == 1 || rfmt == 0) && c == 1 &&
!css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
if ((rfmt == 3) && !css_general_characteristics.util_str)
return -EINVAL;
memset(page, 0, PAGE_SIZE);
scpd_area = page;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
scpd_area->m = m;
scpd_area->c = c;
scpd_area->fmt = fmt;
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
if (ccode > 0)
return (ccode == 3) ? -ENODEV : -EBUSY;
ret = chsc_error_from_response(scpd_area->response.code);
if (ret)
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
#define chsc_det_chp_desc(FMT, c) \
int chsc_determine_fmt##FMT##_channel_path_desc( \
struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
{ \
struct chsc_scpd *scpd_area; \
unsigned long flags; \
int ret; \
\
spin_lock_irqsave(&chsc_page_lock, flags); \
scpd_area = chsc_page; \
ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
scpd_area); \
if (ret) \
goto out; \
\
memcpy(desc, scpd_area->data, sizeof(*desc)); \
out: \
spin_unlock_irqrestore(&chsc_page_lock, flags); \
return ret; \
}
chsc_det_chp_desc(0, 0)
chsc_det_chp_desc(1, 1)
chsc_det_chp_desc(3, 0)
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
int i, mask;
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
mask = 0x80 >> (i + 3);
if (cmcv & mask)
chp->cmg_chars.values[i] = chars->values[i];
else
chp->cmg_chars.values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
unsigned long flags;
int ccode, ret;
struct {
struct chsc_header request;
u32 : 24;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 zeroes1;
struct chsc_header response;
u32 zeroes2;
u32 not_valid : 1;
u32 shared : 1;
u32 : 22;
u32 chpid : 8;
u32 cmcv : 5;
u32 : 11;
u32 cmgq : 8;
u32 cmg : 8;
u32 zeroes3;
u32 data[NR_MEASUREMENT_CHARS];
} *scmc_area;
chp->shared = -1;
chp->cmg = -1;
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
return -EINVAL;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
ccode = chsc(scmc_area);
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
ret = chsc_error_from_response(scmc_area->response.code);
if (ret) {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
goto out;
}
if (scmc_area->not_valid)
goto out;
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
if (chp->cmg != 2 && chp->cmg != 3) {
/* No cmg-dependent data. */
goto out;
}
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
int __init chsc_init(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sei_page || !chsc_page) {
ret = -ENOMEM;
goto out_err;
}
ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
goto out_err;
return ret;
out_err:
free_page((unsigned long)chsc_page);
free_page((unsigned long)sei_page);
return ret;
}
void __init chsc_init_cleanup(void)
{
crw_unregister_handler(CRW_RSC_CSS);
free_page((unsigned long)chsc_page);
free_page((unsigned long)sei_page);
}
int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
{
int ret;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
ret = chsc(sda_area);
if (ret > 0) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
switch (sda_area->response.code) {
case 0x0101:
ret = -EOPNOTSUPP;
break;
default:
ret = chsc_error_from_response(sda_area->response.code);
}
out:
return ret;
}
int chsc_enable_facility(int operation_code)
{
struct chsc_sda_area *sda_area;
unsigned long flags;
int ret;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
sda_area = chsc_page;
ret = __chsc_enable_facility(sda_area, operation_code);
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
{
struct {
struct chsc_header request;
u8 atype;
u32 : 24;
u32 reserved1[6];
struct chsc_header response;
u32 reserved2[3];
struct {
u8 cssid;
u8 iid;
u32 : 16;
} list[];
} *sdcal_area;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
sdcal_area = chsc_page;
sdcal_area->request.length = 0x0020;
sdcal_area->request.code = 0x0034;
sdcal_area->atype = 4;
ret = chsc(sdcal_area);
if (ret) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto exit;
}
ret = chsc_error_from_response(sdcal_area->response.code);
if (ret) {
CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
sdcal_area->response.code);
goto exit;
}
if ((addr_t) &sdcal_area->list[idx] <
(addr_t) &sdcal_area->response + sdcal_area->response.length) {
*cssid = sdcal_area->list[idx].cssid;
*iid = sdcal_area->list[idx].iid;
}
else
ret = -ENODEV;
exit:
spin_unlock_irq(&chsc_page_lock);
return ret;
}
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
int __init
chsc_determine_css_characteristics(void)
{
unsigned long flags;
int result;
struct {
struct chsc_header request;
u32 reserved1;
u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
u32 chsc_char[508];
} *scsc_area;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
result = chsc(scsc_area);
if (result) {
result = (result == 3) ? -ENODEV : -EBUSY;
goto exit;
}
result = chsc_error_from_response(scsc_area->response.code);
if (result == 0) {
memcpy(&css_general_characteristics, scsc_area->general_char,
sizeof(css_general_characteristics));
memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
sizeof(css_chsc_characteristics));
} else
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return result;
}
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta)
{
struct {
struct chsc_header request;
unsigned int rsvd0;
unsigned int op : 8;
unsigned int rsvd1 : 8;
unsigned int ctrl : 16;
unsigned int rsvd2[5];
struct chsc_header response;
unsigned int rsvd3[3];
s64 clock_delta;
unsigned int rsvd4[2];
} *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0020;
rr->request.code = 0x0033;
rr->op = op;
rr->ctrl = ctrl;
rc = chsc(rr);
if (rc)
return -EIO;
rc = (rr->response.code == 0x0001) ? 0 : -EIO;
if (clock_delta)
*clock_delta = rr->clock_delta;
return rc;
}
int chsc_sstpi(void *page, void *result, size_t size)
{
struct {
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
char data[];
} *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0010;
rr->request.code = 0x0038;
rc = chsc(rr);
if (rc)
return -EIO;
memcpy(result, &rr->data, size);
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
int chsc_stzi(void *page, void *result, size_t size)
{
struct {
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
char data[];
} *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0010;
rr->request.code = 0x003e;
rc = chsc(rr);
if (rc)
return -EIO;
memcpy(result, &rr->data, size);
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
int chsc_siosl(struct subchannel_id schid)
{
struct {
struct chsc_header request;
u32 word1;
struct subchannel_id sid;
u32 word3;
struct chsc_header response;
u32 word[11];
} *siosl_area;
unsigned long flags;
int ccode;
int rc;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
siosl_area = chsc_page;
siosl_area->request.length = 0x0010;
siosl_area->request.code = 0x0046;
siosl_area->word1 = 0x80000000;
siosl_area->sid = schid;
ccode = chsc(siosl_area);
if (ccode > 0) {
if (ccode == 3)
rc = -ENODEV;
else
rc = -EBUSY;
CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
schid.ssid, schid.sch_no, ccode);
goto out;
}
rc = chsc_error_from_response(siosl_area->response.code);
if (rc)
CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
siosl_area->response.code);
else
CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
schid.ssid, schid.sch_no);
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);
/**
* chsc_scm_info() - store SCM information (SSI)
* @scm_area: request and response block for SSI
* @token: continuation token
*
* Returns 0 on success.
*/
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
{
int ccode, ret;
memset(scm_area, 0, sizeof(*scm_area));
scm_area->request.length = 0x0020;
scm_area->request.code = 0x004C;
scm_area->reqtok = token;
ccode = chsc(scm_area);
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out;
}
ret = chsc_error_from_response(scm_area->response.code);
if (ret != 0)
CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
scm_area->response.code);
out:
return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);
/**
* chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
* @pnso_area: request and response block for the operation
* @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
* pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
*
* Returns 0 on success.
*/
int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
{
memset(pnso_area, 0, sizeof(*pnso_area));
pnso_area->request.length = 0x0030;
pnso_area->request.code = 0x003d; /* network-subchannel operation */
pnso_area->m = schid.m;
pnso_area->ssid = schid.ssid;
pnso_area->sch = schid.sch_no;
pnso_area->cssid = schid.cssid;
pnso_area->oc = oc;
pnso_area->resume_token = resume_token;
pnso_area->n = (cnc != 0);
if (chsc(pnso_area))
return -EIO;
return chsc_error_from_response(pnso_area->response.code);
}
int chsc_sgib(u32 origin)
{
struct {
struct chsc_header request;
u16 op;
u8 reserved01[2];
u8 reserved02:4;
u8 fmt:4;
u8 reserved03[7];
/* operation data area begin */
u8 reserved04[4];
u32 gib_origin;
u8 reserved05[10];
u8 aix;
u8 reserved06[4029];
struct chsc_header response;
u8 reserved07[4];
} *sgib_area;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
sgib_area = chsc_page;
sgib_area->request.length = 0x0fe0;
sgib_area->request.code = 0x0021;
sgib_area->op = 0x1;
sgib_area->gib_origin = origin;
ret = chsc(sgib_area);
if (ret == 0)
ret = chsc_error_from_response(sgib_area->response.code);
spin_unlock_irq(&chsc_page_lock);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_sgib);
#define SCUD_REQ_LEN 0x10 /* SCUD request block length */
#define SCUD_REQ_CMD 0x4b /* SCUD Command Code */
struct chse_cudb {
u16 flags:8;
u16 chp_valid:8;
u16 cu;
u32 esm_valid:8;
u32:24;
u8 chpid[8];
u32:32;
u32:32;
u8 esm[8];
u32 efla[8];
} __packed;
struct chsc_scud {
struct chsc_header request;
u16:4;
u16 fmt:4;
u16 cssid:8;
u16 first_cu;
u16:16;
u16 last_cu;
u32:32;
struct chsc_header response;
u16:4;
u16 fmt_resp:4;
u32:24;
struct chse_cudb cudb[];
} __packed;
/**
* chsc_scud() - Store control-unit description.
* @cu: number of the control-unit
* @esm: 8 1-byte endpoint security mode values
* @esm_valid: validity mask for @esm
*
* Interface to retrieve information about the endpoint security
* modes for up to 8 paths of a control unit.
*
* Returns 0 on success.
*/
int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid)
{
struct chsc_scud *scud = chsc_page;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
scud->request.length = SCUD_REQ_LEN;
scud->request.code = SCUD_REQ_CMD;
scud->fmt = 0;
scud->cssid = 0;
scud->first_cu = cu;
scud->last_cu = cu;
ret = chsc(scud);
if (!ret)
ret = chsc_error_from_response(scud->response.code);
if (!ret && (scud->response.length <= 8 || scud->fmt_resp != 0
|| !(scud->cudb[0].flags & 0x80)
|| scud->cudb[0].cu != cu)) {
CIO_MSG_EVENT(2, "chsc: scud failed rc=%04x, L2=%04x "
"FMT=%04x, cudb.flags=%02x, cudb.cu=%04x",
scud->response.code, scud->response.length,
scud->fmt_resp, scud->cudb[0].flags, scud->cudb[0].cu);
ret = -EINVAL;
}
if (ret)
goto out;
memcpy(esm, scud->cudb[0].esm, sizeof(*esm));
*esm_valid = scud->cudb[0].esm_valid;
out:
spin_unlock_irq(&chsc_page_lock);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_scud);
| linux-master | drivers/s390/cio/chsc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CCW device PGID and path verification I/O handling.
*
* Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <[email protected]>
* Martin Schwidefsky <[email protected]>
* Peter Oberparleiter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
#include "device.h"
#include "io_sch.h"
#define PGID_RETRIES 256
#define PGID_TIMEOUT (10 * HZ)
static void verify_start(struct ccw_device *cdev);
/*
* Process path verification data and report result.
*/
static void verify_done(struct ccw_device *cdev, int rc)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_dev_id *id = &cdev->private->dev_id;
int mpath = cdev->private->flags.mpath;
int pgroup = cdev->private->flags.pgroup;
if (rc)
goto out;
/* Ensure consistent multipathing state at device and channel. */
if (sch->config.mp != mpath) {
sch->config.mp = mpath;
rc = cio_commit_config(sch);
}
out:
CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
"vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
sch->vpm);
ccw_device_verify_done(cdev, rc);
}
/*
* Create channel program to perform a NOOP.
*/
static void nop_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->dma_area->iccws;
cp->cmd_code = CCW_CMD_NOOP;
cp->cda = 0;
cp->count = 0;
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
}
/*
* Perform NOOP on a single path.
*/
static void nop_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
nop_build_cp(cdev);
ccw_request_start(cdev);
return;
out_nopath:
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/*
* Adjust NOOP I/O status.
*/
static enum io_status nop_filter(struct ccw_device *cdev, void *data,
struct irb *irb, enum io_status status)
{
/* Only subchannel status might indicate a path error. */
if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
return IO_DONE;
return status;
}
/*
* Process NOOP request result for a single path.
*/
static void nop_callback(struct ccw_device *cdev, void *data, int rc)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
switch (rc) {
case 0:
sch->vpm |= req->lpm;
break;
case -ETIME:
cdev->private->path_noirq_mask |= req->lpm;
break;
case -EACCES:
cdev->private->path_notoper_mask |= req->lpm;
break;
default:
goto err;
}
/* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev);
return;
err:
verify_done(cdev, rc);
}
/*
* Create channel program to perform SET PGID on a single path.
*/
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
struct pgid *pgid = &cdev->private->dma_area->pgid[i];
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
cp->cda = (u32)virt_to_phys(pgid);
cp->count = sizeof(*pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
}
static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
{
if (rc) {
/* We don't know the path groups' state. Abort. */
verify_done(cdev, rc);
return;
}
/*
* Path groups have been reset. Restart path verification but
* leave paths in path_noirq_mask out.
*/
cdev->private->flags.pgid_unknown = 0;
verify_start(cdev);
}
/*
* Reset pathgroups and restart path verification, leave unusable paths out.
*/
static void pgid_wipeout_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_dev_id *id = &cdev->private->dev_id;
struct ccw_request *req = &cdev->private->req;
u8 fn;
CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
id->ssid, id->devno, cdev->private->pgid_valid_mask,
cdev->private->path_noirq_mask);
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam;
req->callback = pgid_wipeout_callback;
fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath)
fn |= SPID_FUNC_MULTI_PATH;
spid_build_cp(cdev, fn);
ccw_request_start(cdev);
}
/*
* Perform establish/resign SET PGID on a single path.
*/
static void spid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
u8 fn;
/* Use next available path that is not already in correct state. */
req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
if (!req->lpm)
goto out_nopath;
/* Channel program setup. */
if (req->lpm & sch->opm)
fn = SPID_FUNC_ESTABLISH;
else
fn = SPID_FUNC_RESIGN;
if (cdev->private->flags.mpath)
fn |= SPID_FUNC_MULTI_PATH;
spid_build_cp(cdev, fn);
ccw_request_start(cdev);
return;
out_nopath:
if (cdev->private->flags.pgid_unknown) {
/* At least one SPID could be partially done. */
pgid_wipeout_start(cdev);
return;
}
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/*
* Process SET PGID request result for a single path.
*/
static void spid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
switch (rc) {
case 0:
sch->vpm |= req->lpm & sch->opm;
break;
case -ETIME:
cdev->private->flags.pgid_unknown = 1;
cdev->private->path_noirq_mask |= req->lpm;
break;
case -EACCES:
cdev->private->path_notoper_mask |= req->lpm;
break;
case -EOPNOTSUPP:
if (cdev->private->flags.mpath) {
/* Try without multipathing. */
cdev->private->flags.mpath = 0;
goto out_restart;
}
/* Try without pathgrouping. */
cdev->private->flags.pgroup = 0;
goto out_restart;
default:
goto err;
}
req->lpm >>= 1;
spid_do(cdev);
return;
out_restart:
verify_start(cdev);
return;
err:
verify_done(cdev, rc);
}
static void spid_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
req->singlepath = 1;
req->callback = spid_callback;
spid_do(cdev);
}
static int pgid_is_reset(struct pgid *p)
{
char *c;
for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
if (*c != 0)
return 0;
}
return 1;
}
static int pgid_cmp(struct pgid *p1, struct pgid *p2)
{
return memcmp((char *) p1 + 1, (char *) p2 + 1,
sizeof(struct pgid) - 1);
}
/*
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, u8 *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->dma_area->pgid[0];
struct pgid *first = NULL;
int lpm;
int i;
*mismatch = 0;
*reserved = 0;
*reset = 0;
for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
*reserved |= lpm;
if (pgid_is_reset(pgid)) {
*reset |= lpm;
continue;
}
if (!first) {
first = pgid;
continue;
}
if (pgid_cmp(pgid, first) != 0)
*mismatch = 1;
}
if (!first)
first = &channel_subsystems[0]->global_pgid;
*p = first;
}
static u8 pgid_to_donepm(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int i;
int lpm;
u8 donepm = 0;
/* Set bits for paths which are already in the target state. */
for (i = 0; i < 8; i++) {
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
pgid = &cdev->private->dma_area->pgid[i];
if (sch->opm & lpm) {
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
continue;
} else {
if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
continue;
}
if (cdev->private->flags.mpath) {
if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
continue;
} else {
if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
continue;
}
donepm |= lpm;
}
return donepm;
}
static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
{
int i;
for (i = 0; i < 8; i++)
memcpy(&cdev->private->dma_area->pgid[i], pgid,
sizeof(struct pgid));
}
/*
* Process SENSE PGID data and report result.
*/
static void snid_done(struct ccw_device *cdev, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int mismatch = 0;
u8 reserved = 0;
u8 reset = 0;
u8 donepm;
if (rc)
goto out;
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
if (reserved == cdev->private->pgid_valid_mask)
rc = -EUSERS;
else if (mismatch)
rc = -EOPNOTSUPP;
else {
donepm = pgid_to_donepm(cdev);
sch->vpm = donepm & sch->opm;
cdev->private->pgid_reset_mask |= reset;
cdev->private->pgid_todo_mask &=
~(donepm | cdev->private->path_noirq_mask);
pgid_fill(cdev, pgid);
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
"todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
if (cdev->private->flags.pgid_unknown) {
pgid_wipeout_start(cdev);
return;
}
/* Anything left to do? */
if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
return;
}
/* Perform path-grouping. */
spid_start(cdev);
break;
case -EOPNOTSUPP:
/* Path-grouping not supported. */
cdev->private->flags.pgroup = 0;
cdev->private->flags.mpath = 0;
verify_start(cdev);
break;
default:
verify_done(cdev, rc);
}
}
/*
* Create channel program to perform a SENSE PGID on a single path.
*/
static void snid_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]);
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
}
/*
* Perform SENSE PGID on a single path.
*/
static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
int ret;
req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
~cdev->private->path_noirq_mask);
if (!req->lpm)
goto out_nopath;
snid_build_cp(cdev);
ccw_request_start(cdev);
return;
out_nopath:
if (cdev->private->pgid_valid_mask)
ret = 0;
else if (cdev->private->path_noirq_mask)
ret = -ETIME;
else
ret = -EACCES;
snid_done(cdev, ret);
}
/*
* Process SENSE PGID request result for single path.
*/
static void snid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_request *req = &cdev->private->req;
switch (rc) {
case 0:
cdev->private->pgid_valid_mask |= req->lpm;
break;
case -ETIME:
cdev->private->flags.pgid_unknown = 1;
cdev->private->path_noirq_mask |= req->lpm;
break;
case -EACCES:
cdev->private->path_notoper_mask |= req->lpm;
break;
default:
goto err;
}
/* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev);
return;
err:
snid_done(cdev, rc);
}
/*
* Perform path verification.
*/
static void verify_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
struct ccw_dev_id *devid = &cdev->private->dev_id;
sch->vpm = 0;
sch->lpm = sch->schib.pmcw.pam;
/* Initialize PGID data. */
memset(cdev->private->dma_area->pgid, 0,
sizeof(cdev->private->dma_area->pgid));
cdev->private->pgid_valid_mask = 0;
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
cdev->private->path_notoper_mask = 0;
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = 0x80;
req->singlepath = 1;
if (cdev->private->flags.pgroup) {
CIO_TRACE_EVENT(4, "snid");
CIO_HEX_EVENT(4, devid, sizeof(*devid));
req->callback = snid_callback;
snid_do(cdev);
} else {
CIO_TRACE_EVENT(4, "nop");
CIO_HEX_EVENT(4, devid, sizeof(*devid));
req->filter = nop_filter;
req->callback = nop_callback;
nop_do(cdev);
}
}
/**
* ccw_device_verify_start - perform path verification
* @cdev: ccw device
*
* Perform an I/O on each available channel path to @cdev to determine which
* paths are operational. The resulting path mask is stored in sch->vpm.
* If device options specify pathgrouping, establish a pathgroup for the
* operational paths. When finished, call ccw_device_verify_done with a
* return code specifying the result.
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
*/
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
/*
* Process disband SET PGID request result.
*/
static void disband_callback(struct ccw_device *cdev, void *data, int rc)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_dev_id *id = &cdev->private->dev_id;
if (rc)
goto out;
/* Ensure consistent multipathing state at device and channel. */
cdev->private->flags.mpath = 0;
if (sch->config.mp) {
sch->config.mp = 0;
rc = cio_commit_config(sch);
}
out:
CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
rc);
ccw_device_disband_done(cdev, rc);
}
/**
* ccw_device_disband_start - disband pathgroup
* @cdev: ccw device
*
* Execute a SET PGID channel program on @cdev to disband a previously
* established pathgroup. When finished, call ccw_device_disband_done with
* a return code specifying the result.
*/
void ccw_device_disband_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
u8 fn;
CIO_TRACE_EVENT(4, "disb");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Request setup. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
req->singlepath = 1;
req->callback = disband_callback;
fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath)
fn |= SPID_FUNC_MULTI_PATH;
spid_build_cp(cdev, fn);
ccw_request_start(cdev);
}
struct stlck_data {
struct completion done;
int rc;
};
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32)virt_to_phys(buf1);
cp[0].count = 32;
cp[0].flags = CCW_FLAG_CC;
cp[1].cmd_code = CCW_CMD_RELEASE;
cp[1].cda = (u32)virt_to_phys(buf2);
cp[1].count = 32;
cp[1].flags = 0;
req->cp = cp;
}
static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
{
struct stlck_data *sdata = data;
sdata->rc = rc;
complete(&sdata->done);
}
/**
* ccw_device_stlck_start - perform unconditional release
* @cdev: ccw device
* @data: data pointer to be passed to ccw_device_stlck_done
* @buf1: data pointer used in channel program
* @buf2: data pointer used in channel program
*
* Execute a channel program on @cdev to release an existing PGID reservation.
*/
static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
void *buf1, void *buf2)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
CIO_TRACE_EVENT(4, "stlck");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Request setup. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
req->data = data;
req->callback = stlck_callback;
stlck_build_cp(cdev, buf1, buf2);
ccw_request_start(cdev);
}
/*
* Perform unconditional reserve + release.
*/
int ccw_device_stlck(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct stlck_data data;
u8 *buffer;
int rc;
/* Check if steal lock operation is valid for this device. */
if (cdev->drv) {
if (!cdev->private->options.force)
return -EINVAL;
}
buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_completion(&data.done);
data.rc = -EIO;
spin_lock_irq(sch->lock);
rc = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (rc)
goto out_unlock;
/* Perform operation. */
cdev->private->state = DEV_STATE_STEAL_LOCK;
ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
spin_unlock_irq(sch->lock);
/* Wait for operation to finish. */
if (wait_for_completion_interruptible(&data.done)) {
/* Got a signal. */
spin_lock_irq(sch->lock);
ccw_request_cancel(cdev);
spin_unlock_irq(sch->lock);
wait_for_completion(&data.done);
}
rc = data.rc;
/* Check results. */
spin_lock_irq(sch->lock);
cio_disable_subchannel(sch);
cdev->private->state = DEV_STATE_BOXED;
out_unlock:
spin_unlock_irq(sch->lock);
kfree(buffer);
return rc;
}
| linux-master | drivers/s390/cio/device_pgid.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.