python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* MOXA ART SoCs watchdog driver.
*
* Copyright (C) 2013 Jonas Jensen
*
* Jonas Jensen <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#define REG_COUNT 0x4
#define REG_MODE 0x8
#define REG_ENABLE 0xC
struct moxart_wdt_dev {
struct watchdog_device dev;
void __iomem *base;
unsigned int clock_frequency;
};
static int heartbeat;
static int moxart_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
writel(1, moxart_wdt->base + REG_COUNT);
writel(0x5ab9, moxart_wdt->base + REG_MODE);
writel(0x03, moxart_wdt->base + REG_ENABLE);
return 0;
}
static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
writel(0, moxart_wdt->base + REG_ENABLE);
return 0;
}
static int moxart_wdt_start(struct watchdog_device *wdt_dev)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
writel(moxart_wdt->clock_frequency * wdt_dev->timeout,
moxart_wdt->base + REG_COUNT);
writel(0x5ab9, moxart_wdt->base + REG_MODE);
writel(0x03, moxart_wdt->base + REG_ENABLE);
return 0;
}
static int moxart_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int timeout)
{
wdt_dev->timeout = timeout;
return 0;
}
static const struct watchdog_info moxart_wdt_info = {
.identity = "moxart-wdt",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops moxart_wdt_ops = {
.owner = THIS_MODULE,
.start = moxart_wdt_start,
.stop = moxart_wdt_stop,
.set_timeout = moxart_wdt_set_timeout,
.restart = moxart_wdt_restart,
};
static int moxart_wdt_probe(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt;
struct device *dev = &pdev->dev;
struct clk *clk;
int err;
unsigned int max_timeout;
bool nowayout = WATCHDOG_NOWAYOUT;
moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL);
if (!moxart_wdt)
return -ENOMEM;
platform_set_drvdata(pdev, moxart_wdt);
moxart_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(moxart_wdt->base))
return PTR_ERR(moxart_wdt->base);
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", __func__);
return PTR_ERR(clk);
}
moxart_wdt->clock_frequency = clk_get_rate(clk);
if (moxart_wdt->clock_frequency == 0) {
pr_err("%s: incorrect clock frequency\n", __func__);
return -EINVAL;
}
max_timeout = UINT_MAX / moxart_wdt->clock_frequency;
moxart_wdt->dev.info = &moxart_wdt_info;
moxart_wdt->dev.ops = &moxart_wdt_ops;
moxart_wdt->dev.timeout = max_timeout;
moxart_wdt->dev.min_timeout = 1;
moxart_wdt->dev.max_timeout = max_timeout;
moxart_wdt->dev.parent = dev;
watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev);
watchdog_set_nowayout(&moxart_wdt->dev, nowayout);
watchdog_set_restart_priority(&moxart_wdt->dev, 128);
watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt);
watchdog_stop_on_unregister(&moxart_wdt->dev);
err = devm_watchdog_register_device(dev, &moxart_wdt->dev);
if (err)
return err;
dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
moxart_wdt->dev.timeout, nowayout);
return 0;
}
static const struct of_device_id moxart_watchdog_match[] = {
{ .compatible = "moxa,moxart-watchdog" },
{ },
};
MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
static struct platform_driver moxart_wdt_driver = {
.probe = moxart_wdt_probe,
.driver = {
.name = "moxart-watchdog",
.of_match_table = moxart_watchdog_match,
},
};
module_platform_driver(moxart_wdt_driver);
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds");
MODULE_DESCRIPTION("MOXART watchdog driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonas Jensen <[email protected]>");
| linux-master | drivers/watchdog/moxart_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Berkshire USB-PC Watchdog Card Driver
*
* (c) Copyright 2004-2007 Wim Van Sebroeck <[email protected]>.
*
* Based on source code of the following authors:
* Ken Hollis <[email protected]>,
* Alan Cox <[email protected]>,
* Matt Domsch <[email protected]>,
* Rob Radez <[email protected]>,
* Greg Kroah-Hartman <[email protected]>
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
* Thanks also to Simon Machell at Berkshire Products Inc. for
* providing the test hardware. More info is available at
* http://www.berkprod.com/ or http://www.pcwatchdog.com/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
#include <linux/usb.h> /* For USB functions */
#include <linux/slab.h> /* For kmalloc, ... */
#include <linux/mutex.h> /* For mutex locking */
#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
/* Module and Version Information */
#define DRIVER_VERSION "1.02"
#define DRIVER_AUTHOR "Wim Van Sebroeck <[email protected]>"
#define DRIVER_DESC "Berkshire USB-PC Watchdog driver"
#define DRIVER_NAME "pcwd_usb"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define WATCHDOG_HEARTBEAT 0 /* default heartbeat =
delay-time from dip-switches */
static int heartbeat = WATCHDOG_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. "
"(0<heartbeat<65536 or 0=delay-time from dip-switches, default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/* The vendor and product id's for the USB-PC Watchdog card */
#define USB_PCWD_VENDOR_ID 0x0c98
#define USB_PCWD_PRODUCT_ID 0x1140
/* table of devices that work with this driver */
static const struct usb_device_id usb_pcwd_table[] = {
{ USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usb_pcwd_table);
/* according to documentation max. time to process a command for the USB
* watchdog card is 100 or 200 ms, so we give it 250 ms to do it's job */
#define USB_COMMAND_TIMEOUT 250
/* Watchdog's internal commands */
#define CMD_READ_TEMP 0x02 /* Read Temperature;
Re-trigger Watchdog */
#define CMD_TRIGGER CMD_READ_TEMP
#define CMD_GET_STATUS 0x04 /* Get Status Information */
#define CMD_GET_FIRMWARE_VERSION 0x08 /* Get Firmware Version */
#define CMD_GET_DIP_SWITCH_SETTINGS 0x0c /* Get Dip Switch Settings */
#define CMD_READ_WATCHDOG_TIMEOUT 0x18 /* Read Current Watchdog Time */
#define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 /* Write Current WatchdogTime */
#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */
#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
/* Watchdog's Dip Switch heartbeat values */
static const int heartbeat_tbl[] = {
5, /* OFF-OFF-OFF = 5 Sec */
10, /* OFF-OFF-ON = 10 Sec */
30, /* OFF-ON-OFF = 30 Sec */
60, /* OFF-ON-ON = 1 Min */
300, /* ON-OFF-OFF = 5 Min */
600, /* ON-OFF-ON = 10 Min */
1800, /* ON-ON-OFF = 30 Min */
3600, /* ON-ON-ON = 1 hour */
};
/* We can only use 1 card due to the /dev/watchdog restriction */
static int cards_found;
/* some internal variables */
static unsigned long is_active;
static char expect_release;
/* Structure to hold all of our device specific stuff */
struct usb_pcwd_private {
/* save off the usb device pointer */
struct usb_device *udev;
/* the interface for this device */
struct usb_interface *interface;
/* the interface number used for cmd's */
unsigned int interface_number;
/* the buffer to intr data */
unsigned char *intr_buffer;
/* the dma address for the intr buffer */
dma_addr_t intr_dma;
/* the size of the intr buffer */
size_t intr_size;
/* the urb used for the intr pipe */
struct urb *intr_urb;
/* The command that is reported back */
unsigned char cmd_command;
/* The data MSB that is reported back */
unsigned char cmd_data_msb;
/* The data LSB that is reported back */
unsigned char cmd_data_lsb;
/* true if we received a report after a command */
atomic_t cmd_received;
/* Wether or not the device exists */
int exists;
/* locks this structure */
struct mutex mtx;
};
static struct usb_pcwd_private *usb_pcwd_device;
/* prevent races between open() and disconnect() */
static DEFINE_MUTEX(disconnect_mutex);
/* local function prototypes */
static int usb_pcwd_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void usb_pcwd_disconnect(struct usb_interface *interface);
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver usb_pcwd_driver = {
.name = DRIVER_NAME,
.probe = usb_pcwd_probe,
.disconnect = usb_pcwd_disconnect,
.id_table = usb_pcwd_table,
};
static void usb_pcwd_intr_done(struct urb *urb)
{
struct usb_pcwd_private *usb_pcwd =
(struct usb_pcwd_private *)urb->context;
unsigned char *data = usb_pcwd->intr_buffer;
struct device *dev = &usb_pcwd->interface->dev;
int retval;
switch (urb->status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d",
__func__, urb->status);
return;
/* -EPIPE: should clear the halt */
default: /* error */
dev_dbg(dev, "%s - nonzero urb status received: %d",
__func__, urb->status);
goto resubmit;
}
dev_dbg(dev, "received following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
data[0], data[1], data[2]);
usb_pcwd->cmd_command = data[0];
usb_pcwd->cmd_data_msb = data[1];
usb_pcwd->cmd_data_lsb = data[2];
/* notify anyone waiting that the cmd has finished */
atomic_set(&usb_pcwd->cmd_received, 1);
resubmit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
pr_err("can't resubmit intr, usb_submit_urb failed with result %d\n",
retval);
}
static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
unsigned char cmd, unsigned char *msb, unsigned char *lsb)
{
int got_response, count;
unsigned char *buf;
/* We will not send any commands if the USB PCWD device does
* not exist */
if ((!usb_pcwd) || (!usb_pcwd->exists))
return -1;
buf = kmalloc(6, GFP_KERNEL);
if (buf == NULL)
return 0;
/* The USB PC Watchdog uses a 6 byte report format.
* The board currently uses only 3 of the six bytes of the report. */
buf[0] = cmd; /* Byte 0 = CMD */
buf[1] = *msb; /* Byte 1 = Data MSB */
buf[2] = *lsb; /* Byte 2 = Data LSB */
buf[3] = buf[4] = buf[5] = 0; /* All other bytes not used */
dev_dbg(&usb_pcwd->interface->dev,
"sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
buf[0], buf[1], buf[2]);
atomic_set(&usb_pcwd->cmd_received, 0);
if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0),
HID_REQ_SET_REPORT, HID_DT_REPORT,
0x0200, usb_pcwd->interface_number, buf, 6,
USB_COMMAND_TIMEOUT) != 6) {
dev_dbg(&usb_pcwd->interface->dev,
"usb_pcwd_send_command: error in usb_control_msg for cmd 0x%x 0x%x 0x%x\n",
cmd, *msb, *lsb);
}
/* wait till the usb card processed the command,
* with a max. timeout of USB_COMMAND_TIMEOUT */
got_response = 0;
for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response);
count++) {
mdelay(1);
if (atomic_read(&usb_pcwd->cmd_received))
got_response = 1;
}
if ((got_response) && (cmd == usb_pcwd->cmd_command)) {
/* read back response */
*msb = usb_pcwd->cmd_data_msb;
*lsb = usb_pcwd->cmd_data_lsb;
}
kfree(buf);
return got_response;
}
static int usb_pcwd_start(struct usb_pcwd_private *usb_pcwd)
{
unsigned char msb = 0x00;
unsigned char lsb = 0x00;
int retval;
/* Enable Watchdog */
retval = usb_pcwd_send_command(usb_pcwd, CMD_ENABLE_WATCHDOG,
&msb, &lsb);
if ((retval == 0) || (lsb == 0)) {
pr_err("Card did not acknowledge enable attempt\n");
return -1;
}
return 0;
}
static int usb_pcwd_stop(struct usb_pcwd_private *usb_pcwd)
{
unsigned char msb = 0xA5;
unsigned char lsb = 0xC3;
int retval;
/* Disable Watchdog */
retval = usb_pcwd_send_command(usb_pcwd, CMD_DISABLE_WATCHDOG,
&msb, &lsb);
if ((retval == 0) || (lsb != 0)) {
pr_err("Card did not acknowledge disable attempt\n");
return -1;
}
return 0;
}
static int usb_pcwd_keepalive(struct usb_pcwd_private *usb_pcwd)
{
unsigned char dummy;
/* Re-trigger Watchdog */
usb_pcwd_send_command(usb_pcwd, CMD_TRIGGER, &dummy, &dummy);
return 0;
}
static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
{
unsigned char msb = t / 256;
unsigned char lsb = t % 256;
if ((t < 0x0001) || (t > 0xFFFF))
return -EINVAL;
/* Write new heartbeat to watchdog */
usb_pcwd_send_command(usb_pcwd, CMD_WRITE_WATCHDOG_TIMEOUT, &msb, &lsb);
heartbeat = t;
return 0;
}
static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
int *temperature)
{
unsigned char msb = 0x00;
unsigned char lsb = 0x00;
usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
/*
* Convert celsius to fahrenheit, since this was
* the decided 'standard' for this return value.
*/
*temperature = (lsb * 9 / 5) + 32;
return 0;
}
static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
int *time_left)
{
unsigned char msb = 0x00;
unsigned char lsb = 0x00;
/* Read the time that's left before rebooting */
/* Note: if the board is not yet armed then we will read 0xFFFF */
usb_pcwd_send_command(usb_pcwd, CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb);
*time_left = (msb << 8) + lsb;
return 0;
}
/*
* /dev/watchdog handling
*/
static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
size_t i;
/* note: just in case someone wrote the magic character
* five months ago... */
expect_release = 0;
/* scan to see whether or not we got the
* magic character */
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_release = 42;
}
}
/* someone wrote to us, we should reload the timer */
usb_pcwd_keepalive(usb_pcwd_device);
}
return len;
}
static long usb_pcwd_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = DRIVER_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_GETTEMP:
{
int temperature;
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
return put_user(temperature, p);
}
case WDIOC_SETOPTIONS:
{
int new_options, retval = -EINVAL;
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
usb_pcwd_stop(usb_pcwd_device);
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
usb_pcwd_start(usb_pcwd_device);
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
usb_pcwd_keepalive(usb_pcwd_device);
return 0;
case WDIOC_SETTIMEOUT:
{
int new_heartbeat;
if (get_user(new_heartbeat, p))
return -EFAULT;
if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat))
return -EINVAL;
usb_pcwd_keepalive(usb_pcwd_device);
}
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
case WDIOC_GETTIMELEFT:
{
int time_left;
if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left))
return -EFAULT;
return put_user(time_left, p);
}
default:
return -ENOTTY;
}
}
static int usb_pcwd_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &is_active))
return -EBUSY;
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
return stream_open(inode, file);
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
*/
if (expect_release == 42) {
usb_pcwd_stop(usb_pcwd_device);
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
usb_pcwd_keepalive(usb_pcwd_device);
}
expect_release = 0;
clear_bit(0, &is_active);
return 0;
}
/*
* /dev/temperature handling
*/
static ssize_t usb_pcwd_temperature_read(struct file *file, char __user *data,
size_t len, loff_t *ppos)
{
int temperature;
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
if (copy_to_user(data, &temperature, 1))
return -EFAULT;
return 1;
}
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
return stream_open(inode, file);
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* Notify system
*/
static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
usb_pcwd_stop(usb_pcwd_device); /* Turn the WDT off */
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations usb_pcwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = usb_pcwd_open,
.release = usb_pcwd_release,
};
static struct miscdevice usb_pcwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &usb_pcwd_fops,
};
static const struct file_operations usb_pcwd_temperature_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = usb_pcwd_temperature_read,
.open = usb_pcwd_temperature_open,
.release = usb_pcwd_temperature_release,
};
static struct miscdevice usb_pcwd_temperature_miscdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &usb_pcwd_temperature_fops,
};
static struct notifier_block usb_pcwd_notifier = {
.notifier_call = usb_pcwd_notify_sys,
};
/**
* usb_pcwd_delete
*/
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
{
usb_free_urb(usb_pcwd->intr_urb);
usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
kfree(usb_pcwd);
}
/**
* usb_pcwd_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int usb_pcwd_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
struct usb_pcwd_private *usb_pcwd = NULL;
int pipe;
int retval = -ENOMEM;
int got_fw_rev;
unsigned char fw_rev_major, fw_rev_minor;
char fw_ver_str[20];
unsigned char option_switches, dummy;
cards_found++;
if (cards_found > 1) {
pr_err("This driver only supports 1 device\n");
return -ENODEV;
}
/* get the active interface descriptor */
iface_desc = interface->cur_altsetting;
/* check out that we have a HID device */
if (!(iface_desc->desc.bInterfaceClass == USB_CLASS_HID)) {
pr_err("The device isn't a Human Interface Device\n");
return -ENODEV;
}
if (iface_desc->desc.bNumEndpoints < 1)
return -ENODEV;
/* check out the endpoint: it has to be Interrupt & IN */
endpoint = &iface_desc->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint)) {
/* we didn't find a Interrupt endpoint with direction IN */
pr_err("Couldn't find an INTR & IN endpoint\n");
return -ENODEV;
}
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
/* allocate memory for our device and initialize it */
usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
if (usb_pcwd == NULL)
goto error;
usb_pcwd_device = usb_pcwd;
mutex_init(&usb_pcwd->mtx);
usb_pcwd->udev = udev;
usb_pcwd->interface = interface;
usb_pcwd->interface_number = iface_desc->desc.bInterfaceNumber;
usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ?
le16_to_cpu(endpoint->wMaxPacketSize) : 8);
/* set up the memory buffer's */
usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size,
GFP_KERNEL, &usb_pcwd->intr_dma);
if (!usb_pcwd->intr_buffer) {
pr_err("Out of memory\n");
goto error;
}
/* allocate the urb's */
usb_pcwd->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!usb_pcwd->intr_urb)
goto error;
/* initialise the intr urb's */
usb_fill_int_urb(usb_pcwd->intr_urb, udev, pipe,
usb_pcwd->intr_buffer, usb_pcwd->intr_size,
usb_pcwd_intr_done, usb_pcwd, endpoint->bInterval);
usb_pcwd->intr_urb->transfer_dma = usb_pcwd->intr_dma;
usb_pcwd->intr_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* register our interrupt URB with the USB system */
if (usb_submit_urb(usb_pcwd->intr_urb, GFP_KERNEL)) {
pr_err("Problem registering interrupt URB\n");
retval = -EIO; /* failure */
goto error;
}
/* The device exists and can be communicated with */
usb_pcwd->exists = 1;
/* disable card */
usb_pcwd_stop(usb_pcwd);
/* Get the Firmware Version */
got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION,
&fw_rev_major, &fw_rev_minor);
if (got_fw_rev)
sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor);
else
sprintf(fw_ver_str, "<card no answer>");
pr_info("Found card (Firmware: %s) with temp option\n", fw_ver_str);
/* Get switch settings */
usb_pcwd_send_command(usb_pcwd, CMD_GET_DIP_SWITCH_SETTINGS, &dummy,
&option_switches);
pr_info("Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n",
option_switches,
((option_switches & 0x10) ? "ON" : "OFF"),
((option_switches & 0x08) ? "ON" : "OFF"));
/* If heartbeat = 0 then we use the heartbeat from the dip-switches */
if (heartbeat == 0)
heartbeat = heartbeat_tbl[(option_switches & 0x07)];
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (usb_pcwd_set_heartbeat(usb_pcwd, heartbeat)) {
usb_pcwd_set_heartbeat(usb_pcwd, WATCHDOG_HEARTBEAT);
pr_info("heartbeat value must be 0<heartbeat<65536, using %d\n",
WATCHDOG_HEARTBEAT);
}
retval = register_reboot_notifier(&usb_pcwd_notifier);
if (retval != 0) {
pr_err("cannot register reboot notifier (err=%d)\n", retval);
goto error;
}
retval = misc_register(&usb_pcwd_temperature_miscdev);
if (retval != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
TEMP_MINOR, retval);
goto err_out_unregister_reboot;
}
retval = misc_register(&usb_pcwd_miscdev);
if (retval != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, retval);
goto err_out_misc_deregister;
}
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, usb_pcwd);
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
err_out_misc_deregister:
misc_deregister(&usb_pcwd_temperature_miscdev);
err_out_unregister_reboot:
unregister_reboot_notifier(&usb_pcwd_notifier);
error:
if (usb_pcwd)
usb_pcwd_delete(usb_pcwd);
usb_pcwd_device = NULL;
return retval;
}
/**
* usb_pcwd_disconnect
*
* Called by the usb core when the device is removed from the system.
*
* This routine guarantees that the driver will not submit any more urbs
* by clearing dev->udev.
*/
static void usb_pcwd_disconnect(struct usb_interface *interface)
{
struct usb_pcwd_private *usb_pcwd;
/* prevent races with open() */
mutex_lock(&disconnect_mutex);
usb_pcwd = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
mutex_lock(&usb_pcwd->mtx);
/* Stop the timer before we leave */
if (!nowayout)
usb_pcwd_stop(usb_pcwd);
/* We should now stop communicating with the USB PCWD device */
usb_pcwd->exists = 0;
/* Deregister */
misc_deregister(&usb_pcwd_miscdev);
misc_deregister(&usb_pcwd_temperature_miscdev);
unregister_reboot_notifier(&usb_pcwd_notifier);
mutex_unlock(&usb_pcwd->mtx);
/* Delete the USB PCWD device */
usb_pcwd_delete(usb_pcwd);
cards_found--;
mutex_unlock(&disconnect_mutex);
pr_info("USB PC Watchdog disconnected\n");
}
module_usb_driver(usb_pcwd_driver);
| linux-master | drivers/watchdog/pcwd_usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* (c) Copyright 2021 Hewlett Packard Enterprise Development LP.
*/
#include <linux/hrtimer.h>
#include <linux/watchdog.h>
#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
static enum hrtimer_restart watchdog_hrtimer_pretimeout(struct hrtimer *timer)
{
struct watchdog_core_data *wd_data;
wd_data = container_of(timer, struct watchdog_core_data, pretimeout_timer);
watchdog_notify_pretimeout(wd_data->wdd);
return HRTIMER_NORESTART;
}
void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
}
void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
{
if (!(wdd->info->options & WDIOF_PRETIMEOUT) &&
!watchdog_pretimeout_invalid(wdd, wdd->pretimeout))
hrtimer_start(&wdd->wd_data->pretimeout_timer,
ktime_set(wdd->timeout - wdd->pretimeout, 0),
HRTIMER_MODE_REL);
else
hrtimer_cancel(&wdd->wd_data->pretimeout_timer);
}
void watchdog_hrtimer_pretimeout_stop(struct watchdog_device *wdd)
{
hrtimer_cancel(&wdd->wd_data->pretimeout_timer);
}
| linux-master | drivers/watchdog/watchdog_hrtimer_pretimeout.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2L WDT Watchdog Driver
*
* Copyright (C) 2021 Renesas Electronics Corporation
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/units.h>
#include <linux/watchdog.h>
#define WDTCNT 0x00
#define WDTSET 0x04
#define WDTTIM 0x08
#define WDTINT 0x0C
#define PECR 0x10
#define PEEN 0x14
#define WDTCNT_WDTEN BIT(0)
#define WDTINT_INTDISP BIT(0)
#define PEEN_FORCE BIT(0)
#define WDT_DEFAULT_TIMEOUT 60U
/* Setting period time register only 12 bit set in WDTSET[31:20] */
#define WDTSET_COUNTER_MASK (0xFFF00000)
#define WDTSET_COUNTER_VAL(f) ((f) << 20)
#define F2CYCLE_NSEC(f) (1000000000 / (f))
#define RZV2M_A_NSEC 730
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
enum rz_wdt_type {
WDT_RZG2L,
WDT_RZV2M,
};
struct rzg2l_wdt_priv {
void __iomem *base;
struct watchdog_device wdev;
struct reset_control *rstc;
unsigned long osc_clk_rate;
unsigned long delay;
unsigned long minimum_assertion_period;
struct clk *pclk;
struct clk *osc_clk;
enum rz_wdt_type devtype;
};
static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
{
int err, status;
if (priv->devtype == WDT_RZV2M) {
/* WDT needs TYPE-B reset control */
err = reset_control_assert(priv->rstc);
if (err)
return err;
ndelay(priv->minimum_assertion_period);
err = reset_control_deassert(priv->rstc);
if (err)
return err;
err = read_poll_timeout(reset_control_status, status,
status != 1, 0, 1000, false,
priv->rstc);
} else {
err = reset_control_reset(priv->rstc);
}
return err;
}
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
{
/* delay timer when change the setting register */
ndelay(priv->delay);
}
static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime)
{
u64 timer_cycle_us = 1024 * 1024ULL * (wdttime + 1) * MICRO;
return div64_ul(timer_cycle_us, cycle);
}
static void rzg2l_wdt_write(struct rzg2l_wdt_priv *priv, u32 val, unsigned int reg)
{
if (reg == WDTSET)
val &= WDTSET_COUNTER_MASK;
writel_relaxed(val, priv->base + reg);
/* Registers other than the WDTINT is always synchronized with WDT_CLK */
if (reg != WDTINT)
rzg2l_wdt_wait_delay(priv);
}
static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
u32 time_out;
/* Clear Lapsed Time Register and clear Interrupt */
rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
/* 2 consecutive overflow cycle needed to trigger reset */
time_out = (wdev->timeout * (MICRO / 2)) /
rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0);
rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(time_out), WDTSET);
}
static int rzg2l_wdt_start(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
pm_runtime_get_sync(wdev->parent);
/* Initialize time out */
rzg2l_wdt_init_timeout(wdev);
/* Initialize watchdog counter register */
rzg2l_wdt_write(priv, 0, WDTTIM);
/* Enable watchdog timer*/
rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
return 0;
}
static int rzg2l_wdt_stop(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
rzg2l_wdt_reset(priv);
pm_runtime_put(wdev->parent);
return 0;
}
static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
{
wdev->timeout = timeout;
/*
* If the watchdog is active, reset the module for updating the WDTSET
* register by calling rzg2l_wdt_stop() (which internally calls reset_control_reset()
* to reset the module) so that it is updated with new timeout values.
*/
if (watchdog_active(wdev)) {
rzg2l_wdt_stop(wdev);
rzg2l_wdt_start(wdev);
}
return 0;
}
static int rzg2l_wdt_restart(struct watchdog_device *wdev,
unsigned long action, void *data)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
clk_prepare_enable(priv->pclk);
clk_prepare_enable(priv->osc_clk);
if (priv->devtype == WDT_RZG2L) {
/* Generate Reset (WDTRSTB) Signal on parity error */
rzg2l_wdt_write(priv, 0, PECR);
/* Force parity error */
rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
} else {
/* RZ/V2M doesn't have parity error registers */
rzg2l_wdt_reset(priv);
wdev->timeout = 0;
/* Initialize time out */
rzg2l_wdt_init_timeout(wdev);
/* Initialize watchdog counter register */
rzg2l_wdt_write(priv, 0, WDTTIM);
/* Enable watchdog timer*/
rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
/* Wait 2 consecutive overflow cycles for reset */
mdelay(DIV_ROUND_UP(2 * 0xFFFFF * 1000, priv->osc_clk_rate));
}
return 0;
}
static const struct watchdog_info rzg2l_wdt_ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
.identity = "Renesas RZ/G2L WDT Watchdog",
};
static int rzg2l_wdt_ping(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
return 0;
}
static const struct watchdog_ops rzg2l_wdt_ops = {
.owner = THIS_MODULE,
.start = rzg2l_wdt_start,
.stop = rzg2l_wdt_stop,
.ping = rzg2l_wdt_ping,
.set_timeout = rzg2l_wdt_set_timeout,
.restart = rzg2l_wdt_restart,
};
static void rzg2l_wdt_reset_assert_pm_disable(void *data)
{
struct watchdog_device *wdev = data;
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
pm_runtime_disable(wdev->parent);
reset_control_assert(priv->rstc);
}
static int rzg2l_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rzg2l_wdt_priv *priv;
unsigned long pclk_rate;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
/* Get watchdog main clock */
priv->osc_clk = devm_clk_get(&pdev->dev, "oscclk");
if (IS_ERR(priv->osc_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->osc_clk), "no oscclk");
priv->osc_clk_rate = clk_get_rate(priv->osc_clk);
if (!priv->osc_clk_rate)
return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0");
/* Get Peripheral clock */
priv->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(priv->pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
pclk_rate = clk_get_rate(priv->pclk);
if (!pclk_rate)
return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0");
priv->delay = F2CYCLE_NSEC(priv->osc_clk_rate) * 6 + F2CYCLE_NSEC(pclk_rate) * 9;
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
"failed to get cpg reset");
ret = reset_control_deassert(priv->rstc);
if (ret)
return dev_err_probe(dev, ret, "failed to deassert");
priv->devtype = (uintptr_t)of_device_get_match_data(dev);
if (priv->devtype == WDT_RZV2M) {
priv->minimum_assertion_period = RZV2M_A_NSEC +
3 * F2CYCLE_NSEC(pclk_rate) + 5 *
max(F2CYCLE_NSEC(priv->osc_clk_rate),
F2CYCLE_NSEC(pclk_rate));
}
pm_runtime_enable(&pdev->dev);
priv->wdev.info = &rzg2l_wdt_ident;
priv->wdev.ops = &rzg2l_wdt_ops;
priv->wdev.parent = dev;
priv->wdev.min_timeout = 1;
priv->wdev.max_timeout = rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0xfff) /
USEC_PER_SEC;
priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
watchdog_set_drvdata(&priv->wdev, priv);
ret = devm_add_action_or_reset(&pdev->dev,
rzg2l_wdt_reset_assert_pm_disable,
&priv->wdev);
if (ret < 0)
return ret;
watchdog_set_nowayout(&priv->wdev, nowayout);
watchdog_stop_on_unregister(&priv->wdev);
ret = watchdog_init_timeout(&priv->wdev, 0, dev);
if (ret)
dev_warn(dev, "Specified timeout invalid, using default");
return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
}
static const struct of_device_id rzg2l_wdt_ids[] = {
{ .compatible = "renesas,rzg2l-wdt", .data = (void *)WDT_RZG2L },
{ .compatible = "renesas,rzv2m-wdt", .data = (void *)WDT_RZV2M },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
static struct platform_driver rzg2l_wdt_driver = {
.driver = {
.name = "rzg2l_wdt",
.of_match_table = rzg2l_wdt_ids,
},
.probe = rzg2l_wdt_probe,
};
module_platform_driver(rzg2l_wdt_driver);
MODULE_DESCRIPTION("Renesas RZ/G2L WDT Watchdog Driver");
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/rzg2l_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PIC32 deadman timer driver
*
* Purna Chandra Mandal <[email protected]>
* Copyright (c) 2016, Microchip Technology Inc.
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
#include <asm/mach-pic32/pic32.h>
/* Deadman Timer Regs */
#define DMTCON_REG 0x00
#define DMTPRECLR_REG 0x10
#define DMTCLR_REG 0x20
#define DMTSTAT_REG 0x30
#define DMTCNT_REG 0x40
#define DMTPSCNT_REG 0x60
#define DMTPSINTV_REG 0x70
/* Deadman Timer Regs fields */
#define DMT_ON BIT(15)
#define DMT_STEP1_KEY BIT(6)
#define DMT_STEP2_KEY BIT(3)
#define DMTSTAT_WINOPN BIT(0)
#define DMTSTAT_EVENT BIT(5)
#define DMTSTAT_BAD2 BIT(6)
#define DMTSTAT_BAD1 BIT(7)
/* Reset Control Register fields for watchdog */
#define RESETCON_DMT_TIMEOUT BIT(5)
struct pic32_dmt {
void __iomem *regs;
struct clk *clk;
};
static inline void dmt_enable(struct pic32_dmt *dmt)
{
writel(DMT_ON, PIC32_SET(dmt->regs + DMTCON_REG));
}
static inline void dmt_disable(struct pic32_dmt *dmt)
{
writel(DMT_ON, PIC32_CLR(dmt->regs + DMTCON_REG));
/*
* Cannot touch registers in the CPU cycle following clearing the
* ON bit.
*/
nop();
}
static inline int dmt_bad_status(struct pic32_dmt *dmt)
{
u32 val;
val = readl(dmt->regs + DMTSTAT_REG);
val &= (DMTSTAT_BAD1 | DMTSTAT_BAD2 | DMTSTAT_EVENT);
if (val)
return -EAGAIN;
return 0;
}
static inline int dmt_keepalive(struct pic32_dmt *dmt)
{
u32 v;
u32 timeout = 500;
/* set pre-clear key */
writel(DMT_STEP1_KEY << 8, dmt->regs + DMTPRECLR_REG);
/* wait for DMT window to open */
while (--timeout) {
v = readl(dmt->regs + DMTSTAT_REG) & DMTSTAT_WINOPN;
if (v == DMTSTAT_WINOPN)
break;
}
/* apply key2 */
writel(DMT_STEP2_KEY, dmt->regs + DMTCLR_REG);
/* check whether keys are latched correctly */
return dmt_bad_status(dmt);
}
static inline u32 pic32_dmt_get_timeout_secs(struct pic32_dmt *dmt)
{
unsigned long rate;
rate = clk_get_rate(dmt->clk);
if (rate)
return readl(dmt->regs + DMTPSCNT_REG) / rate;
return 0;
}
static inline u32 pic32_dmt_bootstatus(struct pic32_dmt *dmt)
{
u32 v;
void __iomem *rst_base;
rst_base = ioremap(PIC32_BASE_RESET, 0x10);
if (!rst_base)
return 0;
v = readl(rst_base);
writel(RESETCON_DMT_TIMEOUT, PIC32_CLR(rst_base));
iounmap(rst_base);
return v & RESETCON_DMT_TIMEOUT;
}
static int pic32_dmt_start(struct watchdog_device *wdd)
{
struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
dmt_enable(dmt);
return dmt_keepalive(dmt);
}
static int pic32_dmt_stop(struct watchdog_device *wdd)
{
struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
dmt_disable(dmt);
return 0;
}
static int pic32_dmt_ping(struct watchdog_device *wdd)
{
struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
return dmt_keepalive(dmt);
}
static const struct watchdog_ops pic32_dmt_fops = {
.owner = THIS_MODULE,
.start = pic32_dmt_start,
.stop = pic32_dmt_stop,
.ping = pic32_dmt_ping,
};
static const struct watchdog_info pic32_dmt_ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.identity = "PIC32 Deadman Timer",
};
static struct watchdog_device pic32_dmt_wdd = {
.info = &pic32_dmt_ident,
.ops = &pic32_dmt_fops,
};
static int pic32_dmt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
struct pic32_dmt *dmt;
struct watchdog_device *wdd = &pic32_dmt_wdd;
dmt = devm_kzalloc(dev, sizeof(*dmt), GFP_KERNEL);
if (!dmt)
return -ENOMEM;
dmt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmt->regs))
return PTR_ERR(dmt->regs);
dmt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(dmt->clk)) {
dev_err(dev, "clk not found\n");
return PTR_ERR(dmt->clk);
}
wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
if (!wdd->timeout) {
dev_err(dev, "failed to read watchdog register timeout\n");
return -EINVAL;
}
dev_info(dev, "timeout %d\n", wdd->timeout);
wdd->bootstatus = pic32_dmt_bootstatus(dmt) ? WDIOF_CARDRESET : 0;
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdd, dmt);
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
platform_set_drvdata(pdev, wdd);
return 0;
}
static const struct of_device_id pic32_dmt_of_ids[] = {
{ .compatible = "microchip,pic32mzda-dmt",},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pic32_dmt_of_ids);
static struct platform_driver pic32_dmt_driver = {
.probe = pic32_dmt_probe,
.driver = {
.name = "pic32-dmt",
.of_match_table = of_match_ptr(pic32_dmt_of_ids),
}
};
module_platform_driver(pic32_dmt_driver);
MODULE_AUTHOR("Purna Chandra Mandal <[email protected]>");
MODULE_DESCRIPTION("Microchip PIC32 DMT Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/pic32-dmt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Intel 21285 watchdog driver
* Copyright (c) Phil Blundell <[email protected]>, 1998
*
* based on
*
* SoftDog 0.05: A Software Watchdog Device
*
* (c) Copyright 1996 Alan Cox <[email protected]>,
* All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/system_info.h>
#include <asm/hardware/dec21285.h>
/*
* Define this to stop the watchdog actually rebooting the machine.
*/
#undef ONLY_TESTING
static unsigned int soft_margin = 60; /* in seconds */
static unsigned int reload;
static unsigned long timer_alive;
#ifdef ONLY_TESTING
/*
* If the timer expires..
*/
static void watchdog_fire(int irq, void *dev_id)
{
pr_crit("Would Reboot\n");
*CSR_TIMER4_CNTL = 0;
*CSR_TIMER4_CLR = 0;
}
#endif
/*
* Refresh the timer.
*/
static void watchdog_ping(void)
{
*CSR_TIMER4_LOAD = reload;
}
/*
* Allow only one person to hold it open
*/
static int watchdog_open(struct inode *inode, struct file *file)
{
int ret;
if (*CSR_SA110_CNTL & (1 << 13))
return -EBUSY;
if (test_and_set_bit(1, &timer_alive))
return -EBUSY;
reload = soft_margin * (mem_fclk_21285 / 256);
*CSR_TIMER4_CLR = 0;
watchdog_ping();
*CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD
| TIMER_CNTL_DIV256;
#ifdef ONLY_TESTING
ret = request_irq(IRQ_TIMER4, watchdog_fire, 0, "watchdog", NULL);
if (ret) {
*CSR_TIMER4_CNTL = 0;
clear_bit(1, &timer_alive);
}
#else
/*
* Setting this bit is irreversible; once enabled, there is
* no way to disable the watchdog.
*/
*CSR_SA110_CNTL |= 1 << 13;
ret = 0;
#endif
stream_open(inode, file);
return ret;
}
/*
* Shut off the timer.
* Note: if we really have enabled the watchdog, there
* is no way to turn off.
*/
static int watchdog_release(struct inode *inode, struct file *file)
{
#ifdef ONLY_TESTING
free_irq(IRQ_TIMER4, NULL);
clear_bit(1, &timer_alive);
#endif
return 0;
}
static ssize_t watchdog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/*
* Refresh the timer.
*/
if (len)
watchdog_ping();
return len;
}
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT,
.identity = "Footbridge Watchdog",
};
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int __user *int_arg = (int __user *)arg;
int new_margin, ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = 0;
if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, int_arg);
break;
case WDIOC_KEEPALIVE:
watchdog_ping();
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(new_margin, int_arg);
if (ret)
break;
/* Arbitrary, can't find the card's limits */
if (new_margin < 0 || new_margin > 60) {
ret = -EINVAL;
break;
}
soft_margin = new_margin;
reload = soft_margin * (mem_fclk_21285 / 256);
watchdog_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(soft_margin, int_arg);
break;
}
return ret;
}
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = watchdog_open,
.release = watchdog_release,
};
static struct miscdevice watchdog_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &watchdog_fops,
};
static int __init footbridge_watchdog_init(void)
{
int retval;
if (machine_is_netwinder())
return -ENODEV;
retval = misc_register(&watchdog_miscdev);
if (retval < 0)
return retval;
pr_info("Footbridge Watchdog Timer: 0.01, timer margin: %d sec\n",
soft_margin);
return 0;
}
static void __exit footbridge_watchdog_exit(void)
{
misc_deregister(&watchdog_miscdev);
}
MODULE_AUTHOR("Phil Blundell <[email protected]>");
MODULE_DESCRIPTION("Footbridge watchdog driver");
MODULE_LICENSE("GPL");
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
module_init(footbridge_watchdog_init);
module_exit(footbridge_watchdog_exit);
| linux-master | drivers/watchdog/wdt285.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nv_tco 0.01: TCO timer driver for NV chipsets
*
* (c) Copyright 2005 Google Inc., All Rights Reserved.
*
* Based off i8xx_tco.c:
* (c) Copyright 2000 kernel concepts <[email protected]>, All Rights
* Reserved.
* https://www.kernelconcepts.de
*
* TCO timer driver for NV chipsets
* based on softdog.c by Alan Cox <[email protected]>
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "nv_tco.h"
/* Module and version information */
#define TCO_VERSION "0.01"
#define TCO_MODULE_NAME "NV_TCO"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
/* internal variables */
static unsigned int tcobase;
static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
static unsigned long timer_alive;
static char tco_expect_close;
static struct pci_dev *tco_pci;
/* the watchdog platform device */
static struct platform_device *nv_tco_platform_device;
/* module parameters */
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (2<heartbeat<39) */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
"default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Some TCO specific functions
*/
static inline unsigned char seconds_to_ticks(int seconds)
{
/* the internal timer is stored as ticks which decrement
* every 0.6 seconds */
return (seconds * 10) / 6;
}
static void tco_timer_start(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
val = inl(TCO_CNT(tcobase));
val &= ~TCO_CNT_TCOHALT;
outl(val, TCO_CNT(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static void tco_timer_stop(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
val = inl(TCO_CNT(tcobase));
val |= TCO_CNT_TCOHALT;
outl(val, TCO_CNT(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static void tco_timer_keepalive(void)
{
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
outb(0x01, TCO_RLD(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static int tco_timer_set_heartbeat(int t)
{
int ret = 0;
unsigned char tmrval;
unsigned long flags;
u8 val;
/*
* note seconds_to_ticks(t) > t, so if t > 0x3f, so is
* tmrval=seconds_to_ticks(t). Check that the count in seconds isn't
* out of range on it's own (to avoid overflow in tmrval).
*/
if (t < 0 || t > 0x3f)
return -EINVAL;
tmrval = seconds_to_ticks(t);
/* "Values of 0h-3h are ignored and should not be attempted" */
if (tmrval > 0x3f || tmrval < 0x04)
return -EINVAL;
/* Write new heartbeat to watchdog */
spin_lock_irqsave(&tco_lock, flags);
val = inb(TCO_TMR(tcobase));
val &= 0xc0;
val |= tmrval;
outb(val, TCO_TMR(tcobase));
val = inb(TCO_TMR(tcobase));
if ((val & 0x3f) != tmrval)
ret = -EINVAL;
spin_unlock_irqrestore(&tco_lock, flags);
if (ret)
return ret;
heartbeat = t;
return 0;
}
/*
* /dev/watchdog handling
*/
static int nv_tco_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &timer_alive))
return -EBUSY;
/* Reload and activate timer */
tco_timer_keepalive();
tco_timer_start();
return stream_open(inode, file);
}
static int nv_tco_release(struct inode *inode, struct file *file)
{
/* Shut off the timer */
if (tco_expect_close == 42) {
tco_timer_stop();
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
tco_timer_keepalive();
}
clear_bit(0, &timer_alive);
tco_expect_close = 0;
return 0;
}
static ssize_t nv_tco_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
size_t i;
/*
* note: just in case someone wrote the magic character
* five months ago...
*/
tco_expect_close = 0;
/*
* scan to see whether or not we got the magic
* character
*/
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
tco_expect_close = 42;
}
}
/* someone wrote to us, we should reload the timer */
tco_timer_keepalive();
}
return len;
}
static long nv_tco_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int new_options, retval = -EINVAL;
int new_heartbeat;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = TCO_MODULE_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
tco_timer_stop();
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
tco_timer_keepalive();
tco_timer_start();
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
tco_timer_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, p))
return -EFAULT;
if (tco_timer_set_heartbeat(new_heartbeat))
return -EINVAL;
tco_timer_keepalive();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
return -ENOTTY;
}
}
/*
* Kernel Interfaces
*/
static const struct file_operations nv_tco_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = nv_tco_open,
.release = nv_tco_release,
};
static struct miscdevice nv_tco_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &nv_tco_fops,
};
/*
* Data for PCI driver interface
*
* This data only exists for exporting the supported
* PCI ids via MODULE_DEVICE_TABLE. We do not actually
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id tco_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
/*
* Init & exit routines
*/
static unsigned char nv_tco_getdevice(void)
{
struct pci_dev *dev = NULL;
u32 val;
/* Find the PCI device */
for_each_pci_dev(dev) {
if (pci_match_id(tco_pci_tbl, dev) != NULL) {
tco_pci = dev;
break;
}
}
if (!tco_pci)
return 0;
/* Find the base io port */
pci_read_config_dword(tco_pci, 0x64, &val);
val &= 0xffff;
if (val == 0x0001 || val == 0x0000) {
/* Something is wrong here, bar isn't setup */
pr_err("failed to get tcobase address\n");
return 0;
}
val &= 0xff00;
tcobase = val + 0x40;
if (!request_region(tcobase, 0x10, "NV TCO")) {
pr_err("I/O address 0x%04x already in use\n", tcobase);
return 0;
}
/* Set a reasonable heartbeat before we stop the timer */
tco_timer_set_heartbeat(30);
/*
* Stop the TCO before we change anything so we don't race with
* a zeroed timer.
*/
tco_timer_keepalive();
tco_timer_stop();
/* Disable SMI caused by TCO */
if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
pr_err("I/O address 0x%04x already in use\n",
MCP51_SMI_EN(tcobase));
goto out;
}
val = inl(MCP51_SMI_EN(tcobase));
val &= ~MCP51_SMI_EN_TCO;
outl(val, MCP51_SMI_EN(tcobase));
val = inl(MCP51_SMI_EN(tcobase));
release_region(MCP51_SMI_EN(tcobase), 4);
if (val & MCP51_SMI_EN_TCO) {
pr_err("Could not disable SMI caused by TCO\n");
goto out;
}
/* Check chipset's NO_REBOOT bit */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware\n");
goto out;
}
return 1;
out:
release_region(tcobase, 0x10);
return 0;
}
static int nv_tco_init(struct platform_device *dev)
{
int ret;
/* Check whether or not the hardware watchdog is there */
if (!nv_tco_getdevice())
return -ENODEV;
/* Check to see if last reboot was due to watchdog timeout */
pr_info("Watchdog reboot %sdetected\n",
inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
/* Clear out the old status */
outl(TCO_STS_RESET, TCO_STS(tcobase));
/*
* Check that the heartbeat value is within it's range.
* If not, reset to the default.
*/
if (tco_timer_set_heartbeat(heartbeat)) {
heartbeat = WATCHDOG_HEARTBEAT;
tco_timer_set_heartbeat(heartbeat);
pr_info("heartbeat value must be 2<heartbeat<39, using %d\n",
heartbeat);
}
ret = misc_register(&nv_tco_miscdev);
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto unreg_region;
}
clear_bit(0, &timer_alive);
tco_timer_stop();
pr_info("initialized (0x%04x). heartbeat=%d sec (nowayout=%d)\n",
tcobase, heartbeat, nowayout);
return 0;
unreg_region:
release_region(tcobase, 0x10);
return ret;
}
static void nv_tco_cleanup(void)
{
u32 val;
/* Stop the timer before we leave */
if (!nowayout)
tco_timer_stop();
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
pr_crit("Couldn't unset REBOOT bit. Machine may soon reset\n");
}
/* Deregister */
misc_deregister(&nv_tco_miscdev);
release_region(tcobase, 0x10);
}
static void nv_tco_remove(struct platform_device *dev)
{
if (tcobase)
nv_tco_cleanup();
}
static void nv_tco_shutdown(struct platform_device *dev)
{
u32 val;
tco_timer_stop();
/* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
* unset during shutdown. */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
}
static struct platform_driver nv_tco_driver = {
.probe = nv_tco_init,
.remove_new = nv_tco_remove,
.shutdown = nv_tco_shutdown,
.driver = {
.name = TCO_MODULE_NAME,
},
};
static int __init nv_tco_init_module(void)
{
int err;
pr_info("NV TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
err = platform_driver_register(&nv_tco_driver);
if (err)
return err;
nv_tco_platform_device = platform_device_register_simple(
TCO_MODULE_NAME, -1, NULL, 0);
if (IS_ERR(nv_tco_platform_device)) {
err = PTR_ERR(nv_tco_platform_device);
goto unreg_platform_driver;
}
return 0;
unreg_platform_driver:
platform_driver_unregister(&nv_tco_driver);
return err;
}
static void __exit nv_tco_cleanup_module(void)
{
platform_device_unregister(nv_tco_platform_device);
platform_driver_unregister(&nv_tco_driver);
pr_info("NV TCO Watchdog Module Unloaded\n");
}
module_init(nv_tco_init_module);
module_exit(nv_tco_cleanup_module);
MODULE_AUTHOR("Mike Waychison");
MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/nv_tco.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MixCom Watchdog: A Simple Hardware Watchdog Device
* Based on Softdog driver by Alan Cox and PC Watchdog driver by Ken Hollis
*
* Author: Gergely Madarasz <[email protected]>
*
* Copyright (c) 1999 ITConsult-Pro Co. <[email protected]>
*
* Version 0.1 (99/04/15):
* - first version
*
* Version 0.2 (99/06/16):
* - added kernel timer watchdog ping after close
* since the hardware does not support watchdog shutdown
*
* Version 0.3 (99/06/21):
* - added WDIOC_GETSTATUS and WDIOC_GETSUPPORT ioctl calls
*
* Version 0.3.1 (99/06/22):
* - allow module removal while internal timer is active,
* print warning about probable reset
*
* Version 0.4 (99/11/15):
* - support for one more type board
*
* Version 0.5 (2001/12/14) Matt Domsch <[email protected]>
* - added nowayout module option to override
* CONFIG_WATCHDOG_NOWAYOUT
*
* Version 0.6 (2002/04/12): Rob Radez <[email protected]>
* - make mixcomwd_opened unsigned,
* removed lock_kernel/unlock_kernel from mixcomwd_release,
* modified ioctl a bit to conform to API
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define VERSION "0.6"
#define WATCHDOG_NAME "mixcomwd"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/*
* We have two types of cards that can be probed:
* 1) The Mixcom cards: these cards can be found at addresses
* 0x180, 0x280, 0x380 with an additional offset of 0xc10.
* (Or 0xd90, 0xe90, 0xf90).
* 2) The FlashCOM cards: these cards can be set up at
* 0x300 -> 0x378, in 0x8 jumps with an offset of 0x04.
* (Or 0x304 -> 0x37c in 0x8 jumps).
* Each card has it's own ID.
*/
#define MIXCOM_ID 0x11
#define FLASHCOM_ID 0x18
static struct {
int ioport;
int id;
} mixcomwd_io_info[] = {
/* The Mixcom cards */
{0x0d90, MIXCOM_ID},
{0x0e90, MIXCOM_ID},
{0x0f90, MIXCOM_ID},
/* The FlashCOM cards */
{0x0304, FLASHCOM_ID},
{0x030c, FLASHCOM_ID},
{0x0314, FLASHCOM_ID},
{0x031c, FLASHCOM_ID},
{0x0324, FLASHCOM_ID},
{0x032c, FLASHCOM_ID},
{0x0334, FLASHCOM_ID},
{0x033c, FLASHCOM_ID},
{0x0344, FLASHCOM_ID},
{0x034c, FLASHCOM_ID},
{0x0354, FLASHCOM_ID},
{0x035c, FLASHCOM_ID},
{0x0364, FLASHCOM_ID},
{0x036c, FLASHCOM_ID},
{0x0374, FLASHCOM_ID},
{0x037c, FLASHCOM_ID},
/* The end of the list */
{0x0000, 0},
};
static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
static int watchdog_port;
static int mixcomwd_timer_alive;
static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun);
static char expect_close;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static void mixcomwd_ping(void)
{
outb_p(55, watchdog_port);
return;
}
static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
}
/*
* Allow only one person to hold it open
*/
static int mixcomwd_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &mixcomwd_opened))
return -EBUSY;
mixcomwd_ping();
if (nowayout)
/*
* fops_get() code via open() has already done
* a try_module_get() so it is safe to do the
* __module_get().
*/
__module_get(THIS_MODULE);
else {
if (mixcomwd_timer_alive) {
del_timer(&mixcomwd_timer);
mixcomwd_timer_alive = 0;
}
}
return stream_open(inode, file);
}
static int mixcomwd_release(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
if (mixcomwd_timer_alive) {
pr_err("release called while internal timer alive\n");
return -EBUSY;
}
mixcomwd_timer_alive = 1;
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
} else
pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
clear_bit(0, &mixcomwd_opened);
expect_close = 0;
return 0;
}
static ssize_t mixcomwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
mixcomwd_ping();
}
return len;
}
static long mixcomwd_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int status;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "MixCOM watchdog",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
status = mixcomwd_opened;
if (!nowayout)
status |= mixcomwd_timer_alive;
return put_user(status, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
mixcomwd_ping();
break;
default:
return -ENOTTY;
}
return 0;
}
static const struct file_operations mixcomwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mixcomwd_open,
.release = mixcomwd_release,
};
static struct miscdevice mixcomwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mixcomwd_fops,
};
static int __init checkcard(int port, int card_id)
{
int id;
if (!request_region(port, 1, "MixCOM watchdog"))
return 0;
id = inb_p(port);
if (card_id == MIXCOM_ID)
id &= 0x3f;
if (id != card_id) {
release_region(port, 1);
return 0;
}
return 1;
}
static int __init mixcomwd_init(void)
{
int i, ret, found = 0;
for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
if (checkcard(mixcomwd_io_info[i].ioport,
mixcomwd_io_info[i].id)) {
found = 1;
watchdog_port = mixcomwd_io_info[i].ioport;
}
}
if (!found) {
pr_err("No card detected, or port not available\n");
return -ENODEV;
}
ret = misc_register(&mixcomwd_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto error_misc_register_watchdog;
}
pr_info("MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
VERSION, watchdog_port);
return 0;
error_misc_register_watchdog:
release_region(watchdog_port, 1);
watchdog_port = 0x0000;
return ret;
}
static void __exit mixcomwd_exit(void)
{
if (!nowayout) {
if (mixcomwd_timer_alive) {
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer_sync(&mixcomwd_timer);
mixcomwd_timer_alive = 0;
}
}
misc_deregister(&mixcomwd_miscdev);
release_region(watchdog_port, 1);
}
module_init(mixcomwd_init);
module_exit(mixcomwd_exit);
MODULE_AUTHOR("Gergely Madarasz <[email protected]>");
MODULE_DESCRIPTION("MixCom Watchdog driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/mixcomwd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* W83877F Computer Watchdog Timer driver
*
* Based on acquirewdt.c by Alan Cox,
* and sbc60xxwdt.c by Jakob Oestergaard <[email protected]>
*
* The authors do NOT admit liability nor provide warranty for
* any of this software. This material is provided "AS-IS" in
* the hope that it may be useful for others.
*
* (c) Copyright 2001 Scott Jennings <[email protected]>
*
* 4/19 - 2001 [Initial revision]
* 9/27 - 2001 Added spinlocking
* 4/12 - 2002 [[email protected]] Eliminate extra comments
* Eliminate fop_read
* Eliminate extra spin_unlock
* Added KERN_* tags to printks
* add CONFIG_WATCHDOG_NOWAYOUT support
* fix possible wdt_is_open race
* changed watchdog_info to correctly reflect what
* the driver offers
* added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS,
* WDIOC_SETTIMEOUT,
* WDIOC_GETTIMEOUT, and WDIOC_SETOPTIONS ioctls
* 09/8 - 2003 [[email protected]] cleanup of trailing spaces
* added extra printk's for startup problems
* use module_param
* made timeout (the emulated heartbeat) a
* module_param
* made the keepalive ping an internal subroutine
*
* This WDT driver is different from most other Linux WDT
* drivers in that the driver will ping the watchdog by itself,
* because this particular WDT has a very short timeout (1.6
* seconds) and it would be insane to count on any userspace
* daemon always getting scheduled within that time frame.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#define OUR_NAME "w83877f_wdt"
#define ENABLE_W83877F_PORT 0x3F0
#define ENABLE_W83877F 0x87
#define DISABLE_W83877F 0xAA
#define WDT_PING 0x443
#define WDT_REGISTER 0x14
#define WDT_ENABLE 0x9C
#define WDT_DISABLE 0x8C
/*
* The W83877F seems to be fixed at 1.6s timeout (at least on the
* EMACS PC-104 board I'm using). If we reset the watchdog every
* ~250ms we should be safe. */
#define WDT_INTERVAL (HZ/4+1)
/*
* We must not require too good response from the userspace daemon.
* Here we require the userspace daemon to send us a heartbeat
* char to /dev/watchdog every 30 seconds.
*/
#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
/* in seconds, will be multiplied by HZ to get seconds to wait for a ping */
static int timeout = WATCHDOG_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. (1<=timeout<=3600, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
static DEFINE_SPINLOCK(wdt_spinlock);
/*
* Whack the dog
*/
static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
*/
if (time_before(jiffies, next_heartbeat)) {
/* Ping the WDT */
spin_lock(&wdt_spinlock);
/* Ping the WDT by reading from WDT_PING */
inb_p(WDT_PING);
/* Re-set the timer interval */
mod_timer(&timer, jiffies + WDT_INTERVAL);
spin_unlock(&wdt_spinlock);
} else
pr_warn("Heartbeat lost! Will not ping the watchdog\n");
}
/*
* Utility routines
*/
static void wdt_change(int writeval)
{
unsigned long flags;
spin_lock_irqsave(&wdt_spinlock, flags);
/* buy some time */
inb_p(WDT_PING);
/* make W83877F available */
outb_p(ENABLE_W83877F, ENABLE_W83877F_PORT);
outb_p(ENABLE_W83877F, ENABLE_W83877F_PORT);
/* enable watchdog */
outb_p(WDT_REGISTER, ENABLE_W83877F_PORT);
outb_p(writeval, ENABLE_W83877F_PORT+1);
/* lock the W8387FF away */
outb_p(DISABLE_W83877F, ENABLE_W83877F_PORT);
spin_unlock_irqrestore(&wdt_spinlock, flags);
}
static void wdt_startup(void)
{
next_heartbeat = jiffies + (timeout * HZ);
/* Start the timer */
mod_timer(&timer, jiffies + WDT_INTERVAL);
wdt_change(WDT_ENABLE);
pr_info("Watchdog timer is now enabled\n");
}
static void wdt_turnoff(void)
{
/* Stop the timer */
del_timer_sync(&timer);
wdt_change(WDT_DISABLE);
pr_info("Watchdog timer is now disabled...\n");
}
static void wdt_keepalive(void)
{
/* user land ping */
next_heartbeat = jiffies + (timeout * HZ);
}
/*
* /dev/watchdog handling
*/
static ssize_t fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
size_t ofs;
/* note: just in case someone wrote the magic
character five months ago... */
wdt_expect_close = 0;
/* scan to see whether or not we got the
magic character */
for (ofs = 0; ofs != count; ofs++) {
char c;
if (get_user(c, buf + ofs))
return -EFAULT;
if (c == 'V')
wdt_expect_close = 42;
}
}
/* someone wrote to us, we should restart timer */
wdt_keepalive();
}
return count;
}
static int fop_open(struct inode *inode, struct file *file)
{
/* Just in case we're already talking to someone... */
if (test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
/* Good, fire up the show */
wdt_startup();
return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
{
if (wdt_expect_close == 42)
wdt_turnoff();
else {
del_timer(&timer);
pr_crit("device file closed unexpectedly. Will not stop the WDT!\n");
}
clear_bit(0, &wdt_is_open);
wdt_expect_close = 0;
return 0;
}
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
| WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "W83877F",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int new_options, retval = -EINVAL;
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
wdt_turnoff();
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
wdt_startup();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_timeout;
if (get_user(new_timeout, p))
return -EFAULT;
/* arbitrary upper limit */
if (new_timeout < 1 || new_timeout > 3600)
return -EINVAL;
timeout = new_timeout;
wdt_keepalive();
}
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_turnoff();
return NOTIFY_DONE;
}
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static void __exit w83877f_wdt_unload(void)
{
wdt_turnoff();
/* Deregister */
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(WDT_PING, 1);
release_region(ENABLE_W83877F_PORT, 2);
}
static int __init w83877f_wdt_init(void)
{
int rc = -EBUSY;
if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */
timeout = WATCHDOG_TIMEOUT;
pr_info("timeout value must be 1 <= x <= 3600, using %d\n",
timeout);
}
if (!request_region(ENABLE_W83877F_PORT, 2, "W83877F WDT")) {
pr_err("I/O address 0x%04x already in use\n",
ENABLE_W83877F_PORT);
rc = -EIO;
goto err_out;
}
if (!request_region(WDT_PING, 1, "W8387FF WDT")) {
pr_err("I/O address 0x%04x already in use\n", WDT_PING);
rc = -EIO;
goto err_out_region1;
}
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
pr_err("cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region2;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot;
}
pr_info("WDT driver for W83877F initialised. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
return 0;
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out_region2:
release_region(WDT_PING, 1);
err_out_region1:
release_region(ENABLE_W83877F_PORT, 2);
err_out:
return rc;
}
module_init(w83877f_wdt_init);
module_exit(w83877f_wdt_unload);
MODULE_AUTHOR("Scott and Bill Jennings");
MODULE_DESCRIPTION("Driver for watchdog timer in w83877f chip");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/w83877f_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* W83977F Watchdog Timer Driver for Winbond W83977F I/O Chip
*
* (c) Copyright 2005 Jose Goncalves <[email protected]>
*
* Based on w83877f_wdt.c by Scott Jennings,
* and wdt977.c by Woody Suwalski
*
* -----------------------
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#define WATCHDOG_VERSION "1.00"
#define WATCHDOG_NAME "W83977F WDT"
#define IO_INDEX_PORT 0x3F0
#define IO_DATA_PORT (IO_INDEX_PORT+1)
#define UNLOCK_DATA 0x87
#define LOCK_DATA 0xAA
#define DEVICE_REGISTER 0x07
#define DEFAULT_TIMEOUT 45 /* default timeout in seconds */
static int timeout = DEFAULT_TIMEOUT;
static int timeoutW; /* timeout in watchdog counter units */
static unsigned long timer_alive;
static int testmode;
static char expect_close;
static DEFINE_SPINLOCK(spinlock);
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds (15..7635), default="
__MODULE_STRING(DEFAULT_TIMEOUT) ")");
module_param(testmode, int, 0);
MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Start the watchdog
*/
static int wdt_start(void)
{
unsigned long flags;
spin_lock_irqsave(&spinlock, flags);
/* Unlock the SuperIO chip */
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/*
* Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
* F2 has the timeout in watchdog counter units.
* F3 is set to enable watchdog LED blink at timeout.
* F4 is used to just clear the TIMEOUT'ed state (bit 0).
*/
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
outb_p(0xF2, IO_INDEX_PORT);
outb_p(timeoutW, IO_DATA_PORT);
outb_p(0xF3, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
outb_p(0xF4, IO_INDEX_PORT);
outb_p(0x00, IO_DATA_PORT);
/* Set device Aux2 active */
outb_p(0x30, IO_INDEX_PORT);
outb_p(0x01, IO_DATA_PORT);
/*
* Select device Aux1 (dev=7) to set GP16 as the watchdog output
* (in reg E6) and GP13 as the watchdog LED output (in reg E3).
* Map GP16 at pin 119.
* In test mode watch the bit 0 on F4 to indicate "triggered" or
* check watchdog LED on SBC.
*/
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x07, IO_DATA_PORT);
if (!testmode) {
unsigned pin_map;
outb_p(0xE6, IO_INDEX_PORT);
outb_p(0x0A, IO_DATA_PORT);
outb_p(0x2C, IO_INDEX_PORT);
pin_map = inb_p(IO_DATA_PORT);
pin_map |= 0x10;
pin_map &= ~(0x20);
outb_p(0x2C, IO_INDEX_PORT);
outb_p(pin_map, IO_DATA_PORT);
}
outb_p(0xE3, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
/* Set device Aux1 active */
outb_p(0x30, IO_INDEX_PORT);
outb_p(0x01, IO_DATA_PORT);
/* Lock the SuperIO chip */
outb_p(LOCK_DATA, IO_INDEX_PORT);
spin_unlock_irqrestore(&spinlock, flags);
pr_info("activated\n");
return 0;
}
/*
* Stop the watchdog
*/
static int wdt_stop(void)
{
unsigned long flags;
spin_lock_irqsave(&spinlock, flags);
/* Unlock the SuperIO chip */
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/*
* Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
* F2 is reset to its default value (watchdog timer disabled).
* F3 is reset to its default state.
* F4 clears the TIMEOUT'ed state (bit 0) - back to default.
*/
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
outb_p(0xF2, IO_INDEX_PORT);
outb_p(0xFF, IO_DATA_PORT);
outb_p(0xF3, IO_INDEX_PORT);
outb_p(0x00, IO_DATA_PORT);
outb_p(0xF4, IO_INDEX_PORT);
outb_p(0x00, IO_DATA_PORT);
outb_p(0xF2, IO_INDEX_PORT);
outb_p(0x00, IO_DATA_PORT);
/*
* Select device Aux1 (dev=7) to set GP16 (in reg E6) and
* Gp13 (in reg E3) as inputs.
*/
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x07, IO_DATA_PORT);
if (!testmode) {
outb_p(0xE6, IO_INDEX_PORT);
outb_p(0x01, IO_DATA_PORT);
}
outb_p(0xE3, IO_INDEX_PORT);
outb_p(0x01, IO_DATA_PORT);
/* Lock the SuperIO chip */
outb_p(LOCK_DATA, IO_INDEX_PORT);
spin_unlock_irqrestore(&spinlock, flags);
pr_info("shutdown\n");
return 0;
}
/*
* Send a keepalive ping to the watchdog
* This is done by simply re-writing the timeout to reg. 0xF2
*/
static int wdt_keepalive(void)
{
unsigned long flags;
spin_lock_irqsave(&spinlock, flags);
/* Unlock the SuperIO chip */
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* Select device Aux2 (device=8) to kick watchdog reg F2 */
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
outb_p(0xF2, IO_INDEX_PORT);
outb_p(timeoutW, IO_DATA_PORT);
/* Lock the SuperIO chip */
outb_p(LOCK_DATA, IO_INDEX_PORT);
spin_unlock_irqrestore(&spinlock, flags);
return 0;
}
/*
* Set the watchdog timeout value
*/
static int wdt_set_timeout(int t)
{
unsigned int tmrval;
/*
* Convert seconds to watchdog counter time units, rounding up.
* On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup
* value. This information is supplied in the PCM-5335 manual and was
* checked by me on a real board. This is a bit strange because W83977f
* datasheet says counter unit is in minutes!
*/
if (t < 15)
return -EINVAL;
tmrval = ((t + 15) + 29) / 30;
if (tmrval > 255)
return -EINVAL;
/*
* timeout is the timeout in seconds,
* timeoutW is the timeout in watchdog counter units.
*/
timeoutW = tmrval;
timeout = (timeoutW * 30) - 15;
return 0;
}
/*
* Get the watchdog status
*/
static int wdt_get_status(int *status)
{
int new_status;
unsigned long flags;
spin_lock_irqsave(&spinlock, flags);
/* Unlock the SuperIO chip */
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* Select device Aux2 (device=8) to read watchdog reg F4 */
outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
outb_p(0x08, IO_DATA_PORT);
outb_p(0xF4, IO_INDEX_PORT);
new_status = inb_p(IO_DATA_PORT);
/* Lock the SuperIO chip */
outb_p(LOCK_DATA, IO_INDEX_PORT);
spin_unlock_irqrestore(&spinlock, flags);
*status = 0;
if (new_status & 1)
*status |= WDIOF_CARDRESET;
return 0;
}
/*
* /dev/watchdog handling
*/
static int wdt_open(struct inode *inode, struct file *file)
{
/* If the watchdog is alive we don't need to start it again */
if (test_and_set_bit(0, &timer_alive))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
wdt_start();
return stream_open(inode, file);
}
static int wdt_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
* Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
wdt_stop();
clear_bit(0, &timer_alive);
} else {
wdt_keepalive();
pr_crit("unexpected close, not stopping watchdog!\n");
}
expect_close = 0;
return 0;
}
/*
* wdt_write:
* @file: file handle to the watchdog
* @buf: buffer to write (unused as data does not matter here
* @count: count of bytes
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
* write of data will do, as we don't define content meaning.
*/
static ssize_t wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
size_t ofs;
/* note: just in case someone wrote the
magic character long ago */
expect_close = 0;
/* scan to see whether or not we got the
magic character */
for (ofs = 0; ofs != count; ofs++) {
char c;
if (get_user(c, buf + ofs))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
/* someone wrote to us, we should restart timer */
wdt_keepalive();
}
return count;
}
/*
* wdt_ioctl:
* @inode: inode of the device
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
*
* The watchdog API defines a common set of functions for all watchdogs
* according to their available features.
*/
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.firmware_version = 1,
.identity = WATCHDOG_NAME,
};
static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int status;
int new_options, retval = -EINVAL;
int new_timeout;
union {
struct watchdog_info __user *ident;
int __user *i;
} uarg;
uarg.i = (int __user *)arg;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(uarg.ident, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
wdt_get_status(&status);
return put_user(status, uarg.i);
case WDIOC_GETBOOTSTATUS:
return put_user(0, uarg.i);
case WDIOC_SETOPTIONS:
if (get_user(new_options, uarg.i))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
wdt_stop();
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
wdt_start();
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_timeout, uarg.i))
return -EFAULT;
if (wdt_set_timeout(new_timeout))
return -EINVAL;
wdt_keepalive();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
default:
return -ENOTTY;
}
}
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_stop();
return NOTIFY_DONE;
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = wdt_open,
.release = wdt_release,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static int __init w83977f_wdt_init(void)
{
int rc;
pr_info("driver v%s\n", WATCHDOG_VERSION);
/*
* Check that the timeout value is within it's range;
* if not reset to the default
*/
if (wdt_set_timeout(timeout)) {
wdt_set_timeout(DEFAULT_TIMEOUT);
pr_info("timeout value must be 15 <= timeout <= 7635, using %d\n",
DEFAULT_TIMEOUT);
}
if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) {
pr_err("I/O address 0x%04x already in use\n", IO_INDEX_PORT);
rc = -EIO;
goto err_out;
}
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
pr_err("cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot;
}
pr_info("initialized. timeout=%d sec (nowayout=%d testmode=%d)\n",
timeout, nowayout, testmode);
return 0;
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
release_region(IO_INDEX_PORT, 2);
err_out:
return rc;
}
static void __exit w83977f_wdt_exit(void)
{
wdt_stop();
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(IO_INDEX_PORT, 2);
}
module_init(w83977f_wdt_init);
module_exit(w83977f_wdt_exit);
MODULE_AUTHOR("Jose Goncalves <[email protected]>");
MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/w83977f_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for Watchdogs
*
* Copyright (c) Siemens AG, 2020-2021
*
* Authors:
* Gerd Haeussler <[email protected]>
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/p2sb.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/util_macros.h>
#include <linux/watchdog.h>
#define WD_ENABLE_IOADR 0x62
#define WD_TRIGGER_IOADR 0x66
#define GPIO_COMMUNITY0_PORT_ID 0xaf
#define PAD_CFG_DW0_GPP_A_23 0x4b8
#define SAFE_EN_N_427E 0x01
#define SAFE_EN_N_227E 0x04
#define WD_ENABLED 0x01
#define WD_TRIGGERED 0x80
#define WD_MACROMODE 0x02
#define TIMEOUT_MIN 2
#define TIMEOUT_DEF 64
#define TIMEOUT_MAX 64
#define GP_STATUS_REG_227E 0x404D /* IO PORT for SAFE_EN_N on 227E */
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0000);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static struct resource gp_status_reg_227e_res =
DEFINE_RES_IO_NAMED(GP_STATUS_REG_227E, SZ_1, KBUILD_MODNAME);
static struct resource io_resource_enable =
DEFINE_RES_IO_NAMED(WD_ENABLE_IOADR, SZ_1,
KBUILD_MODNAME " WD_ENABLE_IOADR");
static struct resource io_resource_trigger =
DEFINE_RES_IO_NAMED(WD_TRIGGER_IOADR, SZ_1,
KBUILD_MODNAME " WD_TRIGGER_IOADR");
/* the actual start will be discovered with p2sb, 0 is a placeholder */
static struct resource mem_resource =
DEFINE_RES_MEM_NAMED(0, 0, "WD_RESET_BASE_ADR");
static u32 wd_timeout_table[] = {2, 4, 6, 8, 16, 32, 48, 64 };
static void __iomem *wd_reset_base_addr;
static int wd_start(struct watchdog_device *wdd)
{
outb(inb(WD_ENABLE_IOADR) | WD_ENABLED, WD_ENABLE_IOADR);
return 0;
}
static int wd_stop(struct watchdog_device *wdd)
{
outb(inb(WD_ENABLE_IOADR) & ~WD_ENABLED, WD_ENABLE_IOADR);
return 0;
}
static int wd_ping(struct watchdog_device *wdd)
{
inb(WD_TRIGGER_IOADR);
return 0;
}
static int wd_set_timeout(struct watchdog_device *wdd, unsigned int t)
{
int timeout_idx = find_closest(t, wd_timeout_table,
ARRAY_SIZE(wd_timeout_table));
outb((inb(WD_ENABLE_IOADR) & 0xc7) | timeout_idx << 3, WD_ENABLE_IOADR);
wdd->timeout = wd_timeout_table[timeout_idx];
return 0;
}
static const struct watchdog_info wdt_ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT,
.identity = KBUILD_MODNAME,
};
static const struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wd_start,
.stop = wd_stop,
.ping = wd_ping,
.set_timeout = wd_set_timeout,
};
static void wd_secondary_enable(u32 wdtmode)
{
u16 resetbit;
/* set safe_en_n so we are not just WDIOF_ALARMONLY */
if (wdtmode == SIMATIC_IPC_DEVICE_227E) {
/* enable SAFE_EN_N on GP_STATUS_REG_227E */
resetbit = inb(GP_STATUS_REG_227E);
outb(resetbit & ~SAFE_EN_N_227E, GP_STATUS_REG_227E);
} else {
/* enable SAFE_EN_N on PCH D1600 */
resetbit = ioread16(wd_reset_base_addr);
iowrite16(resetbit & ~SAFE_EN_N_427E, wd_reset_base_addr);
}
}
static int wd_setup(u32 wdtmode)
{
unsigned int bootstatus = 0;
int timeout_idx;
timeout_idx = find_closest(TIMEOUT_DEF, wd_timeout_table,
ARRAY_SIZE(wd_timeout_table));
if (inb(WD_ENABLE_IOADR) & WD_TRIGGERED)
bootstatus |= WDIOF_CARDRESET;
/* reset alarm bit, set macro mode, and set timeout */
outb(WD_TRIGGERED | WD_MACROMODE | timeout_idx << 3, WD_ENABLE_IOADR);
wd_secondary_enable(wdtmode);
return bootstatus;
}
static struct watchdog_device wdd_data = {
.info = &wdt_ident,
.ops = &wdt_ops,
.min_timeout = TIMEOUT_MIN,
.max_timeout = TIMEOUT_MAX
};
static int simatic_ipc_wdt_probe(struct platform_device *pdev)
{
struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227E:
res = &gp_status_reg_227e_res;
if (!request_muxed_region(res->start, resource_size(res), res->name)) {
dev_err(dev,
"Unable to register IO resource at %pR\n",
&gp_status_reg_227e_res);
return -EBUSY;
}
fallthrough;
case SIMATIC_IPC_DEVICE_427E:
wdd_data.parent = dev;
break;
default:
return -EINVAL;
}
if (!devm_request_region(dev, io_resource_enable.start,
resource_size(&io_resource_enable),
io_resource_enable.name)) {
dev_err(dev,
"Unable to register IO resource at %#x\n",
WD_ENABLE_IOADR);
return -EBUSY;
}
if (!devm_request_region(dev, io_resource_trigger.start,
resource_size(&io_resource_trigger),
io_resource_trigger.name)) {
dev_err(dev,
"Unable to register IO resource at %#x\n",
WD_TRIGGER_IOADR);
return -EBUSY;
}
if (plat->devmode == SIMATIC_IPC_DEVICE_427E) {
res = &mem_resource;
ret = p2sb_bar(NULL, 0, res);
if (ret)
return ret;
/* do the final address calculation */
res->start = res->start + (GPIO_COMMUNITY0_PORT_ID << 16) +
PAD_CFG_DW0_GPP_A_23;
res->end = res->start + SZ_4 - 1;
wd_reset_base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(wd_reset_base_addr))
return PTR_ERR(wd_reset_base_addr);
}
wdd_data.bootstatus = wd_setup(plat->devmode);
if (wdd_data.bootstatus)
dev_warn(dev, "last reboot caused by watchdog reset\n");
if (plat->devmode == SIMATIC_IPC_DEVICE_227E)
release_region(gp_status_reg_227e_res.start,
resource_size(&gp_status_reg_227e_res));
watchdog_set_nowayout(&wdd_data, nowayout);
watchdog_stop_on_reboot(&wdd_data);
return devm_watchdog_register_device(dev, &wdd_data);
}
static struct platform_driver simatic_ipc_wdt_driver = {
.probe = simatic_ipc_wdt_probe,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(simatic_ipc_wdt_driver);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Gerd Haeussler <[email protected]>");
| linux-master | drivers/watchdog/simatic-ipc-wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/* cpwd.c - driver implementation for hardware watchdog
* timers found on Sun Microsystems CP1400 and CP1500 boards.
*
* This device supports both the generic Linux watchdog
* interface and Solaris-compatible ioctls as best it is
* able.
*
* NOTE: CP1400 systems appear to have a defective intr_mask
* register on the PLD, preventing the disabling of
* timer interrupts. We use a timer to periodically
* reset 'stopped' watchdogs on affected platforms.
*
* Copyright (c) 2000 Eric Brower ([email protected])
* Copyright (C) 2008 David S. Miller <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/watchdog.h>
#define DRIVER_NAME "cpwd"
#define WD_OBPNAME "watchdog"
#define WD_BADMODEL "SUNW,501-5336"
#define WD_BTIMEOUT (jiffies + (HZ * 1000))
#define WD_BLIMIT 0xFFFF
#define WD0_MINOR 212
#define WD1_MINOR 213
#define WD2_MINOR 214
/* Internal driver definitions. */
#define WD0_ID 0
#define WD1_ID 1
#define WD2_ID 2
#define WD_NUMDEVS 3
#define WD_INTR_OFF 0
#define WD_INTR_ON 1
#define WD_STAT_INIT 0x01 /* Watchdog timer is initialized */
#define WD_STAT_BSTOP 0x02 /* Watchdog timer is brokenstopped */
#define WD_STAT_SVCD 0x04 /* Watchdog interrupt occurred */
/* Register value definitions
*/
#define WD0_INTR_MASK 0x01 /* Watchdog device interrupt masks */
#define WD1_INTR_MASK 0x02
#define WD2_INTR_MASK 0x04
#define WD_S_RUNNING 0x01 /* Watchdog device status running */
#define WD_S_EXPIRED 0x02 /* Watchdog device status expired */
struct cpwd {
void __iomem *regs;
spinlock_t lock;
unsigned int irq;
unsigned long timeout;
bool enabled;
bool reboot;
bool broken;
bool initialized;
struct {
struct miscdevice misc;
void __iomem *regs;
u8 intr_mask;
u8 runstatus;
u16 timeout;
} devs[WD_NUMDEVS];
};
static DEFINE_MUTEX(cpwd_mutex);
static struct cpwd *cpwd_device;
/* Sun uses Altera PLD EPF8820ATC144-4
* providing three hardware watchdogs:
*
* 1) RIC - sends an interrupt when triggered
* 2) XIR - asserts XIR_B_RESET when triggered, resets CPU
* 3) POR - asserts POR_B_RESET when triggered, resets CPU, backplane, board
*
*** Timer register block definition (struct wd_timer_regblk)
*
* dcntr and limit registers (halfword access):
* -------------------
* | 15 | ...| 1 | 0 |
* -------------------
* |- counter val -|
* -------------------
* dcntr - Current 16-bit downcounter value.
* When downcounter reaches '0' watchdog expires.
* Reading this register resets downcounter with
* 'limit' value.
* limit - 16-bit countdown value in 1/10th second increments.
* Writing this register begins countdown with input value.
* Reading from this register does not affect counter.
* NOTES: After watchdog reset, dcntr and limit contain '1'
*
* status register (byte access):
* ---------------------------
* | 7 | ... | 2 | 1 | 0 |
* --------------+------------
* |- UNUSED -| EXP | RUN |
* ---------------------------
* status- Bit 0 - Watchdog is running
* Bit 1 - Watchdog has expired
*
*** PLD register block definition (struct wd_pld_regblk)
*
* intr_mask register (byte access):
* ---------------------------------
* | 7 | ... | 3 | 2 | 1 | 0 |
* +-------------+------------------
* |- UNUSED -| WD3 | WD2 | WD1 |
* ---------------------------------
* WD3 - 1 == Interrupt disabled for watchdog 3
* WD2 - 1 == Interrupt disabled for watchdog 2
* WD1 - 1 == Interrupt disabled for watchdog 1
*
* pld_status register (byte access):
* UNKNOWN, MAGICAL MYSTERY REGISTER
*
*/
#define WD_TIMER_REGSZ 16
#define WD0_OFF 0
#define WD1_OFF (WD_TIMER_REGSZ * 1)
#define WD2_OFF (WD_TIMER_REGSZ * 2)
#define PLD_OFF (WD_TIMER_REGSZ * 3)
#define WD_DCNTR 0x00
#define WD_LIMIT 0x04
#define WD_STATUS 0x08
#define PLD_IMASK (PLD_OFF + 0x00)
#define PLD_STATUS (PLD_OFF + 0x04)
static struct timer_list cpwd_timer;
static int wd0_timeout;
static int wd1_timeout;
static int wd2_timeout;
module_param(wd0_timeout, int, 0);
MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs");
module_param(wd1_timeout, int, 0);
MODULE_PARM_DESC(wd1_timeout, "Default watchdog1 timeout in 1/10secs");
module_param(wd2_timeout, int, 0);
MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs");
MODULE_AUTHOR("Eric Brower <[email protected]>");
MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500");
MODULE_LICENSE("GPL");
static void cpwd_writew(u16 val, void __iomem *addr)
{
writew(cpu_to_le16(val), addr);
}
static u16 cpwd_readw(void __iomem *addr)
{
u16 val = readw(addr);
return le16_to_cpu(val);
}
static void cpwd_writeb(u8 val, void __iomem *addr)
{
writeb(val, addr);
}
static u8 cpwd_readb(void __iomem *addr)
{
return readb(addr);
}
/* Enable or disable watchdog interrupts
* Because of the CP1400 defect this should only be
* called during initialzation or by wd_[start|stop]timer()
*
* index - sub-device index, or -1 for 'all'
* enable - non-zero to enable interrupts, zero to disable
*/
static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
{
unsigned char curregs = cpwd_readb(p->regs + PLD_IMASK);
unsigned char setregs =
(index == -1) ?
(WD0_INTR_MASK | WD1_INTR_MASK | WD2_INTR_MASK) :
(p->devs[index].intr_mask);
if (enable == WD_INTR_ON)
curregs &= ~setregs;
else
curregs |= setregs;
cpwd_writeb(curregs, p->regs + PLD_IMASK);
}
/* Restarts timer with maximum limit value and
* does not unset 'brokenstop' value.
*/
static void cpwd_resetbrokentimer(struct cpwd *p, int index)
{
cpwd_toggleintr(p, index, WD_INTR_ON);
cpwd_writew(WD_BLIMIT, p->devs[index].regs + WD_LIMIT);
}
/* Timer method called to reset stopped watchdogs--
* because of the PLD bug on CP1400, we cannot mask
* interrupts within the PLD so me must continually
* reset the timers ad infinitum.
*/
static void cpwd_brokentimer(struct timer_list *unused)
{
struct cpwd *p = cpwd_device;
int id, tripped = 0;
/* kill a running timer instance, in case we
* were called directly instead of by kernel timer
*/
if (timer_pending(&cpwd_timer))
del_timer(&cpwd_timer);
for (id = 0; id < WD_NUMDEVS; id++) {
if (p->devs[id].runstatus & WD_STAT_BSTOP) {
++tripped;
cpwd_resetbrokentimer(p, id);
}
}
if (tripped) {
/* there is at least one timer brokenstopped-- reschedule */
cpwd_timer.expires = WD_BTIMEOUT;
add_timer(&cpwd_timer);
}
}
/* Reset countdown timer with 'limit' value and continue countdown.
* This will not start a stopped timer.
*/
static void cpwd_pingtimer(struct cpwd *p, int index)
{
if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING)
cpwd_readw(p->devs[index].regs + WD_DCNTR);
}
/* Stop a running watchdog timer-- the timer actually keeps
* running, but the interrupt is masked so that no action is
* taken upon expiration.
*/
static void cpwd_stoptimer(struct cpwd *p, int index)
{
if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) {
cpwd_toggleintr(p, index, WD_INTR_OFF);
if (p->broken) {
p->devs[index].runstatus |= WD_STAT_BSTOP;
cpwd_brokentimer(NULL);
}
}
}
/* Start a watchdog timer with the specified limit value
* If the watchdog is running, it will be restarted with
* the provided limit value.
*
* This function will enable interrupts on the specified
* watchdog.
*/
static void cpwd_starttimer(struct cpwd *p, int index)
{
if (p->broken)
p->devs[index].runstatus &= ~WD_STAT_BSTOP;
p->devs[index].runstatus &= ~WD_STAT_SVCD;
cpwd_writew(p->devs[index].timeout, p->devs[index].regs + WD_LIMIT);
cpwd_toggleintr(p, index, WD_INTR_ON);
}
static int cpwd_getstatus(struct cpwd *p, int index)
{
unsigned char stat = cpwd_readb(p->devs[index].regs + WD_STATUS);
unsigned char intr = cpwd_readb(p->devs[index].regs + PLD_IMASK);
unsigned char ret = WD_STOPPED;
/* determine STOPPED */
if (!stat)
return ret;
/* determine EXPIRED vs FREERUN vs RUNNING */
else if (WD_S_EXPIRED & stat) {
ret = WD_EXPIRED;
} else if (WD_S_RUNNING & stat) {
if (intr & p->devs[index].intr_mask) {
ret = WD_FREERUN;
} else {
/* Fudge WD_EXPIRED status for defective CP1400--
* IF timer is running
* AND brokenstop is set
* AND an interrupt has been serviced
* we are WD_EXPIRED.
*
* IF timer is running
* AND brokenstop is set
* AND no interrupt has been serviced
* we are WD_FREERUN.
*/
if (p->broken &&
(p->devs[index].runstatus & WD_STAT_BSTOP)) {
if (p->devs[index].runstatus & WD_STAT_SVCD) {
ret = WD_EXPIRED;
} else {
/* we could as well pretend
* we are expired */
ret = WD_FREERUN;
}
} else {
ret = WD_RUNNING;
}
}
}
/* determine SERVICED */
if (p->devs[index].runstatus & WD_STAT_SVCD)
ret |= WD_SERVICED;
return ret;
}
static irqreturn_t cpwd_interrupt(int irq, void *dev_id)
{
struct cpwd *p = dev_id;
/* Only WD0 will interrupt-- others are NMI and we won't
* see them here....
*/
spin_lock_irq(&p->lock);
cpwd_stoptimer(p, WD0_ID);
p->devs[WD0_ID].runstatus |= WD_STAT_SVCD;
spin_unlock_irq(&p->lock);
return IRQ_HANDLED;
}
static int cpwd_open(struct inode *inode, struct file *f)
{
struct cpwd *p = cpwd_device;
mutex_lock(&cpwd_mutex);
switch (iminor(inode)) {
case WD0_MINOR:
case WD1_MINOR:
case WD2_MINOR:
break;
default:
mutex_unlock(&cpwd_mutex);
return -ENODEV;
}
/* Register IRQ on first open of device */
if (!p->initialized) {
if (request_irq(p->irq, &cpwd_interrupt,
IRQF_SHARED, DRIVER_NAME, p)) {
pr_err("Cannot register IRQ %d\n", p->irq);
mutex_unlock(&cpwd_mutex);
return -EBUSY;
}
p->initialized = true;
}
mutex_unlock(&cpwd_mutex);
return stream_open(inode, f);
}
static int cpwd_release(struct inode *inode, struct file *file)
{
return 0;
}
static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
static const struct watchdog_info info = {
.options = WDIOF_SETTIMEOUT,
.firmware_version = 1,
.identity = DRIVER_NAME,
};
void __user *argp = (void __user *)arg;
struct inode *inode = file_inode(file);
int index = iminor(inode) - WD0_MINOR;
struct cpwd *p = cpwd_device;
int setopt = 0;
switch (cmd) {
/* Generic Linux IOCTLs */
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &info, sizeof(struct watchdog_info)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
if (put_user(0, (int __user *)argp))
return -EFAULT;
break;
case WDIOC_KEEPALIVE:
cpwd_pingtimer(p, index);
break;
case WDIOC_SETOPTIONS:
if (copy_from_user(&setopt, argp, sizeof(unsigned int)))
return -EFAULT;
if (setopt & WDIOS_DISABLECARD) {
if (p->enabled)
return -EINVAL;
cpwd_stoptimer(p, index);
} else if (setopt & WDIOS_ENABLECARD) {
cpwd_starttimer(p, index);
} else {
return -EINVAL;
}
break;
/* Solaris-compatible IOCTLs */
case WIOCGSTAT:
setopt = cpwd_getstatus(p, index);
if (copy_to_user(argp, &setopt, sizeof(unsigned int)))
return -EFAULT;
break;
case WIOCSTART:
cpwd_starttimer(p, index);
break;
case WIOCSTOP:
if (p->enabled)
return -EINVAL;
cpwd_stoptimer(p, index);
break;
default:
return -EINVAL;
}
return 0;
}
static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return cpwd_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
static ssize_t cpwd_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
struct cpwd *p = cpwd_device;
int index = iminor(inode);
if (count) {
cpwd_pingtimer(p, index);
return 1;
}
return 0;
}
static ssize_t cpwd_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
return -EINVAL;
}
static const struct file_operations cpwd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cpwd_ioctl,
.compat_ioctl = cpwd_compat_ioctl,
.open = cpwd_open,
.write = cpwd_write,
.read = cpwd_read,
.release = cpwd_release,
.llseek = no_llseek,
};
static int cpwd_probe(struct platform_device *op)
{
struct device_node *options;
const char *str_prop;
const void *prop_val;
int i, err = -EINVAL;
struct cpwd *p;
if (cpwd_device)
return -EINVAL;
p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->irq = op->archdata.irqs[0];
spin_lock_init(&p->lock);
p->regs = of_ioremap(&op->resource[0], 0,
4 * WD_TIMER_REGSZ, DRIVER_NAME);
if (!p->regs) {
pr_err("Unable to map registers\n");
return -ENOMEM;
}
options = of_find_node_by_path("/options");
if (!options) {
err = -ENODEV;
pr_err("Unable to find /options node\n");
goto out_iounmap;
}
prop_val = of_get_property(options, "watchdog-enable?", NULL);
p->enabled = (prop_val ? true : false);
prop_val = of_get_property(options, "watchdog-reboot?", NULL);
p->reboot = (prop_val ? true : false);
str_prop = of_get_property(options, "watchdog-timeout", NULL);
if (str_prop)
p->timeout = simple_strtoul(str_prop, NULL, 10);
of_node_put(options);
/* CP1400s seem to have broken PLD implementations-- the
* interrupt_mask register cannot be written, so no timer
* interrupts can be masked within the PLD.
*/
str_prop = of_get_property(op->dev.of_node, "model", NULL);
p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL));
if (!p->enabled)
cpwd_toggleintr(p, -1, WD_INTR_OFF);
for (i = 0; i < WD_NUMDEVS; i++) {
static const char *cpwd_names[] = { "RIC", "XIR", "POR" };
static int *parms[] = { &wd0_timeout,
&wd1_timeout,
&wd2_timeout };
struct miscdevice *mp = &p->devs[i].misc;
mp->minor = WD0_MINOR + i;
mp->name = cpwd_names[i];
mp->fops = &cpwd_fops;
p->devs[i].regs = p->regs + (i * WD_TIMER_REGSZ);
p->devs[i].intr_mask = (WD0_INTR_MASK << i);
p->devs[i].runstatus &= ~WD_STAT_BSTOP;
p->devs[i].runstatus |= WD_STAT_INIT;
p->devs[i].timeout = p->timeout;
if (*parms[i])
p->devs[i].timeout = *parms[i];
err = misc_register(&p->devs[i].misc);
if (err) {
pr_err("Could not register misc device for dev %d\n",
i);
goto out_unregister;
}
}
if (p->broken) {
timer_setup(&cpwd_timer, cpwd_brokentimer, 0);
cpwd_timer.expires = WD_BTIMEOUT;
pr_info("PLD defect workaround enabled for model %s\n",
WD_BADMODEL);
}
platform_set_drvdata(op, p);
cpwd_device = p;
return 0;
out_unregister:
for (i--; i >= 0; i--)
misc_deregister(&p->devs[i].misc);
out_iounmap:
of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
return err;
}
static void cpwd_remove(struct platform_device *op)
{
struct cpwd *p = platform_get_drvdata(op);
int i;
for (i = 0; i < WD_NUMDEVS; i++) {
misc_deregister(&p->devs[i].misc);
if (!p->enabled) {
cpwd_stoptimer(p, i);
if (p->devs[i].runstatus & WD_STAT_BSTOP)
cpwd_resetbrokentimer(p, i);
}
}
if (p->broken)
del_timer_sync(&cpwd_timer);
if (p->initialized)
free_irq(p->irq, p);
of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
cpwd_device = NULL;
}
static const struct of_device_id cpwd_match[] = {
{
.name = "watchdog",
},
{},
};
MODULE_DEVICE_TABLE(of, cpwd_match);
static struct platform_driver cpwd_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = cpwd_match,
},
.probe = cpwd_probe,
.remove_new = cpwd_remove,
};
module_platform_driver(cpwd_driver);
| linux-master | drivers/watchdog/cpwd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IB700 Single Board Computer WDT driver
*
* (c) Copyright 2001 Charles Howes <[email protected]>
*
* Based on advantechwdt.c which is based on acquirewdt.c which
* is based on wdt.c.
*
* (c) Copyright 2000-2001 Marek Michalkiewicz <[email protected]>
*
* Based on acquirewdt.c which is based on wdt.c.
* Original copyright messages:
*
* (c) Copyright 1996 Alan Cox <[email protected]>,
* All Rights Reserved.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <[email protected]>
*
* 14-Dec-2001 Matt Domsch <[email protected]>
* Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
* Added timeout module option to override default
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/ioport.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
static struct platform_device *ibwdt_platform_device;
static unsigned long ibwdt_is_open;
static DEFINE_SPINLOCK(ibwdt_lock);
static char expect_close;
/* Module information */
#define DRV_NAME "ib700wdt"
/*
*
* Watchdog Timer Configuration
*
* The function of the watchdog timer is to reset the system
* automatically and is defined at I/O port 0443H. To enable the
* watchdog timer and allow the system to reset, write I/O port 0443H.
* To disable the timer, write I/O port 0441H for the system to stop the
* watchdog function. The timer has a tolerance of 20% for its
* intervals.
*
* The following describes how the timer should be programmed.
*
* Enabling Watchdog:
* MOV AX,000FH (Choose the values from 0 to F)
* MOV DX,0443H
* OUT DX,AX
*
* Disabling Watchdog:
* MOV AX,000FH (Any value is fine.)
* MOV DX,0441H
* OUT DX,AX
*
* Watchdog timer control table:
* Level Value Time/sec | Level Value Time/sec
* 1 F 0 | 9 7 16
* 2 E 2 | 10 6 18
* 3 D 4 | 11 5 20
* 4 C 6 | 12 4 22
* 5 B 8 | 13 3 24
* 6 A 10 | 14 2 26
* 7 9 12 | 15 1 28
* 8 8 14 | 16 0 30
*
*/
#define WDT_STOP 0x441
#define WDT_START 0x443
/* Default timeout */
#define WATCHDOG_TIMEOUT 30 /* 30 seconds +/- 20% */
static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 0<= timeout <=30, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Watchdog Operations
*/
static void ibwdt_ping(void)
{
int wd_margin = 15 - ((timeout + 1) / 2);
spin_lock(&ibwdt_lock);
/* Write a watchdog value */
outb_p(wd_margin, WDT_START);
spin_unlock(&ibwdt_lock);
}
static void ibwdt_disable(void)
{
spin_lock(&ibwdt_lock);
outb_p(0, WDT_STOP);
spin_unlock(&ibwdt_lock);
}
static int ibwdt_set_heartbeat(int t)
{
if (t < 0 || t > 30)
return -EINVAL;
timeout = t;
return 0;
}
/*
* /dev/watchdog handling
*/
static ssize_t ibwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
ibwdt_ping();
}
return count;
}
static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int new_margin;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
| WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "IB700 WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int options, retval = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
ibwdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
ibwdt_ping();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
ibwdt_ping();
break;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, p))
return -EFAULT;
if (ibwdt_set_heartbeat(new_margin))
return -EINVAL;
ibwdt_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static int ibwdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &ibwdt_is_open))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
/* Activate */
ibwdt_ping();
return stream_open(inode, file);
}
static int ibwdt_close(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
ibwdt_disable();
} else {
pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
ibwdt_ping();
}
clear_bit(0, &ibwdt_is_open);
expect_close = 0;
return 0;
}
/*
* Kernel Interfaces
*/
static const struct file_operations ibwdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = ibwdt_open,
.release = ibwdt_close,
};
static struct miscdevice ibwdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &ibwdt_fops,
};
/*
* Init & exit routines
*/
static int __init ibwdt_probe(struct platform_device *dev)
{
int res;
#if WDT_START != WDT_STOP
if (!request_region(WDT_STOP, 1, "IB700 WDT")) {
pr_err("STOP method I/O %X is not available\n", WDT_STOP);
res = -EIO;
goto out_nostopreg;
}
#endif
if (!request_region(WDT_START, 1, "IB700 WDT")) {
pr_err("START method I/O %X is not available\n", WDT_START);
res = -EIO;
goto out_nostartreg;
}
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (ibwdt_set_heartbeat(timeout)) {
ibwdt_set_heartbeat(WATCHDOG_TIMEOUT);
pr_info("timeout value must be 0<=x<=30, using %d\n", timeout);
}
res = misc_register(&ibwdt_miscdev);
if (res) {
pr_err("failed to register misc device\n");
goto out_nomisc;
}
return 0;
out_nomisc:
release_region(WDT_START, 1);
out_nostartreg:
#if WDT_START != WDT_STOP
release_region(WDT_STOP, 1);
#endif
out_nostopreg:
return res;
}
static void ibwdt_remove(struct platform_device *dev)
{
misc_deregister(&ibwdt_miscdev);
release_region(WDT_START, 1);
#if WDT_START != WDT_STOP
release_region(WDT_STOP, 1);
#endif
}
static void ibwdt_shutdown(struct platform_device *dev)
{
/* Turn the WDT off if we have a soft shutdown */
ibwdt_disable();
}
static struct platform_driver ibwdt_driver = {
.remove_new = ibwdt_remove,
.shutdown = ibwdt_shutdown,
.driver = {
.name = DRV_NAME,
},
};
static int __init ibwdt_init(void)
{
int err;
pr_info("WDT driver for IB700 single board computer initialising\n");
ibwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
if (IS_ERR(ibwdt_platform_device))
return PTR_ERR(ibwdt_platform_device);
err = platform_driver_probe(&ibwdt_driver, ibwdt_probe);
if (err)
goto unreg_platform_device;
return 0;
unreg_platform_device:
platform_device_unregister(ibwdt_platform_device);
return err;
}
static void __exit ibwdt_exit(void)
{
platform_device_unregister(ibwdt_platform_device);
platform_driver_unregister(&ibwdt_driver);
pr_info("Watchdog Module Unloaded\n");
}
module_init(ibwdt_init);
module_exit(ibwdt_exit);
MODULE_AUTHOR("Charles Howes <[email protected]>");
MODULE_DESCRIPTION("IB700 SBC watchdog driver");
MODULE_LICENSE("GPL");
/* end of ib700wdt.c */
| linux-master | drivers/watchdog/ib700wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xen Watchdog Driver
*
* (c) Copyright 2010 Novell, Inc.
*/
#define DRV_NAME "xen_wdt"
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/hrtimer.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <xen/xen.h>
#include <asm/xen/hypercall.h>
#include <xen/interface/sched.h>
static struct platform_device *platform_device;
static struct sched_watchdog wdt;
static time64_t wdt_expires;
#define WATCHDOG_TIMEOUT 60 /* in seconds */
static unsigned int timeout;
module_param(timeout, uint, S_IRUGO);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
"(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, S_IRUGO);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static inline time64_t set_timeout(struct watchdog_device *wdd)
{
wdt.timeout = wdd->timeout;
return ktime_get_seconds() + wdd->timeout;
}
static int xen_wdt_start(struct watchdog_device *wdd)
{
time64_t expires;
int err;
expires = set_timeout(wdd);
if (!wdt.id)
err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
else
err = -EBUSY;
if (err > 0) {
wdt.id = err;
wdt_expires = expires;
err = 0;
} else
BUG_ON(!err);
return err;
}
static int xen_wdt_stop(struct watchdog_device *wdd)
{
int err = 0;
wdt.timeout = 0;
if (wdt.id)
err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
if (!err)
wdt.id = 0;
return err;
}
static int xen_wdt_kick(struct watchdog_device *wdd)
{
time64_t expires;
int err;
expires = set_timeout(wdd);
if (wdt.id)
err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
else
err = -ENXIO;
if (!err)
wdt_expires = expires;
return err;
}
static unsigned int xen_wdt_get_timeleft(struct watchdog_device *wdd)
{
return wdt_expires - ktime_get_seconds();
}
static struct watchdog_info xen_wdt_info = {
.identity = DRV_NAME,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops xen_wdt_ops = {
.owner = THIS_MODULE,
.start = xen_wdt_start,
.stop = xen_wdt_stop,
.ping = xen_wdt_kick,
.get_timeleft = xen_wdt_get_timeleft,
};
static struct watchdog_device xen_wdt_dev = {
.info = &xen_wdt_info,
.ops = &xen_wdt_ops,
.timeout = WATCHDOG_TIMEOUT,
};
static int xen_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sched_watchdog wd = { .id = ~0 };
int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
if (ret == -ENOSYS) {
dev_err(dev, "watchdog not supported by hypervisor\n");
return -ENODEV;
}
if (ret != -EINVAL) {
dev_err(dev, "unexpected hypervisor error (%d)\n", ret);
return -ENODEV;
}
watchdog_init_timeout(&xen_wdt_dev, timeout, NULL);
watchdog_set_nowayout(&xen_wdt_dev, nowayout);
watchdog_stop_on_reboot(&xen_wdt_dev);
watchdog_stop_on_unregister(&xen_wdt_dev);
ret = devm_watchdog_register_device(dev, &xen_wdt_dev);
if (ret)
return ret;
dev_info(dev, "initialized (timeout=%ds, nowayout=%d)\n",
xen_wdt_dev.timeout, nowayout);
return 0;
}
static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state)
{
typeof(wdt.id) id = wdt.id;
int rc = xen_wdt_stop(&xen_wdt_dev);
wdt.id = id;
return rc;
}
static int xen_wdt_resume(struct platform_device *dev)
{
if (!wdt.id)
return 0;
wdt.id = 0;
return xen_wdt_start(&xen_wdt_dev);
}
static struct platform_driver xen_wdt_driver = {
.probe = xen_wdt_probe,
.suspend = xen_wdt_suspend,
.resume = xen_wdt_resume,
.driver = {
.name = DRV_NAME,
},
};
static int __init xen_wdt_init_module(void)
{
int err;
if (!xen_domain())
return -ENODEV;
err = platform_driver_register(&xen_wdt_driver);
if (err)
return err;
platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
if (IS_ERR(platform_device)) {
err = PTR_ERR(platform_device);
platform_driver_unregister(&xen_wdt_driver);
}
return err;
}
static void __exit xen_wdt_cleanup_module(void)
{
platform_device_unregister(platform_device);
platform_driver_unregister(&xen_wdt_driver);
}
module_init(xen_wdt_init_module);
module_exit(xen_wdt_cleanup_module);
MODULE_AUTHOR("Jan Beulich <[email protected]>");
MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/xen_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Imagination Technologies PowerDown Controller Watchdog Timer.
*
* Copyright (c) 2014 Imagination Technologies Ltd.
*
* Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione
* 2012 Henrik Nordstrom
*
* Notes
* -----
* The timeout value is rounded to the next power of two clock cycles.
* This is configured using the PDC_WDT_CONFIG register, according to this
* formula:
*
* timeout = 2^(delay + 1) clock cycles
*
* Where 'delay' is the value written in PDC_WDT_CONFIG register.
*
* Therefore, the hardware only allows to program watchdog timeouts, expressed
* as a power of two number of watchdog clock cycles. The current implementation
* guarantees that the actual watchdog timeout will be _at least_ the value
* programmed in the imgpdg_wdt driver.
*
* The following table shows how the user-configured timeout relates
* to the actual hardware timeout (watchdog clock @ 40000 Hz):
*
* input timeout | WD_DELAY | actual timeout
* -----------------------------------
* 10 | 18 | 13 seconds
* 20 | 19 | 26 seconds
* 30 | 20 | 52 seconds
* 60 | 21 | 104 seconds
*
* Albeit coarse, this granularity would suffice most watchdog uses.
* If the platform allows it, the user should be able to change the watchdog
* clock rate and achieve a finer timeout granularity.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
/* registers */
#define PDC_WDT_SOFT_RESET 0x00
#define PDC_WDT_CONFIG 0x04
#define PDC_WDT_CONFIG_ENABLE BIT(31)
#define PDC_WDT_CONFIG_DELAY_MASK 0x1f
#define PDC_WDT_TICKLE1 0x08
#define PDC_WDT_TICKLE1_MAGIC 0xabcd1234
#define PDC_WDT_TICKLE2 0x0c
#define PDC_WDT_TICKLE2_MAGIC 0x4321dcba
#define PDC_WDT_TICKLE_STATUS_MASK 0x7
#define PDC_WDT_TICKLE_STATUS_SHIFT 0
#define PDC_WDT_TICKLE_STATUS_HRESET 0x0 /* Hard reset */
#define PDC_WDT_TICKLE_STATUS_TIMEOUT 0x1 /* Timeout */
#define PDC_WDT_TICKLE_STATUS_TICKLE 0x2 /* Tickled incorrectly */
#define PDC_WDT_TICKLE_STATUS_SRESET 0x3 /* Soft reset */
#define PDC_WDT_TICKLE_STATUS_USER 0x4 /* User reset */
/* Timeout values are in seconds */
#define PDC_WDT_MIN_TIMEOUT 1
#define PDC_WDT_DEF_TIMEOUT 64
static int heartbeat;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
"(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct pdc_wdt_dev {
struct watchdog_device wdt_dev;
struct clk *wdt_clk;
struct clk *sys_clk;
void __iomem *base;
};
static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev)
{
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
writel(PDC_WDT_TICKLE1_MAGIC, wdt->base + PDC_WDT_TICKLE1);
writel(PDC_WDT_TICKLE2_MAGIC, wdt->base + PDC_WDT_TICKLE2);
return 0;
}
static int pdc_wdt_stop(struct watchdog_device *wdt_dev)
{
unsigned int val;
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
val = readl(wdt->base + PDC_WDT_CONFIG);
val &= ~PDC_WDT_CONFIG_ENABLE;
writel(val, wdt->base + PDC_WDT_CONFIG);
/* Must tickle to finish the stop */
pdc_wdt_keepalive(wdt_dev);
return 0;
}
static void __pdc_wdt_set_timeout(struct pdc_wdt_dev *wdt)
{
unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
unsigned int val;
val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
val |= order_base_2(wdt->wdt_dev.timeout * clk_rate) - 1;
writel(val, wdt->base + PDC_WDT_CONFIG);
}
static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int new_timeout)
{
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
wdt->wdt_dev.timeout = new_timeout;
__pdc_wdt_set_timeout(wdt);
return 0;
}
/* Start the watchdog timer (delay should already be set) */
static int pdc_wdt_start(struct watchdog_device *wdt_dev)
{
unsigned int val;
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
__pdc_wdt_set_timeout(wdt);
val = readl(wdt->base + PDC_WDT_CONFIG);
val |= PDC_WDT_CONFIG_ENABLE;
writel(val, wdt->base + PDC_WDT_CONFIG);
return 0;
}
static int pdc_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
/* Assert SOFT_RESET */
writel(0x1, wdt->base + PDC_WDT_SOFT_RESET);
return 0;
}
static const struct watchdog_info pdc_wdt_info = {
.identity = "IMG PDC Watchdog",
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops pdc_wdt_ops = {
.owner = THIS_MODULE,
.start = pdc_wdt_start,
.stop = pdc_wdt_stop,
.ping = pdc_wdt_keepalive,
.set_timeout = pdc_wdt_set_timeout,
.restart = pdc_wdt_restart,
};
static int pdc_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
u64 div;
int val;
unsigned long clk_rate;
struct pdc_wdt_dev *pdc_wdt;
pdc_wdt = devm_kzalloc(dev, sizeof(*pdc_wdt), GFP_KERNEL);
if (!pdc_wdt)
return -ENOMEM;
pdc_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdc_wdt->base))
return PTR_ERR(pdc_wdt->base);
pdc_wdt->sys_clk = devm_clk_get_enabled(dev, "sys");
if (IS_ERR(pdc_wdt->sys_clk)) {
dev_err(dev, "failed to get the sys clock\n");
return PTR_ERR(pdc_wdt->sys_clk);
}
pdc_wdt->wdt_clk = devm_clk_get_enabled(dev, "wdt");
if (IS_ERR(pdc_wdt->wdt_clk)) {
dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(pdc_wdt->wdt_clk);
}
/* We use the clock rate to calculate the max timeout */
clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
if (clk_rate == 0) {
dev_err(dev, "failed to get clock rate\n");
return -EINVAL;
}
if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) {
dev_err(dev, "invalid clock rate\n");
return -EINVAL;
}
if (order_base_2(clk_rate) == 0)
pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT + 1;
else
pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT;
pdc_wdt->wdt_dev.info = &pdc_wdt_info;
pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
div = 1ULL << (PDC_WDT_CONFIG_DELAY_MASK + 1);
do_div(div, clk_rate);
pdc_wdt->wdt_dev.max_timeout = div;
pdc_wdt->wdt_dev.timeout = PDC_WDT_DEF_TIMEOUT;
pdc_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, dev);
pdc_wdt_stop(&pdc_wdt->wdt_dev);
/* Find what caused the last reset */
val = readl(pdc_wdt->base + PDC_WDT_TICKLE1);
val = (val & PDC_WDT_TICKLE_STATUS_MASK) >> PDC_WDT_TICKLE_STATUS_SHIFT;
switch (val) {
case PDC_WDT_TICKLE_STATUS_TICKLE:
case PDC_WDT_TICKLE_STATUS_TIMEOUT:
pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET;
dev_info(dev, "watchdog module last reset due to timeout\n");
break;
case PDC_WDT_TICKLE_STATUS_HRESET:
dev_info(dev,
"watchdog module last reset due to hard reset\n");
break;
case PDC_WDT_TICKLE_STATUS_SRESET:
dev_info(dev,
"watchdog module last reset due to soft reset\n");
break;
case PDC_WDT_TICKLE_STATUS_USER:
dev_info(dev,
"watchdog module last reset due to user reset\n");
break;
default:
dev_info(dev, "contains an illegal status code (%08x)\n", val);
break;
}
watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&pdc_wdt->wdt_dev, 128);
platform_set_drvdata(pdev, pdc_wdt);
watchdog_stop_on_reboot(&pdc_wdt->wdt_dev);
watchdog_stop_on_unregister(&pdc_wdt->wdt_dev);
return devm_watchdog_register_device(dev, &pdc_wdt->wdt_dev);
}
static const struct of_device_id pdc_wdt_match[] = {
{ .compatible = "img,pdc-wdt" },
{}
};
MODULE_DEVICE_TABLE(of, pdc_wdt_match);
static struct platform_driver pdc_wdt_driver = {
.driver = {
.name = "imgpdc-wdt",
.of_match_table = pdc_wdt_match,
},
.probe = pdc_wdt_probe,
};
module_platform_driver(pdc_wdt_driver);
MODULE_AUTHOR("Jude Abraham <[email protected]>");
MODULE_AUTHOR("Naidu Tellapati <[email protected]>");
MODULE_DESCRIPTION("Imagination Technologies PDC Watchdog Timer Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/imgpdc_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
* Copyright (C) 2017 Hauke Mehrtens <[email protected]>
* Based on EP93xx wdt driver
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/watchdog.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <lantiq_soc.h>
#define LTQ_XRX_RCU_RST_STAT 0x0014
#define LTQ_XRX_RCU_RST_STAT_WDT BIT(31)
/* CPU0 Reset Source Register */
#define LTQ_FALCON_SYS1_CPU0RS 0x0060
/* reset cause mask */
#define LTQ_FALCON_SYS1_CPU0RS_MASK 0x0007
#define LTQ_FALCON_SYS1_CPU0RS_WDT 0x02
/*
* Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
* essentially the following two magic passwords need to be written to allow
* IO access to the WDT core
*/
#define LTQ_WDT_CR_PW1 0x00BE0000
#define LTQ_WDT_CR_PW2 0x00DC0000
#define LTQ_WDT_CR 0x0 /* watchdog control register */
#define LTQ_WDT_CR_GEN BIT(31) /* enable bit */
/* Pre-warning limit set to 1/16 of max WDT period */
#define LTQ_WDT_CR_PWL (0x3 << 26)
/* set clock divider to 0x40000 */
#define LTQ_WDT_CR_CLKDIV (0x3 << 24)
#define LTQ_WDT_CR_PW_MASK GENMASK(23, 16) /* Password field */
#define LTQ_WDT_CR_MAX_TIMEOUT ((1 << 16) - 1) /* The reload field is 16 bit */
#define LTQ_WDT_SR 0x8 /* watchdog status register */
#define LTQ_WDT_SR_EN BIT(31) /* Enable */
#define LTQ_WDT_SR_VALUE_MASK GENMASK(15, 0) /* Timer value */
#define LTQ_WDT_DIVIDER 0x40000
static bool nowayout = WATCHDOG_NOWAYOUT;
struct ltq_wdt_hw {
int (*bootstatus_get)(struct device *dev);
};
struct ltq_wdt_priv {
struct watchdog_device wdt;
void __iomem *membase;
unsigned long clk_rate;
};
static u32 ltq_wdt_r32(struct ltq_wdt_priv *priv, u32 offset)
{
return __raw_readl(priv->membase + offset);
}
static void ltq_wdt_w32(struct ltq_wdt_priv *priv, u32 val, u32 offset)
{
__raw_writel(val, priv->membase + offset);
}
static void ltq_wdt_mask(struct ltq_wdt_priv *priv, u32 clear, u32 set,
u32 offset)
{
u32 val = ltq_wdt_r32(priv, offset);
val &= ~(clear);
val |= set;
ltq_wdt_w32(priv, val, offset);
}
static struct ltq_wdt_priv *ltq_wdt_get_priv(struct watchdog_device *wdt)
{
return container_of(wdt, struct ltq_wdt_priv, wdt);
}
static struct watchdog_info ltq_wdt_info = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_CARDRESET,
.identity = "ltq_wdt",
};
static int ltq_wdt_start(struct watchdog_device *wdt)
{
struct ltq_wdt_priv *priv = ltq_wdt_get_priv(wdt);
u32 timeout;
timeout = wdt->timeout * priv->clk_rate;
ltq_wdt_mask(priv, LTQ_WDT_CR_PW_MASK, LTQ_WDT_CR_PW1, LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_wdt_mask(priv, LTQ_WDT_CR_PW_MASK | LTQ_WDT_CR_MAX_TIMEOUT,
LTQ_WDT_CR_GEN | LTQ_WDT_CR_PWL | LTQ_WDT_CR_CLKDIV |
LTQ_WDT_CR_PW2 | timeout,
LTQ_WDT_CR);
return 0;
}
static int ltq_wdt_stop(struct watchdog_device *wdt)
{
struct ltq_wdt_priv *priv = ltq_wdt_get_priv(wdt);
ltq_wdt_mask(priv, LTQ_WDT_CR_PW_MASK, LTQ_WDT_CR_PW1, LTQ_WDT_CR);
ltq_wdt_mask(priv, LTQ_WDT_CR_GEN | LTQ_WDT_CR_PW_MASK,
LTQ_WDT_CR_PW2, LTQ_WDT_CR);
return 0;
}
static int ltq_wdt_ping(struct watchdog_device *wdt)
{
struct ltq_wdt_priv *priv = ltq_wdt_get_priv(wdt);
u32 timeout;
timeout = wdt->timeout * priv->clk_rate;
ltq_wdt_mask(priv, LTQ_WDT_CR_PW_MASK, LTQ_WDT_CR_PW1, LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_wdt_mask(priv, LTQ_WDT_CR_PW_MASK | LTQ_WDT_CR_MAX_TIMEOUT,
LTQ_WDT_CR_PW2 | timeout, LTQ_WDT_CR);
return 0;
}
static unsigned int ltq_wdt_get_timeleft(struct watchdog_device *wdt)
{
struct ltq_wdt_priv *priv = ltq_wdt_get_priv(wdt);
u64 timeout;
timeout = ltq_wdt_r32(priv, LTQ_WDT_SR) & LTQ_WDT_SR_VALUE_MASK;
return do_div(timeout, priv->clk_rate);
}
static const struct watchdog_ops ltq_wdt_ops = {
.owner = THIS_MODULE,
.start = ltq_wdt_start,
.stop = ltq_wdt_stop,
.ping = ltq_wdt_ping,
.get_timeleft = ltq_wdt_get_timeleft,
};
static int ltq_wdt_xrx_bootstatus_get(struct device *dev)
{
struct regmap *rcu_regmap;
u32 val;
int err;
rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
if (IS_ERR(rcu_regmap))
return PTR_ERR(rcu_regmap);
err = regmap_read(rcu_regmap, LTQ_XRX_RCU_RST_STAT, &val);
if (err)
return err;
if (val & LTQ_XRX_RCU_RST_STAT_WDT)
return WDIOF_CARDRESET;
return 0;
}
static int ltq_wdt_falcon_bootstatus_get(struct device *dev)
{
struct regmap *rcu_regmap;
u32 val;
int err;
rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"lantiq,rcu");
if (IS_ERR(rcu_regmap))
return PTR_ERR(rcu_regmap);
err = regmap_read(rcu_regmap, LTQ_FALCON_SYS1_CPU0RS, &val);
if (err)
return err;
if ((val & LTQ_FALCON_SYS1_CPU0RS_MASK) == LTQ_FALCON_SYS1_CPU0RS_WDT)
return WDIOF_CARDRESET;
return 0;
}
static int ltq_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ltq_wdt_priv *priv;
struct watchdog_device *wdt;
struct clk *clk;
const struct ltq_wdt_hw *ltq_wdt_hw;
int ret;
u32 status;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
/* we do not need to enable the clock as it is always running */
clk = clk_get_io();
priv->clk_rate = clk_get_rate(clk) / LTQ_WDT_DIVIDER;
if (!priv->clk_rate) {
dev_err(dev, "clock rate less than divider %i\n",
LTQ_WDT_DIVIDER);
return -EINVAL;
}
wdt = &priv->wdt;
wdt->info = <q_wdt_info;
wdt->ops = <q_wdt_ops;
wdt->min_timeout = 1;
wdt->max_timeout = LTQ_WDT_CR_MAX_TIMEOUT / priv->clk_rate;
wdt->timeout = wdt->max_timeout;
wdt->parent = dev;
ltq_wdt_hw = of_device_get_match_data(dev);
if (ltq_wdt_hw && ltq_wdt_hw->bootstatus_get) {
ret = ltq_wdt_hw->bootstatus_get(dev);
if (ret >= 0)
wdt->bootstatus = ret;
}
watchdog_set_nowayout(wdt, nowayout);
watchdog_init_timeout(wdt, 0, dev);
status = ltq_wdt_r32(priv, LTQ_WDT_SR);
if (status & LTQ_WDT_SR_EN) {
/*
* If the watchdog is already running overwrite it with our
* new settings. Stop is not needed as the start call will
* replace all settings anyway.
*/
ltq_wdt_start(wdt);
set_bit(WDOG_HW_RUNNING, &wdt->status);
}
return devm_watchdog_register_device(dev, wdt);
}
static const struct ltq_wdt_hw ltq_wdt_xrx100 = {
.bootstatus_get = ltq_wdt_xrx_bootstatus_get,
};
static const struct ltq_wdt_hw ltq_wdt_falcon = {
.bootstatus_get = ltq_wdt_falcon_bootstatus_get,
};
static const struct of_device_id ltq_wdt_match[] = {
{ .compatible = "lantiq,wdt", .data = NULL },
{ .compatible = "lantiq,xrx100-wdt", .data = <q_wdt_xrx100 },
{ .compatible = "lantiq,falcon-wdt", .data = <q_wdt_falcon },
{},
};
MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
.probe = ltq_wdt_probe,
.driver = {
.name = "wdt",
.of_match_table = ltq_wdt_match,
},
};
module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("John Crispin <[email protected]>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/lantiq_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Retu watchdog driver
*
* Copyright (C) 2004, 2005 Nokia Corporation
*
* Based on code written by Amit Kucheria and Michael Buesch.
* Rewritten by Aaro Koskinen.
*/
#include <linux/devm-helpers.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/retu.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
/* Watchdog timer values in seconds */
#define RETU_WDT_MAX_TIMER 63
struct retu_wdt_dev {
struct retu_dev *rdev;
struct device *dev;
struct delayed_work ping_work;
};
/*
* Since Retu watchdog cannot be disabled in hardware, we must kick it
* with a timer until userspace watchdog software takes over. If
* CONFIG_WATCHDOG_NOWAYOUT is set, we never start the feeding.
*/
static void retu_wdt_ping_enable(struct retu_wdt_dev *wdev)
{
retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
schedule_delayed_work(&wdev->ping_work,
round_jiffies_relative(RETU_WDT_MAX_TIMER * HZ / 2));
}
static void retu_wdt_ping_disable(struct retu_wdt_dev *wdev)
{
retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
cancel_delayed_work_sync(&wdev->ping_work);
}
static void retu_wdt_ping_work(struct work_struct *work)
{
struct retu_wdt_dev *wdev = container_of(to_delayed_work(work),
struct retu_wdt_dev, ping_work);
retu_wdt_ping_enable(wdev);
}
static int retu_wdt_start(struct watchdog_device *wdog)
{
struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
retu_wdt_ping_disable(wdev);
return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
}
static int retu_wdt_stop(struct watchdog_device *wdog)
{
struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
retu_wdt_ping_enable(wdev);
return 0;
}
static int retu_wdt_ping(struct watchdog_device *wdog)
{
struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
}
static int retu_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int timeout)
{
struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
wdog->timeout = timeout;
return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
}
static const struct watchdog_info retu_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "Retu watchdog",
};
static const struct watchdog_ops retu_wdt_ops = {
.owner = THIS_MODULE,
.start = retu_wdt_start,
.stop = retu_wdt_stop,
.ping = retu_wdt_ping,
.set_timeout = retu_wdt_set_timeout,
};
static int retu_wdt_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
bool nowayout = WATCHDOG_NOWAYOUT;
struct watchdog_device *retu_wdt;
struct retu_wdt_dev *wdev;
int ret;
retu_wdt = devm_kzalloc(&pdev->dev, sizeof(*retu_wdt), GFP_KERNEL);
if (!retu_wdt)
return -ENOMEM;
wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
retu_wdt->info = &retu_wdt_info;
retu_wdt->ops = &retu_wdt_ops;
retu_wdt->timeout = RETU_WDT_MAX_TIMER;
retu_wdt->min_timeout = 0;
retu_wdt->max_timeout = RETU_WDT_MAX_TIMER;
retu_wdt->parent = &pdev->dev;
watchdog_set_drvdata(retu_wdt, wdev);
watchdog_set_nowayout(retu_wdt, nowayout);
wdev->rdev = rdev;
wdev->dev = &pdev->dev;
ret = devm_delayed_work_autocancel(&pdev->dev, &wdev->ping_work,
retu_wdt_ping_work);
if (ret)
return ret;
ret = devm_watchdog_register_device(&pdev->dev, retu_wdt);
if (ret < 0)
return ret;
if (nowayout)
retu_wdt_ping(retu_wdt);
else
retu_wdt_ping_enable(wdev);
return 0;
}
static struct platform_driver retu_wdt_driver = {
.probe = retu_wdt_probe,
.driver = {
.name = "retu-wdt",
},
};
module_platform_driver(retu_wdt_driver);
MODULE_ALIAS("platform:retu-wdt");
MODULE_DESCRIPTION("Retu watchdog");
MODULE_AUTHOR("Amit Kucheria");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/retu_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for DA9063 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: Mariusz Wojtasik <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9063/core.h>
#include <linux/property.h>
#include <linux/regmap.h>
/*
* Watchdog selector to timeout in seconds.
* 0: WDT disabled;
* others: timeout = 2048 ms * 2^(TWDSCALE-1).
*/
static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
static bool use_sw_pm;
#define DA9063_TWDSCALE_DISABLE 0
#define DA9063_TWDSCALE_MIN 1
#define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1)
#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN]
#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX]
#define DA9063_WDG_TIMEOUT wdt_timeout[3]
#define DA9063_RESET_PROTECTION_MS 256
static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
{
unsigned int i;
for (i = DA9063_TWDSCALE_MIN; i <= DA9063_TWDSCALE_MAX; i++) {
if (wdt_timeout[i] >= secs)
return i;
}
return DA9063_TWDSCALE_MAX;
}
/*
* Read the currently active timeout.
* Zero means the watchdog is disabled.
*/
static unsigned int da9063_wdt_read_timeout(struct da9063 *da9063)
{
unsigned int val;
regmap_read(da9063->regmap, DA9063_REG_CONTROL_D, &val);
return wdt_timeout[val & DA9063_TWDSCALE_MASK];
}
static int da9063_wdt_disable_timer(struct da9063 *da9063)
{
return regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D,
DA9063_TWDSCALE_MASK,
DA9063_TWDSCALE_DISABLE);
}
static int
da9063_wdt_update_timeout(struct da9063 *da9063, unsigned int timeout)
{
unsigned int regval;
int ret;
/*
* The watchdog triggers a reboot if a timeout value is already
* programmed because the timeout value combines two functions
* in one: indicating the counter limit and starting the watchdog.
* The watchdog must be disabled to be able to change the timeout
* value if the watchdog is already running. Then we can set the
* new timeout value which enables the watchdog again.
*/
ret = da9063_wdt_disable_timer(da9063);
if (ret)
return ret;
usleep_range(150, 300);
regval = da9063_wdt_timeout_to_sel(timeout);
return regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D,
DA9063_TWDSCALE_MASK, regval);
}
static int da9063_wdt_start(struct watchdog_device *wdd)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
int ret;
ret = da9063_wdt_update_timeout(da9063, wdd->timeout);
if (ret)
dev_err(da9063->dev, "Watchdog failed to start (err = %d)\n",
ret);
return ret;
}
static int da9063_wdt_stop(struct watchdog_device *wdd)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
int ret;
ret = da9063_wdt_disable_timer(da9063);
if (ret)
dev_alert(da9063->dev, "Watchdog failed to stop (err = %d)\n",
ret);
return ret;
}
static int da9063_wdt_ping(struct watchdog_device *wdd)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
int ret;
/*
* Prevent pings from occurring late in system poweroff/reboot sequence
* and possibly locking out restart handler from accessing i2c bus.
*/
if (system_state > SYSTEM_RUNNING)
return 0;
ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F,
DA9063_WATCHDOG);
if (ret)
dev_alert(da9063->dev, "Failed to ping the watchdog (err = %d)\n",
ret);
return ret;
}
static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
int ret = 0;
/*
* There are two cases when a set_timeout() will be called:
* 1. The watchdog is off and someone wants to set the timeout for the
* further use.
* 2. The watchdog is already running and a new timeout value should be
* set.
*
* The watchdog can't store a timeout value not equal zero without
* enabling the watchdog, so the timeout must be buffered by the driver.
*/
if (watchdog_active(wdd))
ret = da9063_wdt_update_timeout(da9063, timeout);
if (ret)
dev_err(da9063->dev, "Failed to set watchdog timeout (err = %d)\n",
ret);
else
wdd->timeout = wdt_timeout[da9063_wdt_timeout_to_sel(timeout)];
return ret;
}
static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
struct i2c_client *client = to_i2c_client(da9063->dev);
union i2c_smbus_data msg;
int ret;
/*
* Don't use regmap because it is not atomic safe. Additionally, use
* unlocked flavor of i2c_smbus_xfer to avoid scenario where i2c bus
* might previously be locked by some process unable to release the
* lock due to interrupts already being disabled at this late stage.
*/
msg.byte = DA9063_SHUTDOWN;
ret = __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, DA9063_REG_CONTROL_F,
I2C_SMBUS_BYTE_DATA, &msg);
if (ret < 0)
dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n",
ret);
/* wait for reset to assert... */
mdelay(500);
return ret;
}
static const struct watchdog_info da9063_watchdog_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "DA9063 Watchdog",
};
static const struct watchdog_ops da9063_watchdog_ops = {
.owner = THIS_MODULE,
.start = da9063_wdt_start,
.stop = da9063_wdt_stop,
.ping = da9063_wdt_ping,
.set_timeout = da9063_wdt_set_timeout,
.restart = da9063_wdt_restart,
};
static int da9063_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da9063 *da9063;
struct watchdog_device *wdd;
unsigned int timeout;
if (!dev->parent)
return -EINVAL;
da9063 = dev_get_drvdata(dev->parent);
if (!da9063)
return -EINVAL;
wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL);
if (!wdd)
return -ENOMEM;
use_sw_pm = device_property_present(dev, "dlg,use-sw-pm");
wdd->info = &da9063_watchdog_info;
wdd->ops = &da9063_watchdog_ops;
wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdd->max_timeout = DA9063_WDT_MAX_TIMEOUT;
wdd->min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS;
wdd->parent = dev;
wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_restart_priority(wdd, 128);
watchdog_set_drvdata(wdd, da9063);
dev_set_drvdata(dev, wdd);
wdd->timeout = DA9063_WDG_TIMEOUT;
/* Use pre-configured timeout if watchdog is already running. */
timeout = da9063_wdt_read_timeout(da9063);
if (timeout)
wdd->timeout = timeout;
/* Set timeout, maybe override it with DT value, scale it */
watchdog_init_timeout(wdd, 0, dev);
da9063_wdt_set_timeout(wdd, wdd->timeout);
/* Update timeout if the watchdog is already running. */
if (timeout) {
da9063_wdt_update_timeout(da9063, wdd->timeout);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
return devm_watchdog_register_device(dev, wdd);
}
static int __maybe_unused da9063_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
if (!use_sw_pm)
return 0;
if (watchdog_active(wdd))
return da9063_wdt_stop(wdd);
return 0;
}
static int __maybe_unused da9063_wdt_resume(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
if (!use_sw_pm)
return 0;
if (watchdog_active(wdd))
return da9063_wdt_start(wdd);
return 0;
}
static SIMPLE_DEV_PM_OPS(da9063_wdt_pm_ops,
da9063_wdt_suspend, da9063_wdt_resume);
static struct platform_driver da9063_wdt_driver = {
.probe = da9063_wdt_probe,
.driver = {
.name = DA9063_DRVNAME_WATCHDOG,
.pm = &da9063_wdt_pm_ops,
},
};
module_platform_driver(da9063_wdt_driver);
MODULE_AUTHOR("Mariusz Wojtasik <[email protected]>");
MODULE_DESCRIPTION("Watchdog driver for Dialog DA9063");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_WATCHDOG);
| linux-master | drivers/watchdog/da9063_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sl28cpld watchdog driver
*
* Copyright 2020 Kontron Europe GmbH
*/
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
/*
* Watchdog timer block registers.
*/
#define WDT_CTRL 0x00
#define WDT_CTRL_EN BIT(0)
#define WDT_CTRL_LOCK BIT(2)
#define WDT_CTRL_ASSERT_SYS_RESET BIT(6)
#define WDT_CTRL_ASSERT_WDT_TIMEOUT BIT(7)
#define WDT_TIMEOUT 0x01
#define WDT_KICK 0x02
#define WDT_KICK_VALUE 0x6b
#define WDT_COUNT 0x03
#define WDT_DEFAULT_TIMEOUT 10
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int timeout;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds");
struct sl28cpld_wdt {
struct watchdog_device wdd;
struct regmap *regmap;
u32 offset;
bool assert_wdt_timeout;
};
static int sl28cpld_wdt_ping(struct watchdog_device *wdd)
{
struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
return regmap_write(wdt->regmap, wdt->offset + WDT_KICK,
WDT_KICK_VALUE);
}
static int sl28cpld_wdt_start(struct watchdog_device *wdd)
{
struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
unsigned int val;
val = WDT_CTRL_EN | WDT_CTRL_ASSERT_SYS_RESET;
if (wdt->assert_wdt_timeout)
val |= WDT_CTRL_ASSERT_WDT_TIMEOUT;
if (nowayout)
val |= WDT_CTRL_LOCK;
return regmap_update_bits(wdt->regmap, wdt->offset + WDT_CTRL,
val, val);
}
static int sl28cpld_wdt_stop(struct watchdog_device *wdd)
{
struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
return regmap_update_bits(wdt->regmap, wdt->offset + WDT_CTRL,
WDT_CTRL_EN, 0);
}
static unsigned int sl28cpld_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
unsigned int val;
int ret;
ret = regmap_read(wdt->regmap, wdt->offset + WDT_COUNT, &val);
if (ret)
return 0;
return val;
}
static int sl28cpld_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
int ret;
ret = regmap_write(wdt->regmap, wdt->offset + WDT_TIMEOUT, timeout);
if (ret)
return ret;
wdd->timeout = timeout;
return 0;
}
static const struct watchdog_info sl28cpld_wdt_info = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "sl28cpld watchdog",
};
static const struct watchdog_ops sl28cpld_wdt_ops = {
.owner = THIS_MODULE,
.start = sl28cpld_wdt_start,
.stop = sl28cpld_wdt_stop,
.ping = sl28cpld_wdt_ping,
.set_timeout = sl28cpld_wdt_set_timeout,
.get_timeleft = sl28cpld_wdt_get_timeleft,
};
static int sl28cpld_wdt_probe(struct platform_device *pdev)
{
struct watchdog_device *wdd;
struct sl28cpld_wdt *wdt;
unsigned int status;
unsigned int val;
int ret;
if (!pdev->dev.parent)
return -ENODEV;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!wdt->regmap)
return -ENODEV;
ret = device_property_read_u32(&pdev->dev, "reg", &wdt->offset);
if (ret)
return -EINVAL;
wdt->assert_wdt_timeout = device_property_read_bool(&pdev->dev,
"kontron,assert-wdt-timeout-pin");
/* initialize struct watchdog_device */
wdd = &wdt->wdd;
wdd->parent = &pdev->dev;
wdd->info = &sl28cpld_wdt_info;
wdd->ops = &sl28cpld_wdt_ops;
wdd->min_timeout = 1;
wdd->max_timeout = 255;
watchdog_set_drvdata(wdd, wdt);
watchdog_stop_on_reboot(wdd);
/*
* Read the status early, in case of an error, we haven't modified the
* hardware.
*/
ret = regmap_read(wdt->regmap, wdt->offset + WDT_CTRL, &status);
if (ret)
return ret;
/*
* Initial timeout value, may be overwritten by device tree or module
* parameter in watchdog_init_timeout().
*
* Reading a zero here means that either the hardware has a default
* value of zero (which is very unlikely and definitely a hardware
* bug) or the bootloader set it to zero. In any case, we handle
* this case gracefully and set out own timeout.
*/
ret = regmap_read(wdt->regmap, wdt->offset + WDT_TIMEOUT, &val);
if (ret)
return ret;
if (val)
wdd->timeout = val;
else
wdd->timeout = WDT_DEFAULT_TIMEOUT;
watchdog_init_timeout(wdd, timeout, &pdev->dev);
sl28cpld_wdt_set_timeout(wdd, wdd->timeout);
/* if the watchdog is locked, we set nowayout */
if (status & WDT_CTRL_LOCK)
nowayout = true;
watchdog_set_nowayout(wdd, nowayout);
/*
* If watchdog is already running, keep it enabled, but make
* sure its mode is set correctly.
*/
if (status & WDT_CTRL_EN) {
sl28cpld_wdt_start(wdd);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
ret = devm_watchdog_register_device(&pdev->dev, wdd);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register watchdog device\n");
return ret;
}
dev_info(&pdev->dev, "initial timeout %d sec%s\n",
wdd->timeout, nowayout ? ", nowayout" : "");
return 0;
}
static const struct of_device_id sl28cpld_wdt_of_match[] = {
{ .compatible = "kontron,sl28cpld-wdt" },
{}
};
MODULE_DEVICE_TABLE(of, sl28cpld_wdt_of_match);
static struct platform_driver sl28cpld_wdt_driver = {
.probe = sl28cpld_wdt_probe,
.driver = {
.name = "sl28cpld-wdt",
.of_match_table = sl28cpld_wdt_of_match,
},
};
module_platform_driver(sl28cpld_wdt_driver);
MODULE_DESCRIPTION("sl28cpld Watchdog Driver");
MODULE_AUTHOR("Michael Walle <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sl28cpld_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 National Instruments Corp.
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#define LOCK 0xA5
#define UNLOCK 0x5A
#define WDT_CTRL_RESET_EN BIT(7)
#define WDT_RELOAD_PORT_EN BIT(7)
#define WDT_CTRL 1
#define WDT_RELOAD_CTRL 2
#define WDT_PRESET_PRESCALE 4
#define WDT_REG_LOCK 5
#define WDT_COUNT 6
#define WDT_RELOAD_PORT 7
#define WDT_MIN_TIMEOUT 1
#define WDT_MAX_TIMEOUT 464
#define WDT_DEFAULT_TIMEOUT 80
#define WDT_MAX_COUNTER 15
static unsigned int timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. (default="
__MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started. (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct nic7018_wdt {
u16 io_base;
u32 period;
struct watchdog_device wdd;
};
struct nic7018_config {
u32 period;
u8 divider;
};
static const struct nic7018_config nic7018_configs[] = {
{ 2, 4 },
{ 32, 5 },
};
static inline u32 nic7018_timeout(u32 period, u8 counter)
{
return period * counter - period / 2;
}
static const struct nic7018_config *nic7018_get_config(u32 timeout,
u8 *counter)
{
const struct nic7018_config *config;
u8 count;
if (timeout < 30 && timeout != 16) {
config = &nic7018_configs[0];
count = timeout / 2 + 1;
} else {
config = &nic7018_configs[1];
count = DIV_ROUND_UP(timeout + 16, 32);
if (count > WDT_MAX_COUNTER)
count = WDT_MAX_COUNTER;
}
*counter = count;
return config;
}
static int nic7018_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd);
const struct nic7018_config *config;
u8 counter;
config = nic7018_get_config(timeout, &counter);
outb(counter << 4 | config->divider,
wdt->io_base + WDT_PRESET_PRESCALE);
wdd->timeout = nic7018_timeout(config->period, counter);
wdt->period = config->period;
return 0;
}
static int nic7018_start(struct watchdog_device *wdd)
{
struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd);
u8 control;
nic7018_set_timeout(wdd, wdd->timeout);
control = inb(wdt->io_base + WDT_RELOAD_CTRL);
outb(control | WDT_RELOAD_PORT_EN, wdt->io_base + WDT_RELOAD_CTRL);
outb(1, wdt->io_base + WDT_RELOAD_PORT);
control = inb(wdt->io_base + WDT_CTRL);
outb(control | WDT_CTRL_RESET_EN, wdt->io_base + WDT_CTRL);
return 0;
}
static int nic7018_stop(struct watchdog_device *wdd)
{
struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd);
outb(0, wdt->io_base + WDT_CTRL);
outb(0, wdt->io_base + WDT_RELOAD_CTRL);
outb(0xF0, wdt->io_base + WDT_PRESET_PRESCALE);
return 0;
}
static int nic7018_ping(struct watchdog_device *wdd)
{
struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd);
outb(1, wdt->io_base + WDT_RELOAD_PORT);
return 0;
}
static unsigned int nic7018_get_timeleft(struct watchdog_device *wdd)
{
struct nic7018_wdt *wdt = watchdog_get_drvdata(wdd);
u8 count;
count = inb(wdt->io_base + WDT_COUNT) & 0xF;
if (!count)
return 0;
return nic7018_timeout(wdt->period, count);
}
static const struct watchdog_info nic7018_wdd_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "NIC7018 Watchdog",
};
static const struct watchdog_ops nic7018_wdd_ops = {
.owner = THIS_MODULE,
.start = nic7018_start,
.stop = nic7018_stop,
.ping = nic7018_ping,
.set_timeout = nic7018_set_timeout,
.get_timeleft = nic7018_get_timeleft,
};
static int nic7018_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct nic7018_wdt *wdt;
struct resource *io_rc;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
platform_set_drvdata(pdev, wdt);
io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!io_rc) {
dev_err(dev, "missing IO resources\n");
return -EINVAL;
}
if (!devm_request_region(dev, io_rc->start, resource_size(io_rc),
KBUILD_MODNAME)) {
dev_err(dev, "failed to get IO region\n");
return -EBUSY;
}
wdt->io_base = io_rc->start;
wdd = &wdt->wdd;
wdd->info = &nic7018_wdd_info;
wdd->ops = &nic7018_wdd_ops;
wdd->min_timeout = WDT_MIN_TIMEOUT;
wdd->max_timeout = WDT_MAX_TIMEOUT;
wdd->timeout = WDT_DEFAULT_TIMEOUT;
wdd->parent = dev;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
watchdog_init_timeout(wdd, timeout, dev);
/* Unlock WDT register */
outb(UNLOCK, wdt->io_base + WDT_REG_LOCK);
ret = watchdog_register_device(wdd);
if (ret) {
outb(LOCK, wdt->io_base + WDT_REG_LOCK);
return ret;
}
dev_dbg(dev, "io_base=0x%04X, timeout=%d, nowayout=%d\n",
wdt->io_base, timeout, nowayout);
return 0;
}
static void nic7018_remove(struct platform_device *pdev)
{
struct nic7018_wdt *wdt = platform_get_drvdata(pdev);
watchdog_unregister_device(&wdt->wdd);
/* Lock WDT register */
outb(LOCK, wdt->io_base + WDT_REG_LOCK);
}
static const struct acpi_device_id nic7018_device_ids[] = {
{"NIC7018", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, nic7018_device_ids);
static struct platform_driver watchdog_driver = {
.probe = nic7018_probe,
.remove_new = nic7018_remove,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(nic7018_device_ids),
},
};
module_platform_driver(watchdog_driver);
MODULE_DESCRIPTION("National Instruments NIC7018 Watchdog driver");
MODULE_AUTHOR("Hui Chun Ong <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/nic7018_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the SA11x0/PXA2xx
*
* (c) Copyright 2000 Oleg Drokin <[email protected]>
* Based on SoftDog driver by Alan Cox <[email protected]>
*
* Neither Oleg Drokin nor iXcelerator.com admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 2000 Oleg Drokin <[email protected]>
*
* 27/11/2000 Initial release
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/clk.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/timex.h>
#define REG_OSMR0 0x0000 /* OS timer Match Reg. 0 */
#define REG_OSMR1 0x0004 /* OS timer Match Reg. 1 */
#define REG_OSMR2 0x0008 /* OS timer Match Reg. 2 */
#define REG_OSMR3 0x000c /* OS timer Match Reg. 3 */
#define REG_OSCR 0x0010 /* OS timer Counter Reg. */
#define REG_OSSR 0x0014 /* OS timer Status Reg. */
#define REG_OWER 0x0018 /* OS timer Watch-dog Enable Reg. */
#define REG_OIER 0x001C /* OS timer Interrupt Enable Reg. */
#define OSSR_M3 (1 << 3) /* Match status channel 3 */
#define OSSR_M2 (1 << 2) /* Match status channel 2 */
#define OSSR_M1 (1 << 1) /* Match status channel 1 */
#define OSSR_M0 (1 << 0) /* Match status channel 0 */
#define OWER_WME (1 << 0) /* Watchdog Match Enable */
#define OIER_E3 (1 << 3) /* Interrupt enable channel 3 */
#define OIER_E2 (1 << 2) /* Interrupt enable channel 2 */
#define OIER_E1 (1 << 1) /* Interrupt enable channel 1 */
#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
static unsigned int pre_margin;
static int boot_status;
static void __iomem *reg_base;
static inline void sa1100_wr(u32 val, u32 offset)
{
writel_relaxed(val, reg_base + offset);
}
static inline u32 sa1100_rd(u32 offset)
{
return readl_relaxed(reg_base + offset);
}
/*
* Allow only one person to hold it open
*/
static int sa1100dog_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(1, &sa1100wdt_users))
return -EBUSY;
/* Activate SA1100 Watchdog timer */
sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
sa1100_wr(OSSR_M3, REG_OSSR);
sa1100_wr(OWER_WME, REG_OWER);
sa1100_wr(sa1100_rd(REG_OIER) | OIER_E3, REG_OIER);
return stream_open(inode, file);
}
/*
* The watchdog cannot be disabled.
*
* Previous comments suggested that turning off the interrupt by
* clearing REG_OIER[E3] would prevent the watchdog timing out but this
* does not appear to be true (at least on the PXA255).
*/
static int sa1100dog_release(struct inode *inode, struct file *file)
{
pr_crit("Device closed - timer will not stop\n");
clear_bit(1, &sa1100wdt_users);
return 0;
}
static ssize_t sa1100dog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len)
/* Refresh OSMR3 timer. */
sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
return len;
}
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.identity = "SA1100/PXA255 Watchdog",
.firmware_version = 1,
};
static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
int time;
void __user *argp = (void __user *)arg;
int __user *p = argp;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(argp, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
ret = put_user(0, p);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(boot_status, p);
break;
case WDIOC_KEEPALIVE:
sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, p);
if (ret)
break;
if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) {
ret = -EINVAL;
break;
}
pre_margin = oscr_freq * time;
sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(pre_margin / oscr_freq, p);
break;
}
return ret;
}
static const struct file_operations sa1100dog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = sa1100dog_open,
.release = sa1100dog_release,
};
static struct miscdevice sa1100dog_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &sa1100dog_fops,
};
static int margin = 60; /* (secs) Default is 1 minute */
static struct clk *clk;
static int sa1100dog_probe(struct platform_device *pdev)
{
int ret;
int *platform_data;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
ret = PTR_ERR_OR_ZERO(reg_base);
if (ret)
return ret;
clk = clk_get(NULL, "OSTIMER0");
if (IS_ERR(clk)) {
pr_err("SA1100/PXA2xx Watchdog Timer: clock not found: %d\n",
(int) PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("SA1100/PXA2xx Watchdog Timer: clock failed to prepare+enable: %d\n",
ret);
goto err;
}
oscr_freq = clk_get_rate(clk);
platform_data = pdev->dev.platform_data;
if (platform_data && *platform_data)
boot_status = WDIOF_CARDRESET;
pre_margin = oscr_freq * margin;
ret = misc_register(&sa1100dog_miscdev);
if (ret == 0) {
pr_info("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
margin);
return 0;
}
clk_disable_unprepare(clk);
err:
clk_put(clk);
return ret;
}
static void sa1100dog_remove(struct platform_device *pdev)
{
misc_deregister(&sa1100dog_miscdev);
clk_disable_unprepare(clk);
clk_put(clk);
}
static struct platform_driver sa1100dog_driver = {
.driver.name = "sa1100_wdt",
.probe = sa1100dog_probe,
.remove_new = sa1100dog_remove,
};
module_platform_driver(sa1100dog_driver);
MODULE_AUTHOR("Oleg Drokin <[email protected]>");
MODULE_DESCRIPTION("SA1100/PXA2xx Watchdog");
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sa1100_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/watchdog/ar7_wdt.c
*
* Copyright (C) 2007 Nicolas Thill <[email protected]>
* Copyright (c) 2005 Enrik Berkhan <[email protected]>
*
* Some code taken from:
* National Semiconductor SCx200 Watchdog support
* Copyright (c) 2001,2002 Christer Weinigel <[email protected]>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <asm/addrspace.h>
#include <asm/mach-ar7/ar7.h>
#define LONGNAME "TI AR7 Watchdog Timer"
MODULE_AUTHOR("Nicolas Thill <[email protected]>");
MODULE_DESCRIPTION(LONGNAME);
MODULE_LICENSE("GPL");
static int margin = 60;
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
#define READ_REG(x) readl((void __iomem *)&(x))
#define WRITE_REG(x, v) writel((v), (void __iomem *)&(x))
struct ar7_wdt {
u32 kick_lock;
u32 kick;
u32 change_lock;
u32 change;
u32 disable_lock;
u32 disable;
u32 prescale_lock;
u32 prescale;
};
static unsigned long wdt_is_open;
static unsigned expect_close;
static DEFINE_SPINLOCK(wdt_lock);
/* XXX currently fixed, allows max margin ~68.72 secs */
#define prescale_value 0xffff
/* Pointer to the remapped WDT IO space */
static struct ar7_wdt *ar7_wdt;
static struct clk *vbus_clk;
static void ar7_wdt_kick(u32 value)
{
WRITE_REG(ar7_wdt->kick_lock, 0x5555);
if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) {
WRITE_REG(ar7_wdt->kick_lock, 0xaaaa);
if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) {
WRITE_REG(ar7_wdt->kick, value);
return;
}
}
pr_err("failed to unlock WDT kick reg\n");
}
static void ar7_wdt_prescale(u32 value)
{
WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a);
if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) {
WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5);
if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) {
WRITE_REG(ar7_wdt->prescale, value);
return;
}
}
pr_err("failed to unlock WDT prescale reg\n");
}
static void ar7_wdt_change(u32 value)
{
WRITE_REG(ar7_wdt->change_lock, 0x6666);
if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) {
WRITE_REG(ar7_wdt->change_lock, 0xbbbb);
if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) {
WRITE_REG(ar7_wdt->change, value);
return;
}
}
pr_err("failed to unlock WDT change reg\n");
}
static void ar7_wdt_disable(u32 value)
{
WRITE_REG(ar7_wdt->disable_lock, 0x7777);
if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) {
WRITE_REG(ar7_wdt->disable_lock, 0xcccc);
if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) {
WRITE_REG(ar7_wdt->disable_lock, 0xdddd);
if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) {
WRITE_REG(ar7_wdt->disable, value);
return;
}
}
}
pr_err("failed to unlock WDT disable reg\n");
}
static void ar7_wdt_update_margin(int new_margin)
{
u32 change;
u32 vbus_rate;
vbus_rate = clk_get_rate(vbus_clk);
change = new_margin * (vbus_rate / prescale_value);
if (change < 1)
change = 1;
if (change > 0xffff)
change = 0xffff;
ar7_wdt_change(change);
margin = change * prescale_value / vbus_rate;
pr_info("timer margin %d seconds (prescale %d, change %d, freq %d)\n",
margin, prescale_value, change, vbus_rate);
}
static void ar7_wdt_enable_wdt(void)
{
pr_debug("enabling watchdog timer\n");
ar7_wdt_disable(1);
ar7_wdt_kick(1);
}
static void ar7_wdt_disable_wdt(void)
{
pr_debug("disabling watchdog timer\n");
ar7_wdt_disable(0);
}
static int ar7_wdt_open(struct inode *inode, struct file *file)
{
/* only allow one at a time */
if (test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
ar7_wdt_enable_wdt();
expect_close = 0;
return stream_open(inode, file);
}
static int ar7_wdt_release(struct inode *inode, struct file *file)
{
if (!expect_close)
pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n");
else if (!nowayout)
ar7_wdt_disable_wdt();
clear_bit(0, &wdt_is_open);
return 0;
}
static ssize_t ar7_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
/* check for a magic close character */
if (len) {
size_t i;
spin_lock(&wdt_lock);
ar7_wdt_kick(1);
spin_unlock(&wdt_lock);
expect_close = 0;
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 1;
}
}
return len;
}
static long ar7_wdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
static const struct watchdog_info ident = {
.identity = LONGNAME,
.firmware_version = 1,
.options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE),
};
int new_margin;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user((struct watchdog_info *)arg, &ident,
sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
if (put_user(0, (int *)arg))
return -EFAULT;
return 0;
case WDIOC_KEEPALIVE:
ar7_wdt_kick(1);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int *)arg))
return -EFAULT;
if (new_margin < 1)
return -EINVAL;
spin_lock(&wdt_lock);
ar7_wdt_update_margin(new_margin);
ar7_wdt_kick(1);
spin_unlock(&wdt_lock);
fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, (int *)arg))
return -EFAULT;
return 0;
default:
return -ENOTTY;
}
}
static const struct file_operations ar7_wdt_fops = {
.owner = THIS_MODULE,
.write = ar7_wdt_write,
.unlocked_ioctl = ar7_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = ar7_wdt_open,
.release = ar7_wdt_release,
.llseek = no_llseek,
};
static struct miscdevice ar7_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &ar7_wdt_fops,
};
static int ar7_wdt_probe(struct platform_device *pdev)
{
int rc;
ar7_wdt = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(ar7_wdt))
return PTR_ERR(ar7_wdt);
vbus_clk = clk_get(NULL, "vbus");
if (IS_ERR(vbus_clk)) {
pr_err("could not get vbus clock\n");
return PTR_ERR(vbus_clk);
}
ar7_wdt_disable_wdt();
ar7_wdt_prescale(prescale_value);
ar7_wdt_update_margin(margin);
rc = misc_register(&ar7_wdt_miscdev);
if (rc) {
pr_err("unable to register misc device\n");
goto out;
}
return 0;
out:
clk_put(vbus_clk);
vbus_clk = NULL;
return rc;
}
static void ar7_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ar7_wdt_miscdev);
clk_put(vbus_clk);
vbus_clk = NULL;
}
static void ar7_wdt_shutdown(struct platform_device *pdev)
{
if (!nowayout)
ar7_wdt_disable_wdt();
}
static struct platform_driver ar7_wdt_driver = {
.probe = ar7_wdt_probe,
.remove_new = ar7_wdt_remove,
.shutdown = ar7_wdt_shutdown,
.driver = {
.name = "ar7_wdt",
},
};
module_platform_driver(ar7_wdt_driver);
| linux-master | drivers/watchdog/ar7_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* ICP Wafer 5823 Single Board Computer WDT driver
* http://www.icpamerica.com/wafer_5823.php
* May also work on other similar models
*
* (c) Copyright 2002 Justin Cormack <[email protected]>
*
* Release 0.02
*
* Based on advantechwdt.c which is based on wdt.c.
* Original copyright messages:
*
* (c) Copyright 1996-1997 Alan Cox <[email protected]>,
* All Rights Reserved.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <[email protected]>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#define WATCHDOG_NAME "Wafer 5823 WDT"
#define PFX WATCHDOG_NAME ": "
#define WD_TIMO 60 /* 60 sec default timeout */
static unsigned long wafwdt_is_open;
static char expect_close;
static DEFINE_SPINLOCK(wafwdt_lock);
/*
* You must set these - there is no sane way to probe for this board.
*
* To enable, write the timeout value in seconds (1 to 255) to I/O
* port WDT_START, then read the port to start the watchdog. To pat
* the dog, read port WDT_STOP to stop the timer, then read WDT_START
* to restart it again.
*/
static int wdt_stop = 0x843;
static int wdt_start = 0x443;
static int timeout = WD_TIMO; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1 <= timeout <= 255, default="
__MODULE_STRING(WD_TIMO) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static void wafwdt_ping(void)
{
/* pat watchdog */
spin_lock(&wafwdt_lock);
inb_p(wdt_stop);
inb_p(wdt_start);
spin_unlock(&wafwdt_lock);
}
static void wafwdt_start(void)
{
/* start up watchdog */
outb_p(timeout, wdt_start);
inb_p(wdt_start);
}
static void wafwdt_stop(void)
{
/* stop watchdog */
inb_p(wdt_stop);
}
static ssize_t wafwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
/* scan to see whether or not we got the magic
character */
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
/* Well, anyhow someone wrote to us, we should
return that favour */
wafwdt_ping();
}
return count;
}
static long wafwdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int new_timeout;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "Wafer 5823 WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int options, retval = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
wafwdt_stop();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
wafwdt_start();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wafwdt_ping();
break;
case WDIOC_SETTIMEOUT:
if (get_user(new_timeout, p))
return -EFAULT;
if ((new_timeout < 1) || (new_timeout > 255))
return -EINVAL;
timeout = new_timeout;
wafwdt_stop();
wafwdt_start();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static int wafwdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &wafwdt_is_open))
return -EBUSY;
/*
* Activate
*/
wafwdt_start();
return stream_open(inode, file);
}
static int wafwdt_close(struct inode *inode, struct file *file)
{
if (expect_close == 42)
wafwdt_stop();
else {
pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
wafwdt_ping();
}
clear_bit(0, &wafwdt_is_open);
expect_close = 0;
return 0;
}
/*
* Notifier for system down
*/
static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wafwdt_stop();
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations wafwdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wafwdt_write,
.unlocked_ioctl = wafwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = wafwdt_open,
.release = wafwdt_close,
};
static struct miscdevice wafwdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wafwdt_fops,
};
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wafwdt_notifier = {
.notifier_call = wafwdt_notify_sys,
};
static int __init wafwdt_init(void)
{
int ret;
pr_info("WDT driver for Wafer 5823 single board computer initialising\n");
if (timeout < 1 || timeout > 255) {
timeout = WD_TIMO;
pr_info("timeout value must be 1 <= x <= 255, using %d\n",
timeout);
}
if (wdt_stop != wdt_start) {
if (!request_region(wdt_stop, 1, "Wafer 5823 WDT")) {
pr_err("I/O address 0x%04x already in use\n", wdt_stop);
ret = -EIO;
goto error;
}
}
if (!request_region(wdt_start, 1, "Wafer 5823 WDT")) {
pr_err("I/O address 0x%04x already in use\n", wdt_start);
ret = -EIO;
goto error2;
}
ret = register_reboot_notifier(&wafwdt_notifier);
if (ret != 0) {
pr_err("cannot register reboot notifier (err=%d)\n", ret);
goto error3;
}
ret = misc_register(&wafwdt_miscdev);
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto error4;
}
pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
return ret;
error4:
unregister_reboot_notifier(&wafwdt_notifier);
error3:
release_region(wdt_start, 1);
error2:
if (wdt_stop != wdt_start)
release_region(wdt_stop, 1);
error:
return ret;
}
static void __exit wafwdt_exit(void)
{
misc_deregister(&wafwdt_miscdev);
unregister_reboot_notifier(&wafwdt_notifier);
if (wdt_stop != wdt_start)
release_region(wdt_stop, 1);
release_region(wdt_start, 1);
}
module_init(wafwdt_init);
module_exit(wafwdt_exit);
MODULE_AUTHOR("Justin Cormack");
MODULE_DESCRIPTION("ICP Wafer 5823 Single Board Computer WDT driver");
MODULE_LICENSE("GPL");
/* end of wafer5823wdt.c */
| linux-master | drivers/watchdog/wafer5823wdt.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell GTI Watchdog driver
*
* Copyright (C) 2023 Marvell.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
/*
* Hardware supports following mode of operation:
* 1) Interrupt Only:
* This will generate the interrupt to arm core whenever timeout happens.
*
* 2) Interrupt + del3t (Interrupt to firmware (SCP processor)).
* This will generate interrupt to arm core on 1st timeout happens
* This will generate interrupt to SCP processor on 2nd timeout happens
*
* 3) Interrupt + Interrupt to SCP processor (called delt3t) + reboot.
* This will generate interrupt to arm core on 1st timeout happens
* Will generate interrupt to SCP processor on 2nd timeout happens,
* if interrupt is configured.
* Reboot on 3rd timeout.
*
* Driver will use hardware in mode-3 above so that system can reboot in case
* a hardware hang. Also h/w is configured not to generate SCP interrupt, so
* effectively 2nd timeout is ignored within hardware.
*
* First timeout is effectively watchdog pretimeout.
*/
/* GTI CWD Watchdog (GTI_CWD_WDOG) Register */
#define GTI_CWD_WDOG(reg_offset) (0x8 * (reg_offset))
#define GTI_CWD_WDOG_MODE_INT_DEL3T_RST 0x3
#define GTI_CWD_WDOG_MODE_MASK GENMASK_ULL(1, 0)
#define GTI_CWD_WDOG_LEN_SHIFT 4
#define GTI_CWD_WDOG_LEN_MASK GENMASK_ULL(19, 4)
#define GTI_CWD_WDOG_CNT_SHIFT 20
#define GTI_CWD_WDOG_CNT_MASK GENMASK_ULL(43, 20)
/* GTI CWD Watchdog Interrupt (GTI_CWD_INT) Register */
#define GTI_CWD_INT 0x200
#define GTI_CWD_INT_PENDING_STATUS(bit) BIT_ULL(bit)
/* GTI CWD Watchdog Interrupt Enable Clear (GTI_CWD_INT_ENA_CLR) Register */
#define GTI_CWD_INT_ENA_CLR 0x210
#define GTI_CWD_INT_ENA_CLR_VAL(bit) BIT_ULL(bit)
/* GTI CWD Watchdog Interrupt Enable Set (GTI_CWD_INT_ENA_SET) Register */
#define GTI_CWD_INT_ENA_SET 0x218
#define GTI_CWD_INT_ENA_SET_VAL(bit) BIT_ULL(bit)
/* GTI CWD Watchdog Poke (GTI_CWD_POKE) Registers */
#define GTI_CWD_POKE(reg_offset) (0x10000 + 0x8 * (reg_offset))
#define GTI_CWD_POKE_VAL 1
struct gti_match_data {
u32 gti_num_timers;
};
static const struct gti_match_data match_data_octeontx2 = {
.gti_num_timers = 54,
};
static const struct gti_match_data match_data_cn10k = {
.gti_num_timers = 64,
};
struct gti_wdt_priv {
struct watchdog_device wdev;
void __iomem *base;
u32 clock_freq;
struct clk *sclk;
/* wdt_timer_idx used for timer to be used for system watchdog */
u32 wdt_timer_idx;
const struct gti_match_data *data;
};
static irqreturn_t gti_wdt_interrupt(int irq, void *data)
{
struct watchdog_device *wdev = data;
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
/* Clear Interrupt Pending Status */
writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
priv->base + GTI_CWD_INT);
watchdog_notify_pretimeout(wdev);
return IRQ_HANDLED;
}
static int gti_wdt_ping(struct watchdog_device *wdev)
{
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
writeq(GTI_CWD_POKE_VAL,
priv->base + GTI_CWD_POKE(priv->wdt_timer_idx));
return 0;
}
static int gti_wdt_start(struct watchdog_device *wdev)
{
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
u64 regval;
if (!wdev->pretimeout)
return -EINVAL;
set_bit(WDOG_HW_RUNNING, &wdev->status);
/* Clear any pending interrupt */
writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
priv->base + GTI_CWD_INT);
/* Enable Interrupt */
writeq(GTI_CWD_INT_ENA_SET_VAL(priv->wdt_timer_idx),
priv->base + GTI_CWD_INT_ENA_SET);
/* Set (Interrupt + SCP interrupt (DEL3T) + core domain reset) Mode */
regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
regval |= GTI_CWD_WDOG_MODE_INT_DEL3T_RST;
writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
return 0;
}
static int gti_wdt_stop(struct watchdog_device *wdev)
{
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
u64 regval;
/* Disable Interrupt */
writeq(GTI_CWD_INT_ENA_CLR_VAL(priv->wdt_timer_idx),
priv->base + GTI_CWD_INT_ENA_CLR);
/* Set GTI_CWD_WDOG.Mode = 0 to stop the timer */
regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
regval &= ~GTI_CWD_WDOG_MODE_MASK;
writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
return 0;
}
static int gti_wdt_settimeout(struct watchdog_device *wdev,
unsigned int timeout)
{
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
u64 timeout_wdog, regval;
/* Update new timeout */
wdev->timeout = timeout;
/* Pretimeout is 1/3 of timeout */
wdev->pretimeout = timeout / 3;
/* Get clock cycles from pretimeout */
timeout_wdog = (u64)priv->clock_freq * wdev->pretimeout;
/* Watchdog counts in 1024 cycle steps */
timeout_wdog = timeout_wdog >> 10;
/* GTI_CWD_WDOG.CNT: reload counter is 16-bit */
timeout_wdog = (timeout_wdog + 0xff) >> 8;
if (timeout_wdog >= 0x10000)
timeout_wdog = 0xffff;
/*
* GTI_CWD_WDOG.LEN is 24bit, lower 8-bits should be zero and
* upper 16-bits are same as GTI_CWD_WDOG.CNT
*/
regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
regval &= GTI_CWD_WDOG_MODE_MASK;
regval |= (timeout_wdog << (GTI_CWD_WDOG_CNT_SHIFT + 8)) |
(timeout_wdog << GTI_CWD_WDOG_LEN_SHIFT);
writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
return 0;
}
static int gti_wdt_set_pretimeout(struct watchdog_device *wdev,
unsigned int timeout)
{
struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
struct watchdog_device *wdog_dev = &priv->wdev;
/* pretimeout should 1/3 of max_timeout */
if (timeout * 3 <= wdog_dev->max_timeout)
return gti_wdt_settimeout(wdev, timeout * 3);
return -EINVAL;
}
static void gti_clk_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
}
static int gti_wdt_get_cntfrq(struct platform_device *pdev,
struct gti_wdt_priv *priv)
{
int err;
priv->sclk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->sclk))
return PTR_ERR(priv->sclk);
err = devm_add_action_or_reset(&pdev->dev,
gti_clk_disable_unprepare, priv->sclk);
if (err)
return err;
priv->clock_freq = clk_get_rate(priv->sclk);
if (!priv->clock_freq)
return -EINVAL;
return 0;
}
static const struct watchdog_info gti_wdt_ident = {
.identity = "Marvell GTI watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_PRETIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
};
static const struct watchdog_ops gti_wdt_ops = {
.owner = THIS_MODULE,
.start = gti_wdt_start,
.stop = gti_wdt_stop,
.ping = gti_wdt_ping,
.set_timeout = gti_wdt_settimeout,
.set_pretimeout = gti_wdt_set_pretimeout,
};
static int gti_wdt_probe(struct platform_device *pdev)
{
struct gti_wdt_priv *priv;
struct device *dev = &pdev->dev;
struct watchdog_device *wdog_dev;
u64 max_pretimeout;
u32 wdt_idx;
int irq;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->base),
"reg property not valid/found\n");
err = gti_wdt_get_cntfrq(pdev, priv);
if (err)
return dev_err_probe(&pdev->dev, err,
"GTI clock frequency not valid/found");
priv->data = of_device_get_match_data(dev);
/* default use last timer for watchdog */
priv->wdt_timer_idx = priv->data->gti_num_timers - 1;
err = of_property_read_u32(dev->of_node, "marvell,wdt-timer-index",
&wdt_idx);
if (!err) {
if (wdt_idx >= priv->data->gti_num_timers)
return dev_err_probe(&pdev->dev, err,
"GTI wdog timer index not valid");
priv->wdt_timer_idx = wdt_idx;
}
wdog_dev = &priv->wdev;
wdog_dev->info = >i_wdt_ident,
wdog_dev->ops = >i_wdt_ops,
wdog_dev->parent = dev;
/*
* Watchdog counter is 24 bit where lower 8 bits are zeros
* This counter decrements every 1024 clock cycles.
*/
max_pretimeout = (GTI_CWD_WDOG_CNT_MASK >> GTI_CWD_WDOG_CNT_SHIFT);
max_pretimeout &= ~0xFFUL;
max_pretimeout = (max_pretimeout * 1024) / priv->clock_freq;
wdog_dev->pretimeout = max_pretimeout;
/* Maximum timeout is 3 times the pretimeout */
wdog_dev->max_timeout = max_pretimeout * 3;
/* Minimum first timeout (pretimeout) is 1, so min_timeout as 3 */
wdog_dev->min_timeout = 3;
wdog_dev->timeout = wdog_dev->pretimeout;
watchdog_set_drvdata(wdog_dev, priv);
platform_set_drvdata(pdev, priv);
gti_wdt_settimeout(wdog_dev, wdog_dev->timeout);
watchdog_stop_on_reboot(wdog_dev);
watchdog_stop_on_unregister(wdog_dev);
err = devm_watchdog_register_device(dev, wdog_dev);
if (err)
return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "IRQ resource not found\n");
err = devm_request_irq(dev, irq, gti_wdt_interrupt, 0,
pdev->name, &priv->wdev);
if (err)
return dev_err_probe(dev, err, "Failed to register interrupt handler\n");
dev_info(dev, "Watchdog enabled (timeout=%d sec)\n", wdog_dev->timeout);
return 0;
}
static const struct of_device_id gti_wdt_of_match[] = {
{ .compatible = "marvell,cn9670-wdt", .data = &match_data_octeontx2},
{ .compatible = "marvell,cn10624-wdt", .data = &match_data_cn10k},
{ },
};
MODULE_DEVICE_TABLE(of, gti_wdt_of_match);
static struct platform_driver gti_wdt_driver = {
.driver = {
.name = "gti-wdt",
.of_match_table = gti_wdt_of_match,
},
.probe = gti_wdt_probe,
};
module_platform_driver(gti_wdt_driver);
MODULE_AUTHOR("Bharat Bhushan <[email protected]>");
MODULE_DESCRIPTION("Marvell GTI watchdog driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/marvell_gti_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sma cpu5 watchdog driver
*
* Copyright (C) 2003 Heiko Ronsdorf <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* adjustable parameters */
static int verbose;
static int port = 0x91;
static int ticks = 10000;
static DEFINE_SPINLOCK(cpu5wdt_lock);
#define PFX "cpu5wdt: "
#define CPU5WDT_EXTENT 0x0A
#define CPU5WDT_STATUS_REG 0x00
#define CPU5WDT_TIME_A_REG 0x02
#define CPU5WDT_TIME_B_REG 0x03
#define CPU5WDT_MODE_REG 0x04
#define CPU5WDT_TRIGGER_REG 0x07
#define CPU5WDT_ENABLE_REG 0x08
#define CPU5WDT_RESET_REG 0x09
#define CPU5WDT_INTERVAL (HZ/10+1)
/* some device data */
static struct {
struct completion stop;
int running;
struct timer_list timer;
int queue;
int default_ticks;
unsigned long inuse;
} cpu5wdt_device;
/* generic helper functions */
static void cpu5wdt_trigger(struct timer_list *unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
if (cpu5wdt_device.running)
ticks--;
spin_lock(&cpu5wdt_lock);
/* keep watchdog alive */
outb(1, port + CPU5WDT_TRIGGER_REG);
/* requeue?? */
if (cpu5wdt_device.queue && ticks)
mod_timer(&cpu5wdt_device.timer, jiffies + CPU5WDT_INTERVAL);
else {
/* ticks doesn't matter anyway */
complete(&cpu5wdt_device.stop);
}
spin_unlock(&cpu5wdt_lock);
}
static void cpu5wdt_reset(void)
{
ticks = cpu5wdt_device.default_ticks;
if (verbose)
pr_debug("reset (%i ticks)\n", (int) ticks);
}
static void cpu5wdt_start(void)
{
unsigned long flags;
spin_lock_irqsave(&cpu5wdt_lock, flags);
if (!cpu5wdt_device.queue) {
cpu5wdt_device.queue = 1;
outb(0, port + CPU5WDT_TIME_A_REG);
outb(0, port + CPU5WDT_TIME_B_REG);
outb(1, port + CPU5WDT_MODE_REG);
outb(0, port + CPU5WDT_RESET_REG);
outb(0, port + CPU5WDT_ENABLE_REG);
mod_timer(&cpu5wdt_device.timer, jiffies + CPU5WDT_INTERVAL);
}
/* if process dies, counter is not decremented */
cpu5wdt_device.running++;
spin_unlock_irqrestore(&cpu5wdt_lock, flags);
}
static int cpu5wdt_stop(void)
{
unsigned long flags;
spin_lock_irqsave(&cpu5wdt_lock, flags);
if (cpu5wdt_device.running)
cpu5wdt_device.running = 0;
ticks = cpu5wdt_device.default_ticks;
spin_unlock_irqrestore(&cpu5wdt_lock, flags);
if (verbose)
pr_crit("stop not possible\n");
return -EIO;
}
/* filesystem operations */
static int cpu5wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &cpu5wdt_device.inuse))
return -EBUSY;
return stream_open(inode, file);
}
static int cpu5wdt_release(struct inode *inode, struct file *file)
{
clear_bit(0, &cpu5wdt_device.inuse);
return 0;
}
static long cpu5wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int value;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET,
.identity = "CPU5 WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
value = inb(port + CPU5WDT_STATUS_REG);
value = (value >> 2) & 1;
return put_user(value, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
if (get_user(value, p))
return -EFAULT;
if (value & WDIOS_ENABLECARD)
cpu5wdt_start();
if (value & WDIOS_DISABLECARD)
cpu5wdt_stop();
break;
case WDIOC_KEEPALIVE:
cpu5wdt_reset();
break;
default:
return -ENOTTY;
}
return 0;
}
static ssize_t cpu5wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return -EIO;
cpu5wdt_reset();
return count;
}
static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = cpu5wdt_open,
.write = cpu5wdt_write,
.release = cpu5wdt_release,
};
static struct miscdevice cpu5wdt_misc = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &cpu5wdt_fops,
};
/* init/exit function */
static int cpu5wdt_init(void)
{
unsigned int val;
int err;
if (verbose)
pr_debug("port=0x%x, verbose=%i\n", port, verbose);
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
pr_err("request_region failed\n");
err = -EBUSY;
goto no_port;
}
/* watchdog reboot? */
val = inb(port + CPU5WDT_STATUS_REG);
val = (val >> 2) & 1;
if (!val)
pr_info("sorry, was my fault\n");
err = misc_register(&cpu5wdt_misc);
if (err < 0) {
pr_err("misc_register failed\n");
goto no_misc;
}
pr_info("init success\n");
return 0;
no_misc:
release_region(port, CPU5WDT_EXTENT);
no_port:
return err;
}
static int cpu5wdt_init_module(void)
{
return cpu5wdt_init();
}
static void cpu5wdt_exit(void)
{
if (cpu5wdt_device.queue) {
cpu5wdt_device.queue = 0;
wait_for_completion(&cpu5wdt_device.stop);
del_timer(&cpu5wdt_device.timer);
}
misc_deregister(&cpu5wdt_misc);
release_region(port, CPU5WDT_EXTENT);
}
static void cpu5wdt_exit_module(void)
{
cpu5wdt_exit();
}
/* module entry points */
module_init(cpu5wdt_init_module);
module_exit(cpu5wdt_exit_module);
MODULE_AUTHOR("Heiko Ronsdorf <[email protected]>");
MODULE_DESCRIPTION("sma cpu5 watchdog driver");
MODULE_LICENSE("GPL");
module_param_hw(port, int, ioport, 0);
MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91");
module_param(verbose, int, 0);
MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
module_param(ticks, int, 0);
MODULE_PARM_DESC(ticks, "count down ticks, default is 10000");
| linux-master | drivers/watchdog/cpu5wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP LPC18xx Watchdog Timer (WDT)
*
* Copyright (c) 2015 Ariel D'Alessandro <[email protected]>
*
* Notes
* -----
* The Watchdog consists of a fixed divide-by-4 clock pre-scaler and a 24-bit
* counter which decrements on every clock cycle.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
/* Registers */
#define LPC18XX_WDT_MOD 0x00
#define LPC18XX_WDT_MOD_WDEN BIT(0)
#define LPC18XX_WDT_MOD_WDRESET BIT(1)
#define LPC18XX_WDT_TC 0x04
#define LPC18XX_WDT_TC_MIN 0xff
#define LPC18XX_WDT_TC_MAX 0xffffff
#define LPC18XX_WDT_FEED 0x08
#define LPC18XX_WDT_FEED_MAGIC1 0xaa
#define LPC18XX_WDT_FEED_MAGIC2 0x55
#define LPC18XX_WDT_TV 0x0c
/* Clock pre-scaler */
#define LPC18XX_WDT_CLK_DIV 4
/* Timeout values in seconds */
#define LPC18XX_WDT_DEF_TIMEOUT 30U
static int heartbeat;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds (default="
__MODULE_STRING(LPC18XX_WDT_DEF_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct lpc18xx_wdt_dev {
struct watchdog_device wdt_dev;
struct clk *reg_clk;
struct clk *wdt_clk;
unsigned long clk_rate;
void __iomem *base;
struct timer_list timer;
spinlock_t lock;
};
static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
/*
* An abort condition will occur if an interrupt happens during the feed
* sequence.
*/
spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
return 0;
}
static void lpc18xx_wdt_timer_feed(struct timer_list *t)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer);
struct watchdog_device *wdt_dev = &lpc18xx_wdt->wdt_dev;
lpc18xx_wdt_feed(wdt_dev);
/* Use safe value (1/2 of real timeout) */
mod_timer(&lpc18xx_wdt->timer, jiffies +
msecs_to_jiffies((wdt_dev->timeout * MSEC_PER_SEC) / 2));
}
/*
* Since LPC18xx Watchdog cannot be disabled in hardware, we must keep feeding
* it with a timer until userspace watchdog software takes over.
*/
static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
lpc18xx_wdt_timer_feed(&lpc18xx_wdt->timer);
return 0;
}
static void __lpc18xx_wdt_set_timeout(struct lpc18xx_wdt_dev *lpc18xx_wdt)
{
unsigned int val;
val = DIV_ROUND_UP(lpc18xx_wdt->wdt_dev.timeout * lpc18xx_wdt->clk_rate,
LPC18XX_WDT_CLK_DIV);
writel(val, lpc18xx_wdt->base + LPC18XX_WDT_TC);
}
static int lpc18xx_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int new_timeout)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
lpc18xx_wdt->wdt_dev.timeout = new_timeout;
__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
return 0;
}
static unsigned int lpc18xx_wdt_get_timeleft(struct watchdog_device *wdt_dev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
unsigned int val;
val = readl(lpc18xx_wdt->base + LPC18XX_WDT_TV);
return (val * LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
}
static int lpc18xx_wdt_start(struct watchdog_device *wdt_dev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
unsigned int val;
if (timer_pending(&lpc18xx_wdt->timer))
del_timer(&lpc18xx_wdt->timer);
val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
val |= LPC18XX_WDT_MOD_WDEN;
val |= LPC18XX_WDT_MOD_WDRESET;
writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
/*
* Setting the WDEN bit in the WDMOD register is not sufficient to
* enable the Watchdog. A valid feed sequence must be completed after
* setting WDEN before the Watchdog is capable of generating a reset.
*/
lpc18xx_wdt_feed(wdt_dev);
return 0;
}
static int lpc18xx_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
int val;
/*
* Incorrect feed sequence causes immediate watchdog reset if enabled.
*/
spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
val |= LPC18XX_WDT_MOD_WDEN;
val |= LPC18XX_WDT_MOD_WDRESET;
writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
return 0;
}
static const struct watchdog_info lpc18xx_wdt_info = {
.identity = "NXP LPC18xx Watchdog",
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops lpc18xx_wdt_ops = {
.owner = THIS_MODULE,
.start = lpc18xx_wdt_start,
.stop = lpc18xx_wdt_stop,
.ping = lpc18xx_wdt_feed,
.set_timeout = lpc18xx_wdt_set_timeout,
.get_timeleft = lpc18xx_wdt_get_timeleft,
.restart = lpc18xx_wdt_restart,
};
static int lpc18xx_wdt_probe(struct platform_device *pdev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt;
struct device *dev = &pdev->dev;
lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
if (!lpc18xx_wdt)
return -ENOMEM;
lpc18xx_wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_wdt->base))
return PTR_ERR(lpc18xx_wdt->base);
lpc18xx_wdt->reg_clk = devm_clk_get_enabled(dev, "reg");
if (IS_ERR(lpc18xx_wdt->reg_clk)) {
dev_err(dev, "failed to get the reg clock\n");
return PTR_ERR(lpc18xx_wdt->reg_clk);
}
lpc18xx_wdt->wdt_clk = devm_clk_get_enabled(dev, "wdtclk");
if (IS_ERR(lpc18xx_wdt->wdt_clk)) {
dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(lpc18xx_wdt->wdt_clk);
}
/* We use the clock rate to calculate timeouts */
lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
if (lpc18xx_wdt->clk_rate == 0) {
dev_err(dev, "failed to get clock rate\n");
return -EINVAL;
}
lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
lpc18xx_wdt->wdt_dev.ops = &lpc18xx_wdt_ops;
lpc18xx_wdt->wdt_dev.min_timeout = DIV_ROUND_UP(LPC18XX_WDT_TC_MIN *
LPC18XX_WDT_CLK_DIV, lpc18xx_wdt->clk_rate);
lpc18xx_wdt->wdt_dev.max_timeout = (LPC18XX_WDT_TC_MAX *
LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
lpc18xx_wdt->wdt_dev.timeout = min(lpc18xx_wdt->wdt_dev.max_timeout,
LPC18XX_WDT_DEF_TIMEOUT);
spin_lock_init(&lpc18xx_wdt->lock);
lpc18xx_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
timer_setup(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, 0);
watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&lpc18xx_wdt->wdt_dev, 128);
platform_set_drvdata(pdev, lpc18xx_wdt);
watchdog_stop_on_reboot(&lpc18xx_wdt->wdt_dev);
return devm_watchdog_register_device(dev, &lpc18xx_wdt->wdt_dev);
}
static void lpc18xx_wdt_remove(struct platform_device *pdev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
del_timer_sync(&lpc18xx_wdt->timer);
}
static const struct of_device_id lpc18xx_wdt_match[] = {
{ .compatible = "nxp,lpc1850-wwdt" },
{}
};
MODULE_DEVICE_TABLE(of, lpc18xx_wdt_match);
static struct platform_driver lpc18xx_wdt_driver = {
.driver = {
.name = "lpc18xx-wdt",
.of_match_table = lpc18xx_wdt_match,
},
.probe = lpc18xx_wdt_probe,
.remove_new = lpc18xx_wdt_remove,
};
module_platform_driver(lpc18xx_wdt_driver);
MODULE_AUTHOR("Ariel D'Alessandro <[email protected]>");
MODULE_DESCRIPTION("NXP LPC18xx Watchdog Timer Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/lpc18xx_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/N1 Watchdog timer.
* This is a 12-bit timer driver from a (62.5/16384) MHz clock. It can't even
* cope with 2 seconds.
*
* Copyright 2018 Renesas Electronics Europe Ltd.
*
* Derived from Ralink RT288x watchdog timer.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
#define DEFAULT_TIMEOUT 60
#define RZN1_WDT_RETRIGGER 0x0
#define RZN1_WDT_RETRIGGER_RELOAD_VAL 0
#define RZN1_WDT_RETRIGGER_RELOAD_VAL_MASK 0xfff
#define RZN1_WDT_RETRIGGER_PRESCALE BIT(12)
#define RZN1_WDT_RETRIGGER_ENABLE BIT(13)
#define RZN1_WDT_RETRIGGER_WDSI (0x2 << 14)
#define RZN1_WDT_PRESCALER 16384
#define RZN1_WDT_MAX 4095
struct rzn1_watchdog {
struct watchdog_device wdtdev;
void __iomem *base;
unsigned long clk_rate_khz;
};
static inline uint32_t max_heart_beat_ms(unsigned long clk_rate_khz)
{
return (RZN1_WDT_MAX * RZN1_WDT_PRESCALER) / clk_rate_khz;
}
static inline uint32_t compute_reload_value(uint32_t tick_ms,
unsigned long clk_rate_khz)
{
return (tick_ms * clk_rate_khz) / RZN1_WDT_PRESCALER;
}
static int rzn1_wdt_ping(struct watchdog_device *w)
{
struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
/* Any value retrigggers the watchdog */
writel(0, wdt->base + RZN1_WDT_RETRIGGER);
return 0;
}
static int rzn1_wdt_start(struct watchdog_device *w)
{
struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
u32 val;
/*
* The hardware allows you to write to this reg only once.
* Since this includes the reload value, there is no way to change the
* timeout once started. Also note that the WDT clock is half the bus
* fabric clock rate, so if the bus fabric clock rate is changed after
* the WDT is started, the WDT interval will be wrong.
*/
val = RZN1_WDT_RETRIGGER_WDSI;
val |= RZN1_WDT_RETRIGGER_ENABLE;
val |= RZN1_WDT_RETRIGGER_PRESCALE;
val |= compute_reload_value(w->max_hw_heartbeat_ms, wdt->clk_rate_khz);
writel(val, wdt->base + RZN1_WDT_RETRIGGER);
return 0;
}
static irqreturn_t rzn1_wdt_irq(int irq, void *_wdt)
{
pr_crit("RZN1 Watchdog. Initiating system reboot\n");
emergency_restart();
return IRQ_HANDLED;
}
static struct watchdog_info rzn1_wdt_info = {
.identity = "RZ/N1 Watchdog",
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
};
static const struct watchdog_ops rzn1_wdt_ops = {
.owner = THIS_MODULE,
.start = rzn1_wdt_start,
.ping = rzn1_wdt_ping,
};
static int rzn1_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rzn1_watchdog *wdt;
struct device_node *np = dev->of_node;
struct clk *clk;
unsigned long clk_rate;
int ret;
int irq;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, rzn1_wdt_irq, 0,
np->name, wdt);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
return ret;
}
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get the clock\n");
return PTR_ERR(clk);
}
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
dev_err(dev, "failed to get the clock rate\n");
return -EINVAL;
}
wdt->clk_rate_khz = clk_rate / 1000;
wdt->wdtdev.info = &rzn1_wdt_info,
wdt->wdtdev.ops = &rzn1_wdt_ops,
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS,
wdt->wdtdev.parent = dev;
/*
* The period of the watchdog cannot be changed once set
* and is limited to a very short period.
* Configure it for a 1s period once and for all, and
* rely on the heart-beat provided by the watchdog core
* to make this usable by the user-space.
*/
wdt->wdtdev.max_hw_heartbeat_ms = max_heart_beat_ms(wdt->clk_rate_khz);
if (wdt->wdtdev.max_hw_heartbeat_ms > 1000)
wdt->wdtdev.max_hw_heartbeat_ms = 1000;
wdt->wdtdev.timeout = DEFAULT_TIMEOUT;
ret = watchdog_init_timeout(&wdt->wdtdev, 0, dev);
if (ret)
return ret;
watchdog_set_drvdata(&wdt->wdtdev, wdt);
return devm_watchdog_register_device(dev, &wdt->wdtdev);
}
static const struct of_device_id rzn1_wdt_match[] = {
{ .compatible = "renesas,rzn1-wdt" },
{},
};
MODULE_DEVICE_TABLE(of, rzn1_wdt_match);
static struct platform_driver rzn1_wdt_driver = {
.probe = rzn1_wdt_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rzn1_wdt_match,
},
};
module_platform_driver(rzn1_wdt_driver);
MODULE_DESCRIPTION("Renesas RZ/N1 hardware watchdog");
MODULE_AUTHOR("Phil Edworthy <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/rzn1_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* intel TCO vendor specific watchdog driver support
*
* (c) Copyright 2006-2009 Wim Van Sebroeck <[email protected]>.
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Module and version information */
#define DRV_NAME "iTCO_vendor_support"
#define DRV_VERSION "1.04"
/* Includes */
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/ioport.h> /* For io-port access */
#include <linux/io.h> /* For inb/outb/... */
#include "iTCO_vendor.h"
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems - no longer supported */
#define SUPERMICRO_NEW_BOARD 2
/* Broken BIOS */
#define BROKEN_BIOS 911
int iTCO_vendorsupport;
EXPORT_SYMBOL(iTCO_vendorsupport);
module_param_named(vendorsupport, iTCO_vendorsupport, int, 0);
MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
"0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS");
/*
* Vendor Specific Support
*/
/*
* Vendor Support: 1
* Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE
* iTCO chipset: ICH2
*
* Code contributed by: R. Seretny <[email protected]>
* Documentation obtained by R. Seretny from SuperMicro Technical Support
*
* To enable Watchdog function:
* BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes
* This setting enables SMI to clear the watchdog expired flag.
* If BIOS or CPU fail which may cause SMI hang, then system will
* reboot. When application starts to use watchdog function,
* application has to take over the control from SMI.
*
* For P3TSSE, J36 jumper needs to be removed to enable the Watchdog
* function.
*
* Note: The system will reboot when Expire Flag is set TWICE.
* So, if the watchdog timer is 20 seconds, then the maximum hang
* time is about 40 seconds, and the minimum hang time is about
* 20.6 seconds.
*/
static void supermicro_old_pre_start(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(smires->start);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, smires->start); /* Needed to activate watchdog */
}
static void supermicro_old_pre_stop(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(smires->start);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
outl(val32, smires->start); /* Needed to deactivate watchdog */
}
/*
* Vendor Support: 911
* Board: Some Intel ICHx based motherboards
* iTCO chipset: ICH7+
*
* Some Intel motherboards have a broken BIOS implementation: i.e.
* the SMI handler clear's the TIMEOUT bit in the TC01_STS register
* and does not reload the time. Thus the TCO watchdog does not reboot
* the system.
*
* These are the conclusions of Andriy Gapon <[email protected]> after
* debugging: the SMI handler is quite simple - it tests value in
* TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set
* the handler goes into an infinite loop, apparently to allow the
* second timeout and reboot. Otherwise it simply clears TIMEOUT bit
* in TCO1_STS and that's it.
* So the logic seems to be reversed, because it is hard to see how
* TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set
* (other than a transitional effect).
*
* The only fix found to get the motherboard(s) to reboot is to put
* the glb_smi_en bit to 0. This is a dirty hack that bypasses the
* broken code by disabling Global SMI.
*
* WARNING: globally disabling SMI could possibly lead to dramatic
* problems, especially on laptops! I.e. various ACPI things where
* SMI is used for communication between OS and firmware.
*
* Don't use this fix if you don't need to!!!
*/
static void broken_bios_start(struct resource *smires)
{
unsigned long val32;
val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
val32 &= 0xffffdffe;
outl(val32, smires->start);
}
static void broken_bios_stop(struct resource *smires)
{
unsigned long val32;
val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
val32 |= 0x00002001;
outl(val32, smires->start);
}
/*
* Generic Support Functions
*/
void iTCO_vendor_pre_start(struct resource *smires,
unsigned int heartbeat)
{
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_start(smires);
break;
case BROKEN_BIOS:
broken_bios_start(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
void iTCO_vendor_pre_stop(struct resource *smires)
{
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_stop(smires);
break;
case BROKEN_BIOS:
broken_bios_stop(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
int iTCO_vendor_check_noreboot_on(void)
{
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
return 0;
default:
return 1;
}
}
EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
static int __init iTCO_vendor_init_module(void)
{
if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) {
pr_warn("Option vendorsupport=%d is no longer supported, "
"please use the w83627hf_wdt driver instead\n",
SUPERMICRO_NEW_BOARD);
return -EINVAL;
}
pr_info("vendor-support=%d\n", iTCO_vendorsupport);
return 0;
}
static void __exit iTCO_vendor_exit_module(void)
{
pr_info("Module Unloaded\n");
}
module_init(iTCO_vendor_init_module);
module_exit(iTCO_vendor_exit_module);
MODULE_AUTHOR("Wim Van Sebroeck <[email protected]>, "
"R. Seretny <[email protected]>");
MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/iTCO_vendor_support.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/watchdog/m54xx_wdt.c
*
* Watchdog driver for ColdFire MCF547x & MCF548x processors
* Copyright 2010 (c) Philippe De Muyter <[email protected]>
*
* Adapted from the IXP4xx watchdog driver, which carries these notices:
*
* Author: Deepak Saxena <[email protected]>
*
* Copyright 2004 (c) MontaVista, Software, Inc.
* Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <[email protected]>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/m54xxsim.h>
#include <asm/m54xxgpt.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */
static unsigned long wdt_status;
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
static void wdt_enable(void)
{
unsigned int gms0;
/* preserve GPIO usage, if any */
gms0 = __raw_readl(MCF_GPT_GMS0);
if (gms0 & MCF_GPT_GMS_TMS_GPIO)
gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
| MCF_GPT_GMS_OD);
else
gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
__raw_writel(gms0, MCF_GPT_GMS0);
__raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
MCF_GPT_GCIR_CNT(0xffff), MCF_GPT_GCIR0);
gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
__raw_writel(gms0, MCF_GPT_GMS0);
}
static void wdt_disable(void)
{
unsigned int gms0;
/* disable watchdog */
gms0 = __raw_readl(MCF_GPT_GMS0);
gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
__raw_writel(gms0, MCF_GPT_GMS0);
}
static void wdt_keepalive(void)
{
unsigned int gms0;
gms0 = __raw_readl(MCF_GPT_GMS0);
gms0 |= MCF_GPT_GMS_OCPW(0xA5);
__raw_writel(gms0, MCF_GPT_GMS0);
}
static int m54xx_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(WDT_IN_USE, &wdt_status))
return -EBUSY;
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
wdt_enable();
return stream_open(inode, file);
}
static ssize_t m54xx_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
set_bit(WDT_OK_TO_CLOSE, &wdt_status);
}
}
wdt_keepalive();
}
return len;
}
static const struct watchdog_info ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING,
.identity = "Coldfire M54xx Watchdog",
};
static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
int time;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user((struct watchdog_info *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
ret = put_user(0, (int *)arg);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, (int *)arg);
break;
case WDIOC_KEEPALIVE:
wdt_keepalive();
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, (int *)arg);
if (ret)
break;
if (time <= 0 || time > 30) {
ret = -EINVAL;
break;
}
heartbeat = time;
wdt_enable();
fallthrough;
case WDIOC_GETTIMEOUT:
ret = put_user(heartbeat, (int *)arg);
break;
}
return ret;
}
static int m54xx_wdt_release(struct inode *inode, struct file *file)
{
if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
wdt_disable();
else {
pr_crit("Device closed unexpectedly - timer will not stop\n");
wdt_keepalive();
}
clear_bit(WDT_IN_USE, &wdt_status);
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
return 0;
}
static const struct file_operations m54xx_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = m54xx_wdt_write,
.unlocked_ioctl = m54xx_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = m54xx_wdt_open,
.release = m54xx_wdt_release,
};
static struct miscdevice m54xx_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &m54xx_wdt_fops,
};
static int __init m54xx_wdt_init(void)
{
if (!request_mem_region(MCF_GPT_GCIR0, 4, "Coldfire M54xx Watchdog")) {
pr_warn("I/O region busy\n");
return -EBUSY;
}
pr_info("driver is loaded\n");
return misc_register(&m54xx_wdt_miscdev);
}
static void __exit m54xx_wdt_exit(void)
{
misc_deregister(&m54xx_wdt_miscdev);
release_mem_region(MCF_GPT_GCIR0, 4);
}
module_init(m54xx_wdt_init);
module_exit(m54xx_wdt_exit);
MODULE_AUTHOR("Philippe De Muyter <[email protected]>");
MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/m54xx_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PC Watchdog Driver
* by Ken Hollis ([email protected])
*
* Permission granted from Simon Machell ([email protected])
* Written for the Linux Kernel, and GPLed by Ken Hollis
*
* 960107 Added request_region routines, modulized the whole thing.
* 960108 Fixed end-of-file pointer (Thanks to Dan Hollis), added
* WD_TIMEOUT define.
* 960216 Added eof marker on the file, and changed verbose messages.
* 960716 Made functional and cosmetic changes to the source for
* inclusion in Linux 2.0.x kernels, thanks to Alan Cox.
* 960717 Removed read/seek routines, replaced with ioctl. Also, added
* check_region command due to Alan's suggestion.
* 960821 Made changes to compile in newer 2.0.x kernels. Added
* "cold reboot sense" entry.
* 960825 Made a few changes to code, deleted some defines and made
* typedefs to replace them. Made heartbeat reset only available
* via ioctl, and removed the write routine.
* 960828 Added new items for PC Watchdog Rev.C card.
* 960829 Changed around all of the IOCTLs, added new features,
* added watchdog disable/re-enable routines. Added firmware
* version reporting. Added read routine for temperature.
* Removed some extra defines, added an autodetect Revision
* routine.
* 961006 Revised some documentation, fixed some cosmetic bugs. Made
* drivers to panic the system if it's overheating at bootup.
* 961118 Changed some verbiage on some of the output, tidied up
* code bits, and added compatibility to 2.1.x.
* 970912 Enabled board on open and disable on close.
* 971107 Took account of recent VFS changes (broke read).
* 971210 Disable board on initialisation in case board already ticking.
* 971222 Changed open/close for temperature handling
* Michael Meskes <[email protected]>.
* 980112 Used minor numbers from include/linux/miscdevice.h
* 990403 Clear reset status after reading control status register in
* pcwd_showprevstate(). [Marc Boucher <[email protected]>]
* 990605 Made changes to code to support Firmware 1.22a, added
* fairly useless proc entry.
* 990610 removed said useless proc code for the merge <alan>
* 000403 Removed last traces of proc code. <davej>
* 011214 Added nowayout module option to override
* CONFIG_WATCHDOG_NOWAYOUT <[email protected]>
* Added timeout module option to override default
*/
/*
* A bells and whistles driver is available from http://www.pcwd.de/
* More info available at http://www.berkprod.com/ or
* http://www.pcwatchdog.com/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
#include <linux/timer.h> /* For timer related operations */
#include <linux/jiffies.h> /* For jiffies stuff */
#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/reboot.h> /* For kernel_power_off() */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
#include <linux/isa.h> /* For isa devices */
#include <linux/ioport.h> /* For io-port access */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
/* Module and version information */
#define WATCHDOG_VERSION "1.20"
#define WATCHDOG_DATE "18 Feb 2007"
#define WATCHDOG_DRIVER_NAME "ISA-PC Watchdog"
#define WATCHDOG_NAME "pcwd"
#define DRIVER_VERSION WATCHDOG_DRIVER_NAME " driver, v" WATCHDOG_VERSION "\n"
/*
* It should be noted that PCWD_REVISION_B was removed because A and B
* are essentially the same types of card, with the exception that B
* has temperature reporting. Since I didn't receive a Rev.B card,
* the Rev.B card is not supported. (It's a good thing too, as they
* are no longer in production.)
*/
#define PCWD_REVISION_A 1
#define PCWD_REVISION_C 2
/*
* These are the auto-probe addresses available.
*
* Revision A only uses ports 0x270 and 0x370. Revision C introduced 0x350.
* Revision A has an address range of 2 addresses, while Revision C has 4.
*/
#define PCWD_ISA_NR_CARDS 3
static int pcwd_ioports[] = { 0x270, 0x350, 0x370, 0x000 };
/*
* These are the defines that describe the control status bits for the
* PCI-PC Watchdog card.
*/
/* Port 1 : Control Status #1 for the PC Watchdog card, revision A. */
#define WD_WDRST 0x01 /* Previously reset state */
#define WD_T110 0x02 /* Temperature overheat sense */
#define WD_HRTBT 0x04 /* Heartbeat sense */
#define WD_RLY2 0x08 /* External relay triggered */
#define WD_SRLY2 0x80 /* Software external relay triggered */
/* Port 1 : Control Status #1 for the PC Watchdog card, revision C. */
#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */
#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */
#define WD_REVC_TTRP 0x04 /* Temperature Trip status */
#define WD_REVC_RL2A 0x08 /* Relay 2 activated by
on-board processor */
#define WD_REVC_RL1A 0x10 /* Relay 1 active */
#define WD_REVC_R2DS 0x40 /* Relay 2 disable */
#define WD_REVC_RLY2 0x80 /* Relay 2 activated? */
/* Port 2 : Control Status #2 */
#define WD_WDIS 0x10 /* Watchdog Disabled */
#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */
#define WD_SSEL 0x40 /* Watchdog Switch Select
(1:SW1 <-> 0:SW2) */
#define WD_WCMD 0x80 /* Watchdog Command Mode */
/* max. time we give an ISA watchdog card to process a command */
/* 500ms for each 4 bit response (according to spec.) */
#define ISA_COMMAND_TIMEOUT 1000
/* Watchdog's internal commands */
#define CMD_ISA_IDLE 0x00
#define CMD_ISA_VERSION_INTEGER 0x01
#define CMD_ISA_VERSION_TENTH 0x02
#define CMD_ISA_VERSION_HUNDRETH 0x03
#define CMD_ISA_VERSION_MINOR 0x04
#define CMD_ISA_SWITCH_SETTINGS 0x05
#define CMD_ISA_RESET_PC 0x06
#define CMD_ISA_ARM_0 0x07
#define CMD_ISA_ARM_30 0x08
#define CMD_ISA_ARM_60 0x09
#define CMD_ISA_DELAY_TIME_2SECS 0x0A
#define CMD_ISA_DELAY_TIME_4SECS 0x0B
#define CMD_ISA_DELAY_TIME_8SECS 0x0C
#define CMD_ISA_RESET_RELAYS 0x0D
/* Watchdog's Dip Switch heartbeat values */
static const int heartbeat_tbl[] = {
20, /* OFF-OFF-OFF = 20 Sec */
40, /* OFF-OFF-ON = 40 Sec */
60, /* OFF-ON-OFF = 1 Min */
300, /* OFF-ON-ON = 5 Min */
600, /* ON-OFF-OFF = 10 Min */
1800, /* ON-OFF-ON = 30 Min */
3600, /* ON-ON-OFF = 1 Hour */
7200, /* ON-ON-ON = 2 hour */
};
/*
* We are using an kernel timer to do the pinging of the watchdog
* every ~500ms. We try to set the internal heartbeat of the
* watchdog to 2 ms.
*/
#define WDT_INTERVAL (HZ/2+1)
/* We can only use 1 card due to the /dev/watchdog restriction */
static int cards_found;
/* internal variables */
static unsigned long open_allowed;
static char expect_close;
static int temp_panic;
/* this is private data for each ISA-PC watchdog card */
static struct {
char fw_ver_str[6]; /* The cards firmware version */
int revision; /* The card's revision */
int supports_temp; /* Whether or not the card has
a temperature device */
int command_mode; /* Whether or not the card is in
command mode */
int boot_status; /* The card's boot status */
int io_addr; /* The cards I/O address */
spinlock_t io_lock; /* the lock for io operations */
struct timer_list timer; /* The timer that pings the watchdog */
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
} pcwd_private;
/* module parameters */
#define QUIET 0 /* Default */
#define VERBOSE 1 /* Verbose */
#define DEBUG 2 /* print fancy stuff too */
static int debug = QUIET;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug,
"Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)");
/* default heartbeat = delay-time from dip-switches */
#define WATCHDOG_HEARTBEAT 0
static int heartbeat = WATCHDOG_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. "
"(2 <= heartbeat <= 7200 or 0=delay-time from dip-switches, default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Internal functions
*/
static int send_isa_command(int cmd)
{
int i;
int control_status;
int port0, last_port0; /* Double read for stabilising */
if (debug >= DEBUG)
pr_debug("sending following data cmd=0x%02x\n", cmd);
/* The WCMD bit must be 1 and the command is only 4 bits in size */
control_status = (cmd & 0x0F) | WD_WCMD;
outb_p(control_status, pcwd_private.io_addr + 2);
udelay(ISA_COMMAND_TIMEOUT);
port0 = inb_p(pcwd_private.io_addr);
for (i = 0; i < 25; ++i) {
last_port0 = port0;
port0 = inb_p(pcwd_private.io_addr);
if (port0 == last_port0)
break; /* Data is stable */
udelay(250);
}
if (debug >= DEBUG)
pr_debug("received following data for cmd=0x%02x: port0=0x%02x last_port0=0x%02x\n",
cmd, port0, last_port0);
return port0;
}
static int set_command_mode(void)
{
int i, found = 0, count = 0;
/* Set the card into command mode */
spin_lock(&pcwd_private.io_lock);
while ((!found) && (count < 3)) {
i = send_isa_command(CMD_ISA_IDLE);
if (i == 0x00)
found = 1;
else if (i == 0xF3) {
/* Card does not like what we've done to it */
outb_p(0x00, pcwd_private.io_addr + 2);
udelay(1200); /* Spec says wait 1ms */
outb_p(0x00, pcwd_private.io_addr + 2);
udelay(ISA_COMMAND_TIMEOUT);
}
count++;
}
spin_unlock(&pcwd_private.io_lock);
pcwd_private.command_mode = found;
if (debug >= DEBUG)
pr_debug("command_mode=%d\n", pcwd_private.command_mode);
return found;
}
static void unset_command_mode(void)
{
/* Set the card into normal mode */
spin_lock(&pcwd_private.io_lock);
outb_p(0x00, pcwd_private.io_addr + 2);
udelay(ISA_COMMAND_TIMEOUT);
spin_unlock(&pcwd_private.io_lock);
pcwd_private.command_mode = 0;
if (debug >= DEBUG)
pr_debug("command_mode=%d\n", pcwd_private.command_mode);
}
static inline void pcwd_check_temperature_support(void)
{
if (inb(pcwd_private.io_addr) != 0xF0)
pcwd_private.supports_temp = 1;
}
static inline void pcwd_get_firmware(void)
{
int one, ten, hund, minor;
strcpy(pcwd_private.fw_ver_str, "ERROR");
if (set_command_mode()) {
one = send_isa_command(CMD_ISA_VERSION_INTEGER);
ten = send_isa_command(CMD_ISA_VERSION_TENTH);
hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH);
minor = send_isa_command(CMD_ISA_VERSION_MINOR);
sprintf(pcwd_private.fw_ver_str, "%c.%c%c%c",
one, ten, hund, minor);
}
unset_command_mode();
return;
}
static inline int pcwd_get_option_switches(void)
{
int option_switches = 0;
if (set_command_mode()) {
/* Get switch settings */
option_switches = send_isa_command(CMD_ISA_SWITCH_SETTINGS);
}
unset_command_mode();
return option_switches;
}
static void pcwd_show_card_info(void)
{
int option_switches;
/* Get some extra info from the hardware (in command/debug/diag mode) */
if (pcwd_private.revision == PCWD_REVISION_A)
pr_info("ISA-PC Watchdog (REV.A) detected at port 0x%04x\n",
pcwd_private.io_addr);
else if (pcwd_private.revision == PCWD_REVISION_C) {
pcwd_get_firmware();
pr_info("ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n",
pcwd_private.io_addr, pcwd_private.fw_ver_str);
option_switches = pcwd_get_option_switches();
pr_info("Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n",
option_switches,
((option_switches & 0x10) ? "ON" : "OFF"),
((option_switches & 0x08) ? "ON" : "OFF"));
/* Reprogram internal heartbeat to 2 seconds */
if (set_command_mode()) {
send_isa_command(CMD_ISA_DELAY_TIME_2SECS);
unset_command_mode();
}
}
if (pcwd_private.supports_temp)
pr_info("Temperature Option Detected\n");
if (pcwd_private.boot_status & WDIOF_CARDRESET)
pr_info("Previous reboot was caused by the card\n");
if (pcwd_private.boot_status & WDIOF_OVERHEAT) {
pr_emerg("Card senses a CPU Overheat. Panicking!\n");
pr_emerg("CPU Overheat\n");
}
if (pcwd_private.boot_status == 0)
pr_info("No previous trip detected - Cold boot or reset\n");
}
static void pcwd_timer_ping(struct timer_list *unused)
{
int wdrst_stat;
/* If we got a heartbeat pulse within the WDT_INTERVAL
* we agree to ping the WDT */
if (time_before(jiffies, pcwd_private.next_heartbeat)) {
/* Ping the watchdog */
spin_lock(&pcwd_private.io_lock);
if (pcwd_private.revision == PCWD_REVISION_A) {
/* Rev A cards are reset by setting the
WD_WDRST bit in register 1 */
wdrst_stat = inb_p(pcwd_private.io_addr);
wdrst_stat &= 0x0F;
wdrst_stat |= WD_WDRST;
outb_p(wdrst_stat, pcwd_private.io_addr + 1);
} else {
/* Re-trigger watchdog by writing to port 0 */
outb_p(0x00, pcwd_private.io_addr);
}
/* Re-set the timer interval */
mod_timer(&pcwd_private.timer, jiffies + WDT_INTERVAL);
spin_unlock(&pcwd_private.io_lock);
} else {
pr_warn("Heartbeat lost! Will not ping the watchdog\n");
}
}
static int pcwd_start(void)
{
int stat_reg;
pcwd_private.next_heartbeat = jiffies + (heartbeat * HZ);
/* Start the timer */
mod_timer(&pcwd_private.timer, jiffies + WDT_INTERVAL);
/* Enable the port */
if (pcwd_private.revision == PCWD_REVISION_C) {
spin_lock(&pcwd_private.io_lock);
outb_p(0x00, pcwd_private.io_addr + 3);
udelay(ISA_COMMAND_TIMEOUT);
stat_reg = inb_p(pcwd_private.io_addr + 2);
spin_unlock(&pcwd_private.io_lock);
if (stat_reg & WD_WDIS) {
pr_info("Could not start watchdog\n");
return -EIO;
}
}
if (debug >= VERBOSE)
pr_debug("Watchdog started\n");
return 0;
}
static int pcwd_stop(void)
{
int stat_reg;
/* Stop the timer */
del_timer(&pcwd_private.timer);
/* Disable the board */
if (pcwd_private.revision == PCWD_REVISION_C) {
spin_lock(&pcwd_private.io_lock);
outb_p(0xA5, pcwd_private.io_addr + 3);
udelay(ISA_COMMAND_TIMEOUT);
outb_p(0xA5, pcwd_private.io_addr + 3);
udelay(ISA_COMMAND_TIMEOUT);
stat_reg = inb_p(pcwd_private.io_addr + 2);
spin_unlock(&pcwd_private.io_lock);
if ((stat_reg & WD_WDIS) == 0) {
pr_info("Could not stop watchdog\n");
return -EIO;
}
}
if (debug >= VERBOSE)
pr_debug("Watchdog stopped\n");
return 0;
}
static int pcwd_keepalive(void)
{
/* user land ping */
pcwd_private.next_heartbeat = jiffies + (heartbeat * HZ);
if (debug >= DEBUG)
pr_debug("Watchdog keepalive signal send\n");
return 0;
}
static int pcwd_set_heartbeat(int t)
{
if (t < 2 || t > 7200) /* arbitrary upper limit */
return -EINVAL;
heartbeat = t;
if (debug >= VERBOSE)
pr_debug("New heartbeat: %d\n", heartbeat);
return 0;
}
static int pcwd_get_status(int *status)
{
int control_status;
*status = 0;
spin_lock(&pcwd_private.io_lock);
if (pcwd_private.revision == PCWD_REVISION_A)
/* Rev A cards return status information from
* the base register, which is used for the
* temperature in other cards. */
control_status = inb(pcwd_private.io_addr);
else {
/* Rev C cards return card status in the base
* address + 1 register. And use different bits
* to indicate a card initiated reset, and an
* over-temperature condition. And the reboot
* status can be reset. */
control_status = inb(pcwd_private.io_addr + 1);
}
spin_unlock(&pcwd_private.io_lock);
if (pcwd_private.revision == PCWD_REVISION_A) {
if (control_status & WD_WDRST)
*status |= WDIOF_CARDRESET;
if (control_status & WD_T110) {
*status |= WDIOF_OVERHEAT;
if (temp_panic) {
pr_info("Temperature overheat trip!\n");
kernel_power_off();
}
}
} else {
if (control_status & WD_REVC_WTRP)
*status |= WDIOF_CARDRESET;
if (control_status & WD_REVC_TTRP) {
*status |= WDIOF_OVERHEAT;
if (temp_panic) {
pr_info("Temperature overheat trip!\n");
kernel_power_off();
}
}
}
return 0;
}
static int pcwd_clear_status(void)
{
int control_status;
if (pcwd_private.revision == PCWD_REVISION_C) {
spin_lock(&pcwd_private.io_lock);
if (debug >= VERBOSE)
pr_info("clearing watchdog trip status\n");
control_status = inb_p(pcwd_private.io_addr + 1);
if (debug >= DEBUG) {
pr_debug("status was: 0x%02x\n", control_status);
pr_debug("sending: 0x%02x\n",
(control_status & WD_REVC_R2DS));
}
/* clear reset status & Keep Relay 2 disable state as it is */
outb_p((control_status & WD_REVC_R2DS),
pcwd_private.io_addr + 1);
spin_unlock(&pcwd_private.io_lock);
}
return 0;
}
static int pcwd_get_temperature(int *temperature)
{
/* check that port 0 gives temperature info and no command results */
if (pcwd_private.command_mode)
return -1;
*temperature = 0;
if (!pcwd_private.supports_temp)
return -ENODEV;
/*
* Convert celsius to fahrenheit, since this was
* the decided 'standard' for this return value.
*/
spin_lock(&pcwd_private.io_lock);
*temperature = ((inb(pcwd_private.io_addr)) * 9 / 5) + 32;
spin_unlock(&pcwd_private.io_lock);
if (debug >= DEBUG) {
pr_debug("temperature is: %d F\n", *temperature);
}
return 0;
}
/*
* /dev/watchdog handling
*/
static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int rv;
int status;
int temperature;
int new_heartbeat;
int __user *argp = (int __user *)arg;
static const struct watchdog_info ident = {
.options = WDIOF_OVERHEAT |
WDIOF_CARDRESET |
WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "PCWD",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
pcwd_get_status(&status);
return put_user(status, argp);
case WDIOC_GETBOOTSTATUS:
return put_user(pcwd_private.boot_status, argp);
case WDIOC_GETTEMP:
if (pcwd_get_temperature(&temperature))
return -EFAULT;
return put_user(temperature, argp);
case WDIOC_SETOPTIONS:
if (pcwd_private.revision == PCWD_REVISION_C) {
if (get_user(rv, argp))
return -EFAULT;
if (rv & WDIOS_DISABLECARD) {
status = pcwd_stop();
if (status < 0)
return status;
}
if (rv & WDIOS_ENABLECARD) {
status = pcwd_start();
if (status < 0)
return status;
}
if (rv & WDIOS_TEMPPANIC)
temp_panic = 1;
}
return -EINVAL;
case WDIOC_KEEPALIVE:
pcwd_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, argp))
return -EFAULT;
if (pcwd_set_heartbeat(new_heartbeat))
return -EINVAL;
pcwd_keepalive();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, argp);
default:
return -ENOTTY;
}
return 0;
}
static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
pcwd_keepalive();
}
return len;
}
static int pcwd_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &open_allowed))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
/* Activate */
pcwd_start();
pcwd_keepalive();
return stream_open(inode, file);
}
static int pcwd_close(struct inode *inode, struct file *file)
{
if (expect_close == 42)
pcwd_stop();
else {
pr_crit("Unexpected close, not stopping watchdog!\n");
pcwd_keepalive();
}
expect_close = 0;
clear_bit(0, &open_allowed);
return 0;
}
/*
* /dev/temperature handling
*/
static ssize_t pcwd_temp_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int temperature;
if (pcwd_get_temperature(&temperature))
return -EFAULT;
if (copy_to_user(buf, &temperature, 1))
return -EFAULT;
return 1;
}
static int pcwd_temp_open(struct inode *inode, struct file *file)
{
if (!pcwd_private.supports_temp)
return -ENODEV;
return stream_open(inode, file);
}
static int pcwd_temp_close(struct inode *inode, struct file *file)
{
return 0;
}
/*
* Kernel Interfaces
*/
static const struct file_operations pcwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = pcwd_write,
.unlocked_ioctl = pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = pcwd_open,
.release = pcwd_close,
};
static struct miscdevice pcwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &pcwd_fops,
};
static const struct file_operations pcwd_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = pcwd_temp_read,
.open = pcwd_temp_open,
.release = pcwd_temp_close,
};
static struct miscdevice temp_miscdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &pcwd_temp_fops,
};
/*
* Init & exit routines
*/
static inline int get_revision(void)
{
int r = PCWD_REVISION_C;
spin_lock(&pcwd_private.io_lock);
/* REV A cards use only 2 io ports; test
* presumes a floating bus reads as 0xff. */
if ((inb(pcwd_private.io_addr + 2) == 0xFF) ||
(inb(pcwd_private.io_addr + 3) == 0xFF))
r = PCWD_REVISION_A;
spin_unlock(&pcwd_private.io_lock);
return r;
}
/*
* The ISA cards have a heartbeat bit in one of the registers, which
* register is card dependent. The heartbeat bit is monitored, and if
* found, is considered proof that a Berkshire card has been found.
* The initial rate is once per second at board start up, then twice
* per second for normal operation.
*/
static int pcwd_isa_match(struct device *dev, unsigned int id)
{
int base_addr = pcwd_ioports[id];
int port0, last_port0; /* Reg 0, in case it's REV A */
int port1, last_port1; /* Register 1 for REV C cards */
int i;
int retval;
if (debug >= DEBUG)
pr_debug("pcwd_isa_match id=%d\n", id);
if (!request_region(base_addr, 4, "PCWD")) {
pr_info("Port 0x%04x unavailable\n", base_addr);
return 0;
}
retval = 0;
port0 = inb_p(base_addr); /* For REV A boards */
port1 = inb_p(base_addr + 1); /* For REV C boards */
if (port0 != 0xff || port1 != 0xff) {
/* Not an 'ff' from a floating bus, so must be a card! */
for (i = 0; i < 4; ++i) {
msleep(500);
last_port0 = port0;
last_port1 = port1;
port0 = inb_p(base_addr);
port1 = inb_p(base_addr + 1);
/* Has either hearbeat bit changed? */
if ((port0 ^ last_port0) & WD_HRTBT ||
(port1 ^ last_port1) & WD_REVC_HRBT) {
retval = 1;
break;
}
}
}
release_region(base_addr, 4);
return retval;
}
static int pcwd_isa_probe(struct device *dev, unsigned int id)
{
int ret;
if (debug >= DEBUG)
pr_debug("pcwd_isa_probe id=%d\n", id);
cards_found++;
if (cards_found == 1)
pr_info("v%s Ken Hollis ([email protected])\n",
WATCHDOG_VERSION);
if (cards_found > 1) {
pr_err("This driver only supports 1 device\n");
return -ENODEV;
}
if (pcwd_ioports[id] == 0x0000) {
pr_err("No I/O-Address for card detected\n");
return -ENODEV;
}
pcwd_private.io_addr = pcwd_ioports[id];
spin_lock_init(&pcwd_private.io_lock);
/* Check card's revision */
pcwd_private.revision = get_revision();
if (!request_region(pcwd_private.io_addr,
(pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4, "PCWD")) {
pr_err("I/O address 0x%04x already in use\n",
pcwd_private.io_addr);
ret = -EIO;
goto error_request_region;
}
/* Initial variables */
pcwd_private.supports_temp = 0;
temp_panic = 0;
pcwd_private.boot_status = 0x0000;
/* get the boot_status */
pcwd_get_status(&pcwd_private.boot_status);
/* clear the "card caused reboot" flag */
pcwd_clear_status();
timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
/* Disable the board */
pcwd_stop();
/* Check whether or not the card supports the temperature device */
pcwd_check_temperature_support();
/* Show info about the card itself */
pcwd_show_card_info();
/* If heartbeat = 0 then we use the heartbeat from the dip-switches */
if (heartbeat == 0)
heartbeat = heartbeat_tbl[(pcwd_get_option_switches() & 0x07)];
/* Check that the heartbeat value is within it's range;
if not reset to the default */
if (pcwd_set_heartbeat(heartbeat)) {
pcwd_set_heartbeat(WATCHDOG_HEARTBEAT);
pr_info("heartbeat value must be 2 <= heartbeat <= 7200, using %d\n",
WATCHDOG_HEARTBEAT);
}
if (pcwd_private.supports_temp) {
ret = misc_register(&temp_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
TEMP_MINOR, ret);
goto error_misc_register_temp;
}
}
ret = misc_register(&pcwd_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto error_misc_register_watchdog;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
error_misc_register_watchdog:
if (pcwd_private.supports_temp)
misc_deregister(&temp_miscdev);
error_misc_register_temp:
release_region(pcwd_private.io_addr,
(pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4);
error_request_region:
pcwd_private.io_addr = 0x0000;
cards_found--;
return ret;
}
static void pcwd_isa_remove(struct device *dev, unsigned int id)
{
if (debug >= DEBUG)
pr_debug("pcwd_isa_remove id=%d\n", id);
/* Disable the board */
if (!nowayout)
pcwd_stop();
/* Deregister */
misc_deregister(&pcwd_miscdev);
if (pcwd_private.supports_temp)
misc_deregister(&temp_miscdev);
release_region(pcwd_private.io_addr,
(pcwd_private.revision == PCWD_REVISION_A) ? 2 : 4);
pcwd_private.io_addr = 0x0000;
cards_found--;
}
static void pcwd_isa_shutdown(struct device *dev, unsigned int id)
{
if (debug >= DEBUG)
pr_debug("pcwd_isa_shutdown id=%d\n", id);
pcwd_stop();
}
static struct isa_driver pcwd_isa_driver = {
.match = pcwd_isa_match,
.probe = pcwd_isa_probe,
.remove = pcwd_isa_remove,
.shutdown = pcwd_isa_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = WATCHDOG_NAME,
},
};
module_isa_driver(pcwd_isa_driver, PCWD_ISA_NR_CARDS);
MODULE_AUTHOR("Ken Hollis <[email protected]>, "
"Wim Van Sebroeck <[email protected]>");
MODULE_DESCRIPTION("Berkshire ISA-PC Watchdog driver");
MODULE_VERSION(WATCHDOG_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/pcwd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016 IBM Corporation
*
* Joel Stanley <[email protected]>
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct aspeed_wdt_config {
u32 ext_pulse_width_mask;
u32 irq_shift;
u32 irq_mask;
};
struct aspeed_wdt {
struct watchdog_device wdd;
void __iomem *base;
u32 ctrl;
const struct aspeed_wdt_config *cfg;
};
static const struct aspeed_wdt_config ast2400_config = {
.ext_pulse_width_mask = 0xff,
.irq_shift = 0,
.irq_mask = 0,
};
static const struct aspeed_wdt_config ast2500_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 12,
.irq_mask = GENMASK(31, 12),
};
static const struct aspeed_wdt_config ast2600_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 0,
.irq_mask = GENMASK(31, 10),
};
static const struct of_device_id aspeed_wdt_of_table[] = {
{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
{ .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_STATUS 0x00
#define WDT_RELOAD_VALUE 0x04
#define WDT_RESTART 0x08
#define WDT_CTRL 0x0C
#define WDT_CTRL_BOOT_SECONDARY BIT(7)
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
#define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
#define WDT_CTRL_1MHZ_CLK BIT(4)
#define WDT_CTRL_WDT_EXT BIT(3)
#define WDT_CTRL_WDT_INTR BIT(2)
#define WDT_CTRL_RESET_SYSTEM BIT(1)
#define WDT_CTRL_ENABLE BIT(0)
#define WDT_TIMEOUT_STATUS 0x10
#define WDT_TIMEOUT_STATUS_IRQ BIT(2)
#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
#define WDT_CLEAR_TIMEOUT_STATUS 0x14
#define WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION BIT(0)
/*
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
* enabled), specifically:
*
* * Pulse duration
* * Drive mode: push-pull vs open-drain
* * Polarity: Active high or active low
*
* Pulse duration configuration is available on both the AST2400 and AST2500,
* though the field changes between SoCs:
*
* AST2400: Bits 7:0
* AST2500: Bits 19:0
*
* This difference is captured in struct aspeed_wdt_config.
*
* The AST2500 exposes the drive mode and polarity options, but not in a
* regular fashion. For read purposes, bit 31 represents active high or low,
* and bit 30 represents push-pull or open-drain. With respect to write, magic
* values need to be written to the top byte to change the state of the drive
* mode and polarity bits. Any other value written to the top byte has no
* effect on the state of the drive mode or polarity bits. However, the pulse
* width value must be preserved (as desired) if written.
*/
#define WDT_RESET_WIDTH 0x18
#define WDT_RESET_WIDTH_ACTIVE_HIGH BIT(31)
#define WDT_ACTIVE_HIGH_MAGIC (0xA5 << 24)
#define WDT_ACTIVE_LOW_MAGIC (0x5A << 24)
#define WDT_RESET_WIDTH_PUSH_PULL BIT(30)
#define WDT_PUSH_PULL_MAGIC (0xA8 << 24)
#define WDT_OPEN_DRAIN_MAGIC (0x8A << 24)
#define WDT_RESTART_MAGIC 0x4755
/* 32 bits at 1MHz, in milliseconds */
#define WDT_MAX_TIMEOUT_MS 4294967
#define WDT_DEFAULT_TIMEOUT 30
#define WDT_RATE_1MHZ 1000000
static struct aspeed_wdt *to_aspeed_wdt(struct watchdog_device *wdd)
{
return container_of(wdd, struct aspeed_wdt, wdd);
}
static void aspeed_wdt_enable(struct aspeed_wdt *wdt, int count)
{
wdt->ctrl |= WDT_CTRL_ENABLE;
writel(0, wdt->base + WDT_CTRL);
writel(count, wdt->base + WDT_RELOAD_VALUE);
writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
writel(wdt->ctrl, wdt->base + WDT_CTRL);
}
static int aspeed_wdt_start(struct watchdog_device *wdd)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
aspeed_wdt_enable(wdt, wdd->timeout * WDT_RATE_1MHZ);
return 0;
}
static int aspeed_wdt_stop(struct watchdog_device *wdd)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
wdt->ctrl &= ~WDT_CTRL_ENABLE;
writel(wdt->ctrl, wdt->base + WDT_CTRL);
return 0;
}
static int aspeed_wdt_ping(struct watchdog_device *wdd)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
return 0;
}
static int aspeed_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
u32 actual;
wdd->timeout = timeout;
actual = min(timeout, wdd->max_hw_heartbeat_ms / 1000);
writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE);
writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
return 0;
}
static int aspeed_wdt_set_pretimeout(struct watchdog_device *wdd,
unsigned int pretimeout)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
u32 actual = pretimeout * WDT_RATE_1MHZ;
u32 s = wdt->cfg->irq_shift;
u32 m = wdt->cfg->irq_mask;
wdd->pretimeout = pretimeout;
wdt->ctrl &= ~m;
if (pretimeout)
wdt->ctrl |= ((actual << s) & m) | WDT_CTRL_WDT_INTR;
else
wdt->ctrl &= ~WDT_CTRL_WDT_INTR;
writel(wdt->ctrl, wdt->base + WDT_CTRL);
return 0;
}
static int aspeed_wdt_restart(struct watchdog_device *wdd,
unsigned long action, void *data)
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
mdelay(1000);
return 0;
}
/* access_cs0 shows if cs0 is accessible, hence the reverted bit */
static ssize_t access_cs0_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct aspeed_wdt *wdt = dev_get_drvdata(dev);
u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
return sysfs_emit(buf, "%u\n",
!(status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY));
}
static ssize_t access_cs0_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
struct aspeed_wdt *wdt = dev_get_drvdata(dev);
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val)
writel(WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION,
wdt->base + WDT_CLEAR_TIMEOUT_STATUS);
return size;
}
/*
* This attribute exists only if the system has booted from the alternate
* flash with 'alt-boot' option.
*
* At alternate flash the 'access_cs0' sysfs node provides:
* ast2400: a way to get access to the primary SPI flash chip at CS0
* after booting from the alternate chip at CS1.
* ast2500: a way to restore the normal address mapping from
* (CS0->CS1, CS1->CS0) to (CS0->CS0, CS1->CS1).
*
* Clearing the boot code selection and timeout counter also resets to the
* initial state the chip select line mapping. When the SoC is in normal
* mapping state (i.e. booted from CS0), clearing those bits does nothing for
* both versions of the SoC. For alternate boot mode (booted from CS1 due to
* wdt2 expiration) the behavior differs as described above.
*
* This option can be used with wdt2 (watchdog1) only.
*/
static DEVICE_ATTR_RW(access_cs0);
static struct attribute *bswitch_attrs[] = {
&dev_attr_access_cs0.attr,
NULL
};
ATTRIBUTE_GROUPS(bswitch);
static const struct watchdog_ops aspeed_wdt_ops = {
.start = aspeed_wdt_start,
.stop = aspeed_wdt_stop,
.ping = aspeed_wdt_ping,
.set_timeout = aspeed_wdt_set_timeout,
.set_pretimeout = aspeed_wdt_set_pretimeout,
.restart = aspeed_wdt_restart,
.owner = THIS_MODULE,
};
static const struct watchdog_info aspeed_wdt_info = {
.options = WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE
| WDIOF_SETTIMEOUT,
.identity = KBUILD_MODNAME,
};
static const struct watchdog_info aspeed_wdt_pretimeout_info = {
.options = WDIOF_KEEPALIVEPING
| WDIOF_PRETIMEOUT
| WDIOF_MAGICCLOSE
| WDIOF_SETTIMEOUT,
.identity = KBUILD_MODNAME,
};
static irqreturn_t aspeed_wdt_irq(int irq, void *arg)
{
struct watchdog_device *wdd = arg;
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
if (status & WDT_TIMEOUT_STATUS_IRQ)
watchdog_notify_pretimeout(wdd);
return IRQ_HANDLED;
}
static int aspeed_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *ofdid;
struct aspeed_wdt *wdt;
struct device_node *np;
const char *reset_type;
u32 duration;
u32 status;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
np = dev->of_node;
ofdid = of_match_node(aspeed_wdt_of_table, np);
if (!ofdid)
return -EINVAL;
wdt->cfg = ofdid->data;
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
wdt->wdd.info = &aspeed_wdt_info;
if (wdt->cfg->irq_mask) {
int irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
ret = devm_request_irq(dev, irq, aspeed_wdt_irq,
IRQF_SHARED, dev_name(dev),
wdt);
if (ret)
return ret;
wdt->wdd.info = &aspeed_wdt_pretimeout_info;
}
}
wdt->wdd.ops = &aspeed_wdt_ops;
wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
wdt->wdd.parent = dev;
wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
watchdog_init_timeout(&wdt->wdd, 0, dev);
watchdog_set_nowayout(&wdt->wdd, nowayout);
/*
* On clock rates:
* - ast2400 wdt can run at PCLK, or 1MHz
* - ast2500 only runs at 1MHz, hard coding bit 4 to 1
* - ast2600 always runs at 1MHz
*
* Set the ast2400 to run at 1MHz as it simplifies the driver.
*/
if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
wdt->ctrl = WDT_CTRL_1MHZ_CLK;
/*
* Control reset on a per-device basis to ensure the
* host is not affected by a BMC reboot
*/
ret = of_property_read_string(np, "aspeed,reset-type", &reset_type);
if (ret) {
wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
} else {
if (!strcmp(reset_type, "cpu"))
wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "soc"))
wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "system"))
wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
WDT_CTRL_RESET_SYSTEM;
else if (strcmp(reset_type, "none"))
return -EINVAL;
}
if (of_property_read_bool(np, "aspeed,external-signal"))
wdt->ctrl |= WDT_CTRL_WDT_EXT;
if (of_property_read_bool(np, "aspeed,alt-boot"))
wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
/*
* The watchdog is running, but invoke aspeed_wdt_start() to
* write wdt->ctrl to WDT_CTRL to ensure the watchdog's
* configuration conforms to the driver's expectations.
* Primarily, ensure we're using the 1MHz clock source.
*/
aspeed_wdt_start(&wdt->wdd);
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
(of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
reg &= wdt->cfg->ext_pulse_width_mask;
if (of_property_read_bool(np, "aspeed,ext-active-high"))
reg |= WDT_ACTIVE_HIGH_MAGIC;
else
reg |= WDT_ACTIVE_LOW_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
reg &= wdt->cfg->ext_pulse_width_mask;
if (of_property_read_bool(np, "aspeed,ext-push-pull"))
reg |= WDT_PUSH_PULL_MAGIC;
else
reg |= WDT_OPEN_DRAIN_MAGIC;
writel(reg, wdt->base + WDT_RESET_WIDTH);
}
if (!of_property_read_u32(np, "aspeed,ext-pulse-duration", &duration)) {
u32 max_duration = wdt->cfg->ext_pulse_width_mask + 1;
if (duration == 0 || duration > max_duration) {
dev_err(dev, "Invalid pulse duration: %uus\n",
duration);
duration = max(1U, min(max_duration, duration));
dev_info(dev, "Pulse duration set to %uus\n",
duration);
}
/*
* The watchdog is always configured with a 1MHz source, so
* there is no need to scale the microsecond value. However we
* need to offset it - from the datasheet:
*
* "This register decides the asserting duration of wdt_ext and
* wdt_rstarm signal. The default value is 0xFF. It means the
* default asserting duration of wdt_ext and wdt_rstarm is
* 256us."
*
* This implies a value of 0 gives a 1us pulse.
*/
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
status = readl(wdt->base + WDT_TIMEOUT_STATUS);
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
wdt->wdd.bootstatus = WDIOF_CARDRESET;
if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
of_device_is_compatible(np, "aspeed,ast2500-wdt"))
wdt->wdd.groups = bswitch_groups;
}
dev_set_drvdata(dev, wdt);
return devm_watchdog_register_device(dev, &wdt->wdd);
}
static struct platform_driver aspeed_watchdog_driver = {
.probe = aspeed_wdt_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_wdt_of_table,
},
};
static int __init aspeed_wdt_init(void)
{
return platform_driver_register(&aspeed_watchdog_driver);
}
arch_initcall(aspeed_wdt_init);
static void __exit aspeed_wdt_exit(void)
{
platform_driver_unregister(&aspeed_watchdog_driver);
}
module_exit(aspeed_wdt_exit);
MODULE_DESCRIPTION("Aspeed Watchdog Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/aspeed_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Atom E6xx Watchdog driver
*
* Copyright (C) 2011 Alexander Stein
* <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/spinlock.h>
#define DRIVER_NAME "ie6xx_wdt"
#define PV1 0x00
#define PV2 0x04
#define RR0 0x0c
#define RR1 0x0d
#define WDT_RELOAD 0x01
#define WDT_TOUT 0x02
#define WDTCR 0x10
#define WDT_PRE_SEL 0x04
#define WDT_RESET_SEL 0x08
#define WDT_RESET_EN 0x10
#define WDT_TOUT_EN 0x20
#define DCR 0x14
#define WDTLR 0x18
#define WDT_LOCK 0x01
#define WDT_ENABLE 0x02
#define WDT_TOUT_CNF 0x03
#define MIN_TIME 1
#define MAX_TIME (10 * 60) /* 10 minutes */
#define DEFAULT_TIME 60
static unsigned int timeout = DEFAULT_TIME;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout,
"Default Watchdog timer setting ("
__MODULE_STRING(DEFAULT_TIME) "s)."
"The range is from 1 to 600");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static u8 resetmode = 0x10;
module_param(resetmode, byte, 0);
MODULE_PARM_DESC(resetmode,
"Resetmode bits: 0x08 warm reset (cold reset otherwise), "
"0x10 reset enable, 0x20 disable toggle GPIO[4] (default=0x10)");
static struct {
unsigned short sch_wdtba;
spinlock_t unlock_sequence;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
} ie6xx_wdt_data;
/*
* This is needed to write to preload and reload registers
* struct ie6xx_wdt_data.unlock_sequence must be used
* to prevent sequence interrupts
*/
static void ie6xx_wdt_unlock_registers(void)
{
outb(0x80, ie6xx_wdt_data.sch_wdtba + RR0);
outb(0x86, ie6xx_wdt_data.sch_wdtba + RR0);
}
static int ie6xx_wdt_ping(struct watchdog_device *wdd)
{
spin_lock(&ie6xx_wdt_data.unlock_sequence);
ie6xx_wdt_unlock_registers();
outb(WDT_RELOAD, ie6xx_wdt_data.sch_wdtba + RR1);
spin_unlock(&ie6xx_wdt_data.unlock_sequence);
return 0;
}
static int ie6xx_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
{
u32 preload;
u64 clock;
u8 wdtcr;
/* Watchdog clock is PCI Clock (33MHz) */
clock = 33000000;
/* and the preload value is loaded into [34:15] of the down counter */
preload = (t * clock) >> 15;
/*
* Manual states preload must be one less.
* Does not wrap as t is at least 1
*/
preload -= 1;
spin_lock(&ie6xx_wdt_data.unlock_sequence);
/* Set ResetMode & Enable prescaler for range 10ms to 10 min */
wdtcr = resetmode & 0x38;
outb(wdtcr, ie6xx_wdt_data.sch_wdtba + WDTCR);
ie6xx_wdt_unlock_registers();
outl(0, ie6xx_wdt_data.sch_wdtba + PV1);
ie6xx_wdt_unlock_registers();
outl(preload, ie6xx_wdt_data.sch_wdtba + PV2);
ie6xx_wdt_unlock_registers();
outb(WDT_RELOAD | WDT_TOUT, ie6xx_wdt_data.sch_wdtba + RR1);
spin_unlock(&ie6xx_wdt_data.unlock_sequence);
wdd->timeout = t;
return 0;
}
static int ie6xx_wdt_start(struct watchdog_device *wdd)
{
ie6xx_wdt_set_timeout(wdd, wdd->timeout);
/* Enable the watchdog timer */
spin_lock(&ie6xx_wdt_data.unlock_sequence);
outb(WDT_ENABLE, ie6xx_wdt_data.sch_wdtba + WDTLR);
spin_unlock(&ie6xx_wdt_data.unlock_sequence);
return 0;
}
static int ie6xx_wdt_stop(struct watchdog_device *wdd)
{
if (inb(ie6xx_wdt_data.sch_wdtba + WDTLR) & WDT_LOCK)
return -1;
/* Disable the watchdog timer */
spin_lock(&ie6xx_wdt_data.unlock_sequence);
outb(0, ie6xx_wdt_data.sch_wdtba + WDTLR);
spin_unlock(&ie6xx_wdt_data.unlock_sequence);
return 0;
}
static const struct watchdog_info ie6xx_wdt_info = {
.identity = "Intel Atom E6xx Watchdog",
.options = WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
};
static const struct watchdog_ops ie6xx_wdt_ops = {
.owner = THIS_MODULE,
.start = ie6xx_wdt_start,
.stop = ie6xx_wdt_stop,
.ping = ie6xx_wdt_ping,
.set_timeout = ie6xx_wdt_set_timeout,
};
static struct watchdog_device ie6xx_wdt_dev = {
.info = &ie6xx_wdt_info,
.ops = &ie6xx_wdt_ops,
.min_timeout = MIN_TIME,
.max_timeout = MAX_TIME,
};
#ifdef CONFIG_DEBUG_FS
static int ie6xx_wdt_show(struct seq_file *s, void *unused)
{
seq_printf(s, "PV1 = 0x%08x\n",
inl(ie6xx_wdt_data.sch_wdtba + PV1));
seq_printf(s, "PV2 = 0x%08x\n",
inl(ie6xx_wdt_data.sch_wdtba + PV2));
seq_printf(s, "RR = 0x%08x\n",
inw(ie6xx_wdt_data.sch_wdtba + RR0));
seq_printf(s, "WDTCR = 0x%08x\n",
inw(ie6xx_wdt_data.sch_wdtba + WDTCR));
seq_printf(s, "DCR = 0x%08x\n",
inl(ie6xx_wdt_data.sch_wdtba + DCR));
seq_printf(s, "WDTLR = 0x%08x\n",
inw(ie6xx_wdt_data.sch_wdtba + WDTLR));
seq_printf(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ie6xx_wdt);
static void ie6xx_wdt_debugfs_init(void)
{
/* /sys/kernel/debug/ie6xx_wdt */
ie6xx_wdt_data.debugfs = debugfs_create_file("ie6xx_wdt",
S_IFREG | S_IRUGO, NULL, NULL, &ie6xx_wdt_fops);
}
static void ie6xx_wdt_debugfs_exit(void)
{
debugfs_remove(ie6xx_wdt_data.debugfs);
}
#else
static void ie6xx_wdt_debugfs_init(void)
{
}
static void ie6xx_wdt_debugfs_exit(void)
{
}
#endif
static int ie6xx_wdt_probe(struct platform_device *pdev)
{
struct resource *res;
u8 wdtlr;
int ret;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
return -ENODEV;
if (!request_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "Watchdog region 0x%llx already in use!\n",
(u64)res->start);
return -EBUSY;
}
ie6xx_wdt_data.sch_wdtba = res->start;
dev_dbg(&pdev->dev, "WDT = 0x%X\n", ie6xx_wdt_data.sch_wdtba);
ie6xx_wdt_dev.timeout = timeout;
watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout);
ie6xx_wdt_dev.parent = &pdev->dev;
spin_lock_init(&ie6xx_wdt_data.unlock_sequence);
wdtlr = inb(ie6xx_wdt_data.sch_wdtba + WDTLR);
if (wdtlr & WDT_LOCK)
dev_warn(&pdev->dev,
"Watchdog Timer is Locked (Reg=0x%x)\n", wdtlr);
ie6xx_wdt_debugfs_init();
ret = watchdog_register_device(&ie6xx_wdt_dev);
if (ret)
goto misc_register_error;
return 0;
misc_register_error:
ie6xx_wdt_debugfs_exit();
release_region(res->start, resource_size(res));
ie6xx_wdt_data.sch_wdtba = 0;
return ret;
}
static void ie6xx_wdt_remove(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
ie6xx_wdt_stop(NULL);
watchdog_unregister_device(&ie6xx_wdt_dev);
ie6xx_wdt_debugfs_exit();
release_region(res->start, resource_size(res));
ie6xx_wdt_data.sch_wdtba = 0;
}
static struct platform_driver ie6xx_wdt_driver = {
.probe = ie6xx_wdt_probe,
.remove_new = ie6xx_wdt_remove,
.driver = {
.name = DRIVER_NAME,
},
};
static int __init ie6xx_wdt_init(void)
{
/* Check boot parameters to verify that their initial values */
/* are in range. */
if ((timeout < MIN_TIME) ||
(timeout > MAX_TIME)) {
pr_err("Watchdog timer: value of timeout %d (dec) "
"is out of range from %d to %d (dec)\n",
timeout, MIN_TIME, MAX_TIME);
return -EINVAL;
}
return platform_driver_register(&ie6xx_wdt_driver);
}
static void __exit ie6xx_wdt_exit(void)
{
platform_driver_unregister(&ie6xx_wdt_driver);
}
late_initcall(ie6xx_wdt_init);
module_exit(ie6xx_wdt_exit);
MODULE_AUTHOR("Alexander Stein <[email protected]>");
MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/watchdog/ie6xx_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
*
* Author: Wolfram Sang <[email protected]>
*
* Copyright (C) 2011-12 Wolfram Sang, Pengutronix
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#define WDOG_TICK_RATE 1000 /* 1 kHz clock */
#define STMP3XXX_DEFAULT_TIMEOUT 19
#define STMP3XXX_MAX_TIMEOUT (UINT_MAX / WDOG_TICK_RATE)
static int heartbeat = STMP3XXX_DEFAULT_TIMEOUT;
module_param(heartbeat, uint, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
__MODULE_STRING(STMP3XXX_MAX_TIMEOUT) ", default "
__MODULE_STRING(STMP3XXX_DEFAULT_TIMEOUT));
static int wdt_start(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE);
return 0;
}
static int wdt_stop(struct watchdog_device *wdd)
{
struct device *dev = watchdog_get_drvdata(wdd);
struct stmp3xxx_wdt_pdata *pdata = dev_get_platdata(dev);
pdata->wdt_set_timeout(dev->parent, 0);
return 0;
}
static int wdt_set_timeout(struct watchdog_device *wdd, unsigned new_timeout)
{
wdd->timeout = new_timeout;
return wdt_start(wdd);
}
static const struct watchdog_info stmp3xxx_wdt_ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "STMP3XXX RTC Watchdog",
};
static const struct watchdog_ops stmp3xxx_wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
.set_timeout = wdt_set_timeout,
};
static struct watchdog_device stmp3xxx_wdd = {
.info = &stmp3xxx_wdt_ident,
.ops = &stmp3xxx_wdt_ops,
.min_timeout = 1,
.max_timeout = STMP3XXX_MAX_TIMEOUT,
.status = WATCHDOG_NOWAYOUT_INIT_STATUS,
};
static int wdt_notify_sys(struct notifier_block *nb, unsigned long code,
void *unused)
{
switch (code) {
case SYS_DOWN: /* keep enabled, system might crash while going down */
break;
case SYS_HALT: /* allow the system to actually halt */
case SYS_POWER_OFF:
wdt_stop(&stmp3xxx_wdd);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static int stmp3xxx_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
watchdog_set_drvdata(&stmp3xxx_wdd, dev);
stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
stmp3xxx_wdd.parent = dev;
ret = devm_watchdog_register_device(dev, &stmp3xxx_wdd);
if (ret < 0)
return ret;
if (register_reboot_notifier(&wdt_notifier))
dev_warn(dev, "cannot register reboot notifier\n");
dev_info(dev, "initialized watchdog with heartbeat %ds\n",
stmp3xxx_wdd.timeout);
return 0;
}
static void stmp3xxx_wdt_remove(struct platform_device *pdev)
{
unregister_reboot_notifier(&wdt_notifier);
}
static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdd = &stmp3xxx_wdd;
if (watchdog_active(wdd))
return wdt_stop(wdd);
return 0;
}
static int __maybe_unused stmp3xxx_wdt_resume(struct device *dev)
{
struct watchdog_device *wdd = &stmp3xxx_wdd;
if (watchdog_active(wdd))
return wdt_start(wdd);
return 0;
}
static SIMPLE_DEV_PM_OPS(stmp3xxx_wdt_pm_ops,
stmp3xxx_wdt_suspend, stmp3xxx_wdt_resume);
static struct platform_driver stmp3xxx_wdt_driver = {
.driver = {
.name = "stmp3xxx_rtc_wdt",
.pm = &stmp3xxx_wdt_pm_ops,
},
.probe = stmp3xxx_wdt_probe,
.remove_new = stmp3xxx_wdt_remove,
};
module_platform_driver(stmp3xxx_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfram Sang <[email protected]>");
| linux-master | drivers/watchdog/stmp3xxx_rtc_wdt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Nuvoton Technology corporation.
// Copyright (c) 2018 IBM Corp.
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
#define NPCM_WTCR 0x1C
#define NPCM_WTCLK (BIT(10) | BIT(11)) /* Clock divider */
#define NPCM_WTE BIT(7) /* Enable */
#define NPCM_WTIE BIT(6) /* Enable irq */
#define NPCM_WTIS (BIT(4) | BIT(5)) /* Interval selection */
#define NPCM_WTIF BIT(3) /* Interrupt flag*/
#define NPCM_WTRF BIT(2) /* Reset flag */
#define NPCM_WTRE BIT(1) /* Reset enable */
#define NPCM_WTR BIT(0) /* Reset counter */
/*
* Watchdog timeouts
*
* 170 msec: WTCLK=01 WTIS=00 VAL= 0x400
* 670 msec: WTCLK=01 WTIS=01 VAL= 0x410
* 1360 msec: WTCLK=10 WTIS=00 VAL= 0x800
* 2700 msec: WTCLK=01 WTIS=10 VAL= 0x420
* 5360 msec: WTCLK=10 WTIS=01 VAL= 0x810
* 10700 msec: WTCLK=01 WTIS=11 VAL= 0x430
* 21600 msec: WTCLK=10 WTIS=10 VAL= 0x820
* 43000 msec: WTCLK=11 WTIS=00 VAL= 0xC00
* 85600 msec: WTCLK=10 WTIS=11 VAL= 0x830
* 172000 msec: WTCLK=11 WTIS=01 VAL= 0xC10
* 687000 msec: WTCLK=11 WTIS=10 VAL= 0xC20
* 2750000 msec: WTCLK=11 WTIS=11 VAL= 0xC30
*/
struct npcm_wdt {
struct watchdog_device wdd;
void __iomem *reg;
struct clk *clk;
};
static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd)
{
return container_of(wdd, struct npcm_wdt, wdd);
}
static int npcm_wdt_ping(struct watchdog_device *wdd)
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
u32 val;
val = readl(wdt->reg);
writel(val | NPCM_WTR, wdt->reg);
return 0;
}
static int npcm_wdt_start(struct watchdog_device *wdd)
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
u32 val;
if (wdt->clk)
clk_prepare_enable(wdt->clk);
if (wdd->timeout < 2)
val = 0x800;
else if (wdd->timeout < 3)
val = 0x420;
else if (wdd->timeout < 6)
val = 0x810;
else if (wdd->timeout < 11)
val = 0x430;
else if (wdd->timeout < 22)
val = 0x820;
else if (wdd->timeout < 44)
val = 0xC00;
else if (wdd->timeout < 87)
val = 0x830;
else if (wdd->timeout < 173)
val = 0xC10;
else if (wdd->timeout < 688)
val = 0xC20;
else
val = 0xC30;
val |= NPCM_WTRE | NPCM_WTE | NPCM_WTR | NPCM_WTIE;
writel(val, wdt->reg);
return 0;
}
static int npcm_wdt_stop(struct watchdog_device *wdd)
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
writel(0, wdt->reg);
if (wdt->clk)
clk_disable_unprepare(wdt->clk);
return 0;
}
static int npcm_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
if (timeout < 2)
wdd->timeout = 1;
else if (timeout < 3)
wdd->timeout = 2;
else if (timeout < 6)
wdd->timeout = 5;
else if (timeout < 11)
wdd->timeout = 10;
else if (timeout < 22)
wdd->timeout = 21;
else if (timeout < 44)
wdd->timeout = 43;
else if (timeout < 87)
wdd->timeout = 86;
else if (timeout < 173)
wdd->timeout = 172;
else if (timeout < 688)
wdd->timeout = 687;
else
wdd->timeout = 2750;
if (watchdog_active(wdd))
npcm_wdt_start(wdd);
return 0;
}
static irqreturn_t npcm_wdt_interrupt(int irq, void *data)
{
struct npcm_wdt *wdt = data;
watchdog_notify_pretimeout(&wdt->wdd);
return IRQ_HANDLED;
}
static int npcm_wdt_restart(struct watchdog_device *wdd,
unsigned long action, void *data)
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
/* For reset, we start the WDT clock and leave it running. */
if (wdt->clk)
clk_prepare_enable(wdt->clk);
writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
udelay(1000);
return 0;
}
static bool npcm_is_running(struct watchdog_device *wdd)
{
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
return readl(wdt->reg) & NPCM_WTE;
}
static const struct watchdog_info npcm_wdt_info = {
.identity = KBUILD_MODNAME,
.options = WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops npcm_wdt_ops = {
.owner = THIS_MODULE,
.start = npcm_wdt_start,
.stop = npcm_wdt_stop,
.ping = npcm_wdt_ping,
.set_timeout = npcm_wdt_set_timeout,
.restart = npcm_wdt_restart,
};
static int npcm_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct npcm_wdt *wdt;
int irq;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->reg))
return PTR_ERR(wdt->reg);
wdt->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(wdt->clk))
return PTR_ERR(wdt->clk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
wdt->wdd.info = &npcm_wdt_info;
wdt->wdd.ops = &npcm_wdt_ops;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 2750;
wdt->wdd.parent = dev;
wdt->wdd.timeout = 86;
watchdog_init_timeout(&wdt->wdd, 0, dev);
/* Ensure timeout is able to be represented by the hardware */
npcm_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
if (npcm_is_running(&wdt->wdd)) {
/* Restart with the default or device-tree specified timeout */
npcm_wdt_start(&wdt->wdd);
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0, "watchdog",
wdt);
if (ret)
return ret;
ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret)
return ret;
dev_info(dev, "NPCM watchdog driver enabled\n");
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id npcm_wdt_match[] = {
{.compatible = "nuvoton,wpcm450-wdt"},
{.compatible = "nuvoton,npcm750-wdt"},
{},
};
MODULE_DEVICE_TABLE(of, npcm_wdt_match);
#endif
static struct platform_driver npcm_wdt_driver = {
.probe = npcm_wdt_probe,
.driver = {
.name = "npcm-wdt",
.of_match_table = of_match_ptr(npcm_wdt_match),
},
};
module_platform_driver(npcm_wdt_driver);
MODULE_AUTHOR("Joel Stanley");
MODULE_DESCRIPTION("Watchdog driver for NPCM");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/npcm_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer.
*
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*
* This driver was based on: drivers/watchdog/ixp4xx_wdt.c
* Author: Deepak Saxena <[email protected]>
* Copyright 2004 (c) MontaVista, Software, Inc.
*
* which again was based on sa1100 driver,
* Copyright (C) 2000 Oleg Drokin <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "ath79-wdt"
#define WDT_TIMEOUT 15 /* seconds */
#define WDOG_REG_CTRL 0x00
#define WDOG_REG_TIMER 0x04
#define WDOG_CTRL_LAST_RESET BIT(31)
#define WDOG_CTRL_ACTION_MASK 3
#define WDOG_CTRL_ACTION_NONE 0 /* no action */
#define WDOG_CTRL_ACTION_GPI 1 /* general purpose interrupt */
#define WDOG_CTRL_ACTION_NMI 2 /* NMI */
#define WDOG_CTRL_ACTION_FCR 3 /* full chip reset */
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int timeout = WDT_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
"(default=" __MODULE_STRING(WDT_TIMEOUT) "s)");
static unsigned long wdt_flags;
#define WDT_FLAGS_BUSY 0
#define WDT_FLAGS_EXPECT_CLOSE 1
static struct clk *wdt_clk;
static unsigned long wdt_freq;
static int boot_status;
static int max_timeout;
static void __iomem *wdt_base;
static inline void ath79_wdt_wr(unsigned reg, u32 val)
{
iowrite32(val, wdt_base + reg);
}
static inline u32 ath79_wdt_rr(unsigned reg)
{
return ioread32(wdt_base + reg);
}
static inline void ath79_wdt_keepalive(void)
{
ath79_wdt_wr(WDOG_REG_TIMER, wdt_freq * timeout);
/* flush write */
ath79_wdt_rr(WDOG_REG_TIMER);
}
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
/*
* Updating the TIMER register requires a few microseconds
* on the AR934x SoCs at least. Use a small delay to ensure
* that the TIMER register is updated within the hardware
* before enabling the watchdog.
*/
udelay(2);
ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
/* flush write */
ath79_wdt_rr(WDOG_REG_CTRL);
}
static inline void ath79_wdt_disable(void)
{
ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_NONE);
/* flush write */
ath79_wdt_rr(WDOG_REG_CTRL);
}
static int ath79_wdt_set_timeout(int val)
{
if (val < 1 || val > max_timeout)
return -EINVAL;
timeout = val;
ath79_wdt_keepalive();
return 0;
}
static int ath79_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags))
return -EBUSY;
clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
ath79_wdt_enable();
return stream_open(inode, file);
}
static int ath79_wdt_release(struct inode *inode, struct file *file)
{
if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags))
ath79_wdt_disable();
else {
pr_crit("device closed unexpectedly, watchdog timer will not stop!\n");
ath79_wdt_keepalive();
}
clear_bit(WDT_FLAGS_BUSY, &wdt_flags);
clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
return 0;
}
static ssize_t ath79_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
set_bit(WDT_FLAGS_EXPECT_CLOSE,
&wdt_flags);
}
}
ath79_wdt_keepalive();
}
return len;
}
static const struct watchdog_info ath79_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
.firmware_version = 0,
.identity = "ATH79 watchdog",
};
static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int err;
int t;
switch (cmd) {
case WDIOC_GETSUPPORT:
err = copy_to_user(argp, &ath79_wdt_info,
sizeof(ath79_wdt_info)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
err = put_user(0, p);
break;
case WDIOC_GETBOOTSTATUS:
err = put_user(boot_status, p);
break;
case WDIOC_KEEPALIVE:
ath79_wdt_keepalive();
err = 0;
break;
case WDIOC_SETTIMEOUT:
err = get_user(t, p);
if (err)
break;
err = ath79_wdt_set_timeout(t);
if (err)
break;
fallthrough;
case WDIOC_GETTIMEOUT:
err = put_user(timeout, p);
break;
default:
err = -ENOTTY;
break;
}
return err;
}
static const struct file_operations ath79_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = ath79_wdt_write,
.unlocked_ioctl = ath79_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = ath79_wdt_open,
.release = ath79_wdt_release,
};
static struct miscdevice ath79_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &ath79_wdt_fops,
};
static int ath79_wdt_probe(struct platform_device *pdev)
{
u32 ctrl;
int err;
if (wdt_base)
return -EBUSY;
wdt_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
wdt_clk = devm_clk_get(&pdev->dev, "wdt");
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
err = clk_prepare_enable(wdt_clk);
if (err)
return err;
wdt_freq = clk_get_rate(wdt_clk);
if (!wdt_freq) {
err = -EINVAL;
goto err_clk_disable;
}
max_timeout = (0xfffffffful / wdt_freq);
if (timeout < 1 || timeout > max_timeout) {
timeout = max_timeout;
dev_info(&pdev->dev,
"timeout value must be 0 < timeout < %d, using %d\n",
max_timeout, timeout);
}
ctrl = ath79_wdt_rr(WDOG_REG_CTRL);
boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
err = misc_register(&ath79_wdt_miscdev);
if (err) {
dev_err(&pdev->dev,
"unable to register misc device, err=%d\n", err);
goto err_clk_disable;
}
return 0;
err_clk_disable:
clk_disable_unprepare(wdt_clk);
return err;
}
static void ath79_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ath79_wdt_miscdev);
clk_disable_unprepare(wdt_clk);
}
static void ath79_wdt_shutdown(struct platform_device *pdev)
{
ath79_wdt_disable();
}
#ifdef CONFIG_OF
static const struct of_device_id ath79_wdt_match[] = {
{ .compatible = "qca,ar7130-wdt" },
{},
};
MODULE_DEVICE_TABLE(of, ath79_wdt_match);
#endif
static struct platform_driver ath79_wdt_driver = {
.probe = ath79_wdt_probe,
.remove_new = ath79_wdt_remove,
.shutdown = ath79_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ath79_wdt_match),
},
};
module_platform_driver(ath79_wdt_driver);
MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
MODULE_AUTHOR("Gabor Juhos <[email protected]");
MODULE_AUTHOR("Imre Kaloz <[email protected]");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/watchdog/ath79_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer
*
* Copyright (C) 2011 Gabor Juhos <[email protected]>
* Copyright (C) 2013 John Crispin <[email protected]>
*
* This driver was based on: drivers/watchdog/softdog.c
*/
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
#define SYSC_RSTSTAT 0x38
#define WDT_RST_CAUSE BIT(1)
#define RALINK_WDT_TIMEOUT 30
#define RALINK_WDT_PRESCALE 65536
#define TIMER_REG_TMR1LOAD 0x00
#define TIMER_REG_TMR1CTL 0x08
#define TMRSTAT_TMR1RST BIT(5)
#define TMR1CTL_ENABLE BIT(7)
#define TMR1CTL_MODE_SHIFT 4
#define TMR1CTL_MODE_MASK 0x3
#define TMR1CTL_MODE_FREE_RUNNING 0x0
#define TMR1CTL_MODE_PERIODIC 0x1
#define TMR1CTL_MODE_TIMEOUT 0x2
#define TMR1CTL_MODE_WDT 0x3
#define TMR1CTL_PRESCALE_MASK 0xf
#define TMR1CTL_PRESCALE_65536 0xf
struct rt2880_wdt_data {
void __iomem *base;
unsigned long freq;
struct clk *clk;
struct reset_control *rst;
struct watchdog_device wdt;
};
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static inline void rt_wdt_w32(void __iomem *base, unsigned int reg, u32 val)
{
iowrite32(val, base + reg);
}
static inline u32 rt_wdt_r32(void __iomem *base, unsigned int reg)
{
return ioread32(base + reg);
}
static int rt288x_wdt_ping(struct watchdog_device *w)
{
struct rt2880_wdt_data *drvdata = watchdog_get_drvdata(w);
rt_wdt_w32(drvdata->base, TIMER_REG_TMR1LOAD, w->timeout * drvdata->freq);
return 0;
}
static int rt288x_wdt_start(struct watchdog_device *w)
{
struct rt2880_wdt_data *drvdata = watchdog_get_drvdata(w);
u32 t;
t = rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL);
t &= ~(TMR1CTL_MODE_MASK << TMR1CTL_MODE_SHIFT |
TMR1CTL_PRESCALE_MASK);
t |= (TMR1CTL_MODE_WDT << TMR1CTL_MODE_SHIFT |
TMR1CTL_PRESCALE_65536);
rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, t);
rt288x_wdt_ping(w);
t = rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL);
t |= TMR1CTL_ENABLE;
rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, t);
return 0;
}
static int rt288x_wdt_stop(struct watchdog_device *w)
{
struct rt2880_wdt_data *drvdata = watchdog_get_drvdata(w);
u32 t;
rt288x_wdt_ping(w);
t = rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL);
t &= ~TMR1CTL_ENABLE;
rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, t);
return 0;
}
static int rt288x_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
{
w->timeout = t;
rt288x_wdt_ping(w);
return 0;
}
static int rt288x_wdt_bootcause(void)
{
if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE)
return WDIOF_CARDRESET;
return 0;
}
static const struct watchdog_info rt288x_wdt_info = {
.identity = "Ralink Watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops rt288x_wdt_ops = {
.owner = THIS_MODULE,
.start = rt288x_wdt_start,
.stop = rt288x_wdt_stop,
.ping = rt288x_wdt_ping,
.set_timeout = rt288x_wdt_set_timeout,
};
static int rt288x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct watchdog_device *wdt;
struct rt2880_wdt_data *drvdata;
int ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
drvdata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
drvdata->rst = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(drvdata->rst))
reset_control_deassert(drvdata->rst);
drvdata->freq = clk_get_rate(drvdata->clk) / RALINK_WDT_PRESCALE;
wdt = &drvdata->wdt;
wdt->info = &rt288x_wdt_info;
wdt->ops = &rt288x_wdt_ops;
wdt->min_timeout = 1;
wdt->max_timeout = (0xfffful / drvdata->freq);
wdt->parent = dev;
wdt->bootstatus = rt288x_wdt_bootcause();
watchdog_init_timeout(wdt, wdt->max_timeout, dev);
watchdog_set_nowayout(wdt, nowayout);
watchdog_set_drvdata(wdt, drvdata);
watchdog_stop_on_reboot(wdt);
ret = devm_watchdog_register_device(dev, &drvdata->wdt);
if (!ret)
dev_info(dev, "Initialized\n");
return 0;
}
static const struct of_device_id rt288x_wdt_match[] = {
{ .compatible = "ralink,rt2880-wdt" },
{},
};
MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
static struct platform_driver rt288x_wdt_driver = {
.probe = rt288x_wdt_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = rt288x_wdt_match,
},
};
module_platform_driver(rt288x_wdt_driver);
MODULE_DESCRIPTION("MediaTek/Ralink RT288x/RT3xxx hardware watchdog driver");
MODULE_AUTHOR("Gabor Juhos <[email protected]");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/rt2880_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2015, Intel Corporation.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/completion.h>
#include <linux/watchdog.h>
#include <linux/uuid.h>
#include <linux/mei_cl_bus.h>
/*
* iAMT Watchdog Device
*/
#define INTEL_AMT_WATCHDOG_ID "iamt_wdt"
#define MEI_WDT_DEFAULT_TIMEOUT 120 /* seconds */
#define MEI_WDT_MIN_TIMEOUT 120 /* seconds */
#define MEI_WDT_MAX_TIMEOUT 65535 /* seconds */
/* Commands */
#define MEI_MANAGEMENT_CONTROL 0x02
/* MEI Management Control version number */
#define MEI_MC_VERSION_NUMBER 0x10
/* Sub Commands */
#define MEI_MC_START_WD_TIMER_REQ 0x13
#define MEI_MC_START_WD_TIMER_RES 0x83
#define MEI_WDT_STATUS_SUCCESS 0
#define MEI_WDT_WDSTATE_NOT_REQUIRED 0x1
#define MEI_MC_STOP_WD_TIMER_REQ 0x14
/**
* enum mei_wdt_state - internal watchdog state
*
* @MEI_WDT_PROBE: wd in probing stage
* @MEI_WDT_IDLE: wd is idle and not opened
* @MEI_WDT_START: wd was opened, start was called
* @MEI_WDT_RUNNING: wd is expecting keep alive pings
* @MEI_WDT_STOPPING: wd is stopping and will move to IDLE
* @MEI_WDT_NOT_REQUIRED: wd device is not required
*/
enum mei_wdt_state {
MEI_WDT_PROBE,
MEI_WDT_IDLE,
MEI_WDT_START,
MEI_WDT_RUNNING,
MEI_WDT_STOPPING,
MEI_WDT_NOT_REQUIRED,
};
static const char *mei_wdt_state_str(enum mei_wdt_state state)
{
switch (state) {
case MEI_WDT_PROBE:
return "PROBE";
case MEI_WDT_IDLE:
return "IDLE";
case MEI_WDT_START:
return "START";
case MEI_WDT_RUNNING:
return "RUNNING";
case MEI_WDT_STOPPING:
return "STOPPING";
case MEI_WDT_NOT_REQUIRED:
return "NOT_REQUIRED";
default:
return "unknown";
}
}
/**
* struct mei_wdt - mei watchdog driver
* @wdd: watchdog device
*
* @cldev: mei watchdog client device
* @state: watchdog internal state
* @resp_required: ping required response
* @response: ping response completion
* @unregister: unregister worker
* @reg_lock: watchdog device registration lock
* @timeout: watchdog current timeout
*
* @dbgfs_dir: debugfs dir entry
*/
struct mei_wdt {
struct watchdog_device wdd;
struct mei_cl_device *cldev;
enum mei_wdt_state state;
bool resp_required;
struct completion response;
struct work_struct unregister;
struct mutex reg_lock;
u16 timeout;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
};
/**
* struct mei_mc_hdr - Management Control Command Header
*
* @command: Management Control (0x2)
* @bytecount: Number of bytes in the message beyond this byte
* @subcommand: Management Control Subcommand
* @versionnumber: Management Control Version (0x10)
*/
struct mei_mc_hdr {
u8 command;
u8 bytecount;
u8 subcommand;
u8 versionnumber;
};
/**
* struct mei_wdt_start_request - watchdog start/ping
*
* @hdr: Management Control Command Header
* @timeout: timeout value
* @reserved: reserved (legacy)
*/
struct mei_wdt_start_request {
struct mei_mc_hdr hdr;
u16 timeout;
u8 reserved[17];
} __packed;
/**
* struct mei_wdt_start_response - watchdog start/ping response
*
* @hdr: Management Control Command Header
* @status: operation status
* @wdstate: watchdog status bit mask
*/
struct mei_wdt_start_response {
struct mei_mc_hdr hdr;
u8 status;
u8 wdstate;
} __packed;
/**
* struct mei_wdt_stop_request - watchdog stop
*
* @hdr: Management Control Command Header
*/
struct mei_wdt_stop_request {
struct mei_mc_hdr hdr;
} __packed;
/**
* mei_wdt_ping - send wd start/ping command
*
* @wdt: mei watchdog device
*
* Return: 0 on success,
* negative errno code on failure
*/
static int mei_wdt_ping(struct mei_wdt *wdt)
{
struct mei_wdt_start_request req;
const size_t req_len = sizeof(req);
int ret;
memset(&req, 0, req_len);
req.hdr.command = MEI_MANAGEMENT_CONTROL;
req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ;
req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
req.timeout = wdt->timeout;
ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
if (ret < 0)
return ret;
return 0;
}
/**
* mei_wdt_stop - send wd stop command
*
* @wdt: mei watchdog device
*
* Return: 0 on success,
* negative errno code on failure
*/
static int mei_wdt_stop(struct mei_wdt *wdt)
{
struct mei_wdt_stop_request req;
const size_t req_len = sizeof(req);
int ret;
memset(&req, 0, req_len);
req.hdr.command = MEI_MANAGEMENT_CONTROL;
req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ;
req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
if (ret < 0)
return ret;
return 0;
}
/**
* mei_wdt_ops_start - wd start command from the watchdog core.
*
* @wdd: watchdog device
*
* Return: 0 on success or -ENODEV;
*/
static int mei_wdt_ops_start(struct watchdog_device *wdd)
{
struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
wdt->state = MEI_WDT_START;
wdd->timeout = wdt->timeout;
return 0;
}
/**
* mei_wdt_ops_stop - wd stop command from the watchdog core.
*
* @wdd: watchdog device
*
* Return: 0 if success, negative errno code for failure
*/
static int mei_wdt_ops_stop(struct watchdog_device *wdd)
{
struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
int ret;
if (wdt->state != MEI_WDT_RUNNING)
return 0;
wdt->state = MEI_WDT_STOPPING;
ret = mei_wdt_stop(wdt);
if (ret)
return ret;
wdt->state = MEI_WDT_IDLE;
return 0;
}
/**
* mei_wdt_ops_ping - wd ping command from the watchdog core.
*
* @wdd: watchdog device
*
* Return: 0 if success, negative errno code on failure
*/
static int mei_wdt_ops_ping(struct watchdog_device *wdd)
{
struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
int ret;
if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING)
return 0;
if (wdt->resp_required)
init_completion(&wdt->response);
wdt->state = MEI_WDT_RUNNING;
ret = mei_wdt_ping(wdt);
if (ret)
return ret;
if (wdt->resp_required)
ret = wait_for_completion_killable(&wdt->response);
return ret;
}
/**
* mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core.
*
* @wdd: watchdog device
* @timeout: timeout value to set
*
* Return: 0 if success, negative errno code for failure
*/
static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
/* valid value is already checked by the caller */
wdt->timeout = timeout;
wdd->timeout = timeout;
return 0;
}
static const struct watchdog_ops wd_ops = {
.owner = THIS_MODULE,
.start = mei_wdt_ops_start,
.stop = mei_wdt_ops_stop,
.ping = mei_wdt_ops_ping,
.set_timeout = mei_wdt_ops_set_timeout,
};
/* not const as the firmware_version field need to be retrieved */
static struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_ALARMONLY,
};
/**
* __mei_wdt_is_registered - check if wdt is registered
*
* @wdt: mei watchdog device
*
* Return: true if the wdt is registered with the watchdog subsystem
* Locking: should be called under wdt->reg_lock
*/
static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt)
{
return !!watchdog_get_drvdata(&wdt->wdd);
}
/**
* mei_wdt_unregister - unregister from the watchdog subsystem
*
* @wdt: mei watchdog device
*/
static void mei_wdt_unregister(struct mei_wdt *wdt)
{
mutex_lock(&wdt->reg_lock);
if (__mei_wdt_is_registered(wdt)) {
watchdog_unregister_device(&wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, NULL);
memset(&wdt->wdd, 0, sizeof(wdt->wdd));
}
mutex_unlock(&wdt->reg_lock);
}
/**
* mei_wdt_register - register with the watchdog subsystem
*
* @wdt: mei watchdog device
*
* Return: 0 if success, negative errno code for failure
*/
static int mei_wdt_register(struct mei_wdt *wdt)
{
struct device *dev;
int ret;
if (!wdt || !wdt->cldev)
return -EINVAL;
dev = &wdt->cldev->dev;
mutex_lock(&wdt->reg_lock);
if (__mei_wdt_is_registered(wdt)) {
ret = 0;
goto out;
}
wdt->wdd.info = &wd_info;
wdt->wdd.ops = &wd_ops;
wdt->wdd.parent = dev;
wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT;
wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT;
wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_stop_on_reboot(&wdt->wdd);
watchdog_stop_on_unregister(&wdt->wdd);
ret = watchdog_register_device(&wdt->wdd);
if (ret)
watchdog_set_drvdata(&wdt->wdd, NULL);
wdt->state = MEI_WDT_IDLE;
out:
mutex_unlock(&wdt->reg_lock);
return ret;
}
static void mei_wdt_unregister_work(struct work_struct *work)
{
struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister);
mei_wdt_unregister(wdt);
}
/**
* mei_wdt_rx - callback for data receive
*
* @cldev: bus device
*/
static void mei_wdt_rx(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
struct mei_wdt_start_response res;
const size_t res_len = sizeof(res);
int ret;
ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len);
if (ret < 0) {
dev_err(&cldev->dev, "failure in recv %d\n", ret);
return;
}
/* Empty response can be sent on stop */
if (ret == 0)
return;
if (ret < sizeof(struct mei_mc_hdr)) {
dev_err(&cldev->dev, "recv small data %d\n", ret);
return;
}
if (res.hdr.command != MEI_MANAGEMENT_CONTROL ||
res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) {
dev_err(&cldev->dev, "wrong command received\n");
return;
}
if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) {
dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n",
res.hdr.subcommand,
mei_wdt_state_str(wdt->state),
wdt->state);
return;
}
/* Run the unregistration in a worker as this can be
* run only after ping completion, otherwise the flow will
* deadlock on watchdog core mutex.
*/
if (wdt->state == MEI_WDT_RUNNING) {
if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
wdt->state = MEI_WDT_NOT_REQUIRED;
schedule_work(&wdt->unregister);
}
goto out;
}
if (wdt->state == MEI_WDT_PROBE) {
if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
wdt->state = MEI_WDT_NOT_REQUIRED;
} else {
/* stop the watchdog and register watchdog device */
mei_wdt_stop(wdt);
mei_wdt_register(wdt);
}
return;
}
dev_warn(&cldev->dev, "not in correct state %s[%d]\n",
mei_wdt_state_str(wdt->state), wdt->state);
out:
if (!completion_done(&wdt->response))
complete(&wdt->response);
}
/**
* mei_wdt_notif - callback for event notification
*
* @cldev: bus device
*/
static void mei_wdt_notif(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
if (wdt->state != MEI_WDT_NOT_REQUIRED)
return;
mei_wdt_register(wdt);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mei_wdt *wdt = file->private_data;
const size_t bufsz = 32;
char buf[32];
ssize_t pos;
mutex_lock(&wdt->reg_lock);
pos = scnprintf(buf, bufsz, "%s\n",
__mei_wdt_is_registered(wdt) ? "activated" : "deactivated");
mutex_unlock(&wdt->reg_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
}
static const struct file_operations dbgfs_fops_activation = {
.open = simple_open,
.read = mei_dbgfs_read_activation,
.llseek = generic_file_llseek,
};
static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mei_wdt *wdt = file->private_data;
char buf[32];
ssize_t pos;
pos = scnprintf(buf, sizeof(buf), "state: %s\n",
mei_wdt_state_str(wdt->state));
return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
}
static const struct file_operations dbgfs_fops_state = {
.open = simple_open,
.read = mei_dbgfs_read_state,
.llseek = generic_file_llseek,
};
static void dbgfs_unregister(struct mei_wdt *wdt)
{
debugfs_remove_recursive(wdt->dbgfs_dir);
wdt->dbgfs_dir = NULL;
}
static void dbgfs_register(struct mei_wdt *wdt)
{
struct dentry *dir;
dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
wdt->dbgfs_dir = dir;
debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state);
debugfs_create_file("activation", S_IRUSR, dir, wdt,
&dbgfs_fops_activation);
}
#else
static inline void dbgfs_unregister(struct mei_wdt *wdt) {}
static inline void dbgfs_register(struct mei_wdt *wdt) {}
#endif /* CONFIG_DEBUG_FS */
static int mei_wdt_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct mei_wdt *wdt;
int ret;
wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT;
wdt->state = MEI_WDT_PROBE;
wdt->cldev = cldev;
wdt->resp_required = mei_cldev_ver(cldev) > 0x1;
mutex_init(&wdt->reg_lock);
init_completion(&wdt->response);
INIT_WORK(&wdt->unregister, mei_wdt_unregister_work);
mei_cldev_set_drvdata(cldev, wdt);
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(&cldev->dev, "Could not enable cl device\n");
goto err_out;
}
ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx);
if (ret) {
dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret);
goto err_disable;
}
ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif);
/* on legacy devices notification is not supported
*/
if (ret && ret != -EOPNOTSUPP) {
dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret);
goto err_disable;
}
wd_info.firmware_version = mei_cldev_ver(cldev);
if (wdt->resp_required)
ret = mei_wdt_ping(wdt);
else
ret = mei_wdt_register(wdt);
if (ret)
goto err_disable;
dbgfs_register(wdt);
return 0;
err_disable:
mei_cldev_disable(cldev);
err_out:
kfree(wdt);
return ret;
}
static void mei_wdt_remove(struct mei_cl_device *cldev)
{
struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
/* Free the caller in case of fw initiated or unexpected reset */
if (!completion_done(&wdt->response))
complete(&wdt->response);
cancel_work_sync(&wdt->unregister);
mei_wdt_unregister(wdt);
mei_cldev_disable(cldev);
dbgfs_unregister(wdt);
kfree(wdt);
}
#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
static const struct mei_cl_device_id mei_wdt_tbl[] = {
{ .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY },
/* required last entry */
{ }
};
MODULE_DEVICE_TABLE(mei, mei_wdt_tbl);
static struct mei_cl_driver mei_wdt_driver = {
.id_table = mei_wdt_tbl,
.name = KBUILD_MODNAME,
.probe = mei_wdt_probe,
.remove = mei_wdt_remove,
};
module_mei_cl_driver(mei_wdt_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
| linux-master | drivers/watchdog/mei_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/watchdog/orion_wdt.c
*
* Watchdog driver for Orion/Kirkwood processors
*
* Author: Sylver Bruneau <[email protected]>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
/* RSTOUT mask register physical address for Orion5x, Kirkwood and Dove */
#define ORION_RSTOUT_MASK_OFFSET 0x20108
/* Internal registers can be configured at any 1 MiB aligned address */
#define INTERNAL_REGS_MASK ~(SZ_1M - 1)
/*
* Watchdog timer block registers.
*/
#define TIMER_CTRL 0x0000
#define TIMER1_FIXED_ENABLE_BIT BIT(12)
#define WDT_AXP_FIXED_ENABLE_BIT BIT(10)
#define TIMER1_ENABLE_BIT BIT(2)
#define TIMER_A370_STATUS 0x0004
#define WDT_A370_EXPIRED BIT(31)
#define TIMER1_STATUS_BIT BIT(8)
#define TIMER1_VAL_OFF 0x001c
#define WDT_MAX_CYCLE_COUNT 0xffffffff
#define WDT_A370_RATIO_MASK(v) ((v) << 16)
#define WDT_A370_RATIO_SHIFT 5
#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat; /* module parameter (seconds) */
struct orion_watchdog;
struct orion_watchdog_data {
int wdt_counter_offset;
int wdt_enable_bit;
int rstout_enable_bit;
int rstout_mask_bit;
int (*clock_init)(struct platform_device *,
struct orion_watchdog *);
int (*enabled)(struct orion_watchdog *);
int (*start)(struct watchdog_device *);
int (*stop)(struct watchdog_device *);
};
struct orion_watchdog {
struct watchdog_device wdt;
void __iomem *reg;
void __iomem *rstout;
void __iomem *rstout_mask;
unsigned long clk_rate;
struct clk *clk;
const struct orion_watchdog_data *data;
};
static int orion_wdt_clock_init(struct platform_device *pdev,
struct orion_watchdog *dev)
{
int ret;
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
ret = clk_prepare_enable(dev->clk);
if (ret) {
clk_put(dev->clk);
return ret;
}
dev->clk_rate = clk_get_rate(dev->clk);
return 0;
}
static int armada370_wdt_clock_init(struct platform_device *pdev,
struct orion_watchdog *dev)
{
int ret;
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
ret = clk_prepare_enable(dev->clk);
if (ret) {
clk_put(dev->clk);
return ret;
}
/* Setup watchdog input clock */
atomic_io_modify(dev->reg + TIMER_CTRL,
WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT),
WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT));
dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
return 0;
}
static int armada375_wdt_clock_init(struct platform_device *pdev,
struct orion_watchdog *dev)
{
int ret;
dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
if (!IS_ERR(dev->clk)) {
ret = clk_prepare_enable(dev->clk);
if (ret) {
clk_put(dev->clk);
return ret;
}
atomic_io_modify(dev->reg + TIMER_CTRL,
WDT_AXP_FIXED_ENABLE_BIT,
WDT_AXP_FIXED_ENABLE_BIT);
dev->clk_rate = clk_get_rate(dev->clk);
return 0;
}
/* Mandatory fallback for proper devicetree backward compatibility */
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
ret = clk_prepare_enable(dev->clk);
if (ret) {
clk_put(dev->clk);
return ret;
}
atomic_io_modify(dev->reg + TIMER_CTRL,
WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT),
WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT));
dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
return 0;
}
static int armadaxp_wdt_clock_init(struct platform_device *pdev,
struct orion_watchdog *dev)
{
int ret;
u32 val;
dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
ret = clk_prepare_enable(dev->clk);
if (ret) {
clk_put(dev->clk);
return ret;
}
/* Fix the wdt and timer1 clock frequency to 25MHz */
val = WDT_AXP_FIXED_ENABLE_BIT | TIMER1_FIXED_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, val, val);
dev->clk_rate = clk_get_rate(dev->clk);
return 0;
}
static int orion_wdt_ping(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* Reload watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
dev->reg + TIMER1_VAL_OFF);
return 0;
}
static int armada375_start(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
u32 reg;
/* Set watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
dev->reg + TIMER1_VAL_OFF);
/* Clear the watchdog expiration bit */
atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
/* Enable watchdog timer */
reg = dev->data->wdt_enable_bit;
if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
reg |= TIMER1_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
/* Enable reset on watchdog */
reg = readl(dev->rstout);
reg |= dev->data->rstout_enable_bit;
writel(reg, dev->rstout);
atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit, 0);
return 0;
}
static int armada370_start(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
u32 reg;
/* Set watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
/* Clear the watchdog expiration bit */
atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
/* Enable watchdog timer */
reg = dev->data->wdt_enable_bit;
if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
reg |= TIMER1_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
/* Enable reset on watchdog */
reg = readl(dev->rstout);
reg |= dev->data->rstout_enable_bit;
writel(reg, dev->rstout);
return 0;
}
static int orion_start(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* Set watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
/* Enable watchdog timer */
atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
dev->data->wdt_enable_bit);
/* Enable reset on watchdog */
atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
dev->data->rstout_enable_bit);
return 0;
}
static int orion_wdt_start(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* There are some per-SoC quirks to handle */
return dev->data->start(wdt_dev);
}
static int orion_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
/* Disable reset on watchdog */
atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0);
/* Disable watchdog timer */
atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
return 0;
}
static int armada375_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
u32 reg, mask;
/* Disable reset on watchdog */
atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit,
dev->data->rstout_mask_bit);
reg = readl(dev->rstout);
reg &= ~dev->data->rstout_enable_bit;
writel(reg, dev->rstout);
/* Disable watchdog timer */
mask = dev->data->wdt_enable_bit;
if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
mask |= TIMER1_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
return 0;
}
static int armada370_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
u32 reg, mask;
/* Disable reset on watchdog */
reg = readl(dev->rstout);
reg &= ~dev->data->rstout_enable_bit;
writel(reg, dev->rstout);
/* Disable watchdog timer */
mask = dev->data->wdt_enable_bit;
if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
mask |= TIMER1_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
return 0;
}
static int orion_wdt_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
return dev->data->stop(wdt_dev);
}
static int orion_enabled(struct orion_watchdog *dev)
{
bool enabled, running;
enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
return enabled && running;
}
static int armada375_enabled(struct orion_watchdog *dev)
{
bool masked, enabled, running;
masked = readl(dev->rstout_mask) & dev->data->rstout_mask_bit;
enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
return !masked && enabled && running;
}
static int orion_wdt_enabled(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
return dev->data->enabled(dev);
}
static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
}
static struct watchdog_info orion_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Orion Watchdog",
};
static const struct watchdog_ops orion_wdt_ops = {
.owner = THIS_MODULE,
.start = orion_wdt_start,
.stop = orion_wdt_stop,
.ping = orion_wdt_ping,
.get_timeleft = orion_wdt_get_timeleft,
};
static irqreturn_t orion_wdt_irq(int irq, void *devid)
{
panic("Watchdog Timeout");
return IRQ_HANDLED;
}
static irqreturn_t orion_wdt_pre_irq(int irq, void *devid)
{
struct orion_watchdog *dev = devid;
atomic_io_modify(dev->reg + TIMER_A370_STATUS,
TIMER1_STATUS_BIT, 0);
watchdog_notify_pretimeout(&dev->wdt);
return IRQ_HANDLED;
}
/*
* The original devicetree binding for this driver specified only
* one memory resource, so in order to keep DT backwards compatibility
* we try to fallback to a hardcoded register address, if the resource
* is missing from the devicetree.
*/
static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev,
phys_addr_t internal_regs)
{
struct resource *res;
phys_addr_t rstout;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res)
return devm_ioremap(&pdev->dev, res->start,
resource_size(res));
rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET;
WARN(1, FW_BUG "falling back to hardcoded RSTOUT reg %pa\n", &rstout);
return devm_ioremap(&pdev->dev, rstout, 0x4);
}
static const struct orion_watchdog_data orion_data = {
.rstout_enable_bit = BIT(1),
.wdt_enable_bit = BIT(4),
.wdt_counter_offset = 0x24,
.clock_init = orion_wdt_clock_init,
.enabled = orion_enabled,
.start = orion_start,
.stop = orion_stop,
};
static const struct orion_watchdog_data armada370_data = {
.rstout_enable_bit = BIT(8),
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armada370_wdt_clock_init,
.enabled = orion_enabled,
.start = armada370_start,
.stop = armada370_stop,
};
static const struct orion_watchdog_data armadaxp_data = {
.rstout_enable_bit = BIT(8),
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armadaxp_wdt_clock_init,
.enabled = orion_enabled,
.start = armada370_start,
.stop = armada370_stop,
};
static const struct orion_watchdog_data armada375_data = {
.rstout_enable_bit = BIT(8),
.rstout_mask_bit = BIT(10),
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armada375_wdt_clock_init,
.enabled = armada375_enabled,
.start = armada375_start,
.stop = armada375_stop,
};
static const struct orion_watchdog_data armada380_data = {
.rstout_enable_bit = BIT(8),
.rstout_mask_bit = BIT(10),
.wdt_enable_bit = BIT(8),
.wdt_counter_offset = 0x34,
.clock_init = armadaxp_wdt_clock_init,
.enabled = armada375_enabled,
.start = armada375_start,
.stop = armada375_stop,
};
static const struct of_device_id orion_wdt_of_match_table[] = {
{
.compatible = "marvell,orion-wdt",
.data = &orion_data,
},
{
.compatible = "marvell,armada-370-wdt",
.data = &armada370_data,
},
{
.compatible = "marvell,armada-xp-wdt",
.data = &armadaxp_data,
},
{
.compatible = "marvell,armada-375-wdt",
.data = &armada375_data,
},
{
.compatible = "marvell,armada-380-wdt",
.data = &armada380_data,
},
{},
};
MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
static int orion_wdt_get_regs(struct platform_device *pdev,
struct orion_watchdog *dev)
{
struct device_node *node = pdev->dev.of_node;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!dev->reg)
return -ENOMEM;
/* Each supported compatible has some RSTOUT register quirk */
if (of_device_is_compatible(node, "marvell,orion-wdt")) {
dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
INTERNAL_REGS_MASK);
if (!dev->rstout)
return -ENODEV;
} else if (of_device_is_compatible(node, "marvell,armada-370-wdt") ||
of_device_is_compatible(node, "marvell,armada-xp-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
} else if (of_device_is_compatible(node, "marvell,armada-375-wdt") ||
of_device_is_compatible(node, "marvell,armada-380-wdt")) {
/* Dedicated RSTOUT register, can be requested. */
dev->rstout = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dev->rstout))
return PTR_ERR(dev->rstout);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res)
return -ENODEV;
dev->rstout_mask = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!dev->rstout_mask)
return -ENOMEM;
} else {
return -ENODEV;
}
return 0;
}
static int orion_wdt_probe(struct platform_device *pdev)
{
struct orion_watchdog *dev;
const struct of_device_id *match;
unsigned int wdt_max_duration; /* (seconds) */
int ret, irq;
dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog),
GFP_KERNEL);
if (!dev)
return -ENOMEM;
match = of_match_device(orion_wdt_of_match_table, &pdev->dev);
if (!match)
/* Default legacy match */
match = &orion_wdt_of_match_table[0];
dev->wdt.info = &orion_wdt_info;
dev->wdt.ops = &orion_wdt_ops;
dev->wdt.min_timeout = 1;
dev->data = match->data;
ret = orion_wdt_get_regs(pdev, dev);
if (ret)
return ret;
ret = dev->data->clock_init(pdev, dev);
if (ret) {
dev_err(&pdev->dev, "cannot initialize clock\n");
return ret;
}
wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate;
dev->wdt.timeout = wdt_max_duration;
dev->wdt.max_timeout = wdt_max_duration;
dev->wdt.parent = &pdev->dev;
watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
platform_set_drvdata(pdev, &dev->wdt);
watchdog_set_drvdata(&dev->wdt, dev);
/*
* Let's make sure the watchdog is fully stopped, unless it's
* explicitly enabled. This may be the case if the module was
* removed and re-inserted, or if the bootloader explicitly
* set a running watchdog before booting the kernel.
*/
if (!orion_wdt_enabled(&dev->wdt))
orion_wdt_stop(&dev->wdt);
else
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */
irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
/*
* Not all supported platforms specify an interrupt for the
* watchdog, so let's make it optional.
*/
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0,
pdev->name, dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto disable_clk;
}
}
/* Optional 2nd interrupt for pretimeout */
irq = platform_get_irq_optional(pdev, 1);
if (irq > 0) {
orion_wdt_info.options |= WDIOF_PRETIMEOUT;
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
0, pdev->name, dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto disable_clk;
}
}
watchdog_set_nowayout(&dev->wdt, nowayout);
ret = watchdog_register_device(&dev->wdt);
if (ret)
goto disable_clk;
pr_info("Initial timeout %d sec%s\n",
dev->wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
disable_clk:
clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
return ret;
}
static void orion_wdt_remove(struct platform_device *pdev)
{
struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
watchdog_unregister_device(wdt_dev);
clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
}
static void orion_wdt_shutdown(struct platform_device *pdev)
{
struct watchdog_device *wdt_dev = platform_get_drvdata(pdev);
orion_wdt_stop(wdt_dev);
}
static struct platform_driver orion_wdt_driver = {
.probe = orion_wdt_probe,
.remove_new = orion_wdt_remove,
.shutdown = orion_wdt_shutdown,
.driver = {
.name = "orion_wdt",
.of_match_table = orion_wdt_of_match_table,
},
};
module_platform_driver(orion_wdt_driver);
MODULE_AUTHOR("Sylver Bruneau <[email protected]>");
MODULE_DESCRIPTION("Orion Processor Watchdog");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds");
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:orion_wdt");
| linux-master | drivers/watchdog/orion_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the wm831x PMICs
*
* Copyright (C) 2009 Wolfson Microelectronics
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct wm831x_wdt_drvdata {
struct watchdog_device wdt;
struct wm831x *wm831x;
struct mutex lock;
int update_state;
};
/* We can't use the sub-second values here but they're included
* for completeness. */
static struct {
unsigned int time; /* Seconds */
u16 val; /* WDOG_TO value */
} wm831x_wdt_cfgs[] = {
{ 1, 2 },
{ 2, 3 },
{ 4, 4 },
{ 8, 5 },
{ 16, 6 },
{ 32, 7 },
{ 33, 7 }, /* Actually 32.768s so include both, others round down */
};
static int wm831x_wdt_start(struct watchdog_device *wdt_dev)
{
struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
struct wm831x *wm831x = driver_data->wm831x;
int ret;
mutex_lock(&driver_data->lock);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_ENA, WM831X_WDOG_ENA);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
mutex_unlock(&driver_data->lock);
return ret;
}
static int wm831x_wdt_stop(struct watchdog_device *wdt_dev)
{
struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
struct wm831x *wm831x = driver_data->wm831x;
int ret;
mutex_lock(&driver_data->lock);
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_ENA, 0);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
mutex_unlock(&driver_data->lock);
return ret;
}
static int wm831x_wdt_ping(struct watchdog_device *wdt_dev)
{
struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
struct wm831x *wm831x = driver_data->wm831x;
int ret;
u16 reg;
mutex_lock(&driver_data->lock);
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (!(reg & WM831X_WDOG_RST_SRC)) {
dev_err(wm831x->dev, "Hardware watchdog update unsupported\n");
ret = -EINVAL;
goto out;
}
reg |= WM831X_WDOG_RESET;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
out:
mutex_unlock(&driver_data->lock);
return ret;
}
static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int timeout)
{
struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev);
struct wm831x *wm831x = driver_data->wm831x;
int ret, i;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
if (wm831x_wdt_cfgs[i].time == timeout)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
return -EINVAL;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG,
WM831X_WDOG_TO_MASK,
wm831x_wdt_cfgs[i].val);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev, "Failed to unlock security key: %d\n",
ret);
}
wdt_dev->timeout = timeout;
return ret;
}
static const struct watchdog_info wm831x_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "WM831x Watchdog",
};
static const struct watchdog_ops wm831x_wdt_ops = {
.owner = THIS_MODULE,
.start = wm831x_wdt_start,
.stop = wm831x_wdt_stop,
.ping = wm831x_wdt_ping,
.set_timeout = wm831x_wdt_set_timeout,
};
static int wm831x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct wm831x *wm831x = dev_get_drvdata(dev->parent);
struct wm831x_pdata *chip_pdata = dev_get_platdata(dev->parent);
struct wm831x_watchdog_pdata *pdata;
struct wm831x_wdt_drvdata *driver_data;
struct watchdog_device *wm831x_wdt;
int reg, ret, i;
ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read watchdog status: %d\n",
ret);
return ret;
}
reg = ret;
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
mutex_init(&driver_data->lock);
driver_data->wm831x = wm831x;
wm831x_wdt = &driver_data->wdt;
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
wm831x_wdt->parent = dev;
watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
reg &= WM831X_WDOG_TO_MASK;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
if (wm831x_wdt_cfgs[i].val == reg)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
dev_warn(wm831x->dev,
"Unknown watchdog timeout: %x\n", reg);
else
wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time;
/* Apply any configuration */
if (chip_pdata)
pdata = chip_pdata->watchdog;
else
pdata = NULL;
if (pdata) {
reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK |
WM831X_WDOG_RST_SRC);
reg |= pdata->primary << WM831X_WDOG_PRIMACT_SHIFT;
reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT;
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
wm831x_reg_lock(wm831x);
} else {
dev_err(wm831x->dev,
"Failed to unlock security key: %d\n", ret);
return ret;
}
}
return devm_watchdog_register_device(dev, &driver_data->wdt);
}
static struct platform_driver wm831x_wdt_driver = {
.probe = wm831x_wdt_probe,
.driver = {
.name = "wm831x-watchdog",
},
};
module_platform_driver(wm831x_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-watchdog");
| linux-master | drivers/watchdog/wm831x_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/char/watchdog/max63xx_wdt.c
*
* Driver for max63{69,70,71,72,73,74} watchdog timers
*
* Copyright (C) 2009 Marc Zyngier <[email protected]>
*
* This driver assumes the watchdog pins are memory mapped (as it is
* the case for the Arcom Zeus). Should it be connected over GPIOs or
* another interface, some abstraction will have to be introduced.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/property.h>
#define DEFAULT_HEARTBEAT 60
#define MAX_HEARTBEAT 60
static unsigned int heartbeat = DEFAULT_HEARTBEAT;
static bool nowayout = WATCHDOG_NOWAYOUT;
/*
* Memory mapping: a single byte, 3 first lower bits to select bit 3
* to ping the watchdog.
*/
#define MAX6369_WDSET (7 << 0)
#define MAX6369_WDI (1 << 3)
#define MAX6369_WDSET_DISABLED 3
static int nodelay;
struct max63xx_wdt {
struct watchdog_device wdd;
const struct max63xx_timeout *timeout;
/* memory mapping */
void __iomem *base;
spinlock_t lock;
/* WDI and WSET bits write access routines */
void (*ping)(struct max63xx_wdt *wdt);
void (*set)(struct max63xx_wdt *wdt, u8 set);
};
/*
* The timeout values used are actually the absolute minimum the chip
* offers. Typical values on my board are slightly over twice as long
* (10s setting ends up with a 25s timeout), and can be up to 3 times
* the nominal setting (according to the datasheet). So please take
* these values with a grain of salt. Same goes for the initial delay
* "feature". Only max6373/74 have a few settings without this initial
* delay (selected with the "nodelay" parameter).
*
* I also decided to remove from the tables any timeout smaller than a
* second, as it looked completly overkill...
*/
/* Timeouts in second */
struct max63xx_timeout {
const u8 wdset;
const u8 tdelay;
const u8 twd;
};
static const struct max63xx_timeout max6369_table[] = {
{ 5, 1, 1 },
{ 6, 10, 10 },
{ 7, 60, 60 },
{ },
};
static const struct max63xx_timeout max6371_table[] = {
{ 6, 60, 3 },
{ 7, 60, 60 },
{ },
};
static const struct max63xx_timeout max6373_table[] = {
{ 2, 60, 1 },
{ 5, 0, 1 },
{ 1, 3, 3 },
{ 7, 60, 10 },
{ 6, 0, 10 },
{ },
};
static const struct max63xx_timeout *
max63xx_select_timeout(const struct max63xx_timeout *table, int value)
{
while (table->twd) {
if (value <= table->twd) {
if (nodelay && table->tdelay == 0)
return table;
if (!nodelay)
return table;
}
table++;
}
return NULL;
}
static int max63xx_wdt_ping(struct watchdog_device *wdd)
{
struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
wdt->ping(wdt);
return 0;
}
static int max63xx_wdt_start(struct watchdog_device *wdd)
{
struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
wdt->set(wdt, wdt->timeout->wdset);
/* check for a edge triggered startup */
if (wdt->timeout->tdelay == 0)
wdt->ping(wdt);
return 0;
}
static int max63xx_wdt_stop(struct watchdog_device *wdd)
{
struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
wdt->set(wdt, MAX6369_WDSET_DISABLED);
return 0;
}
static const struct watchdog_ops max63xx_wdt_ops = {
.owner = THIS_MODULE,
.start = max63xx_wdt_start,
.stop = max63xx_wdt_stop,
.ping = max63xx_wdt_ping,
};
static const struct watchdog_info max63xx_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "max63xx Watchdog",
};
static void max63xx_mmap_ping(struct max63xx_wdt *wdt)
{
u8 val;
spin_lock(&wdt->lock);
val = __raw_readb(wdt->base);
__raw_writeb(val | MAX6369_WDI, wdt->base);
__raw_writeb(val & ~MAX6369_WDI, wdt->base);
spin_unlock(&wdt->lock);
}
static void max63xx_mmap_set(struct max63xx_wdt *wdt, u8 set)
{
u8 val;
spin_lock(&wdt->lock);
val = __raw_readb(wdt->base);
val &= ~MAX6369_WDSET;
val |= set & MAX6369_WDSET;
__raw_writeb(val, wdt->base);
spin_unlock(&wdt->lock);
}
static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
{
wdt->base = devm_platform_ioremap_resource(p, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
spin_lock_init(&wdt->lock);
wdt->ping = max63xx_mmap_ping;
wdt->set = max63xx_mmap_set;
return 0;
}
static int max63xx_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct max63xx_wdt *wdt;
const struct max63xx_timeout *table;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
/* Attempt to use fwnode first */
table = device_get_match_data(dev);
if (!table)
table = (struct max63xx_timeout *)pdev->id_entry->driver_data;
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
wdt->timeout = max63xx_select_timeout(table, heartbeat);
if (!wdt->timeout) {
dev_err(dev, "unable to satisfy %ds heartbeat request\n",
heartbeat);
return -EINVAL;
}
err = max63xx_mmap_init(pdev, wdt);
if (err)
return err;
platform_set_drvdata(pdev, &wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, wdt);
wdt->wdd.parent = dev;
wdt->wdd.timeout = wdt->timeout->twd;
wdt->wdd.info = &max63xx_wdt_info;
wdt->wdd.ops = &max63xx_wdt_ops;
watchdog_set_nowayout(&wdt->wdd, nowayout);
err = devm_watchdog_register_device(dev, &wdt->wdd);
if (err)
return err;
dev_info(dev, "using %ds heartbeat with %ds initial delay\n",
wdt->timeout->twd, wdt->timeout->tdelay);
return 0;
}
static const struct platform_device_id max63xx_id_table[] = {
{ "max6369_wdt", (kernel_ulong_t)max6369_table, },
{ "max6370_wdt", (kernel_ulong_t)max6369_table, },
{ "max6371_wdt", (kernel_ulong_t)max6371_table, },
{ "max6372_wdt", (kernel_ulong_t)max6371_table, },
{ "max6373_wdt", (kernel_ulong_t)max6373_table, },
{ "max6374_wdt", (kernel_ulong_t)max6373_table, },
{ },
};
MODULE_DEVICE_TABLE(platform, max63xx_id_table);
static const struct of_device_id max63xx_dt_id_table[] = {
{ .compatible = "maxim,max6369", .data = max6369_table, },
{ .compatible = "maxim,max6370", .data = max6369_table, },
{ .compatible = "maxim,max6371", .data = max6371_table, },
{ .compatible = "maxim,max6372", .data = max6371_table, },
{ .compatible = "maxim,max6373", .data = max6373_table, },
{ .compatible = "maxim,max6374", .data = max6373_table, },
{ }
};
MODULE_DEVICE_TABLE(of, max63xx_dt_id_table);
static struct platform_driver max63xx_wdt_driver = {
.probe = max63xx_wdt_probe,
.id_table = max63xx_id_table,
.driver = {
.name = "max63xx_wdt",
.of_match_table = max63xx_dt_id_table,
},
};
module_platform_driver(max63xx_wdt_driver);
MODULE_AUTHOR("Marc Zyngier <[email protected]>");
MODULE_DESCRIPTION("max63xx Watchdog Driver");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
"Watchdog heartbeat period in seconds from 1 to "
__MODULE_STRING(MAX_HEARTBEAT) ", default "
__MODULE_STRING(DEFAULT_HEARTBEAT));
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
module_param(nodelay, int, 0);
MODULE_PARM_DESC(nodelay,
"Force selection of a timeout setting without initial delay "
"(max6373/74 only, default=0)");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/max63xx_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2016 Mentor Graphics
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include "watchdog_pretimeout.h"
/**
* pretimeout_panic - Panic on watchdog pretimeout event
* @wdd - watchdog_device
*
* Panic, watchdog has not been fed till pretimeout event.
*/
static void pretimeout_panic(struct watchdog_device *wdd)
{
panic("watchdog pretimeout event\n");
}
static struct watchdog_governor watchdog_gov_panic = {
.name = "panic",
.pretimeout = pretimeout_panic,
};
static int __init watchdog_gov_panic_register(void)
{
return watchdog_register_governor(&watchdog_gov_panic);
}
static void __exit watchdog_gov_panic_unregister(void)
{
watchdog_unregister_governor(&watchdog_gov_panic);
}
module_init(watchdog_gov_panic_register);
module_exit(watchdog_gov_panic_unregister);
MODULE_AUTHOR("Vladimir Zapolskiy <[email protected]>");
MODULE_DESCRIPTION("Panic watchdog pretimeout governor");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/pretimeout_panic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2016 Mentor Graphics
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/watchdog.h>
#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
/* Default watchdog pretimeout governor */
static struct watchdog_governor *default_gov;
/* The spinlock protects default_gov, wdd->gov and pretimeout_list */
static DEFINE_SPINLOCK(pretimeout_lock);
/* List of watchdog devices, which can generate a pretimeout event */
static LIST_HEAD(pretimeout_list);
struct watchdog_pretimeout {
struct watchdog_device *wdd;
struct list_head entry;
};
/* The mutex protects governor list and serializes external interfaces */
static DEFINE_MUTEX(governor_lock);
/* List of the registered watchdog pretimeout governors */
static LIST_HEAD(governor_list);
struct governor_priv {
struct watchdog_governor *gov;
struct list_head entry;
};
static struct governor_priv *find_governor_by_name(const char *gov_name)
{
struct governor_priv *priv;
list_for_each_entry(priv, &governor_list, entry)
if (sysfs_streq(gov_name, priv->gov->name))
return priv;
return NULL;
}
int watchdog_pretimeout_available_governors_get(char *buf)
{
struct governor_priv *priv;
int count = 0;
mutex_lock(&governor_lock);
list_for_each_entry(priv, &governor_list, entry)
count += sysfs_emit_at(buf, count, "%s\n", priv->gov->name);
mutex_unlock(&governor_lock);
return count;
}
int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf)
{
int count = 0;
spin_lock_irq(&pretimeout_lock);
if (wdd->gov)
count = sysfs_emit(buf, "%s\n", wdd->gov->name);
spin_unlock_irq(&pretimeout_lock);
return count;
}
int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
const char *buf)
{
struct governor_priv *priv;
mutex_lock(&governor_lock);
priv = find_governor_by_name(buf);
if (!priv) {
mutex_unlock(&governor_lock);
return -EINVAL;
}
spin_lock_irq(&pretimeout_lock);
wdd->gov = priv->gov;
spin_unlock_irq(&pretimeout_lock);
mutex_unlock(&governor_lock);
return 0;
}
void watchdog_notify_pretimeout(struct watchdog_device *wdd)
{
unsigned long flags;
spin_lock_irqsave(&pretimeout_lock, flags);
if (!wdd->gov) {
spin_unlock_irqrestore(&pretimeout_lock, flags);
return;
}
wdd->gov->pretimeout(wdd);
spin_unlock_irqrestore(&pretimeout_lock, flags);
}
EXPORT_SYMBOL_GPL(watchdog_notify_pretimeout);
int watchdog_register_governor(struct watchdog_governor *gov)
{
struct watchdog_pretimeout *p;
struct governor_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_lock(&governor_lock);
if (find_governor_by_name(gov->name)) {
mutex_unlock(&governor_lock);
kfree(priv);
return -EBUSY;
}
priv->gov = gov;
list_add(&priv->entry, &governor_list);
if (!strncmp(gov->name, WATCHDOG_PRETIMEOUT_DEFAULT_GOV,
WATCHDOG_GOV_NAME_MAXLEN)) {
spin_lock_irq(&pretimeout_lock);
default_gov = gov;
list_for_each_entry(p, &pretimeout_list, entry)
if (!p->wdd->gov)
p->wdd->gov = default_gov;
spin_unlock_irq(&pretimeout_lock);
}
mutex_unlock(&governor_lock);
return 0;
}
EXPORT_SYMBOL(watchdog_register_governor);
void watchdog_unregister_governor(struct watchdog_governor *gov)
{
struct watchdog_pretimeout *p;
struct governor_priv *priv, *t;
mutex_lock(&governor_lock);
list_for_each_entry_safe(priv, t, &governor_list, entry) {
if (priv->gov == gov) {
list_del(&priv->entry);
kfree(priv);
break;
}
}
spin_lock_irq(&pretimeout_lock);
list_for_each_entry(p, &pretimeout_list, entry)
if (p->wdd->gov == gov)
p->wdd->gov = default_gov;
spin_unlock_irq(&pretimeout_lock);
mutex_unlock(&governor_lock);
}
EXPORT_SYMBOL(watchdog_unregister_governor);
int watchdog_register_pretimeout(struct watchdog_device *wdd)
{
struct watchdog_pretimeout *p;
if (!watchdog_have_pretimeout(wdd))
return 0;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
spin_lock_irq(&pretimeout_lock);
list_add(&p->entry, &pretimeout_list);
p->wdd = wdd;
wdd->gov = default_gov;
spin_unlock_irq(&pretimeout_lock);
return 0;
}
void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
{
struct watchdog_pretimeout *p, *t;
if (!watchdog_have_pretimeout(wdd))
return;
spin_lock_irq(&pretimeout_lock);
wdd->gov = NULL;
list_for_each_entry_safe(p, t, &pretimeout_list, entry) {
if (p->wdd == wdd) {
list_del(&p->entry);
kfree(p);
break;
}
}
spin_unlock_irq(&pretimeout_lock);
}
| linux-master | drivers/watchdog/watchdog_pretimeout.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Atmel AT91SAM9x processors.
*
* Copyright (C) 2008 Renaud CERRATO [email protected]
*
*/
/*
* The Watchdog Timer Mode Register can be only written to once. If the
* timeout need to be set from Linux, be sure that the bootstrap or the
* bootloader doesn't write to this register.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
#define wdt_read(wdt, field) \
readl_relaxed((wdt)->base + (field))
#define wdt_write(wtd, field, val) \
writel_relaxed((val), (wdt)->base + (field))
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
*/
#define ticks_to_hz_rounddown(t) ((((t) + 1) * HZ) >> 8)
#define ticks_to_hz_roundup(t) (((((t) + 1) * HZ) + 255) >> 8)
#define ticks_to_secs(t) (((t) + 1) >> 8)
#define secs_to_ticks(s) ((s) ? (((s) << 8) - 1) : 0)
#define WDT_MR_RESET 0x3FFF2FFF
/* Watchdog max counter value in ticks */
#define WDT_COUNTER_MAX_TICKS 0xFFF
/* Watchdog max delta/value in secs */
#define WDT_COUNTER_MAX_SECS ticks_to_secs(WDT_COUNTER_MAX_TICKS)
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
/* Timer heartbeat (500ms) */
#define WDT_TIMEOUT (HZ/2)
/* User land timeout */
#define WDT_HEARTBEAT 15
static int heartbeat;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
"(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
struct at91wdt {
struct watchdog_device wdd;
void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
struct timer_list timer; /* The timer that pings the watchdog */
u32 mr;
u32 mr_mask;
unsigned long heartbeat; /* WDT heartbeat in jiffies */
bool nowayout;
unsigned int irq;
struct clk *sclk;
};
/* ......................................................................... */
static irqreturn_t wdt_interrupt(int irq, void *dev_id)
{
struct at91wdt *wdt = (struct at91wdt *)dev_id;
if (wdt_read(wdt, AT91_WDT_SR)) {
pr_crit("at91sam9 WDT software reset\n");
emergency_restart();
pr_crit("Reboot didn't ?????\n");
}
return IRQ_HANDLED;
}
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
static inline void at91_wdt_reset(struct at91wdt *wdt)
{
wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
* Timer tick
*/
static void at91_ping(struct timer_list *t)
{
struct at91wdt *wdt = from_timer(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
} else {
pr_crit("I will reset your machine !\n");
}
}
static int at91_wdt_start(struct watchdog_device *wdd)
{
struct at91wdt *wdt = to_wdt(wdd);
/* calculate when the next userspace timeout will be */
wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
return 0;
}
static int at91_wdt_stop(struct watchdog_device *wdd)
{
/* The watchdog timer hardware can not be stopped... */
return 0;
}
static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
{
wdd->timeout = new_timeout;
return at91_wdt_start(wdd);
}
static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
{
u32 tmp;
u32 delta;
u32 value;
int err;
u32 mask = wdt->mr_mask;
unsigned long min_heartbeat = 1;
unsigned long max_heartbeat;
struct device *dev = &pdev->dev;
tmp = wdt_read(wdt, AT91_WDT_MR);
if ((tmp & mask) != (wdt->mr & mask)) {
if (tmp == WDT_MR_RESET) {
wdt_write(wdt, AT91_WDT_MR, wdt->mr);
tmp = wdt_read(wdt, AT91_WDT_MR);
}
}
if (tmp & AT91_WDT_WDDIS) {
if (wdt->mr & AT91_WDT_WDDIS)
return 0;
dev_err(dev, "watchdog is disabled\n");
return -EINVAL;
}
value = tmp & AT91_WDT_WDV;
delta = (tmp & AT91_WDT_WDD) >> 16;
if (delta < value)
min_heartbeat = ticks_to_hz_roundup(value - delta);
max_heartbeat = ticks_to_hz_rounddown(value);
if (!max_heartbeat) {
dev_err(dev,
"heartbeat is too small for the system to handle it correctly\n");
return -EINVAL;
}
/*
* Try to reset the watchdog counter 4 or 2 times more often than
* actually requested, to avoid spurious watchdog reset.
* If this is not possible because of the min_heartbeat value, reset
* it at the min_heartbeat period.
*/
if ((max_heartbeat / 4) >= min_heartbeat)
wdt->heartbeat = max_heartbeat / 4;
else if ((max_heartbeat / 2) >= min_heartbeat)
wdt->heartbeat = max_heartbeat / 2;
else
wdt->heartbeat = min_heartbeat;
if (max_heartbeat < min_heartbeat + 4)
dev_warn(dev,
"min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
err = devm_request_irq(dev, wdt->irq, wdt_interrupt,
IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND,
pdev->name, wdt);
if (err)
return err;
}
if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
dev_warn(dev,
"watchdog already configured differently (mr = %x expecting %x)\n",
tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
timer_setup(&wdt->timer, at91_ping, 0);
/*
* Use min_heartbeat the first time to avoid spurious watchdog reset:
* we don't know for how long the watchdog counter is running, and
* - resetting it right now might trigger a watchdog fault reset
* - waiting for heartbeat time might lead to a watchdog timeout
* reset
*/
mod_timer(&wdt->timer, jiffies + min_heartbeat);
/* Try to set timeout from device tree first */
if (watchdog_init_timeout(&wdt->wdd, 0, dev))
watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
err = watchdog_register_device(&wdt->wdd);
if (err)
goto out_stop_timer;
wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
return 0;
out_stop_timer:
del_timer(&wdt->timer);
return err;
}
/* ......................................................................... */
static const struct watchdog_info at91_wdt_info = {
.identity = DRV_NAME,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
static const struct watchdog_ops at91_wdt_ops = {
.owner = THIS_MODULE,
.start = at91_wdt_start,
.stop = at91_wdt_stop,
.set_timeout = at91_wdt_set_timeout,
};
#if defined(CONFIG_OF)
static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
{
u32 min = 0;
u32 max = WDT_COUNTER_MAX_SECS;
const char *tmp;
/* Get the interrupts property */
wdt->irq = irq_of_parse_and_map(np, 0);
if (!wdt->irq)
dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
&max)) {
if (!max || max > WDT_COUNTER_MAX_SECS)
max = WDT_COUNTER_MAX_SECS;
if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
0, &min)) {
if (min >= max)
min = max - 1;
}
}
min = secs_to_ticks(min);
max = secs_to_ticks(max);
wdt->mr_mask = 0x3FFFFFFF;
wdt->mr = 0;
if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
!strcmp(tmp, "software")) {
wdt->mr |= AT91_WDT_WDFIEN;
wdt->mr_mask &= ~AT91_WDT_WDRPROC;
} else {
wdt->mr |= AT91_WDT_WDRSTEN;
}
if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
!strcmp(tmp, "proc"))
wdt->mr |= AT91_WDT_WDRPROC;
if (of_property_read_bool(np, "atmel,disable")) {
wdt->mr |= AT91_WDT_WDDIS;
wdt->mr_mask &= AT91_WDT_WDDIS;
}
if (of_property_read_bool(np, "atmel,idle-halt"))
wdt->mr |= AT91_WDT_WDIDLEHLT;
if (of_property_read_bool(np, "atmel,dbg-halt"))
wdt->mr |= AT91_WDT_WDDBGHLT;
wdt->mr |= max | ((max - min) << 16);
return 0;
}
#else
static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
{
return 0;
}
#endif
static int __init at91wdt_probe(struct platform_device *pdev)
{
int err;
struct at91wdt *wdt;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
wdt->mr_mask = 0x3FFFFFFF;
wdt->nowayout = nowayout;
wdt->wdd.parent = &pdev->dev;
wdt->wdd.info = &at91_wdt_info;
wdt->wdd.ops = &at91_wdt_ops;
wdt->wdd.timeout = WDT_HEARTBEAT;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0xFFFF;
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
wdt->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt->sclk))
return PTR_ERR(wdt->sclk);
err = clk_prepare_enable(wdt->sclk);
if (err) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return err;
}
if (pdev->dev.of_node) {
err = of_at91wdt_init(pdev->dev.of_node, wdt);
if (err)
goto err_clk;
}
err = at91_wdt_init(pdev, wdt);
if (err)
goto err_clk;
platform_set_drvdata(pdev, wdt);
pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
wdt->wdd.timeout, wdt->nowayout);
return 0;
err_clk:
clk_disable_unprepare(wdt->sclk);
return err;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
{
struct at91wdt *wdt = platform_get_drvdata(pdev);
watchdog_unregister_device(&wdt->wdd);
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer(&wdt->timer);
clk_disable_unprepare(wdt->sclk);
return 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id at91_wdt_dt_ids[] = {
{ .compatible = "atmel,at91sam9260-wdt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_wdt_dt_ids);
#endif
static struct platform_driver at91wdt_driver = {
.remove = __exit_p(at91wdt_remove),
.driver = {
.name = "at91_wdt",
.of_match_table = of_match_ptr(at91_wdt_dt_ids),
},
};
module_platform_driver_probe(at91wdt_driver, at91wdt_probe);
MODULE_AUTHOR("Renaud CERRATO <[email protected]>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91SAM9x processors");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/at91sam9_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog device driver for DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/mfd/da9062/registers.h>
#include <linux/mfd/da9062/core.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/of.h>
static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
#define DA9062_TWDSCALE_DISABLE 0
#define DA9062_TWDSCALE_MIN 1
#define DA9062_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1)
#define DA9062_WDT_MIN_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MIN]
#define DA9062_WDT_MAX_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MAX]
#define DA9062_WDG_DEFAULT_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MAX-1]
#define DA9062_RESET_PROTECTION_MS 300
struct da9062_watchdog {
struct da9062 *hw;
struct watchdog_device wdtdev;
bool use_sw_pm;
};
static unsigned int da9062_wdt_read_timeout(struct da9062_watchdog *wdt)
{
unsigned int val;
regmap_read(wdt->hw->regmap, DA9062AA_CONTROL_D, &val);
return wdt_timeout[val & DA9062AA_TWDSCALE_MASK];
}
static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
{
unsigned int i;
for (i = DA9062_TWDSCALE_MIN; i <= DA9062_TWDSCALE_MAX; i++) {
if (wdt_timeout[i] >= secs)
return i;
}
return DA9062_TWDSCALE_MAX;
}
static int da9062_reset_watchdog_timer(struct da9062_watchdog *wdt)
{
return regmap_update_bits(wdt->hw->regmap, DA9062AA_CONTROL_F,
DA9062AA_WATCHDOG_MASK,
DA9062AA_WATCHDOG_MASK);
}
static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
unsigned int regval)
{
struct da9062 *chip = wdt->hw;
regmap_update_bits(chip->regmap,
DA9062AA_CONTROL_D,
DA9062AA_TWDSCALE_MASK,
DA9062_TWDSCALE_DISABLE);
usleep_range(150, 300);
return regmap_update_bits(chip->regmap,
DA9062AA_CONTROL_D,
DA9062AA_TWDSCALE_MASK,
regval);
}
static int da9062_wdt_start(struct watchdog_device *wdd)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
unsigned int selector;
int ret;
selector = da9062_wdt_timeout_to_sel(wdt->wdtdev.timeout);
ret = da9062_wdt_update_timeout_register(wdt, selector);
if (ret)
dev_err(wdt->hw->dev, "Watchdog failed to start (err = %d)\n",
ret);
return ret;
}
static int da9062_wdt_stop(struct watchdog_device *wdd)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
int ret;
ret = regmap_update_bits(wdt->hw->regmap,
DA9062AA_CONTROL_D,
DA9062AA_TWDSCALE_MASK,
DA9062_TWDSCALE_DISABLE);
if (ret)
dev_err(wdt->hw->dev, "Watchdog failed to stop (err = %d)\n",
ret);
return ret;
}
static int da9062_wdt_ping(struct watchdog_device *wdd)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
int ret;
/*
* Prevent pings from occurring late in system poweroff/reboot sequence
* and possibly locking out restart handler from accessing i2c bus.
*/
if (system_state > SYSTEM_RUNNING)
return 0;
ret = da9062_reset_watchdog_timer(wdt);
if (ret)
dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
ret);
return ret;
}
static int da9062_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
unsigned int selector;
int ret;
selector = da9062_wdt_timeout_to_sel(timeout);
ret = da9062_wdt_update_timeout_register(wdt, selector);
if (ret)
dev_err(wdt->hw->dev, "Failed to set watchdog timeout (err = %d)\n",
ret);
else
wdd->timeout = wdt_timeout[selector];
return ret;
}
static int da9062_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
struct i2c_client *client = to_i2c_client(wdt->hw->dev);
union i2c_smbus_data msg;
int ret;
/*
* Don't use regmap because it is not atomic safe. Additionally, use
* unlocked flavor of i2c_smbus_xfer to avoid scenario where i2c bus
* might be previously locked by some process unable to release the
* lock due to interrupts already being disabled at this late stage.
*/
msg.byte = DA9062AA_SHUTDOWN_MASK;
ret = __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, DA9062AA_CONTROL_F,
I2C_SMBUS_BYTE_DATA, &msg);
if (ret < 0)
dev_alert(wdt->hw->dev, "Failed to shutdown (err = %d)\n",
ret);
/* wait for reset to assert... */
mdelay(500);
return ret;
}
static const struct watchdog_info da9062_watchdog_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "DA9062 WDT",
};
static const struct watchdog_ops da9062_watchdog_ops = {
.owner = THIS_MODULE,
.start = da9062_wdt_start,
.stop = da9062_wdt_stop,
.ping = da9062_wdt_ping,
.set_timeout = da9062_wdt_set_timeout,
.restart = da9062_wdt_restart,
};
static const struct of_device_id da9062_compatible_id_table[] = {
{ .compatible = "dlg,da9062-watchdog", },
{ },
};
MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
static int da9062_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
unsigned int timeout;
struct da9062 *chip;
struct da9062_watchdog *wdt;
chip = dev_get_drvdata(dev->parent);
if (!chip)
return -EINVAL;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->use_sw_pm = device_property_present(dev, "dlg,use-sw-pm");
wdt->hw = chip;
wdt->wdtdev.info = &da9062_watchdog_info;
wdt->wdtdev.ops = &da9062_watchdog_ops;
wdt->wdtdev.min_timeout = DA9062_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT;
wdt->wdtdev.min_hw_heartbeat_ms = DA9062_RESET_PROTECTION_MS;
wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
wdt->wdtdev.parent = dev;
watchdog_set_restart_priority(&wdt->wdtdev, 128);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
dev_set_drvdata(dev, &wdt->wdtdev);
timeout = da9062_wdt_read_timeout(wdt);
if (timeout)
wdt->wdtdev.timeout = timeout;
/* Set timeout from DT value if available */
watchdog_init_timeout(&wdt->wdtdev, 0, dev);
if (timeout) {
da9062_wdt_set_timeout(&wdt->wdtdev, wdt->wdtdev.timeout);
set_bit(WDOG_HW_RUNNING, &wdt->wdtdev.status);
}
return devm_watchdog_register_device(dev, &wdt->wdtdev);
}
static int __maybe_unused da9062_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
if (!wdt->use_sw_pm)
return 0;
if (watchdog_active(wdd))
return da9062_wdt_stop(wdd);
return 0;
}
static int __maybe_unused da9062_wdt_resume(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
if (!wdt->use_sw_pm)
return 0;
if (watchdog_active(wdd))
return da9062_wdt_start(wdd);
return 0;
}
static SIMPLE_DEV_PM_OPS(da9062_wdt_pm_ops,
da9062_wdt_suspend, da9062_wdt_resume);
static struct platform_driver da9062_wdt_driver = {
.probe = da9062_wdt_probe,
.driver = {
.name = "da9062-watchdog",
.pm = &da9062_wdt_pm_ops,
.of_match_table = da9062_compatible_id_table,
},
};
module_platform_driver(da9062_wdt_driver);
MODULE_AUTHOR("S Twiss <[email protected]>");
MODULE_DESCRIPTION("WDT device driver for Dialog DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9062-watchdog");
| linux-master | drivers/watchdog/da9062_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* drivers/char/watchdog/scx200_wdt.c
National Semiconductor SCx200 Watchdog support
Copyright (c) 2001,2002 Christer Weinigel <[email protected]>
Some code taken from:
National Semiconductor PC87307/PC97307 (ala SC1200) WDT driver
(c) Copyright 2002 Zwane Mwaikambo <[email protected]>
The author(s) of this software shall not be held liable for damages
of any nature resulting due to the use of this software. This
software is provided AS-IS with no warranties. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/scx200.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#define DEBUG
MODULE_AUTHOR("Christer Weinigel <[email protected]>");
MODULE_DESCRIPTION("NatSemi SCx200 Watchdog Driver");
MODULE_LICENSE("GPL");
static int margin = 60; /* in seconds */
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static u16 wdto_restart;
static char expect_close;
static unsigned long open_lock;
static DEFINE_SPINLOCK(scx_lock);
/* Bits of the WDCNFG register */
#define W_ENABLE 0x00fa /* Enable watchdog */
#define W_DISABLE 0x0000 /* Disable watchdog */
/* The scaling factor for the timer, this depends on the value of W_ENABLE */
#define W_SCALE (32768/1024)
static void scx200_wdt_ping(void)
{
spin_lock(&scx_lock);
outw(wdto_restart, scx200_cb_base + SCx200_WDT_WDTO);
spin_unlock(&scx_lock);
}
static void scx200_wdt_update_margin(void)
{
pr_info("timer margin %d seconds\n", margin);
wdto_restart = margin * W_SCALE;
}
static void scx200_wdt_enable(void)
{
pr_debug("enabling watchdog timer, wdto_restart = %d\n", wdto_restart);
spin_lock(&scx_lock);
outw(0, scx200_cb_base + SCx200_WDT_WDTO);
outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS);
outw(W_ENABLE, scx200_cb_base + SCx200_WDT_WDCNFG);
spin_unlock(&scx_lock);
scx200_wdt_ping();
}
static void scx200_wdt_disable(void)
{
pr_debug("disabling watchdog timer\n");
spin_lock(&scx_lock);
outw(0, scx200_cb_base + SCx200_WDT_WDTO);
outb(SCx200_WDT_WDSTS_WDOVF, scx200_cb_base + SCx200_WDT_WDSTS);
outw(W_DISABLE, scx200_cb_base + SCx200_WDT_WDCNFG);
spin_unlock(&scx_lock);
}
static int scx200_wdt_open(struct inode *inode, struct file *file)
{
/* only allow one at a time */
if (test_and_set_bit(0, &open_lock))
return -EBUSY;
scx200_wdt_enable();
return stream_open(inode, file);
}
static int scx200_wdt_release(struct inode *inode, struct file *file)
{
if (expect_close != 42)
pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n");
else if (!nowayout)
scx200_wdt_disable();
expect_close = 0;
clear_bit(0, &open_lock);
return 0;
}
static int scx200_wdt_notify_sys(struct notifier_block *this,
unsigned long code, void *unused)
{
if (code == SYS_HALT || code == SYS_POWER_OFF)
if (!nowayout)
scx200_wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block scx200_wdt_notifier = {
.notifier_call = scx200_wdt_notify_sys,
};
static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* check for a magic close character */
if (len) {
size_t i;
scx200_wdt_ping();
expect_close = 0;
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
return len;
}
return 0;
}
static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.identity = "NatSemi SCx200 Watchdog",
.firmware_version = 1,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
int new_margin;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
if (put_user(0, p))
return -EFAULT;
return 0;
case WDIOC_KEEPALIVE:
scx200_wdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, p))
return -EFAULT;
if (new_margin < 1)
return -EINVAL;
margin = new_margin;
scx200_wdt_update_margin();
scx200_wdt_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
return 0;
default:
return -ENOTTY;
}
}
static const struct file_operations scx200_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = scx200_wdt_write,
.unlocked_ioctl = scx200_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = scx200_wdt_open,
.release = scx200_wdt_release,
};
static struct miscdevice scx200_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &scx200_wdt_fops,
};
static int __init scx200_wdt_init(void)
{
int r;
pr_debug("NatSemi SCx200 Watchdog Driver\n");
/* check that we have found the configuration block */
if (!scx200_cb_present())
return -ENODEV;
if (!request_region(scx200_cb_base + SCx200_WDT_OFFSET,
SCx200_WDT_SIZE,
"NatSemi SCx200 Watchdog")) {
pr_warn("watchdog I/O region busy\n");
return -EBUSY;
}
scx200_wdt_update_margin();
scx200_wdt_disable();
r = register_reboot_notifier(&scx200_wdt_notifier);
if (r) {
pr_err("unable to register reboot notifier\n");
release_region(scx200_cb_base + SCx200_WDT_OFFSET,
SCx200_WDT_SIZE);
return r;
}
r = misc_register(&scx200_wdt_miscdev);
if (r) {
unregister_reboot_notifier(&scx200_wdt_notifier);
release_region(scx200_cb_base + SCx200_WDT_OFFSET,
SCx200_WDT_SIZE);
return r;
}
return 0;
}
static void __exit scx200_wdt_cleanup(void)
{
misc_deregister(&scx200_wdt_miscdev);
unregister_reboot_notifier(&scx200_wdt_notifier);
release_region(scx200_cb_base + SCx200_WDT_OFFSET,
SCx200_WDT_SIZE);
}
module_init(scx200_wdt_init);
module_exit(scx200_wdt_cleanup);
| linux-master | drivers/watchdog/scx200_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Watchdog timer for machines with the CS5535/CS5536 companion chip
*
* Copyright (C) 2006-2007, Advanced Micro Devices, Inc.
* Copyright (C) 2009 Andres Salomon <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/cs5535.h>
#define GEODEWDT_HZ 500
#define GEODEWDT_SCALE 6
#define GEODEWDT_MAX_SECONDS 131
#define WDT_FLAGS_OPEN 1
#define WDT_FLAGS_ORPHAN 2
#define DRV_NAME "geodewdt"
#define WATCHDOG_NAME "Geode GX/LX WDT"
#define WATCHDOG_TIMEOUT 60
static int timeout = WATCHDOG_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<= timeout <=131, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static struct platform_device *geodewdt_platform_device;
static unsigned long wdt_flags;
static struct cs5535_mfgpt_timer *wdt_timer;
static int safe_close;
static void geodewdt_ping(void)
{
/* Stop the counter */
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
/* Reset the counter */
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
/* Enable the counter */
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
}
static void geodewdt_disable(void)
{
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
}
static int geodewdt_set_heartbeat(int val)
{
if (val < 1 || val > GEODEWDT_MAX_SECONDS)
return -EINVAL;
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
timeout = val;
return 0;
}
static int geodewdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags))
return -EBUSY;
if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags))
__module_get(THIS_MODULE);
geodewdt_ping();
return stream_open(inode, file);
}
static int geodewdt_release(struct inode *inode, struct file *file)
{
if (safe_close) {
geodewdt_disable();
module_put(THIS_MODULE);
} else {
pr_crit("Unexpected close - watchdog is not stopping\n");
geodewdt_ping();
set_bit(WDT_FLAGS_ORPHAN, &wdt_flags);
}
clear_bit(WDT_FLAGS_OPEN, &wdt_flags);
safe_close = 0;
return 0;
}
static ssize_t geodewdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
safe_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
safe_close = 1;
}
}
geodewdt_ping();
}
return len;
}
static long geodewdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int interval;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = WATCHDOG_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int options, ret = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
geodewdt_disable();
ret = 0;
}
if (options & WDIOS_ENABLECARD) {
geodewdt_ping();
ret = 0;
}
return ret;
}
case WDIOC_KEEPALIVE:
geodewdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(interval, p))
return -EFAULT;
if (geodewdt_set_heartbeat(interval))
return -EINVAL;
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static const struct file_operations geodewdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = geodewdt_write,
.unlocked_ioctl = geodewdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = geodewdt_open,
.release = geodewdt_release,
};
static struct miscdevice geodewdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &geodewdt_fops,
};
static int __init geodewdt_probe(struct platform_device *dev)
{
int ret;
wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
if (!wdt_timer) {
pr_err("No timers were available\n");
return -ENODEV;
}
/* Set up the timer */
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
GEODEWDT_SCALE | (3 << 8));
/* Set up comparator 2 to reset when the event fires */
cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
/* Set up the initial timeout */
cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
timeout * GEODEWDT_HZ);
ret = misc_register(&geodewdt_miscdev);
return ret;
}
static void geodewdt_remove(struct platform_device *dev)
{
misc_deregister(&geodewdt_miscdev);
}
static void geodewdt_shutdown(struct platform_device *dev)
{
geodewdt_disable();
}
static struct platform_driver geodewdt_driver = {
.remove_new = geodewdt_remove,
.shutdown = geodewdt_shutdown,
.driver = {
.name = DRV_NAME,
},
};
static int __init geodewdt_init(void)
{
int ret;
geodewdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
if (IS_ERR(geodewdt_platform_device))
return PTR_ERR(geodewdt_platform_device);
ret = platform_driver_probe(&geodewdt_driver, geodewdt_probe);
if (ret)
goto err;
return 0;
err:
platform_device_unregister(geodewdt_platform_device);
return ret;
}
static void __exit geodewdt_exit(void)
{
platform_device_unregister(geodewdt_platform_device);
platform_driver_unregister(&geodewdt_driver);
}
module_init(geodewdt_init);
module_exit(geodewdt_exit);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/geodewdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* omap_wdt.c
*
* Watchdog driver for the TI OMAP 16xx & 24xx/34xx 32KHz (non-secure) watchdog
*
* Author: MontaVista Software, Inc.
* <[email protected]> or <[email protected]>
*
* 2003 (c) MontaVista Software, Inc.
*
* History:
*
* 20030527: George G. Davis <[email protected]>
* Initially based on linux-2.4.19-rmk7-pxa1/drivers/char/sa1100_wdt.c
* (c) Copyright 2000 Oleg Drokin <[email protected]>
* Based on SoftDog driver by Alan Cox <[email protected]>
*
* Copyright (c) 2004 Texas Instruments.
* 1. Modified to support OMAP1610 32-KHz watchdog timer
* 2. Ported to 2.6 kernel
*
* Copyright (c) 2005 David Brownell
* Use the driver model and standard identifiers; handle bigger timeouts.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/omap-wd-timer.h>
#include "omap_wdt.h"
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned timer_margin;
module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
#define to_omap_wdt_dev(_wdog) container_of(_wdog, struct omap_wdt_dev, wdog)
static bool early_enable;
module_param(early_enable, bool, 0);
MODULE_PARM_DESC(early_enable,
"Watchdog is started on module insertion (default=0)");
struct omap_wdt_dev {
struct watchdog_device wdog;
void __iomem *base; /* physical */
struct device *dev;
bool omap_wdt_users;
int wdt_trgr_pattern;
struct mutex lock; /* to avoid races with PM */
};
static void omap_wdt_reload(struct omap_wdt_dev *wdev)
{
void __iomem *base = wdev->base;
/* wait for posted write to complete */
while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern;
writel_relaxed(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
/* wait for posted write to complete */
while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
/* reloaded WCRR from WLDR */
}
static void omap_wdt_enable(struct omap_wdt_dev *wdev)
{
void __iomem *base = wdev->base;
/* Sequence to enable the watchdog */
writel_relaxed(0xBBBB, base + OMAP_WATCHDOG_SPR);
while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10)
cpu_relax();
writel_relaxed(0x4444, base + OMAP_WATCHDOG_SPR);
while ((readl_relaxed(base + OMAP_WATCHDOG_WPS)) & 0x10)
cpu_relax();
}
static void omap_wdt_disable(struct omap_wdt_dev *wdev)
{
void __iomem *base = wdev->base;
/* sequence required to disable watchdog */
writel_relaxed(0xAAAA, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10)
cpu_relax();
writel_relaxed(0x5555, base + OMAP_WATCHDOG_SPR); /* TIMER_MODE */
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x10)
cpu_relax();
}
static void omap_wdt_set_timer(struct omap_wdt_dev *wdev,
unsigned int timeout)
{
u32 pre_margin = GET_WLDR_VAL(timeout);
void __iomem *base = wdev->base;
/* just count up at 32 KHz */
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
writel_relaxed(pre_margin, base + OMAP_WATCHDOG_LDR);
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
}
static int omap_wdt_start(struct watchdog_device *wdog)
{
struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
void __iomem *base = wdev->base;
mutex_lock(&wdev->lock);
wdev->omap_wdt_users = true;
pm_runtime_get_sync(wdev->dev);
/*
* Make sure the watchdog is disabled. This is unfortunately required
* because writing to various registers with the watchdog running has no
* effect.
*/
omap_wdt_disable(wdev);
/* initialize prescaler */
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
writel_relaxed((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL);
while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
omap_wdt_set_timer(wdev, wdog->timeout);
omap_wdt_reload(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_stop(struct watchdog_device *wdog)
{
struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
mutex_lock(&wdev->lock);
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
wdev->omap_wdt_users = false;
mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_ping(struct watchdog_device *wdog)
{
struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
mutex_lock(&wdev->lock);
omap_wdt_reload(wdev);
mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int timeout)
{
struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
mutex_lock(&wdev->lock);
omap_wdt_disable(wdev);
omap_wdt_set_timer(wdev, timeout);
omap_wdt_enable(wdev);
omap_wdt_reload(wdev);
wdog->timeout = timeout;
mutex_unlock(&wdev->lock);
return 0;
}
static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
{
struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
void __iomem *base = wdev->base;
u32 value;
value = readl_relaxed(base + OMAP_WATCHDOG_CRR);
return GET_WCCR_SECS(value);
}
static const struct watchdog_info omap_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "OMAP Watchdog",
};
static const struct watchdog_ops omap_wdt_ops = {
.owner = THIS_MODULE,
.start = omap_wdt_start,
.stop = omap_wdt_stop,
.ping = omap_wdt_ping,
.set_timeout = omap_wdt_set_timeout,
.get_timeleft = omap_wdt_get_timeleft,
};
static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_wdt_dev *wdev;
int ret;
wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
wdev->omap_wdt_users = false;
wdev->dev = &pdev->dev;
wdev->wdt_trgr_pattern = 0x1234;
mutex_init(&wdev->lock);
/* reserve static register mappings */
wdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdev->base))
return PTR_ERR(wdev->base);
wdev->wdog.info = &omap_wdt_info;
wdev->wdog.ops = &omap_wdt_ops;
wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
wdev->wdog.parent = &pdev->dev;
watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev);
watchdog_set_nowayout(&wdev->wdog, nowayout);
platform_set_drvdata(pdev, wdev);
pm_runtime_enable(wdev->dev);
pm_runtime_get_sync(wdev->dev);
if (pdata && pdata->read_reset_sources) {
u32 rs = pdata->read_reset_sources();
if (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT))
wdev->wdog.bootstatus = WDIOF_CARDRESET;
}
if (early_enable) {
omap_wdt_start(&wdev->wdog);
set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
} else {
omap_wdt_disable(wdev);
}
ret = watchdog_register_device(&wdev->wdog);
if (ret) {
pm_runtime_put(wdev->dev);
pm_runtime_disable(wdev->dev);
return ret;
}
pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
wdev->wdog.timeout);
if (early_enable)
omap_wdt_start(&wdev->wdog);
pm_runtime_put(wdev->dev);
return 0;
}
static void omap_wdt_shutdown(struct platform_device *pdev)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
mutex_unlock(&wdev->lock);
}
static void omap_wdt_remove(struct platform_device *pdev)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
pm_runtime_disable(wdev->dev);
watchdog_unregister_device(&wdev->wdog);
}
/* REVISIT ... not clear this is the best way to handle system suspend; and
* it's very inappropriate for selective device suspend (e.g. suspending this
* through sysfs rather than by stopping the watchdog daemon). Also, this
* may not play well enough with NOWAYOUT...
*/
static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_resume(struct platform_device *pdev)
{
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_reload(wdev);
}
mutex_unlock(&wdev->lock);
return 0;
}
static const struct of_device_id omap_wdt_of_match[] = {
{ .compatible = "ti,omap3-wdt", },
{},
};
MODULE_DEVICE_TABLE(of, omap_wdt_of_match);
static struct platform_driver omap_wdt_driver = {
.probe = omap_wdt_probe,
.remove_new = omap_wdt_remove,
.shutdown = omap_wdt_shutdown,
.suspend = pm_ptr(omap_wdt_suspend),
.resume = pm_ptr(omap_wdt_resume),
.driver = {
.name = "omap_wdt",
.of_match_table = omap_wdt_of_match,
},
};
module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_wdt");
| linux-master | drivers/watchdog/omap_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Yang Ling <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
/* Loongson 1 Watchdog Register Definitions */
#define WDT_EN 0x0
#define WDT_TIMER 0x4
#define WDT_SET 0x8
#define DEFAULT_HEARTBEAT 30
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
static unsigned int heartbeat;
module_param(heartbeat, uint, 0444);
struct ls1x_wdt_drvdata {
void __iomem *base;
struct clk *clk;
unsigned long clk_rate;
struct watchdog_device wdt;
};
static int ls1x_wdt_ping(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
writel(0x1, drvdata->base + WDT_SET);
return 0;
}
static int ls1x_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int timeout)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
unsigned int max_hw_heartbeat = wdt_dev->max_hw_heartbeat_ms / 1000;
unsigned int counts;
wdt_dev->timeout = timeout;
counts = drvdata->clk_rate * min(timeout, max_hw_heartbeat);
writel(counts, drvdata->base + WDT_TIMER);
return 0;
}
static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
writel(0x1, drvdata->base + WDT_EN);
return 0;
}
static int ls1x_wdt_stop(struct watchdog_device *wdt_dev)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
writel(0x0, drvdata->base + WDT_EN);
return 0;
}
static int ls1x_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
writel(0x1, drvdata->base + WDT_EN);
writel(0x1, drvdata->base + WDT_TIMER);
writel(0x1, drvdata->base + WDT_SET);
return 0;
}
static const struct watchdog_info ls1x_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Loongson1 Watchdog",
};
static const struct watchdog_ops ls1x_wdt_ops = {
.owner = THIS_MODULE,
.start = ls1x_wdt_start,
.stop = ls1x_wdt_stop,
.ping = ls1x_wdt_ping,
.set_timeout = ls1x_wdt_set_timeout,
.restart = ls1x_wdt_restart,
};
static int ls1x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ls1x_wdt_drvdata *drvdata;
struct watchdog_device *ls1x_wdt;
unsigned long clk_rate;
int err;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
drvdata->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
clk_rate = clk_get_rate(drvdata->clk);
if (!clk_rate)
return -EINVAL;
drvdata->clk_rate = clk_rate;
ls1x_wdt = &drvdata->wdt;
ls1x_wdt->info = &ls1x_wdt_info;
ls1x_wdt->ops = &ls1x_wdt_ops;
ls1x_wdt->timeout = DEFAULT_HEARTBEAT;
ls1x_wdt->min_timeout = 1;
ls1x_wdt->max_hw_heartbeat_ms = U32_MAX / clk_rate * 1000;
ls1x_wdt->parent = dev;
watchdog_init_timeout(ls1x_wdt, heartbeat, dev);
watchdog_set_nowayout(ls1x_wdt, nowayout);
watchdog_set_drvdata(ls1x_wdt, drvdata);
err = devm_watchdog_register_device(dev, &drvdata->wdt);
if (err)
return err;
platform_set_drvdata(pdev, drvdata);
dev_info(dev, "Loongson1 Watchdog driver registered\n");
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id ls1x_wdt_dt_ids[] = {
{ .compatible = "loongson,ls1b-wdt", },
{ .compatible = "loongson,ls1c-wdt", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ls1x_wdt_dt_ids);
#endif
static struct platform_driver ls1x_wdt_driver = {
.probe = ls1x_wdt_probe,
.driver = {
.name = "ls1x-wdt",
.of_match_table = of_match_ptr(ls1x_wdt_dt_ids),
},
};
module_platform_driver(ls1x_wdt_driver);
MODULE_AUTHOR("Yang Ling <[email protected]>");
MODULE_DESCRIPTION("Loongson1 Watchdog Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/loongson1_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* w83627hf/thf WDT driver
*
* (c) Copyright 2013 Guenter Roeck
* converted to watchdog infrastructure
*
* (c) Copyright 2007 Vlad Drukker <[email protected]>
* added support for W83627THF.
*
* (c) Copyright 2003,2007 Pádraig Brady <[email protected]>
*
* Based on advantechwdt.c which is based on wdt.c.
* Original copyright messages:
*
* (c) Copyright 2000-2001 Marek Michalkiewicz <[email protected]>
*
* (c) Copyright 1996 Alan Cox <[email protected]>,
* All Rights Reserved.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/dmi.h>
#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
static int wdt_io;
static int cr_wdt_timeout; /* WDT timeout register */
static int cr_wdt_control; /* WDT control register */
static int cr_wdt_csr; /* WDT control & status register */
static int wdt_cfg_enter = 0x87;/* key to unlock configuration space */
static int wdt_cfg_leave = 0xAA;/* key to lock configuration space */
enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
nct6795, nct6796, nct6102, nct6116 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1 <= timeout <= 255, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int early_disable;
module_param(early_disable, int, 0);
MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
/*
* Kernel methods.
*/
#define WDT_EFER (wdt_io+0) /* Extended Function Enable Registers */
#define WDT_EFIR (wdt_io+0) /* Extended Function Index Register
(same as EFER) */
#define WDT_EFDR (WDT_EFIR+1) /* Extended Function Data Register */
#define W83627HF_LD_WDT 0x08
#define W83627HF_ID 0x52
#define W83627S_ID 0x59
#define W83697HF_ID 0x60
#define W83697UG_ID 0x68
#define W83637HF_ID 0x70
#define W83627THF_ID 0x82
#define W83687THF_ID 0x85
#define W83627EHF_ID 0x88
#define W83627DHG_ID 0xa0
#define W83627UHG_ID 0xa2
#define W83667HG_ID 0xa5
#define W83627DHG_P_ID 0xb0
#define W83667HG_B_ID 0xb3
#define NCT6775_ID 0xb4
#define NCT6776_ID 0xc3
#define NCT6102_ID 0xc4
#define NCT6116_ID 0xd2
#define NCT6779_ID 0xc5
#define NCT6791_ID 0xc8
#define NCT6792_ID 0xc9
#define NCT6793_ID 0xd1
#define NCT6795_ID 0xd3
#define NCT6796_ID 0xd4 /* also NCT9697D, NCT9698D */
#define W83627HF_WDT_TIMEOUT 0xf6
#define W83697HF_WDT_TIMEOUT 0xf4
#define NCT6102D_WDT_TIMEOUT 0xf1
#define W83627HF_WDT_CONTROL 0xf5
#define W83697HF_WDT_CONTROL 0xf3
#define NCT6102D_WDT_CONTROL 0xf0
#define W836X7HF_WDT_CSR 0xf7
#define NCT6102D_WDT_CSR 0xf2
#define WDT_CSR_STATUS 0x10
#define WDT_CSR_KBD 0x40
#define WDT_CSR_MOUSE 0x80
static void superio_outb(int reg, int val)
{
outb(reg, WDT_EFER);
outb(val, WDT_EFDR);
}
static inline int superio_inb(int reg)
{
outb(reg, WDT_EFER);
return inb(WDT_EFDR);
}
static int superio_enter(void)
{
if (!request_muxed_region(wdt_io, 2, WATCHDOG_NAME))
return -EBUSY;
outb_p(wdt_cfg_enter, WDT_EFER); /* Enter extended function mode */
outb_p(wdt_cfg_enter, WDT_EFER); /* Again according to manual */
return 0;
}
static void superio_select(int ld)
{
superio_outb(0x07, ld);
}
static void superio_exit(void)
{
outb_p(wdt_cfg_leave, WDT_EFER); /* Leave extended function mode */
release_region(wdt_io, 2);
}
static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
{
int ret;
unsigned char t;
ret = superio_enter();
if (ret)
return ret;
superio_select(W83627HF_LD_WDT);
/* set CR30 bit 0 to activate GPIO2 */
t = superio_inb(0x30);
if (!(t & 0x01))
superio_outb(0x30, t | 0x01);
switch (chip) {
case w83627hf:
case w83627s:
t = superio_inb(0x2B) & ~0x10;
superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
break;
case w83697hf:
/* Set pin 119 to WDTO# mode (= CR29, WDT0) */
t = superio_inb(0x29) & ~0x60;
t |= 0x20;
superio_outb(0x29, t);
break;
case w83697ug:
/* Set pin 118 to WDTO# mode */
t = superio_inb(0x2b) & ~0x04;
superio_outb(0x2b, t);
break;
case w83627thf:
t = (superio_inb(0x2B) & ~0x08) | 0x04;
superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
break;
case w83627dhg:
case w83627dhg_p:
t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
t = superio_inb(cr_wdt_control);
t |= 0x02; /* enable the WDTO# output low pulse
* to the KBRST# pin */
superio_outb(cr_wdt_control, t);
break;
case w83637hf:
break;
case w83687thf:
t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
superio_outb(0x2C, t);
break;
case w83627ehf:
case w83627uhg:
case w83667hg:
case w83667hg_b:
case nct6775:
case nct6776:
case nct6779:
case nct6791:
case nct6792:
case nct6793:
case nct6795:
case nct6796:
case nct6102:
case nct6116:
/*
* These chips have a fixed WDTO# output pin (W83627UHG),
* or support more than one WDTO# output pin.
* Don't touch its configuration, and hope the BIOS
* does the right thing.
*/
t = superio_inb(cr_wdt_control);
t |= 0x02; /* enable the WDTO# output low pulse
* to the KBRST# pin */
superio_outb(cr_wdt_control, t);
break;
default:
break;
}
t = superio_inb(cr_wdt_timeout);
if (t != 0) {
if (early_disable) {
pr_warn("Stopping previously enabled watchdog until userland kicks in\n");
superio_outb(cr_wdt_timeout, 0);
} else {
pr_info("Watchdog already running. Resetting timeout to %d sec\n",
wdog->timeout);
superio_outb(cr_wdt_timeout, wdog->timeout);
}
}
/* set second mode & disable keyboard turning off watchdog */
t = superio_inb(cr_wdt_control) & ~0x0C;
superio_outb(cr_wdt_control, t);
t = superio_inb(cr_wdt_csr);
if (t & WDT_CSR_STATUS)
wdog->bootstatus |= WDIOF_CARDRESET;
/* reset status, disable keyboard & mouse turning off watchdog */
t &= ~(WDT_CSR_STATUS | WDT_CSR_KBD | WDT_CSR_MOUSE);
superio_outb(cr_wdt_csr, t);
superio_exit();
return 0;
}
static int wdt_set_time(unsigned int timeout)
{
int ret;
ret = superio_enter();
if (ret)
return ret;
superio_select(W83627HF_LD_WDT);
superio_outb(cr_wdt_timeout, timeout);
superio_exit();
return 0;
}
static int wdt_start(struct watchdog_device *wdog)
{
return wdt_set_time(wdog->timeout);
}
static int wdt_stop(struct watchdog_device *wdog)
{
return wdt_set_time(0);
}
static int wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout)
{
wdog->timeout = timeout;
return 0;
}
static unsigned int wdt_get_time(struct watchdog_device *wdog)
{
unsigned int timeleft;
int ret;
ret = superio_enter();
if (ret)
return 0;
superio_select(W83627HF_LD_WDT);
timeleft = superio_inb(cr_wdt_timeout);
superio_exit();
return timeleft;
}
/*
* Kernel Interfaces
*/
static const struct watchdog_info wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "W83627HF Watchdog",
};
static const struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
.set_timeout = wdt_set_timeout,
.get_timeleft = wdt_get_time,
};
static struct watchdog_device wdt_dev = {
.info = &wdt_info,
.ops = &wdt_ops,
.timeout = WATCHDOG_TIMEOUT,
.min_timeout = 1,
.max_timeout = 255,
};
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static int wdt_find(int addr)
{
u8 val;
int ret;
cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
cr_wdt_control = W83627HF_WDT_CONTROL;
cr_wdt_csr = W836X7HF_WDT_CSR;
ret = superio_enter();
if (ret)
return ret;
superio_select(W83627HF_LD_WDT);
val = superio_inb(0x20);
switch (val) {
case W83627HF_ID:
ret = w83627hf;
break;
case W83627S_ID:
ret = w83627s;
break;
case W83697HF_ID:
ret = w83697hf;
cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
cr_wdt_control = W83697HF_WDT_CONTROL;
break;
case W83697UG_ID:
ret = w83697ug;
cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
cr_wdt_control = W83697HF_WDT_CONTROL;
break;
case W83637HF_ID:
ret = w83637hf;
break;
case W83627THF_ID:
ret = w83627thf;
break;
case W83687THF_ID:
ret = w83687thf;
break;
case W83627EHF_ID:
ret = w83627ehf;
break;
case W83627DHG_ID:
ret = w83627dhg;
break;
case W83627DHG_P_ID:
ret = w83627dhg_p;
break;
case W83627UHG_ID:
ret = w83627uhg;
break;
case W83667HG_ID:
ret = w83667hg;
break;
case W83667HG_B_ID:
ret = w83667hg_b;
break;
case NCT6775_ID:
ret = nct6775;
break;
case NCT6776_ID:
ret = nct6776;
break;
case NCT6779_ID:
ret = nct6779;
break;
case NCT6791_ID:
ret = nct6791;
break;
case NCT6792_ID:
ret = nct6792;
break;
case NCT6793_ID:
ret = nct6793;
break;
case NCT6795_ID:
ret = nct6795;
break;
case NCT6796_ID:
ret = nct6796;
break;
case NCT6102_ID:
ret = nct6102;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
case NCT6116_ID:
ret = nct6116;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
case 0xff:
ret = -ENODEV;
break;
default:
ret = -ENODEV;
pr_err("Unsupported chip ID: 0x%02x\n", val);
break;
}
superio_exit();
return ret;
}
/*
* On some systems, the NCT6791D comes with a companion chip and the
* watchdog function is in this companion chip. We must use a different
* unlocking sequence to access the companion chip.
*/
static int __init wdt_use_alt_key(const struct dmi_system_id *d)
{
wdt_cfg_enter = 0x88;
wdt_cfg_leave = 0xBB;
return 0;
}
static const struct dmi_system_id wdt_dmi_table[] __initconst = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "INVES"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CTS"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "INVES"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SHARKBAY"),
},
.callback = wdt_use_alt_key,
},
{}
};
static int __init wdt_init(void)
{
int ret;
int chip;
static const char * const chip_name[] = {
"W83627HF",
"W83627S",
"W83697HF",
"W83697UG",
"W83637HF",
"W83627THF",
"W83687THF",
"W83627EHF",
"W83627DHG",
"W83627UHG",
"W83667HG",
"W83667DHG-P",
"W83667HG-B",
"NCT6775",
"NCT6776",
"NCT6779",
"NCT6791",
"NCT6792",
"NCT6793",
"NCT6795",
"NCT6796",
"NCT6102",
"NCT6116",
};
/* Apply system-specific quirks */
dmi_check_system(wdt_dmi_table);
wdt_io = 0x2e;
chip = wdt_find(0x2e);
if (chip < 0) {
wdt_io = 0x4e;
chip = wdt_find(0x4e);
if (chip < 0)
return chip;
}
pr_info("WDT driver for %s Super I/O chip initialising\n",
chip_name[chip]);
watchdog_init_timeout(&wdt_dev, timeout, NULL);
watchdog_set_nowayout(&wdt_dev, nowayout);
watchdog_stop_on_reboot(&wdt_dev);
ret = w83627hf_init(&wdt_dev, chip);
if (ret) {
pr_err("failed to initialize watchdog (err=%d)\n", ret);
return ret;
}
ret = watchdog_register_device(&wdt_dev);
if (ret)
return ret;
pr_info("initialized. timeout=%d sec (nowayout=%d)\n",
wdt_dev.timeout, nowayout);
return ret;
}
static void __exit wdt_exit(void)
{
watchdog_unregister_device(&wdt_dev);
}
module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pádraig Brady <[email protected]>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
| linux-master | drivers/watchdog/w83627hf_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Marvell Armada 37xx SoCs
*
* Author: Marek Behún <[email protected]>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/watchdog.h>
/*
* There are four counters that can be used for watchdog on Armada 37xx.
* The addresses for counter control registers are register base plus ID*0x10,
* where ID is 0, 1, 2 or 3.
*
* In this driver we use IDs 0 and 1. Counter ID 1 is used as watchdog counter,
* while counter ID 0 is used to implement pinging the watchdog: counter ID 1 is
* set to restart counting from initial value on counter ID 0 end count event.
* Pinging is done by forcing immediate end count event on counter ID 0.
* If only one counter was used, pinging would have to be implemented by
* disabling and enabling the counter, leaving the system in a vulnerable state
* for a (really) short period of time.
*
* Counters ID 2 and 3 are enabled by default even before U-Boot loads,
* therefore this driver does not provide a way to use them, eg. by setting a
* property in device tree.
*/
#define CNTR_ID_RETRIGGER 0
#define CNTR_ID_WDOG 1
/* relative to cpu_misc */
#define WDT_TIMER_SELECT 0x64
#define WDT_TIMER_SELECT_MASK 0xf
#define WDT_TIMER_SELECT_VAL BIT(CNTR_ID_WDOG)
/* relative to reg */
#define CNTR_CTRL(id) ((id) * 0x10)
#define CNTR_CTRL_ENABLE 0x0001
#define CNTR_CTRL_ACTIVE 0x0002
#define CNTR_CTRL_MODE_MASK 0x000c
#define CNTR_CTRL_MODE_ONESHOT 0x0000
#define CNTR_CTRL_MODE_HWSIG 0x000c
#define CNTR_CTRL_TRIG_SRC_MASK 0x00f0
#define CNTR_CTRL_TRIG_SRC_PREV_CNTR 0x0050
#define CNTR_CTRL_PRESCALE_MASK 0xff00
#define CNTR_CTRL_PRESCALE_MIN 2
#define CNTR_CTRL_PRESCALE_SHIFT 8
#define CNTR_COUNT_LOW(id) (CNTR_CTRL(id) + 0x4)
#define CNTR_COUNT_HIGH(id) (CNTR_CTRL(id) + 0x8)
#define WATCHDOG_TIMEOUT 120
static unsigned int timeout;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct armada_37xx_watchdog {
struct watchdog_device wdt;
struct regmap *cpu_misc;
void __iomem *reg;
u64 timeout; /* in clock ticks */
unsigned long clk_rate;
struct clk *clk;
};
static u64 get_counter_value(struct armada_37xx_watchdog *dev, int id)
{
u64 val;
/*
* when low is read, high is latched into flip-flops so that it can be
* read consistently without using software debouncing
*/
val = readl(dev->reg + CNTR_COUNT_LOW(id));
val |= ((u64)readl(dev->reg + CNTR_COUNT_HIGH(id))) << 32;
return val;
}
static void set_counter_value(struct armada_37xx_watchdog *dev, int id, u64 val)
{
writel(val & 0xffffffff, dev->reg + CNTR_COUNT_LOW(id));
writel(val >> 32, dev->reg + CNTR_COUNT_HIGH(id));
}
static void counter_enable(struct armada_37xx_watchdog *dev, int id)
{
u32 reg;
reg = readl(dev->reg + CNTR_CTRL(id));
reg |= CNTR_CTRL_ENABLE;
writel(reg, dev->reg + CNTR_CTRL(id));
}
static void counter_disable(struct armada_37xx_watchdog *dev, int id)
{
u32 reg;
reg = readl(dev->reg + CNTR_CTRL(id));
reg &= ~CNTR_CTRL_ENABLE;
writel(reg, dev->reg + CNTR_CTRL(id));
}
static void init_counter(struct armada_37xx_watchdog *dev, int id, u32 mode,
u32 trig_src)
{
u32 reg;
reg = readl(dev->reg + CNTR_CTRL(id));
reg &= ~(CNTR_CTRL_MODE_MASK | CNTR_CTRL_PRESCALE_MASK |
CNTR_CTRL_TRIG_SRC_MASK);
/* set mode */
reg |= mode & CNTR_CTRL_MODE_MASK;
/* set prescaler to the min value */
reg |= CNTR_CTRL_PRESCALE_MIN << CNTR_CTRL_PRESCALE_SHIFT;
/* set trigger source */
reg |= trig_src & CNTR_CTRL_TRIG_SRC_MASK;
writel(reg, dev->reg + CNTR_CTRL(id));
}
static int armada_37xx_wdt_ping(struct watchdog_device *wdt)
{
struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
/* counter 1 is retriggered by forcing end count on counter 0 */
counter_disable(dev, CNTR_ID_RETRIGGER);
counter_enable(dev, CNTR_ID_RETRIGGER);
return 0;
}
static unsigned int armada_37xx_wdt_get_timeleft(struct watchdog_device *wdt)
{
struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
u64 res;
res = get_counter_value(dev, CNTR_ID_WDOG) * CNTR_CTRL_PRESCALE_MIN;
do_div(res, dev->clk_rate);
return res;
}
static int armada_37xx_wdt_set_timeout(struct watchdog_device *wdt,
unsigned int timeout)
{
struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
wdt->timeout = timeout;
/*
* Compute the timeout in clock rate. We use smallest possible
* prescaler, which divides the clock rate by 2
* (CNTR_CTRL_PRESCALE_MIN).
*/
dev->timeout = (u64)dev->clk_rate * timeout;
do_div(dev->timeout, CNTR_CTRL_PRESCALE_MIN);
set_counter_value(dev, CNTR_ID_WDOG, dev->timeout);
return 0;
}
static bool armada_37xx_wdt_is_running(struct armada_37xx_watchdog *dev)
{
u32 reg;
regmap_read(dev->cpu_misc, WDT_TIMER_SELECT, ®);
if ((reg & WDT_TIMER_SELECT_MASK) != WDT_TIMER_SELECT_VAL)
return false;
reg = readl(dev->reg + CNTR_CTRL(CNTR_ID_WDOG));
return !!(reg & CNTR_CTRL_ACTIVE);
}
static int armada_37xx_wdt_start(struct watchdog_device *wdt)
{
struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
/* select counter 1 as watchdog counter */
regmap_write(dev->cpu_misc, WDT_TIMER_SELECT, WDT_TIMER_SELECT_VAL);
/* init counter 0 as retrigger counter for counter 1 */
init_counter(dev, CNTR_ID_RETRIGGER, CNTR_CTRL_MODE_ONESHOT, 0);
set_counter_value(dev, CNTR_ID_RETRIGGER, 0);
/* init counter 1 to be retriggerable by counter 0 end count */
init_counter(dev, CNTR_ID_WDOG, CNTR_CTRL_MODE_HWSIG,
CNTR_CTRL_TRIG_SRC_PREV_CNTR);
set_counter_value(dev, CNTR_ID_WDOG, dev->timeout);
/* enable counter 1 */
counter_enable(dev, CNTR_ID_WDOG);
/* start counter 1 by forcing immediate end count on counter 0 */
counter_enable(dev, CNTR_ID_RETRIGGER);
return 0;
}
static int armada_37xx_wdt_stop(struct watchdog_device *wdt)
{
struct armada_37xx_watchdog *dev = watchdog_get_drvdata(wdt);
counter_disable(dev, CNTR_ID_WDOG);
counter_disable(dev, CNTR_ID_RETRIGGER);
regmap_write(dev->cpu_misc, WDT_TIMER_SELECT, 0);
return 0;
}
static const struct watchdog_info armada_37xx_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Armada 37xx Watchdog",
};
static const struct watchdog_ops armada_37xx_wdt_ops = {
.owner = THIS_MODULE,
.start = armada_37xx_wdt_start,
.stop = armada_37xx_wdt_stop,
.ping = armada_37xx_wdt_ping,
.set_timeout = armada_37xx_wdt_set_timeout,
.get_timeleft = armada_37xx_wdt_get_timeleft,
};
static int armada_37xx_wdt_probe(struct platform_device *pdev)
{
struct armada_37xx_watchdog *dev;
struct resource *res;
struct regmap *regmap;
int ret;
dev = devm_kzalloc(&pdev->dev, sizeof(struct armada_37xx_watchdog),
GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->wdt.info = &armada_37xx_wdt_info;
dev->wdt.ops = &armada_37xx_wdt_ops;
regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
if (IS_ERR(regmap))
return PTR_ERR(regmap);
dev->cpu_misc = regmap;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!dev->reg)
return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
dev->clk_rate = clk_get_rate(dev->clk);
if (!dev->clk_rate)
return -EINVAL;
/*
* Since the timeout in seconds is given as 32 bit unsigned int, and
* the counters hold 64 bit values, even after multiplication by clock
* rate the counter can hold timeout of UINT_MAX seconds.
*/
dev->wdt.min_timeout = 1;
dev->wdt.max_timeout = UINT_MAX;
dev->wdt.parent = &pdev->dev;
/* default value, possibly override by module parameter or dtb */
dev->wdt.timeout = WATCHDOG_TIMEOUT;
watchdog_init_timeout(&dev->wdt, timeout, &pdev->dev);
platform_set_drvdata(pdev, &dev->wdt);
watchdog_set_drvdata(&dev->wdt, dev);
armada_37xx_wdt_set_timeout(&dev->wdt, dev->wdt.timeout);
if (armada_37xx_wdt_is_running(dev))
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
watchdog_set_nowayout(&dev->wdt, nowayout);
watchdog_stop_on_reboot(&dev->wdt);
ret = devm_watchdog_register_device(&pdev->dev, &dev->wdt);
if (ret)
return ret;
dev_info(&pdev->dev, "Initial timeout %d sec%s\n",
dev->wdt.timeout, nowayout ? ", nowayout" : "");
return 0;
}
static int __maybe_unused armada_37xx_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdt = dev_get_drvdata(dev);
return armada_37xx_wdt_stop(wdt);
}
static int __maybe_unused armada_37xx_wdt_resume(struct device *dev)
{
struct watchdog_device *wdt = dev_get_drvdata(dev);
if (watchdog_active(wdt))
return armada_37xx_wdt_start(wdt);
return 0;
}
static const struct dev_pm_ops armada_37xx_wdt_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(armada_37xx_wdt_suspend,
armada_37xx_wdt_resume)
};
#ifdef CONFIG_OF
static const struct of_device_id armada_37xx_wdt_match[] = {
{ .compatible = "marvell,armada-3700-wdt", },
{},
};
MODULE_DEVICE_TABLE(of, armada_37xx_wdt_match);
#endif
static struct platform_driver armada_37xx_wdt_driver = {
.probe = armada_37xx_wdt_probe,
.driver = {
.name = "armada_37xx_wdt",
.of_match_table = of_match_ptr(armada_37xx_wdt_match),
.pm = &armada_37xx_wdt_dev_pm_ops,
},
};
module_platform_driver(armada_37xx_wdt_driver);
MODULE_AUTHOR("Marek Behun <[email protected]>");
MODULE_DESCRIPTION("Armada 37xx CPU Watchdog");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:armada_37xx_wdt");
| linux-master | drivers/watchdog/armada_37xx_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* exar_wdt.c - Driver for the watchdog present in some
* Exar/MaxLinear UART chips like the XR28V38x.
*
* (c) Copyright 2022 D. Müller <[email protected]>.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
#define DRV_NAME "exar_wdt"
static const unsigned short sio_config_ports[] = { 0x2e, 0x4e };
static const unsigned char sio_enter_keys[] = { 0x67, 0x77, 0x87, 0xA0 };
#define EXAR_EXIT_KEY 0xAA
#define EXAR_LDN 0x07
#define EXAR_DID 0x20
#define EXAR_VID 0x23
#define EXAR_WDT 0x26
#define EXAR_ACT 0x30
#define EXAR_RTBASE 0x60
#define EXAR_WDT_LDEV 0x08
#define EXAR_VEN_ID 0x13A8
#define EXAR_DEV_382 0x0382
#define EXAR_DEV_384 0x0384
/* WDT runtime registers */
#define WDT_CTRL 0x00
#define WDT_VAL 0x01
#define WDT_UNITS_10MS 0x0 /* the 10 millisec unit of the HW is not used */
#define WDT_UNITS_SEC 0x2
#define WDT_UNITS_MIN 0x4
/* default WDT control for WDTOUT signal activ / rearm by read */
#define EXAR_WDT_DEF_CONF 0
struct wdt_pdev_node {
struct list_head list;
struct platform_device *pdev;
const char name[16];
};
struct wdt_priv {
/* the lock for WDT io operations */
spinlock_t io_lock;
struct resource wdt_res;
struct watchdog_device wdt_dev;
unsigned short did;
unsigned short config_port;
unsigned char enter_key;
unsigned char unit;
unsigned char timeout;
};
#define WATCHDOG_TIMEOUT 60
static int timeout = WATCHDOG_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<=timeout<=15300, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int exar_sio_enter(const unsigned short config_port,
const unsigned char key)
{
if (!request_muxed_region(config_port, 2, DRV_NAME))
return -EBUSY;
/* write the ENTER-KEY twice */
outb(key, config_port);
outb(key, config_port);
return 0;
}
static void exar_sio_exit(const unsigned short config_port)
{
outb(EXAR_EXIT_KEY, config_port);
release_region(config_port, 2);
}
static unsigned char exar_sio_read(const unsigned short config_port,
const unsigned char reg)
{
outb(reg, config_port);
return inb(config_port + 1);
}
static void exar_sio_write(const unsigned short config_port,
const unsigned char reg, const unsigned char val)
{
outb(reg, config_port);
outb(val, config_port + 1);
}
static unsigned short exar_sio_read16(const unsigned short config_port,
const unsigned char reg)
{
unsigned char msb, lsb;
msb = exar_sio_read(config_port, reg);
lsb = exar_sio_read(config_port, reg + 1);
return (msb << 8) | lsb;
}
static void exar_sio_select_wdt(const unsigned short config_port)
{
exar_sio_write(config_port, EXAR_LDN, EXAR_WDT_LDEV);
}
static void exar_wdt_arm(const struct wdt_priv *priv)
{
unsigned short rt_base = priv->wdt_res.start;
/* write timeout value twice to arm watchdog */
outb(priv->timeout, rt_base + WDT_VAL);
outb(priv->timeout, rt_base + WDT_VAL);
}
static void exar_wdt_disarm(const struct wdt_priv *priv)
{
unsigned short rt_base = priv->wdt_res.start;
/*
* use two accesses with different values to make sure
* that a combination of a previous single access and
* the ones below with the same value are not falsely
* interpreted as "arm watchdog"
*/
outb(0xFF, rt_base + WDT_VAL);
outb(0, rt_base + WDT_VAL);
}
static int exar_wdt_start(struct watchdog_device *wdog)
{
struct wdt_priv *priv = watchdog_get_drvdata(wdog);
unsigned short rt_base = priv->wdt_res.start;
spin_lock(&priv->io_lock);
exar_wdt_disarm(priv);
outb(priv->unit, rt_base + WDT_CTRL);
exar_wdt_arm(priv);
spin_unlock(&priv->io_lock);
return 0;
}
static int exar_wdt_stop(struct watchdog_device *wdog)
{
struct wdt_priv *priv = watchdog_get_drvdata(wdog);
spin_lock(&priv->io_lock);
exar_wdt_disarm(priv);
spin_unlock(&priv->io_lock);
return 0;
}
static int exar_wdt_keepalive(struct watchdog_device *wdog)
{
struct wdt_priv *priv = watchdog_get_drvdata(wdog);
unsigned short rt_base = priv->wdt_res.start;
spin_lock(&priv->io_lock);
/* reading the WDT_VAL reg will feed the watchdog */
inb(rt_base + WDT_VAL);
spin_unlock(&priv->io_lock);
return 0;
}
static int exar_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
{
struct wdt_priv *priv = watchdog_get_drvdata(wdog);
bool unit_min = false;
/*
* if new timeout is bigger then 255 seconds, change the
* unit to minutes and round the timeout up to the next whole minute
*/
if (t > 255) {
unit_min = true;
t = DIV_ROUND_UP(t, 60);
}
/* save for later use in exar_wdt_start() */
priv->unit = unit_min ? WDT_UNITS_MIN : WDT_UNITS_SEC;
priv->timeout = t;
wdog->timeout = unit_min ? t * 60 : t;
if (watchdog_hw_running(wdog))
exar_wdt_start(wdog);
return 0;
}
static const struct watchdog_info exar_wdt_info = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.identity = "Exar/MaxLinear XR28V38x Watchdog",
};
static const struct watchdog_ops exar_wdt_ops = {
.owner = THIS_MODULE,
.start = exar_wdt_start,
.stop = exar_wdt_stop,
.ping = exar_wdt_keepalive,
.set_timeout = exar_wdt_set_timeout,
};
static int exar_wdt_config(struct watchdog_device *wdog,
const unsigned char conf)
{
struct wdt_priv *priv = watchdog_get_drvdata(wdog);
int ret;
ret = exar_sio_enter(priv->config_port, priv->enter_key);
if (ret)
return ret;
exar_sio_select_wdt(priv->config_port);
exar_sio_write(priv->config_port, EXAR_WDT, conf);
exar_sio_exit(priv->config_port);
return 0;
}
static int __init exar_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct wdt_priv *priv = dev->platform_data;
struct watchdog_device *wdt_dev = &priv->wdt_dev;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
return -ENXIO;
spin_lock_init(&priv->io_lock);
wdt_dev->info = &exar_wdt_info;
wdt_dev->ops = &exar_wdt_ops;
wdt_dev->min_timeout = 1;
wdt_dev->max_timeout = 255 * 60;
watchdog_init_timeout(wdt_dev, timeout, NULL);
watchdog_set_nowayout(wdt_dev, nowayout);
watchdog_stop_on_reboot(wdt_dev);
watchdog_stop_on_unregister(wdt_dev);
watchdog_set_drvdata(wdt_dev, priv);
ret = exar_wdt_config(wdt_dev, EXAR_WDT_DEF_CONF);
if (ret)
return ret;
exar_wdt_set_timeout(wdt_dev, timeout);
/* Make sure that the watchdog is not running */
exar_wdt_stop(wdt_dev);
ret = devm_watchdog_register_device(dev, wdt_dev);
if (ret)
return ret;
dev_info(dev, "XR28V%X WDT initialized. timeout=%d sec (nowayout=%d)\n",
priv->did, timeout, nowayout);
return 0;
}
static unsigned short __init exar_detect(const unsigned short config_port,
const unsigned char key,
unsigned short *rt_base)
{
int ret;
unsigned short base = 0;
unsigned short vid, did;
ret = exar_sio_enter(config_port, key);
if (ret)
return 0;
vid = exar_sio_read16(config_port, EXAR_VID);
did = exar_sio_read16(config_port, EXAR_DID);
/* check for the vendor and device IDs we currently know about */
if (vid == EXAR_VEN_ID &&
(did == EXAR_DEV_382 ||
did == EXAR_DEV_384)) {
exar_sio_select_wdt(config_port);
/* is device active? */
if (exar_sio_read(config_port, EXAR_ACT) == 0x01)
base = exar_sio_read16(config_port, EXAR_RTBASE);
}
exar_sio_exit(config_port);
if (base) {
pr_debug("Found a XR28V%X WDT (conf: 0x%x / rt: 0x%04x)\n",
did, config_port, base);
*rt_base = base;
return did;
}
return 0;
}
static struct platform_driver exar_wdt_driver = {
.driver = {
.name = DRV_NAME,
},
};
static LIST_HEAD(pdev_list);
static int __init exar_wdt_register(struct wdt_priv *priv, const int idx)
{
struct wdt_pdev_node *n;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
INIT_LIST_HEAD(&n->list);
scnprintf((char *)n->name, sizeof(n->name), DRV_NAME ".%d", idx);
priv->wdt_res.name = n->name;
n->pdev = platform_device_register_resndata(NULL, DRV_NAME, idx,
&priv->wdt_res, 1,
priv, sizeof(*priv));
if (IS_ERR(n->pdev)) {
int err = PTR_ERR(n->pdev);
kfree(n);
return err;
}
list_add_tail(&n->list, &pdev_list);
return 0;
}
static void exar_wdt_unregister(void)
{
struct wdt_pdev_node *n, *t;
list_for_each_entry_safe(n, t, &pdev_list, list) {
platform_device_unregister(n->pdev);
list_del(&n->list);
kfree(n);
}
}
static int __init exar_wdt_init(void)
{
int ret, i, j, idx = 0;
/* search for active Exar watchdogs on all possible locations */
for (i = 0; i < ARRAY_SIZE(sio_config_ports); i++) {
for (j = 0; j < ARRAY_SIZE(sio_enter_keys); j++) {
unsigned short did, rt_base = 0;
did = exar_detect(sio_config_ports[i],
sio_enter_keys[j],
&rt_base);
if (did) {
struct wdt_priv priv = {
.wdt_res = DEFINE_RES_IO(rt_base, 2),
.did = did,
.config_port = sio_config_ports[i],
.enter_key = sio_enter_keys[j],
};
ret = exar_wdt_register(&priv, idx);
if (!ret)
idx++;
}
}
}
if (!idx)
return -ENODEV;
ret = platform_driver_probe(&exar_wdt_driver, exar_wdt_probe);
if (ret)
exar_wdt_unregister();
return ret;
}
static void __exit exar_wdt_exit(void)
{
exar_wdt_unregister();
platform_driver_unregister(&exar_wdt_driver);
}
module_init(exar_wdt_init);
module_exit(exar_wdt_exit);
MODULE_AUTHOR("David Müller <[email protected]>");
MODULE_DESCRIPTION("Exar/MaxLinear Watchdog Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/exar_wdt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) STMicroelectronics 2018
// Author: Pascal Paillet <[email protected]> for STMicroelectronics.
#include <linux/kernel.h>
#include <linux/mfd/stpmic1.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
/* WATCHDOG CONTROL REGISTER bit */
#define WDT_START BIT(0)
#define WDT_PING BIT(1)
#define WDT_START_MASK BIT(0)
#define WDT_PING_MASK BIT(1)
#define WDT_STOP 0
#define PMIC_WDT_MIN_TIMEOUT 1
#define PMIC_WDT_MAX_TIMEOUT 256
#define PMIC_WDT_DEFAULT_TIMEOUT 30
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct stpmic1_wdt {
struct stpmic1 *pmic;
struct watchdog_device wdtdev;
};
static int pmic_wdt_start(struct watchdog_device *wdd)
{
struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd);
return regmap_update_bits(wdt->pmic->regmap,
WCHDG_CR, WDT_START_MASK, WDT_START);
}
static int pmic_wdt_stop(struct watchdog_device *wdd)
{
struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd);
return regmap_update_bits(wdt->pmic->regmap,
WCHDG_CR, WDT_START_MASK, WDT_STOP);
}
static int pmic_wdt_ping(struct watchdog_device *wdd)
{
struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd);
return regmap_update_bits(wdt->pmic->regmap,
WCHDG_CR, WDT_PING_MASK, WDT_PING);
}
static int pmic_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct stpmic1_wdt *wdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout;
/* timeout is equal to register value + 1 */
return regmap_write(wdt->pmic->regmap, WCHDG_TIMER_CR, timeout - 1);
}
static const struct watchdog_info pmic_watchdog_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "STPMIC1 PMIC Watchdog",
};
static const struct watchdog_ops pmic_watchdog_ops = {
.owner = THIS_MODULE,
.start = pmic_wdt_start,
.stop = pmic_wdt_stop,
.ping = pmic_wdt_ping,
.set_timeout = pmic_wdt_set_timeout,
};
static int pmic_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
struct stpmic1 *pmic;
struct stpmic1_wdt *wdt;
if (!dev->parent)
return -EINVAL;
pmic = dev_get_drvdata(dev->parent);
if (!pmic)
return -EINVAL;
wdt = devm_kzalloc(dev, sizeof(struct stpmic1_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->pmic = pmic;
wdt->wdtdev.info = &pmic_watchdog_info;
wdt->wdtdev.ops = &pmic_watchdog_ops;
wdt->wdtdev.min_timeout = PMIC_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = PMIC_WDT_MAX_TIMEOUT;
wdt->wdtdev.parent = dev;
wdt->wdtdev.timeout = PMIC_WDT_DEFAULT_TIMEOUT;
watchdog_init_timeout(&wdt->wdtdev, 0, dev);
watchdog_set_nowayout(&wdt->wdtdev, nowayout);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret)
return ret;
dev_dbg(wdt->pmic->dev, "PMIC Watchdog driver probed\n");
return 0;
}
static const struct of_device_id of_pmic_wdt_match[] = {
{ .compatible = "st,stpmic1-wdt" },
{ },
};
MODULE_DEVICE_TABLE(of, of_pmic_wdt_match);
static struct platform_driver stpmic1_wdt_driver = {
.probe = pmic_wdt_probe,
.driver = {
.name = "stpmic1-wdt",
.of_match_table = of_pmic_wdt_match,
},
};
module_platform_driver(stpmic1_wdt_driver);
MODULE_DESCRIPTION("Watchdog driver for STPMIC1 device");
MODULE_AUTHOR("Pascal Paillet <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/stpmic1_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/***************************************************************************
* Copyright (C) 2006 by Hans Edgington <[email protected]> *
* Copyright (C) 2007-2009 Hans de Goede <[email protected]> *
* Copyright (C) 2010 Giel van Schijndel <[email protected]> *
* *
***************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#define DRVNAME "f71808e_wdt"
#define SIO_F71808FG_LD_WDT 0x07 /* Watchdog timer logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
#define SIO_REG_DEVREV 0x22 /* Device revision */
#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
#define SIO_REG_CLOCK_SEL 0x26 /* Clock select */
#define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */
#define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */
#define SIO_REG_TSI_LEVEL_SEL 0x28 /* TSI Level select */
#define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */
#define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */
#define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */
#define SIO_F81866_REG_GPIO1 0x2c /* F81866 GPIO1 Enable Register */
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
#define SIO_F71808_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71868_ID 0x1106 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F81803_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
#define SIO_F81966_ID 0x1502 /* F81804 chipset ID, same for f81966 */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
#define F71808FG_REG_WD_TIME 0xf6
#define F71808FG_FLAG_WDOUT_EN 7
#define F71808FG_FLAG_WDTMOUT_STS 6
#define F71808FG_FLAG_WD_EN 5
#define F71808FG_FLAG_WD_PULSE 4
#define F71808FG_FLAG_WD_UNIT 3
#define F81865_REG_WDO_CONF 0xfa
#define F81865_FLAG_WDOUT_EN 0
/* Default values */
#define WATCHDOG_TIMEOUT 60 /* 1 minute default timeout */
#define WATCHDOG_MAX_TIMEOUT (60 * 255)
#define WATCHDOG_PULSE_WIDTH 125 /* 125 ms, default pulse width for
watchdog signal */
#define WATCHDOG_F71862FG_PIN 63 /* default watchdog reset output
pin number 63 */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
static int timeout = WATCHDOG_TIMEOUT; /* default timeout in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<= timeout <="
__MODULE_STRING(WATCHDOG_MAX_TIMEOUT) " (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH;
module_param(pulse_width, uint, 0);
MODULE_PARM_DESC(pulse_width,
"Watchdog signal pulse width. 0(=level), 1, 25, 30, 125, 150, 5000 or 6000 ms"
" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
module_param(f71862fg_pin, uint, 0);
MODULE_PARM_DESC(f71862fg_pin,
"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static unsigned int start_withtimeout;
module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
f81803, f81865, f81866, f81966};
static const char * const fintek_wdt_names[] = {
"f71808fg",
"f71858fg",
"f71862fg",
"f71868",
"f71869",
"f71882fg",
"f71889fg",
"f81803",
"f81865",
"f81866",
"f81966"
};
/* Super-I/O Function prototypes */
static inline int superio_inb(int base, int reg);
static inline int superio_inw(int base, int reg);
static inline void superio_outb(int base, int reg, u8 val);
static inline void superio_set_bit(int base, int reg, int bit);
static inline void superio_clear_bit(int base, int reg, int bit);
static inline int superio_enter(int base);
static inline void superio_select(int base, int ld);
static inline void superio_exit(int base);
struct fintek_wdt {
struct watchdog_device wdd;
unsigned short sioaddr;
enum chips type;
struct watchdog_info ident;
u8 timer_val; /* content for the wd_time register */
char minutes_mode;
u8 pulse_val; /* pulse width flag */
char pulse_mode; /* enable pulse output mode? */
};
struct fintek_wdt_pdata {
enum chips type;
};
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
{
outb(reg, base);
return inb(base + 1);
}
static int superio_inw(int base, int reg)
{
int val;
val = superio_inb(base, reg) << 8;
val |= superio_inb(base, reg + 1);
return val;
}
static inline void superio_outb(int base, int reg, u8 val)
{
outb(reg, base);
outb(val, base + 1);
}
static inline void superio_set_bit(int base, int reg, int bit)
{
unsigned long val = superio_inb(base, reg);
__set_bit(bit, &val);
superio_outb(base, reg, val);
}
static inline void superio_clear_bit(int base, int reg, int bit)
{
unsigned long val = superio_inb(base, reg);
__clear_bit(bit, &val);
superio_outb(base, reg, val);
}
static inline int superio_enter(int base)
{
/* Don't step on other drivers' I/O space by accident */
if (!request_muxed_region(base, 2, DRVNAME)) {
pr_err("I/O address 0x%04x already in use\n", (int)base);
return -EBUSY;
}
/* according to the datasheet the key must be sent twice! */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
return 0;
}
static inline void superio_select(int base, int ld)
{
outb(SIO_REG_LDSEL, base);
outb(ld, base + 1);
}
static inline void superio_exit(int base)
{
outb(SIO_LOCK_KEY, base);
release_region(base, 2);
}
static int fintek_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
{
struct fintek_wdt *wd = watchdog_get_drvdata(wdd);
if (timeout > 0xff) {
wd->timer_val = DIV_ROUND_UP(timeout, 60);
wd->minutes_mode = true;
timeout = wd->timer_val * 60;
} else {
wd->timer_val = timeout;
wd->minutes_mode = false;
}
wdd->timeout = timeout;
return 0;
}
static int fintek_wdt_set_pulse_width(struct fintek_wdt *wd, unsigned int pw)
{
unsigned int t1 = 25, t2 = 125, t3 = 5000;
if (wd->type == f71868) {
t1 = 30;
t2 = 150;
t3 = 6000;
}
if (pw <= 1) {
wd->pulse_val = 0;
} else if (pw <= t1) {
wd->pulse_val = 1;
} else if (pw <= t2) {
wd->pulse_val = 2;
} else if (pw <= t3) {
wd->pulse_val = 3;
} else {
pr_err("pulse width out of range\n");
return -EINVAL;
}
wd->pulse_mode = pw;
return 0;
}
static int fintek_wdt_keepalive(struct watchdog_device *wdd)
{
struct fintek_wdt *wd = watchdog_get_drvdata(wdd);
int err;
err = superio_enter(wd->sioaddr);
if (err)
return err;
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
if (wd->minutes_mode)
/* select minutes for timer units */
superio_set_bit(wd->sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_UNIT);
else
/* select seconds for timer units */
superio_clear_bit(wd->sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_UNIT);
/* Set timer value */
superio_outb(wd->sioaddr, F71808FG_REG_WD_TIME,
wd->timer_val);
superio_exit(wd->sioaddr);
return 0;
}
static int fintek_wdt_start(struct watchdog_device *wdd)
{
struct fintek_wdt *wd = watchdog_get_drvdata(wdd);
int err;
u8 tmp;
/* Make sure we don't die as soon as the watchdog is enabled below */
err = fintek_wdt_keepalive(wdd);
if (err)
return err;
err = superio_enter(wd->sioaddr);
if (err)
return err;
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
/* Watchdog pin configuration */
switch (wd->type) {
case f71808fg:
/* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
superio_clear_bit(wd->sioaddr, SIO_REG_MFUNCT2, 3);
superio_clear_bit(wd->sioaddr, SIO_REG_MFUNCT3, 3);
break;
case f71862fg:
if (f71862fg_pin == 63) {
/* SPI must be disabled first to use this pin! */
superio_clear_bit(wd->sioaddr, SIO_REG_ROM_ADDR_SEL, 6);
superio_set_bit(wd->sioaddr, SIO_REG_MFUNCT3, 4);
} else if (f71862fg_pin == 56) {
superio_set_bit(wd->sioaddr, SIO_REG_MFUNCT1, 1);
}
break;
case f71868:
case f71869:
/* GPIO14 --> WDTRST# */
superio_clear_bit(wd->sioaddr, SIO_REG_MFUNCT1, 4);
break;
case f71882fg:
/* Set pin 56 to WDTRST# */
superio_set_bit(wd->sioaddr, SIO_REG_MFUNCT1, 1);
break;
case f71889fg:
/* set pin 40 to WDTRST# */
superio_outb(wd->sioaddr, SIO_REG_MFUNCT3,
superio_inb(wd->sioaddr, SIO_REG_MFUNCT3) & 0xcf);
break;
case f81803:
/* Enable TSI Level register bank */
superio_clear_bit(wd->sioaddr, SIO_REG_CLOCK_SEL, 3);
/* Set pin 27 to WDTRST# */
superio_outb(wd->sioaddr, SIO_REG_TSI_LEVEL_SEL, 0x5f &
superio_inb(wd->sioaddr, SIO_REG_TSI_LEVEL_SEL));
break;
case f81865:
/* Set pin 70 to WDTRST# */
superio_clear_bit(wd->sioaddr, SIO_REG_MFUNCT3, 5);
break;
case f81866:
case f81966:
/*
* GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
* The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
* BIT5: 0 -> WDTRST#
* 1 -> GPIO15
*/
tmp = superio_inb(wd->sioaddr, SIO_F81866_REG_PORT_SEL);
tmp &= ~(BIT(3) | BIT(0));
tmp |= BIT(2);
superio_outb(wd->sioaddr, SIO_F81866_REG_PORT_SEL, tmp);
superio_clear_bit(wd->sioaddr, SIO_F81866_REG_GPIO1, 5);
break;
default:
/*
* 'default' label to shut up the compiler and catch
* programmer errors
*/
err = -ENODEV;
goto exit_superio;
}
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(wd->sioaddr, SIO_REG_ENABLE, 0);
if (wd->type == f81865 || wd->type == f81866 || wd->type == f81966)
superio_set_bit(wd->sioaddr, F81865_REG_WDO_CONF,
F81865_FLAG_WDOUT_EN);
else
superio_set_bit(wd->sioaddr, F71808FG_REG_WDO_CONF,
F71808FG_FLAG_WDOUT_EN);
superio_set_bit(wd->sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_EN);
if (wd->pulse_mode) {
/* Select "pulse" output mode with given duration */
u8 wdt_conf = superio_inb(wd->sioaddr,
F71808FG_REG_WDT_CONF);
/* Set WD_PSWIDTH bits (1:0) */
wdt_conf = (wdt_conf & 0xfc) | (wd->pulse_val & 0x03);
/* Set WD_PULSE to "pulse" mode */
wdt_conf |= BIT(F71808FG_FLAG_WD_PULSE);
superio_outb(wd->sioaddr, F71808FG_REG_WDT_CONF,
wdt_conf);
} else {
/* Select "level" output mode */
superio_clear_bit(wd->sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_PULSE);
}
exit_superio:
superio_exit(wd->sioaddr);
return err;
}
static int fintek_wdt_stop(struct watchdog_device *wdd)
{
struct fintek_wdt *wd = watchdog_get_drvdata(wdd);
int err;
err = superio_enter(wd->sioaddr);
if (err)
return err;
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
superio_clear_bit(wd->sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_EN);
superio_exit(wd->sioaddr);
return 0;
}
static bool fintek_wdt_is_running(struct fintek_wdt *wd, u8 wdt_conf)
{
return (superio_inb(wd->sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (wdt_conf & BIT(F71808FG_FLAG_WD_EN));
}
static const struct watchdog_ops fintek_wdt_ops = {
.owner = THIS_MODULE,
.start = fintek_wdt_start,
.stop = fintek_wdt_stop,
.ping = fintek_wdt_keepalive,
.set_timeout = fintek_wdt_set_timeout,
};
static int fintek_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fintek_wdt_pdata *pdata;
struct watchdog_device *wdd;
struct fintek_wdt *wd;
int wdt_conf, err = 0;
struct resource *res;
int sioaddr;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
return -ENXIO;
sioaddr = res->start;
wd = devm_kzalloc(dev, sizeof(*wd), GFP_KERNEL);
if (!wd)
return -ENOMEM;
pdata = dev->platform_data;
wd->type = pdata->type;
wd->sioaddr = sioaddr;
wd->ident.options = WDIOF_SETTIMEOUT
| WDIOF_MAGICCLOSE
| WDIOF_KEEPALIVEPING
| WDIOF_CARDRESET;
snprintf(wd->ident.identity,
sizeof(wd->ident.identity), "%s watchdog",
fintek_wdt_names[wd->type]);
err = superio_enter(sioaddr);
if (err)
return err;
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
/*
* We don't want WDTMOUT_STS to stick around till regular reboot.
* Write 1 to the bit to clear it to zero.
*/
superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
wdd = &wd->wdd;
if (fintek_wdt_is_running(wd, wdt_conf))
set_bit(WDOG_HW_RUNNING, &wdd->status);
superio_exit(sioaddr);
wdd->parent = dev;
wdd->info = &wd->ident;
wdd->ops = &fintek_wdt_ops;
wdd->min_timeout = 1;
wdd->max_timeout = WATCHDOG_MAX_TIMEOUT;
watchdog_set_drvdata(wdd, wd);
watchdog_set_nowayout(wdd, nowayout);
watchdog_stop_on_unregister(wdd);
watchdog_stop_on_reboot(wdd);
watchdog_init_timeout(wdd, start_withtimeout ?: timeout, NULL);
if (wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS))
wdd->bootstatus = WDIOF_CARDRESET;
/*
* WATCHDOG_HANDLE_BOOT_ENABLED can result in keepalive being directly
* called without a set_timeout before, so it needs to be done here
* unconditionally.
*/
fintek_wdt_set_timeout(wdd, wdd->timeout);
fintek_wdt_set_pulse_width(wd, pulse_width);
if (start_withtimeout) {
err = fintek_wdt_start(wdd);
if (err) {
dev_err(dev, "cannot start watchdog timer\n");
return err;
}
set_bit(WDOG_HW_RUNNING, &wdd->status);
dev_info(dev, "watchdog started with initial timeout of %u sec\n",
start_withtimeout);
}
return devm_watchdog_register_device(dev, wdd);
}
static int __init fintek_wdt_find(int sioaddr)
{
enum chips type;
u16 devid;
int err = superio_enter(sioaddr);
if (err)
return err;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID) {
pr_debug("Not a Fintek device\n");
err = -ENODEV;
goto exit;
}
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
case SIO_F71808_ID:
type = f71808fg;
break;
case SIO_F71862_ID:
type = f71862fg;
break;
case SIO_F71868_ID:
type = f71868;
break;
case SIO_F71869_ID:
case SIO_F71869A_ID:
type = f71869;
break;
case SIO_F71882_ID:
type = f71882fg;
break;
case SIO_F71889_ID:
type = f71889fg;
break;
case SIO_F71858_ID:
/* Confirmed (by datasheet) not to have a watchdog. */
err = -ENODEV;
goto exit;
case SIO_F81803_ID:
type = f81803;
break;
case SIO_F81865_ID:
type = f81865;
break;
case SIO_F81866_ID:
type = f81866;
break;
case SIO_F81966_ID:
type = f81966;
break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
err = -ENODEV;
goto exit;
}
pr_info("Found %s watchdog chip, revision %d\n",
fintek_wdt_names[type],
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
return err ? err : type;
}
static struct platform_driver fintek_wdt_driver = {
.probe = fintek_wdt_probe,
.driver = {
.name = DRVNAME,
},
};
static struct platform_device *fintek_wdt_pdev;
static int __init fintek_wdt_init(void)
{
static const unsigned short addrs[] = { 0x2e, 0x4e };
struct fintek_wdt_pdata pdata;
struct resource wdt_res = {};
int ret;
int i;
if (f71862fg_pin != 63 && f71862fg_pin != 56) {
pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(addrs); i++) {
ret = fintek_wdt_find(addrs[i]);
if (ret >= 0)
break;
}
if (i == ARRAY_SIZE(addrs))
return ret;
pdata.type = ret;
ret = platform_driver_register(&fintek_wdt_driver);
if (ret)
return ret;
wdt_res.name = "superio port";
wdt_res.flags = IORESOURCE_IO;
wdt_res.start = addrs[i];
wdt_res.end = addrs[i] + 1;
fintek_wdt_pdev = platform_device_register_resndata(NULL, DRVNAME, -1,
&wdt_res, 1,
&pdata, sizeof(pdata));
if (IS_ERR(fintek_wdt_pdev)) {
platform_driver_unregister(&fintek_wdt_driver);
return PTR_ERR(fintek_wdt_pdev);
}
return 0;
}
static void __exit fintek_wdt_exit(void)
{
platform_device_unregister(fintek_wdt_pdev);
platform_driver_unregister(&fintek_wdt_driver);
}
MODULE_DESCRIPTION("F71808E Watchdog Driver");
MODULE_AUTHOR("Giel van Schijndel <[email protected]>");
MODULE_LICENSE("GPL");
module_init(fintek_wdt_init);
module_exit(fintek_wdt_exit);
| linux-master | drivers/watchdog/f71808e_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Atmel SAMA5D4 Watchdog Timer
*
* Copyright (C) 2015-2019 Microchip Technology Inc. and its subsidiaries
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
#include "at91sam9_wdt.h"
/* minimum and maximum watchdog timeout, in seconds */
#define MIN_WDT_TIMEOUT 1
#define MAX_WDT_TIMEOUT 16
#define WDT_DEFAULT_TIMEOUT MAX_WDT_TIMEOUT
#define WDT_SEC2TICKS(s) ((s) ? (((s) << 8) - 1) : 0)
struct sama5d4_wdt {
struct watchdog_device wdd;
void __iomem *reg_base;
u32 mr;
u32 ir;
unsigned long last_ping;
bool need_irq;
bool sam9x60_support;
};
static int wdt_timeout;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_timeout, int, 0);
MODULE_PARM_DESC(wdt_timeout,
"Watchdog timeout in seconds. (default = "
__MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS))
#define wdt_read(wdt, field) \
readl_relaxed((wdt)->reg_base + (field))
/* 4 slow clock periods is 4/32768 = 122.07µs*/
#define WDT_DELAY usecs_to_jiffies(123)
static void wdt_write(struct sama5d4_wdt *wdt, u32 field, u32 val)
{
/*
* WDT_CR and WDT_MR must not be modified within three slow clock
* periods following a restart of the watchdog performed by a write
* access in WDT_CR.
*/
while (time_before(jiffies, wdt->last_ping + WDT_DELAY))
usleep_range(30, 125);
writel_relaxed(val, wdt->reg_base + field);
wdt->last_ping = jiffies;
}
static void wdt_write_nosleep(struct sama5d4_wdt *wdt, u32 field, u32 val)
{
if (time_before(jiffies, wdt->last_ping + WDT_DELAY))
udelay(123);
writel_relaxed(val, wdt->reg_base + field);
wdt->last_ping = jiffies;
}
static int sama5d4_wdt_start(struct watchdog_device *wdd)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
if (wdt->sam9x60_support) {
writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IER);
wdt->mr &= ~AT91_SAM9X60_WDDIS;
} else {
wdt->mr &= ~AT91_WDT_WDDIS;
}
wdt_write(wdt, AT91_WDT_MR, wdt->mr);
return 0;
}
static int sama5d4_wdt_stop(struct watchdog_device *wdd)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
if (wdt->sam9x60_support) {
writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IDR);
wdt->mr |= AT91_SAM9X60_WDDIS;
} else {
wdt->mr |= AT91_WDT_WDDIS;
}
wdt_write(wdt, AT91_WDT_MR, wdt->mr);
return 0;
}
static int sama5d4_wdt_ping(struct watchdog_device *wdd)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
return 0;
}
static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
u32 value = WDT_SEC2TICKS(timeout);
if (wdt->sam9x60_support) {
wdt_write(wdt, AT91_SAM9X60_WLR,
AT91_SAM9X60_SET_COUNTER(value));
wdd->timeout = timeout;
return 0;
}
wdt->mr &= ~AT91_WDT_WDV;
wdt->mr |= AT91_WDT_SET_WDV(value);
/*
* WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When
* setting the WDDIS bit, and while it is set, the fields WDV and WDD
* must not be modified.
* If the watchdog is enabled, then the timeout can be updated. Else,
* wait that the user enables it.
*/
if (wdt_enabled)
wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS);
wdd->timeout = timeout;
return 0;
}
static const struct watchdog_info sama5d4_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "Atmel SAMA5D4 Watchdog",
};
static const struct watchdog_ops sama5d4_wdt_ops = {
.owner = THIS_MODULE,
.start = sama5d4_wdt_start,
.stop = sama5d4_wdt_stop,
.ping = sama5d4_wdt_ping,
.set_timeout = sama5d4_wdt_set_timeout,
};
static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
{
struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
u32 reg;
if (wdt->sam9x60_support)
reg = wdt_read(wdt, AT91_SAM9X60_ISR);
else
reg = wdt_read(wdt, AT91_WDT_SR);
if (reg) {
pr_crit("Atmel Watchdog Software Reset\n");
emergency_restart();
pr_crit("Reboot didn't succeed\n");
}
return IRQ_HANDLED;
}
static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
{
const char *tmp;
if (wdt->sam9x60_support)
wdt->mr = AT91_SAM9X60_WDDIS;
else
wdt->mr = AT91_WDT_WDDIS;
if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
!strcmp(tmp, "software"))
wdt->need_irq = true;
if (of_property_read_bool(np, "atmel,idle-halt"))
wdt->mr |= AT91_WDT_WDIDLEHLT;
if (of_property_read_bool(np, "atmel,dbg-halt"))
wdt->mr |= AT91_WDT_WDDBGHLT;
return 0;
}
static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
{
u32 reg, val;
val = WDT_SEC2TICKS(WDT_DEFAULT_TIMEOUT);
/*
* When booting and resuming, the bootloader may have changed the
* watchdog configuration.
* If the watchdog is already running, we can safely update it.
* Else, we have to disable it properly.
*/
if (!wdt_enabled) {
reg = wdt_read(wdt, AT91_WDT_MR);
if (wdt->sam9x60_support && (!(reg & AT91_SAM9X60_WDDIS)))
wdt_write_nosleep(wdt, AT91_WDT_MR,
reg | AT91_SAM9X60_WDDIS);
else if (!wdt->sam9x60_support &&
(!(reg & AT91_WDT_WDDIS)))
wdt_write_nosleep(wdt, AT91_WDT_MR,
reg | AT91_WDT_WDDIS);
}
if (wdt->sam9x60_support) {
if (wdt->need_irq)
wdt->ir = AT91_SAM9X60_PERINT;
else
wdt->mr |= AT91_SAM9X60_PERIODRST;
wdt_write(wdt, AT91_SAM9X60_IER, wdt->ir);
wdt_write(wdt, AT91_SAM9X60_WLR, AT91_SAM9X60_SET_COUNTER(val));
} else {
wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
wdt->mr |= AT91_WDT_SET_WDV(val);
if (wdt->need_irq)
wdt->mr |= AT91_WDT_WDFIEN;
else
wdt->mr |= AT91_WDT_WDRSTEN;
}
wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
return 0;
}
static int sama5d4_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct sama5d4_wdt *wdt;
void __iomem *regs;
u32 irq = 0;
u32 reg;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdd = &wdt->wdd;
wdd->timeout = WDT_DEFAULT_TIMEOUT;
wdd->info = &sama5d4_wdt_info;
wdd->ops = &sama5d4_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
wdt->last_ping = jiffies;
if (of_device_is_compatible(dev->of_node, "microchip,sam9x60-wdt") ||
of_device_is_compatible(dev->of_node, "microchip,sama7g5-wdt"))
wdt->sam9x60_support = true;
watchdog_set_drvdata(wdd, wdt);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
wdt->reg_base = regs;
ret = of_sama5d4_wdt_init(dev->of_node, wdt);
if (ret)
return ret;
if (wdt->need_irq) {
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_warn(dev, "failed to get IRQ from DT\n");
wdt->need_irq = false;
}
}
if (wdt->need_irq) {
ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler,
IRQF_SHARED | IRQF_IRQPOLL |
IRQF_NO_SUSPEND, pdev->name, pdev);
if (ret) {
dev_err(dev, "cannot register interrupt handler\n");
return ret;
}
}
watchdog_init_timeout(wdd, wdt_timeout, dev);
reg = wdt_read(wdt, AT91_WDT_MR);
if (!(reg & AT91_WDT_WDDIS)) {
wdt->mr &= ~AT91_WDT_WDDIS;
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
ret = sama5d4_wdt_init(wdt);
if (ret)
return ret;
watchdog_set_nowayout(wdd, nowayout);
watchdog_stop_on_unregister(wdd);
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
platform_set_drvdata(pdev, wdt);
dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n",
wdd->timeout, nowayout);
return 0;
}
static const struct of_device_id sama5d4_wdt_of_match[] = {
{
.compatible = "atmel,sama5d4-wdt",
},
{
.compatible = "microchip,sam9x60-wdt",
},
{
.compatible = "microchip,sama7g5-wdt",
},
{ }
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
static int sama5d4_wdt_suspend_late(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->wdd))
sama5d4_wdt_stop(&wdt->wdd);
return 0;
}
static int sama5d4_wdt_resume_early(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
/*
* FIXME: writing MR also pings the watchdog which may not be desired.
* This should only be done when the registers are lost on suspend but
* there is no way to get this information right now.
*/
sama5d4_wdt_init(wdt);
if (watchdog_active(&wdt->wdd))
sama5d4_wdt_start(&wdt->wdd);
return 0;
}
static const struct dev_pm_ops sama5d4_wdt_pm_ops = {
LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
sama5d4_wdt_resume_early)
};
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
.driver = {
.name = "sama5d4_wdt",
.pm = pm_sleep_ptr(&sama5d4_wdt_pm_ops),
.of_match_table = sama5d4_wdt_of_match,
}
};
module_platform_driver(sama5d4_wdt_driver);
MODULE_AUTHOR("Atmel Corporation");
MODULE_DESCRIPTION("Atmel SAMA5D4 Watchdog Timer driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/sama5d4_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NANO7240 SBC Watchdog device driver
*
* Based on w83877f.c by Scott Jennings,
*
* (c) Copyright 2007 Gilles GIGAN <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#define SBC7240_ENABLE_PORT 0x443
#define SBC7240_DISABLE_PORT 0x043
#define SBC7240_SET_TIMEOUT_PORT SBC7240_ENABLE_PORT
#define SBC7240_MAGIC_CHAR 'V'
#define SBC7240_TIMEOUT 30
#define SBC7240_MAX_TIMEOUT 255
static int timeout = SBC7240_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<="
__MODULE_STRING(SBC7240_MAX_TIMEOUT) ", default="
__MODULE_STRING(SBC7240_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog when closing device file");
#define SBC7240_OPEN_STATUS_BIT 0
#define SBC7240_ENABLED_STATUS_BIT 1
#define SBC7240_EXPECT_CLOSE_STATUS_BIT 2
static unsigned long wdt_status;
/*
* Utility routines
*/
static void wdt_disable(void)
{
/* disable the watchdog */
if (test_and_clear_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_DISABLE_PORT);
pr_info("Watchdog timer is now disabled\n");
}
}
static void wdt_enable(void)
{
/* enable the watchdog */
if (!test_and_set_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_ENABLE_PORT);
pr_info("Watchdog timer is now enabled\n");
}
}
static int wdt_set_timeout(int t)
{
if (t < 1 || t > SBC7240_MAX_TIMEOUT) {
pr_err("timeout value must be 1<=x<=%d\n", SBC7240_MAX_TIMEOUT);
return -1;
}
/* set the timeout */
outb_p((unsigned)t, SBC7240_SET_TIMEOUT_PORT);
timeout = t;
pr_info("timeout set to %d seconds\n", t);
return 0;
}
/* Whack the dog */
static inline void wdt_keepalive(void)
{
if (test_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status))
inb_p(SBC7240_ENABLE_PORT);
}
/*
* /dev/watchdog handling
*/
static ssize_t fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
size_t i;
char c;
if (count) {
if (!nowayout) {
clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
/* is there a magic char ? */
for (i = 0; i != count; i++) {
if (get_user(c, buf + i))
return -EFAULT;
if (c == SBC7240_MAGIC_CHAR) {
set_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
break;
}
}
}
wdt_keepalive();
}
return count;
}
static int fop_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status))
return -EBUSY;
wdt_enable();
return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
{
if (test_and_clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status)
|| !nowayout) {
wdt_disable();
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
wdt_keepalive();
}
clear_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status);
return 0;
}
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING|
WDIOF_SETTIMEOUT|
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SBC7240",
};
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user((void __user *)arg, &ident, sizeof(ident))
? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, (int __user *)arg);
case WDIOC_SETOPTIONS:
{
int options;
int retval = -EINVAL;
if (get_user(options, (int __user *)arg))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
wdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
wdt_enable();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_timeout;
if (get_user(new_timeout, (int __user *)arg))
return -EFAULT;
if (wdt_set_timeout(new_timeout))
return -EINVAL;
}
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
return -ENOTTY;
}
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static void __exit sbc7240_wdt_unload(void)
{
pr_info("Removing watchdog\n");
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(SBC7240_ENABLE_PORT, 1);
}
static int __init sbc7240_wdt_init(void)
{
int rc = -EBUSY;
if (!request_region(SBC7240_ENABLE_PORT, 1, "SBC7240 WDT")) {
pr_err("I/O address 0x%04x already in use\n",
SBC7240_ENABLE_PORT);
rc = -EIO;
goto err_out;
}
/* The IO port 0x043 used to disable the watchdog
* is already claimed by the system timer, so we
* can't request_region() it ...*/
if (timeout < 1 || timeout > SBC7240_MAX_TIMEOUT) {
timeout = SBC7240_TIMEOUT;
pr_info("timeout value must be 1<=x<=%d, using %d\n",
SBC7240_MAX_TIMEOUT, timeout);
}
wdt_set_timeout(timeout);
wdt_disable();
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
pr_err("cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot_notifier;
}
pr_info("Watchdog driver for SBC7240 initialised (nowayout=%d)\n",
nowayout);
return 0;
err_out_reboot_notifier:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
release_region(SBC7240_ENABLE_PORT, 1);
err_out:
return rc;
}
module_init(sbc7240_wdt_init);
module_exit(sbc7240_wdt_unload);
MODULE_AUTHOR("Gilles Gigan");
MODULE_DESCRIPTION("Watchdog device driver for single board"
" computers EPIC Nano 7240 from iEi");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sbc7240_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Industrial Computer Source PCI-WDT500/501 driver
*
* (c) Copyright 1996-1997 Alan Cox <[email protected]>,
* All Rights Reserved.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <[email protected]>
*
* Release 0.10.
*
* Fixes
* Dave Gregorich : Modularisation and minor bugs
* Alan Cox : Added the watchdog ioctl() stuff
* Alan Cox : Fixed the reboot problem (as noted by
* Matt Crocker).
* Alan Cox : Added wdt= boot option
* Alan Cox : Cleaned up copy/user stuff
* Tim Hockin : Added insmod parameters, comment cleanup
* Parameterized timeout
* JP Nollmann : Added support for PCI wdt501p
* Alan Cox : Split ISA and PCI cards into two drivers
* Jeff Garzik : PCI cleanups
* Tigran Aivazian : Restructured wdtpci_init_one() to handle
* failures
* Joel Becker : Added WDIOC_GET/SETTIMEOUT
* Zwane Mwaikambo : Magic char closing, locking changes,
* cleanups
* Matt Domsch : nowayout module option
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#define WDT_IS_PCI
#include "wd501p.h"
/* We can only use 1 card due to the /dev/watchdog restriction */
static int dev_count;
static unsigned long open_lock;
static DEFINE_SPINLOCK(wdtpci_lock);
static char expect_close;
static resource_size_t io;
static int irq;
/* Default timeout */
#define WD_TIMO 60 /* Default heartbeat = 60 seconds */
static int heartbeat = WD_TIMO;
static int wd_heartbeat;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
"Watchdog heartbeat in seconds. (0<heartbeat<65536, default="
__MODULE_STRING(WD_TIMO) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/* Support for the Fan Tachometer on the PCI-WDT501 */
static int tachometer;
module_param(tachometer, int, 0);
MODULE_PARM_DESC(tachometer,
"PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
static int type = 500;
module_param(type, int, 0);
MODULE_PARM_DESC(type,
"PCI-WDT501 Card type (500 or 501 , default=500)");
/*
* Programming support
*/
static void wdtpci_ctr_mode(int ctr, int mode)
{
ctr <<= 6;
ctr |= 0x30;
ctr |= (mode << 1);
outb(ctr, WDT_CR);
udelay(8);
}
static void wdtpci_ctr_load(int ctr, int val)
{
outb(val & 0xFF, WDT_COUNT0 + ctr);
udelay(8);
outb(val >> 8, WDT_COUNT0 + ctr);
udelay(8);
}
/**
* wdtpci_start:
*
* Start the watchdog driver.
*/
static int wdtpci_start(void)
{
unsigned long flags;
spin_lock_irqsave(&wdtpci_lock, flags);
/*
* "pet" the watchdog, as Access says.
* This resets the clock outputs.
*/
inb(WDT_DC); /* Disable watchdog */
udelay(8);
wdtpci_ctr_mode(2, 0); /* Program CTR2 for Mode 0:
Pulse on Terminal Count */
outb(0, WDT_DC); /* Enable watchdog */
udelay(8);
inb(WDT_DC); /* Disable watchdog */
udelay(8);
outb(0, WDT_CLOCK); /* 2.0833MHz clock */
udelay(8);
inb(WDT_BUZZER); /* disable */
udelay(8);
inb(WDT_OPTONOTRST); /* disable */
udelay(8);
inb(WDT_OPTORST); /* disable */
udelay(8);
inb(WDT_PROGOUT); /* disable */
udelay(8);
wdtpci_ctr_mode(0, 3); /* Program CTR0 for Mode 3:
Square Wave Generator */
wdtpci_ctr_mode(1, 2); /* Program CTR1 for Mode 2:
Rate Generator */
wdtpci_ctr_mode(2, 1); /* Program CTR2 for Mode 1:
Retriggerable One-Shot */
wdtpci_ctr_load(0, 20833); /* count at 100Hz */
wdtpci_ctr_load(1, wd_heartbeat);/* Heartbeat */
/* DO NOT LOAD CTR2 on PCI card! -- JPN */
outb(0, WDT_DC); /* Enable watchdog */
udelay(8);
spin_unlock_irqrestore(&wdtpci_lock, flags);
return 0;
}
/**
* wdtpci_stop:
*
* Stop the watchdog driver.
*/
static int wdtpci_stop(void)
{
unsigned long flags;
/* Turn the card off */
spin_lock_irqsave(&wdtpci_lock, flags);
inb(WDT_DC); /* Disable watchdog */
udelay(8);
wdtpci_ctr_load(2, 0); /* 0 length reset pulses now */
spin_unlock_irqrestore(&wdtpci_lock, flags);
return 0;
}
/**
* wdtpci_ping:
*
* Reload counter one with the watchdog heartbeat. We don't bother
* reloading the cascade counter.
*/
static int wdtpci_ping(void)
{
unsigned long flags;
spin_lock_irqsave(&wdtpci_lock, flags);
/* Write a watchdog value */
inb(WDT_DC); /* Disable watchdog */
udelay(8);
wdtpci_ctr_mode(1, 2); /* Re-Program CTR1 for Mode 2:
Rate Generator */
wdtpci_ctr_load(1, wd_heartbeat);/* Heartbeat */
outb(0, WDT_DC); /* Enable watchdog */
udelay(8);
spin_unlock_irqrestore(&wdtpci_lock, flags);
return 0;
}
/**
* wdtpci_set_heartbeat:
* @t: the new heartbeat value that needs to be set.
*
* Set a new heartbeat value for the watchdog device. If the heartbeat
* value is incorrect we keep the old value and return -EINVAL.
* If successful we return 0.
*/
static int wdtpci_set_heartbeat(int t)
{
/* Arbitrary, can't find the card's limits */
if (t < 1 || t > 65535)
return -EINVAL;
heartbeat = t;
wd_heartbeat = t * 100;
return 0;
}
/**
* wdtpci_get_status:
* @status: the new status.
*
* Extract the status information from a WDT watchdog device. There are
* several board variants so we have to know which bits are valid. Some
* bits default to one and some to zero in order to be maximally painful.
*
* we then map the bits onto the status ioctl flags.
*/
static int wdtpci_get_status(int *status)
{
unsigned char new_status;
unsigned long flags;
spin_lock_irqsave(&wdtpci_lock, flags);
new_status = inb(WDT_SR);
spin_unlock_irqrestore(&wdtpci_lock, flags);
*status = 0;
if (new_status & WDC_SR_ISOI0)
*status |= WDIOF_EXTERN1;
if (new_status & WDC_SR_ISII1)
*status |= WDIOF_EXTERN2;
if (type == 501) {
if (!(new_status & WDC_SR_TGOOD))
*status |= WDIOF_OVERHEAT;
if (!(new_status & WDC_SR_PSUOVER))
*status |= WDIOF_POWEROVER;
if (!(new_status & WDC_SR_PSUUNDR))
*status |= WDIOF_POWERUNDER;
if (tachometer) {
if (!(new_status & WDC_SR_FANGOOD))
*status |= WDIOF_FANFAULT;
}
}
return 0;
}
/**
* wdtpci_get_temperature:
*
* Reports the temperature in degrees Fahrenheit. The API is in
* farenheit. It was designed by an imperial measurement luddite.
*/
static int wdtpci_get_temperature(int *temperature)
{
unsigned short c;
unsigned long flags;
spin_lock_irqsave(&wdtpci_lock, flags);
c = inb(WDT_RT);
udelay(8);
spin_unlock_irqrestore(&wdtpci_lock, flags);
*temperature = (c * 11 / 15) + 7;
return 0;
}
/**
* wdtpci_interrupt:
* @irq: Interrupt number
* @dev_id: Unused as we don't allow multiple devices.
*
* Handle an interrupt from the board. These are raised when the status
* map changes in what the board considers an interesting way. That means
* a failure condition occurring.
*/
static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
{
/*
* Read the status register see what is up and
* then printk it.
*/
unsigned char status;
spin_lock(&wdtpci_lock);
status = inb(WDT_SR);
udelay(8);
pr_crit("status %d\n", status);
if (type == 501) {
if (!(status & WDC_SR_TGOOD)) {
pr_crit("Overheat alarm (%d)\n", inb(WDT_RT));
udelay(8);
}
if (!(status & WDC_SR_PSUOVER))
pr_crit("PSU over voltage\n");
if (!(status & WDC_SR_PSUUNDR))
pr_crit("PSU under voltage\n");
if (tachometer) {
if (!(status & WDC_SR_FANGOOD))
pr_crit("Possible fan fault\n");
}
}
if (!(status & WDC_SR_WCCR)) {
#ifdef SOFTWARE_REBOOT
#ifdef ONLY_TESTING
pr_crit("Would Reboot\n");
#else
pr_crit("Initiating system reboot\n");
emergency_restart();
#endif
#else
pr_crit("Reset in 5ms\n");
#endif
}
spin_unlock(&wdtpci_lock);
return IRQ_HANDLED;
}
/**
* wdtpci_write:
* @file: file handle to the watchdog
* @buf: buffer to write (unused as data does not matter here
* @count: count of bytes
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
* write of data will do, as we we don't define content meaning.
*/
static ssize_t wdtpci_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
wdtpci_ping();
}
return count;
}
/**
* wdtpci_ioctl:
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
*
* The watchdog API defines a common set of functions for all watchdogs
* according to their available features. We only actually usefully support
* querying capabilities and current status.
*/
static long wdtpci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int new_heartbeat;
int status;
struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT|
WDIOF_MAGICCLOSE|
WDIOF_KEEPALIVEPING,
.firmware_version = 1,
.identity = "PCI-WDT500/501",
};
/* Add options according to the card we have */
ident.options |= (WDIOF_EXTERN1|WDIOF_EXTERN2);
if (type == 501) {
ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|
WDIOF_POWEROVER);
if (tachometer)
ident.options |= WDIOF_FANFAULT;
}
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
wdtpci_get_status(&status);
return put_user(status, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
wdtpci_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, p))
return -EFAULT;
if (wdtpci_set_heartbeat(new_heartbeat))
return -EINVAL;
wdtpci_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
return -ENOTTY;
}
}
/**
* wdtpci_open:
* @inode: inode of device
* @file: file handle to device
*
* The watchdog device has been opened. The watchdog device is single
* open and on opening we load the counters. Counter zero is a 100Hz
* cascade, into counter 1 which downcounts to reboot. When the counter
* triggers counter 2 downcounts the length of the reset pulse which
* set set to be as long as possible.
*/
static int wdtpci_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &open_lock))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
/*
* Activate
*/
wdtpci_start();
return stream_open(inode, file);
}
/**
* wdtpci_release:
* @inode: inode to board
* @file: file handle to board
*
* The watchdog has a configurable API. There is a religious dispute
* between people who want their watchdog to be able to shut down and
* those who want to be sure if the watchdog manager dies the machine
* reboots. In the former case we disable the counters, in the latter
* case you have to open it again very soon.
*/
static int wdtpci_release(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
wdtpci_stop();
} else {
pr_crit("Unexpected close, not stopping timer!\n");
wdtpci_ping();
}
expect_close = 0;
clear_bit(0, &open_lock);
return 0;
}
/**
* wdtpci_temp_read:
* @file: file handle to the watchdog board
* @buf: buffer to write 1 byte into
* @count: length of buffer
* @ptr: offset (no seek allowed)
*
* Read reports the temperature in degrees Fahrenheit. The API is in
* fahrenheit. It was designed by an imperial measurement luddite.
*/
static ssize_t wdtpci_temp_read(struct file *file, char __user *buf,
size_t count, loff_t *ptr)
{
int temperature;
if (wdtpci_get_temperature(&temperature))
return -EFAULT;
if (copy_to_user(buf, &temperature, 1))
return -EFAULT;
return 1;
}
/**
* wdtpci_temp_open:
* @inode: inode of device
* @file: file handle to device
*
* The temperature device has been opened.
*/
static int wdtpci_temp_open(struct inode *inode, struct file *file)
{
return stream_open(inode, file);
}
/**
* wdtpci_temp_release:
* @inode: inode to board
* @file: file handle to board
*
* The temperature device has been closed.
*/
static int wdtpci_temp_release(struct inode *inode, struct file *file)
{
return 0;
}
/**
* wdtpci_notify_sys:
* @this: our notifier block
* @code: the event being reported
* @unused: unused
*
* Our notifier is called on system shutdowns. We want to turn the card
* off at reboot otherwise the machine will reboot again during memory
* test or worse yet during the following fsck. This would suck, in fact
* trust me - if it happens it does suck.
*/
static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdtpci_stop();
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations wdtpci_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wdtpci_write,
.unlocked_ioctl = wdtpci_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = wdtpci_open,
.release = wdtpci_release,
};
static struct miscdevice wdtpci_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdtpci_fops,
};
static const struct file_operations wdtpci_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = wdtpci_temp_read,
.open = wdtpci_temp_open,
.release = wdtpci_temp_release,
};
static struct miscdevice temp_miscdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &wdtpci_temp_fops,
};
/*
* The WDT card needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wdtpci_notifier = {
.notifier_call = wdtpci_notify_sys,
};
static int wdtpci_init_one(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret = -EIO;
dev_count++;
if (dev_count > 1) {
pr_err("This driver only supports one device\n");
return -ENODEV;
}
if (type != 500 && type != 501) {
pr_err("unknown card type '%d'\n", type);
return -ENODEV;
}
if (pci_enable_device(dev)) {
pr_err("Not possible to enable PCI Device\n");
return -ENODEV;
}
if (pci_resource_start(dev, 2) == 0x0000) {
pr_err("No I/O-Address for card detected\n");
ret = -ENODEV;
goto out_pci;
}
if (pci_request_region(dev, 2, "wdt_pci")) {
pr_err("I/O address 0x%llx already in use\n",
(unsigned long long)pci_resource_start(dev, 2));
goto out_pci;
}
irq = dev->irq;
io = pci_resource_start(dev, 2);
if (request_irq(irq, wdtpci_interrupt, IRQF_SHARED,
"wdt_pci", &wdtpci_miscdev)) {
pr_err("IRQ %d is not free\n", irq);
goto out_reg;
}
pr_info("PCI-WDT500/501 (PCI-WDG-CSM) driver 0.10 at 0x%llx (Interrupt %d)\n",
(unsigned long long)io, irq);
/* Check that the heartbeat value is within its range;
if not reset to the default */
if (wdtpci_set_heartbeat(heartbeat)) {
wdtpci_set_heartbeat(WD_TIMO);
pr_info("heartbeat value must be 0 < heartbeat < 65536, using %d\n",
WD_TIMO);
}
ret = register_reboot_notifier(&wdtpci_notifier);
if (ret) {
pr_err("cannot register reboot notifier (err=%d)\n", ret);
goto out_irq;
}
if (type == 501) {
ret = misc_register(&temp_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
TEMP_MINOR, ret);
goto out_rbt;
}
}
ret = misc_register(&wdtpci_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto out_misc;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
if (type == 501)
pr_info("Fan Tachometer is %s\n",
tachometer ? "Enabled" : "Disabled");
ret = 0;
out:
return ret;
out_misc:
if (type == 501)
misc_deregister(&temp_miscdev);
out_rbt:
unregister_reboot_notifier(&wdtpci_notifier);
out_irq:
free_irq(irq, &wdtpci_miscdev);
out_reg:
pci_release_region(dev, 2);
out_pci:
pci_disable_device(dev);
goto out;
}
static void wdtpci_remove_one(struct pci_dev *pdev)
{
/* here we assume only one device will ever have
* been picked up and registered by probe function */
misc_deregister(&wdtpci_miscdev);
if (type == 501)
misc_deregister(&temp_miscdev);
unregister_reboot_notifier(&wdtpci_notifier);
free_irq(irq, &wdtpci_miscdev);
pci_release_region(pdev, 2);
pci_disable_device(pdev);
dev_count--;
}
static const struct pci_device_id wdtpci_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, wdtpci_pci_tbl);
static struct pci_driver wdtpci_driver = {
.name = "wdt_pci",
.id_table = wdtpci_pci_tbl,
.probe = wdtpci_init_one,
.remove = wdtpci_remove_one,
};
module_pci_driver(wdtpci_driver);
MODULE_AUTHOR("JP Nollmann, Alan Cox");
MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/wdt_pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox watchdog driver
*
* Copyright (C) 2019 Mellanox Technologies
* Copyright (C) 2019 Michael Shych <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#define MLXREG_WDT_CLOCK_SCALE 1000
#define MLXREG_WDT_MAX_TIMEOUT_TYPE1 32
#define MLXREG_WDT_MAX_TIMEOUT_TYPE2 255
#define MLXREG_WDT_MAX_TIMEOUT_TYPE3 65535
#define MLXREG_WDT_MIN_TIMEOUT 1
#define MLXREG_WDT_OPTIONS_BASE (WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | \
WDIOF_SETTIMEOUT)
/**
* struct mlxreg_wdt - wd private data:
*
* @wdd: watchdog device;
* @device: basic device;
* @pdata: data received from platform driver;
* @regmap: register map of parent device;
* @timeout: defined timeout in sec.;
* @action_idx: index for direct access to action register;
* @timeout_idx:index for direct access to TO register;
* @tleft_idx: index for direct access to time left register;
* @ping_idx: index for direct access to ping register;
* @reset_idx: index for direct access to reset cause register;
* @wd_type: watchdog HW type;
*/
struct mlxreg_wdt {
struct watchdog_device wdd;
struct mlxreg_core_platform_data *pdata;
void *regmap;
int action_idx;
int timeout_idx;
int tleft_idx;
int ping_idx;
int reset_idx;
int regmap_val_sz;
enum mlxreg_wdt_type wdt_type;
};
static void mlxreg_wdt_check_card_reset(struct mlxreg_wdt *wdt)
{
struct mlxreg_core_data *reg_data;
u32 regval;
int rc;
if (wdt->reset_idx == -EINVAL)
return;
if (!(wdt->wdd.info->options & WDIOF_CARDRESET))
return;
reg_data = &wdt->pdata->data[wdt->reset_idx];
rc = regmap_read(wdt->regmap, reg_data->reg, ®val);
if (!rc) {
if (regval & ~reg_data->mask) {
wdt->wdd.bootstatus = WDIOF_CARDRESET;
dev_info(wdt->wdd.parent,
"watchdog previously reset the CPU\n");
}
}
}
static int mlxreg_wdt_start(struct watchdog_device *wdd)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->action_idx];
return regmap_update_bits(wdt->regmap, reg_data->reg, ~reg_data->mask,
BIT(reg_data->bit));
}
static int mlxreg_wdt_stop(struct watchdog_device *wdd)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->action_idx];
return regmap_update_bits(wdt->regmap, reg_data->reg, ~reg_data->mask,
~BIT(reg_data->bit));
}
static int mlxreg_wdt_ping(struct watchdog_device *wdd)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->ping_idx];
return regmap_write_bits(wdt->regmap, reg_data->reg, ~reg_data->mask,
BIT(reg_data->bit));
}
static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->timeout_idx];
u32 regval, set_time, hw_timeout;
int rc;
switch (wdt->wdt_type) {
case MLX_WDT_TYPE1:
rc = regmap_read(wdt->regmap, reg_data->reg, ®val);
if (rc)
return rc;
hw_timeout = order_base_2(timeout * MLXREG_WDT_CLOCK_SCALE);
regval = (regval & reg_data->mask) | hw_timeout;
/* Rowndown to actual closest number of sec. */
set_time = BIT(hw_timeout) / MLXREG_WDT_CLOCK_SCALE;
rc = regmap_write(wdt->regmap, reg_data->reg, regval);
break;
case MLX_WDT_TYPE2:
set_time = timeout;
rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
break;
case MLX_WDT_TYPE3:
/* WD_TYPE3 has 2B set time register */
set_time = timeout;
if (wdt->regmap_val_sz == 1) {
regval = timeout & 0xff;
rc = regmap_write(wdt->regmap, reg_data->reg, regval);
if (!rc) {
regval = (timeout & 0xff00) >> 8;
rc = regmap_write(wdt->regmap,
reg_data->reg + 1, regval);
}
} else {
rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
}
break;
default:
return -EINVAL;
}
wdd->timeout = set_time;
if (!rc) {
/*
* Restart watchdog with new timeout period
* if watchdog is already started.
*/
if (watchdog_active(wdd)) {
rc = mlxreg_wdt_stop(wdd);
if (!rc)
rc = mlxreg_wdt_start(wdd);
}
}
return rc;
}
static unsigned int mlxreg_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->tleft_idx];
u32 regval, msb, lsb;
int rc;
if (wdt->wdt_type == MLX_WDT_TYPE2) {
rc = regmap_read(wdt->regmap, reg_data->reg, ®val);
} else {
/* WD_TYPE3 has 2 byte timeleft register */
if (wdt->regmap_val_sz == 1) {
rc = regmap_read(wdt->regmap, reg_data->reg, &lsb);
if (!rc) {
rc = regmap_read(wdt->regmap,
reg_data->reg + 1, &msb);
regval = (msb & 0xff) << 8 | (lsb & 0xff);
}
} else {
rc = regmap_read(wdt->regmap, reg_data->reg, ®val);
}
}
/* Return 0 timeleft in case of failure register read. */
return rc == 0 ? regval : 0;
}
static const struct watchdog_ops mlxreg_wdt_ops_type1 = {
.start = mlxreg_wdt_start,
.stop = mlxreg_wdt_stop,
.ping = mlxreg_wdt_ping,
.set_timeout = mlxreg_wdt_set_timeout,
.owner = THIS_MODULE,
};
static const struct watchdog_ops mlxreg_wdt_ops_type2 = {
.start = mlxreg_wdt_start,
.stop = mlxreg_wdt_stop,
.ping = mlxreg_wdt_ping,
.set_timeout = mlxreg_wdt_set_timeout,
.get_timeleft = mlxreg_wdt_get_timeleft,
.owner = THIS_MODULE,
};
static const struct watchdog_info mlxreg_wdt_main_info = {
.options = MLXREG_WDT_OPTIONS_BASE
| WDIOF_CARDRESET,
.identity = "mlx-wdt-main",
};
static const struct watchdog_info mlxreg_wdt_aux_info = {
.options = MLXREG_WDT_OPTIONS_BASE
| WDIOF_ALARMONLY,
.identity = "mlx-wdt-aux",
};
static void mlxreg_wdt_config(struct mlxreg_wdt *wdt,
struct mlxreg_core_platform_data *pdata)
{
struct mlxreg_core_data *data = pdata->data;
int i;
wdt->reset_idx = -EINVAL;
for (i = 0; i < pdata->counter; i++, data++) {
if (strnstr(data->label, "action", sizeof(data->label)))
wdt->action_idx = i;
else if (strnstr(data->label, "timeout", sizeof(data->label)))
wdt->timeout_idx = i;
else if (strnstr(data->label, "timeleft", sizeof(data->label)))
wdt->tleft_idx = i;
else if (strnstr(data->label, "ping", sizeof(data->label)))
wdt->ping_idx = i;
else if (strnstr(data->label, "reset", sizeof(data->label)))
wdt->reset_idx = i;
}
wdt->pdata = pdata;
if (strnstr(pdata->identity, mlxreg_wdt_main_info.identity,
sizeof(mlxreg_wdt_main_info.identity)))
wdt->wdd.info = &mlxreg_wdt_main_info;
else
wdt->wdd.info = &mlxreg_wdt_aux_info;
wdt->wdt_type = pdata->version;
switch (wdt->wdt_type) {
case MLX_WDT_TYPE1:
wdt->wdd.ops = &mlxreg_wdt_ops_type1;
wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE1;
break;
case MLX_WDT_TYPE2:
wdt->wdd.ops = &mlxreg_wdt_ops_type2;
wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
break;
case MLX_WDT_TYPE3:
wdt->wdd.ops = &mlxreg_wdt_ops_type2;
wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE3;
break;
default:
break;
}
wdt->wdd.min_timeout = MLXREG_WDT_MIN_TIMEOUT;
}
static int mlxreg_wdt_init_timeout(struct mlxreg_wdt *wdt,
struct mlxreg_core_platform_data *pdata)
{
u32 timeout;
timeout = pdata->data[wdt->timeout_idx].health_cntr;
return mlxreg_wdt_set_timeout(&wdt->wdd, timeout);
}
static int mlxreg_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mlxreg_core_platform_data *pdata;
struct mlxreg_wdt *wdt;
int rc;
pdata = dev_get_platdata(dev);
if (!pdata) {
dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->wdd.parent = dev;
wdt->regmap = pdata->regmap;
rc = regmap_get_val_bytes(wdt->regmap);
if (rc < 0)
return -EINVAL;
wdt->regmap_val_sz = rc;
mlxreg_wdt_config(wdt, pdata);
if ((pdata->features & MLXREG_CORE_WD_FEATURE_NOWAYOUT))
watchdog_set_nowayout(&wdt->wdd, WATCHDOG_NOWAYOUT);
watchdog_stop_on_reboot(&wdt->wdd);
watchdog_stop_on_unregister(&wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, wdt);
rc = mlxreg_wdt_init_timeout(wdt, pdata);
if (rc)
goto register_error;
if ((pdata->features & MLXREG_CORE_WD_FEATURE_START_AT_BOOT)) {
rc = mlxreg_wdt_start(&wdt->wdd);
if (rc)
goto register_error;
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
mlxreg_wdt_check_card_reset(wdt);
rc = devm_watchdog_register_device(dev, &wdt->wdd);
register_error:
if (rc)
dev_err(dev, "Cannot register watchdog device (err=%d)\n", rc);
return rc;
}
static struct platform_driver mlxreg_wdt_driver = {
.probe = mlxreg_wdt_probe,
.driver = {
.name = "mlx-wdt",
},
};
module_platform_driver(mlxreg_wdt_driver);
MODULE_AUTHOR("Michael Shych <[email protected]>");
MODULE_DESCRIPTION("Mellanox watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mlx-wdt");
| linux-master | drivers/watchdog/mlx_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Berkshire PCI-PC Watchdog Card Driver
*
* (c) Copyright 2003-2007 Wim Van Sebroeck <[email protected]>.
*
* Based on source code of the following authors:
* Ken Hollis <[email protected]>,
* Lindsay Harris <[email protected]>,
* Alan Cox <[email protected]>,
* Matt Domsch <[email protected]>,
* Rob Radez <[email protected]>
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*/
/*
* A bells and whistles driver is available from:
* http://www.kernel.org/pub/linux/kernel/people/wim/pcwd/pcwd_pci/
*
* More info available at
* http://www.berkprod.com/ or http://www.pcwatchdog.com/
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
#include <linux/pci.h> /* For pci functions */
#include <linux/ioport.h> /* For io-port access */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
/* Module and version information */
#define WATCHDOG_VERSION "1.03"
#define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog"
#define WATCHDOG_NAME "pcwd_pci"
#define DRIVER_VERSION WATCHDOG_DRIVER_NAME " driver, v" WATCHDOG_VERSION
/* Stuff for the PCI ID's */
#ifndef PCI_VENDOR_ID_QUICKLOGIC
#define PCI_VENDOR_ID_QUICKLOGIC 0x11e3
#endif
#ifndef PCI_DEVICE_ID_WATCHDOG_PCIPCWD
#define PCI_DEVICE_ID_WATCHDOG_PCIPCWD 0x5030
#endif
/*
* These are the defines that describe the control status bits for the
* PCI-PC Watchdog card.
*/
/* Port 1 : Control Status #1 */
#define WD_PCI_WTRP 0x01 /* Watchdog Trip status */
#define WD_PCI_HRBT 0x02 /* Watchdog Heartbeat */
#define WD_PCI_TTRP 0x04 /* Temperature Trip status */
#define WD_PCI_RL2A 0x08 /* Relay 2 Active */
#define WD_PCI_RL1A 0x10 /* Relay 1 Active */
#define WD_PCI_R2DS 0x40 /* Relay 2 Disable Temperature-trip /
reset */
#define WD_PCI_RLY2 0x80 /* Activate Relay 2 on the board */
/* Port 2 : Control Status #2 */
#define WD_PCI_WDIS 0x10 /* Watchdog Disable */
#define WD_PCI_ENTP 0x20 /* Enable Temperature Trip Reset */
#define WD_PCI_WRSP 0x40 /* Watchdog wrote response */
#define WD_PCI_PCMD 0x80 /* PC has sent command */
/* according to documentation max. time to process a command for the pci
* watchdog card is 100 ms, so we give it 150 ms to do it's job */
#define PCI_COMMAND_TIMEOUT 150
/* Watchdog's internal commands */
#define CMD_GET_STATUS 0x04
#define CMD_GET_FIRMWARE_VERSION 0x08
#define CMD_READ_WATCHDOG_TIMEOUT 0x18
#define CMD_WRITE_WATCHDOG_TIMEOUT 0x19
#define CMD_GET_CLEAR_RESET_COUNT 0x84
/* Watchdog's Dip Switch heartbeat values */
static const int heartbeat_tbl[] = {
5, /* OFF-OFF-OFF = 5 Sec */
10, /* OFF-OFF-ON = 10 Sec */
30, /* OFF-ON-OFF = 30 Sec */
60, /* OFF-ON-ON = 1 Min */
300, /* ON-OFF-OFF = 5 Min */
600, /* ON-OFF-ON = 10 Min */
1800, /* ON-ON-OFF = 30 Min */
3600, /* ON-ON-ON = 1 hour */
};
/* We can only use 1 card due to the /dev/watchdog restriction */
static int cards_found;
/* internal variables */
static int temp_panic;
static unsigned long is_active;
static char expect_release;
/* this is private data for each PCI-PC watchdog card */
static struct {
/* Wether or not the card has a temperature device */
int supports_temp;
/* The card's boot status */
int boot_status;
/* The cards I/O address */
unsigned long io_addr;
/* the lock for io operations */
spinlock_t io_lock;
/* the PCI-device */
struct pci_dev *pdev;
} pcipcwd_private;
/* module parameters */
#define QUIET 0 /* Default */
#define VERBOSE 1 /* Verbose */
#define DEBUG 2 /* print fancy stuff too */
static int debug = QUIET;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)");
#define WATCHDOG_HEARTBEAT 0 /* default heartbeat =
delay-time from dip-switches */
static int heartbeat = WATCHDOG_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. "
"(0<heartbeat<65536 or 0=delay-time from dip-switches, default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Internal functions
*/
static int send_command(int cmd, int *msb, int *lsb)
{
int got_response, count;
if (debug >= DEBUG)
pr_debug("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x\n",
cmd, *msb, *lsb);
spin_lock(&pcipcwd_private.io_lock);
/* If a command requires data it should be written first.
* Data for commands with 8 bits of data should be written to port 4.
* Commands with 16 bits of data, should be written as LSB to port 4
* and MSB to port 5.
* After the required data has been written then write the command to
* port 6. */
outb_p(*lsb, pcipcwd_private.io_addr + 4);
outb_p(*msb, pcipcwd_private.io_addr + 5);
outb_p(cmd, pcipcwd_private.io_addr + 6);
/* wait till the pci card processed the command, signaled by
* the WRSP bit in port 2 and give it a max. timeout of
* PCI_COMMAND_TIMEOUT to process */
got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP;
for (count = 0; (count < PCI_COMMAND_TIMEOUT) && (!got_response);
count++) {
mdelay(1);
got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP;
}
if (debug >= DEBUG) {
if (got_response) {
pr_debug("time to process command was: %d ms\n",
count);
} else {
pr_debug("card did not respond on command!\n");
}
}
if (got_response) {
/* read back response */
*lsb = inb_p(pcipcwd_private.io_addr + 4);
*msb = inb_p(pcipcwd_private.io_addr + 5);
/* clear WRSP bit */
inb_p(pcipcwd_private.io_addr + 6);
if (debug >= DEBUG)
pr_debug("received following data for cmd=0x%02x: msb=0x%02x lsb=0x%02x\n",
cmd, *msb, *lsb);
}
spin_unlock(&pcipcwd_private.io_lock);
return got_response;
}
static inline void pcipcwd_check_temperature_support(void)
{
if (inb_p(pcipcwd_private.io_addr) != 0xF0)
pcipcwd_private.supports_temp = 1;
}
static int pcipcwd_get_option_switches(void)
{
int option_switches;
option_switches = inb_p(pcipcwd_private.io_addr + 3);
return option_switches;
}
static void pcipcwd_show_card_info(void)
{
int got_fw_rev, fw_rev_major, fw_rev_minor;
char fw_ver_str[20]; /* The cards firmware version */
int option_switches;
got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major,
&fw_rev_minor);
if (got_fw_rev)
sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor);
else
sprintf(fw_ver_str, "<card no answer>");
/* Get switch settings */
option_switches = pcipcwd_get_option_switches();
pr_info("Found card at port 0x%04x (Firmware: %s) %s temp option\n",
(int) pcipcwd_private.io_addr, fw_ver_str,
(pcipcwd_private.supports_temp ? "with" : "without"));
pr_info("Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n",
option_switches,
((option_switches & 0x10) ? "ON" : "OFF"),
((option_switches & 0x08) ? "ON" : "OFF"));
if (pcipcwd_private.boot_status & WDIOF_CARDRESET)
pr_info("Previous reset was caused by the Watchdog card\n");
if (pcipcwd_private.boot_status & WDIOF_OVERHEAT)
pr_info("Card sensed a CPU Overheat\n");
if (pcipcwd_private.boot_status == 0)
pr_info("No previous trip detected - Cold boot or reset\n");
}
static int pcipcwd_start(void)
{
int stat_reg;
spin_lock(&pcipcwd_private.io_lock);
outb_p(0x00, pcipcwd_private.io_addr + 3);
udelay(1000);
stat_reg = inb_p(pcipcwd_private.io_addr + 2);
spin_unlock(&pcipcwd_private.io_lock);
if (stat_reg & WD_PCI_WDIS) {
pr_err("Card timer not enabled\n");
return -1;
}
if (debug >= VERBOSE)
pr_debug("Watchdog started\n");
return 0;
}
static int pcipcwd_stop(void)
{
int stat_reg;
spin_lock(&pcipcwd_private.io_lock);
outb_p(0xA5, pcipcwd_private.io_addr + 3);
udelay(1000);
outb_p(0xA5, pcipcwd_private.io_addr + 3);
udelay(1000);
stat_reg = inb_p(pcipcwd_private.io_addr + 2);
spin_unlock(&pcipcwd_private.io_lock);
if (!(stat_reg & WD_PCI_WDIS)) {
pr_err("Card did not acknowledge disable attempt\n");
return -1;
}
if (debug >= VERBOSE)
pr_debug("Watchdog stopped\n");
return 0;
}
static int pcipcwd_keepalive(void)
{
/* Re-trigger watchdog by writing to port 0 */
spin_lock(&pcipcwd_private.io_lock);
outb_p(0x42, pcipcwd_private.io_addr); /* send out any data */
spin_unlock(&pcipcwd_private.io_lock);
if (debug >= DEBUG)
pr_debug("Watchdog keepalive signal send\n");
return 0;
}
static int pcipcwd_set_heartbeat(int t)
{
int t_msb = t / 256;
int t_lsb = t % 256;
if ((t < 0x0001) || (t > 0xFFFF))
return -EINVAL;
/* Write new heartbeat to watchdog */
send_command(CMD_WRITE_WATCHDOG_TIMEOUT, &t_msb, &t_lsb);
heartbeat = t;
if (debug >= VERBOSE)
pr_debug("New heartbeat: %d\n", heartbeat);
return 0;
}
static int pcipcwd_get_status(int *status)
{
int control_status;
*status = 0;
control_status = inb_p(pcipcwd_private.io_addr + 1);
if (control_status & WD_PCI_WTRP)
*status |= WDIOF_CARDRESET;
if (control_status & WD_PCI_TTRP) {
*status |= WDIOF_OVERHEAT;
if (temp_panic)
panic(KBUILD_MODNAME ": Temperature overheat trip!\n");
}
if (debug >= DEBUG)
pr_debug("Control Status #1: 0x%02x\n", control_status);
return 0;
}
static int pcipcwd_clear_status(void)
{
int control_status;
int msb;
int reset_counter;
if (debug >= VERBOSE)
pr_info("clearing watchdog trip status & LED\n");
control_status = inb_p(pcipcwd_private.io_addr + 1);
if (debug >= DEBUG) {
pr_debug("status was: 0x%02x\n", control_status);
pr_debug("sending: 0x%02x\n",
(control_status & WD_PCI_R2DS) | WD_PCI_WTRP);
}
/* clear trip status & LED and keep mode of relay 2 */
outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP,
pcipcwd_private.io_addr + 1);
/* clear reset counter */
msb = 0;
reset_counter = 0xff;
send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter);
if (debug >= DEBUG) {
pr_debug("reset count was: 0x%02x\n", reset_counter);
}
return 0;
}
static int pcipcwd_get_temperature(int *temperature)
{
*temperature = 0;
if (!pcipcwd_private.supports_temp)
return -ENODEV;
spin_lock(&pcipcwd_private.io_lock);
*temperature = inb_p(pcipcwd_private.io_addr);
spin_unlock(&pcipcwd_private.io_lock);
/*
* Convert celsius to fahrenheit, since this was
* the decided 'standard' for this return value.
*/
*temperature = (*temperature * 9 / 5) + 32;
if (debug >= DEBUG) {
pr_debug("temperature is: %d F\n", *temperature);
}
return 0;
}
static int pcipcwd_get_timeleft(int *time_left)
{
int msb;
int lsb;
/* Read the time that's left before rebooting */
/* Note: if the board is not yet armed then we will read 0xFFFF */
send_command(CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb);
*time_left = (msb << 8) + lsb;
if (debug >= VERBOSE)
pr_debug("Time left before next reboot: %d\n", *time_left);
return 0;
}
/*
* /dev/watchdog handling
*/
static ssize_t pcipcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
size_t i;
/* note: just in case someone wrote the magic character
* five months ago... */
expect_release = 0;
/* scan to see whether or not we got the
* magic character */
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_release = 42;
}
}
/* someone wrote to us, we should reload the timer */
pcipcwd_keepalive();
}
return len;
}
static long pcipcwd_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_OVERHEAT |
WDIOF_CARDRESET |
WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = WATCHDOG_DRIVER_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
{
int status;
pcipcwd_get_status(&status);
return put_user(status, p);
}
case WDIOC_GETBOOTSTATUS:
return put_user(pcipcwd_private.boot_status, p);
case WDIOC_GETTEMP:
{
int temperature;
if (pcipcwd_get_temperature(&temperature))
return -EFAULT;
return put_user(temperature, p);
}
case WDIOC_SETOPTIONS:
{
int new_options, retval = -EINVAL;
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
if (pcipcwd_stop())
return -EIO;
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
if (pcipcwd_start())
return -EIO;
retval = 0;
}
if (new_options & WDIOS_TEMPPANIC) {
temp_panic = 1;
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
pcipcwd_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_heartbeat;
if (get_user(new_heartbeat, p))
return -EFAULT;
if (pcipcwd_set_heartbeat(new_heartbeat))
return -EINVAL;
pcipcwd_keepalive();
}
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
case WDIOC_GETTIMELEFT:
{
int time_left;
if (pcipcwd_get_timeleft(&time_left))
return -EFAULT;
return put_user(time_left, p);
}
default:
return -ENOTTY;
}
}
static int pcipcwd_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &is_active)) {
if (debug >= VERBOSE)
pr_err("Attempt to open already opened device\n");
return -EBUSY;
}
/* Activate */
pcipcwd_start();
pcipcwd_keepalive();
return stream_open(inode, file);
}
static int pcipcwd_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
*/
if (expect_release == 42) {
pcipcwd_stop();
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
pcipcwd_keepalive();
}
expect_release = 0;
clear_bit(0, &is_active);
return 0;
}
/*
* /dev/temperature handling
*/
static ssize_t pcipcwd_temp_read(struct file *file, char __user *data,
size_t len, loff_t *ppos)
{
int temperature;
if (pcipcwd_get_temperature(&temperature))
return -EFAULT;
if (copy_to_user(data, &temperature, 1))
return -EFAULT;
return 1;
}
static int pcipcwd_temp_open(struct inode *inode, struct file *file)
{
if (!pcipcwd_private.supports_temp)
return -ENODEV;
return stream_open(inode, file);
}
static int pcipcwd_temp_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* Notify system
*/
static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
pcipcwd_stop(); /* Turn the WDT off */
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations pcipcwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = pcipcwd_write,
.unlocked_ioctl = pcipcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = pcipcwd_open,
.release = pcipcwd_release,
};
static struct miscdevice pcipcwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &pcipcwd_fops,
};
static const struct file_operations pcipcwd_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = pcipcwd_temp_read,
.open = pcipcwd_temp_open,
.release = pcipcwd_temp_release,
};
static struct miscdevice pcipcwd_temp_miscdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &pcipcwd_temp_fops,
};
static struct notifier_block pcipcwd_notifier = {
.notifier_call = pcipcwd_notify_sys,
};
/*
* Init & exit routines
*/
static int pcipcwd_card_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret = -EIO;
cards_found++;
if (cards_found == 1)
pr_info("%s\n", DRIVER_VERSION);
if (cards_found > 1) {
pr_err("This driver only supports 1 device\n");
return -ENODEV;
}
if (pci_enable_device(pdev)) {
pr_err("Not possible to enable PCI Device\n");
return -ENODEV;
}
if (pci_resource_start(pdev, 0) == 0x0000) {
pr_err("No I/O-Address for card detected\n");
ret = -ENODEV;
goto err_out_disable_device;
}
spin_lock_init(&pcipcwd_private.io_lock);
pcipcwd_private.pdev = pdev;
pcipcwd_private.io_addr = pci_resource_start(pdev, 0);
if (pci_request_regions(pdev, WATCHDOG_NAME)) {
pr_err("I/O address 0x%04x already in use\n",
(int) pcipcwd_private.io_addr);
ret = -EIO;
goto err_out_disable_device;
}
/* get the boot_status */
pcipcwd_get_status(&pcipcwd_private.boot_status);
/* clear the "card caused reboot" flag */
pcipcwd_clear_status();
/* disable card */
pcipcwd_stop();
/* Check whether or not the card supports the temperature device */
pcipcwd_check_temperature_support();
/* Show info about the card itself */
pcipcwd_show_card_info();
/* If heartbeat = 0 then we use the heartbeat from the dip-switches */
if (heartbeat == 0)
heartbeat =
heartbeat_tbl[(pcipcwd_get_option_switches() & 0x07)];
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (pcipcwd_set_heartbeat(heartbeat)) {
pcipcwd_set_heartbeat(WATCHDOG_HEARTBEAT);
pr_info("heartbeat value must be 0<heartbeat<65536, using %d\n",
WATCHDOG_HEARTBEAT);
}
ret = register_reboot_notifier(&pcipcwd_notifier);
if (ret != 0) {
pr_err("cannot register reboot notifier (err=%d)\n", ret);
goto err_out_release_region;
}
if (pcipcwd_private.supports_temp) {
ret = misc_register(&pcipcwd_temp_miscdev);
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
TEMP_MINOR, ret);
goto err_out_unregister_reboot;
}
}
ret = misc_register(&pcipcwd_miscdev);
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto err_out_misc_deregister;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
err_out_misc_deregister:
if (pcipcwd_private.supports_temp)
misc_deregister(&pcipcwd_temp_miscdev);
err_out_unregister_reboot:
unregister_reboot_notifier(&pcipcwd_notifier);
err_out_release_region:
pci_release_regions(pdev);
err_out_disable_device:
pci_disable_device(pdev);
return ret;
}
static void pcipcwd_card_exit(struct pci_dev *pdev)
{
/* Stop the timer before we leave */
if (!nowayout)
pcipcwd_stop();
/* Deregister */
misc_deregister(&pcipcwd_miscdev);
if (pcipcwd_private.supports_temp)
misc_deregister(&pcipcwd_temp_miscdev);
unregister_reboot_notifier(&pcipcwd_notifier);
pci_release_regions(pdev);
pci_disable_device(pdev);
cards_found--;
}
static const struct pci_device_id pcipcwd_pci_tbl[] = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl);
static struct pci_driver pcipcwd_driver = {
.name = WATCHDOG_NAME,
.id_table = pcipcwd_pci_tbl,
.probe = pcipcwd_card_init,
.remove = pcipcwd_card_exit,
};
module_pci_driver(pcipcwd_driver);
MODULE_AUTHOR("Wim Van Sebroeck <[email protected]>");
MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/pcwd_pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for Intel Keem Bay non-secure watchdog.
*
* Copyright (C) 2020 Intel Corporation
*/
#include <linux/arm-smccc.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
/* Non-secure watchdog register offsets */
#define TIM_WATCHDOG 0x0
#define TIM_WATCHDOG_INT_THRES 0x4
#define TIM_WDOG_EN 0x8
#define TIM_SAFE 0xc
#define WDT_TH_INT_MASK BIT(8)
#define WDT_TO_INT_MASK BIT(9)
#define WDT_INT_CLEAR_SMC 0x8200ff18
#define WDT_UNLOCK 0xf1d0dead
#define WDT_DISABLE 0x0
#define WDT_ENABLE 0x1
#define WDT_LOAD_MAX U32_MAX
#define WDT_LOAD_MIN 1
#define WDT_TIMEOUT 5
#define WDT_PRETIMEOUT 4
static unsigned int timeout = WDT_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout period in seconds (default = "
__MODULE_STRING(WDT_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = "
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct keembay_wdt {
struct watchdog_device wdd;
struct clk *clk;
unsigned int rate;
int to_irq;
int th_irq;
void __iomem *base;
};
static inline u32 keembay_wdt_readl(struct keembay_wdt *wdt, u32 offset)
{
return readl(wdt->base + offset);
}
static inline void keembay_wdt_writel(struct keembay_wdt *wdt, u32 offset, u32 val)
{
writel(WDT_UNLOCK, wdt->base + TIM_SAFE);
writel(val, wdt->base + offset);
}
static void keembay_wdt_set_timeout_reg(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
keembay_wdt_writel(wdt, TIM_WATCHDOG, wdog->timeout * wdt->rate);
}
static void keembay_wdt_set_pretimeout_reg(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
u32 th_val = 0;
if (wdog->pretimeout)
th_val = wdog->timeout - wdog->pretimeout;
keembay_wdt_writel(wdt, TIM_WATCHDOG_INT_THRES, th_val * wdt->rate);
}
static int keembay_wdt_start(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
keembay_wdt_writel(wdt, TIM_WDOG_EN, WDT_ENABLE);
return 0;
}
static int keembay_wdt_stop(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
keembay_wdt_writel(wdt, TIM_WDOG_EN, WDT_DISABLE);
return 0;
}
static int keembay_wdt_ping(struct watchdog_device *wdog)
{
keembay_wdt_set_timeout_reg(wdog);
return 0;
}
static int keembay_wdt_set_timeout(struct watchdog_device *wdog, u32 t)
{
wdog->timeout = t;
keembay_wdt_set_timeout_reg(wdog);
keembay_wdt_set_pretimeout_reg(wdog);
return 0;
}
static int keembay_wdt_set_pretimeout(struct watchdog_device *wdog, u32 t)
{
if (t > wdog->timeout)
return -EINVAL;
wdog->pretimeout = t;
keembay_wdt_set_pretimeout_reg(wdog);
return 0;
}
static unsigned int keembay_wdt_get_timeleft(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
return keembay_wdt_readl(wdt, TIM_WATCHDOG) / wdt->rate;
}
/*
* SMC call is used to clear the interrupt bits, because the TIM_GEN_CONFIG
* register is in the secure bank.
*/
static irqreturn_t keembay_wdt_to_isr(int irq, void *dev_id)
{
struct keembay_wdt *wdt = dev_id;
struct arm_smccc_res res;
arm_smccc_smc(WDT_INT_CLEAR_SMC, WDT_TO_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
dev_crit(wdt->wdd.parent, "Intel Keem Bay non-secure wdt timeout.\n");
emergency_restart();
return IRQ_HANDLED;
}
static irqreturn_t keembay_wdt_th_isr(int irq, void *dev_id)
{
struct keembay_wdt *wdt = dev_id;
struct arm_smccc_res res;
keembay_wdt_set_pretimeout(&wdt->wdd, 0x0);
arm_smccc_smc(WDT_INT_CLEAR_SMC, WDT_TH_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
dev_crit(wdt->wdd.parent, "Intel Keem Bay non-secure wdt pre-timeout.\n");
watchdog_notify_pretimeout(&wdt->wdd);
return IRQ_HANDLED;
}
static const struct watchdog_info keembay_wdt_info = {
.identity = "Intel Keem Bay Watchdog Timer",
.options = WDIOF_SETTIMEOUT |
WDIOF_PRETIMEOUT |
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
};
static const struct watchdog_ops keembay_wdt_ops = {
.owner = THIS_MODULE,
.start = keembay_wdt_start,
.stop = keembay_wdt_stop,
.ping = keembay_wdt_ping,
.set_timeout = keembay_wdt_set_timeout,
.set_pretimeout = keembay_wdt_set_pretimeout,
.get_timeleft = keembay_wdt_get_timeleft,
};
static int keembay_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keembay_wdt *wdt;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
wdt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
/* we do not need to enable the clock as it is enabled by default */
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk))
return dev_err_probe(dev, PTR_ERR(wdt->clk), "Failed to get clock\n");
wdt->rate = clk_get_rate(wdt->clk);
if (!wdt->rate)
return dev_err_probe(dev, -EINVAL, "Failed to get clock rate\n");
wdt->th_irq = platform_get_irq_byname(pdev, "threshold");
if (wdt->th_irq < 0)
return dev_err_probe(dev, wdt->th_irq, "Failed to get IRQ for threshold\n");
ret = devm_request_irq(dev, wdt->th_irq, keembay_wdt_th_isr, 0,
"keembay-wdt", wdt);
if (ret)
return dev_err_probe(dev, ret, "Failed to request IRQ for threshold\n");
wdt->to_irq = platform_get_irq_byname(pdev, "timeout");
if (wdt->to_irq < 0)
return dev_err_probe(dev, wdt->to_irq, "Failed to get IRQ for timeout\n");
ret = devm_request_irq(dev, wdt->to_irq, keembay_wdt_to_isr, 0,
"keembay-wdt", wdt);
if (ret)
return dev_err_probe(dev, ret, "Failed to request IRQ for timeout\n");
wdt->wdd.parent = dev;
wdt->wdd.info = &keembay_wdt_info;
wdt->wdd.ops = &keembay_wdt_ops;
wdt->wdd.min_timeout = WDT_LOAD_MIN;
wdt->wdd.max_timeout = WDT_LOAD_MAX / wdt->rate;
wdt->wdd.timeout = WDT_TIMEOUT;
wdt->wdd.pretimeout = WDT_PRETIMEOUT;
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_set_nowayout(&wdt->wdd, nowayout);
watchdog_init_timeout(&wdt->wdd, timeout, dev);
keembay_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
keembay_wdt_set_pretimeout(&wdt->wdd, wdt->wdd.pretimeout);
ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret)
return dev_err_probe(dev, ret, "Failed to register watchdog device.\n");
platform_set_drvdata(pdev, wdt);
dev_info(dev, "Initial timeout %d sec%s.\n",
wdt->wdd.timeout, nowayout ? ", nowayout" : "");
return 0;
}
static int __maybe_unused keembay_wdt_suspend(struct device *dev)
{
struct keembay_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->wdd))
return keembay_wdt_stop(&wdt->wdd);
return 0;
}
static int __maybe_unused keembay_wdt_resume(struct device *dev)
{
struct keembay_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->wdd))
return keembay_wdt_start(&wdt->wdd);
return 0;
}
static SIMPLE_DEV_PM_OPS(keembay_wdt_pm_ops, keembay_wdt_suspend,
keembay_wdt_resume);
static const struct of_device_id keembay_wdt_match[] = {
{ .compatible = "intel,keembay-wdt" },
{ }
};
MODULE_DEVICE_TABLE(of, keembay_wdt_match);
static struct platform_driver keembay_wdt_driver = {
.probe = keembay_wdt_probe,
.driver = {
.name = "keembay_wdt",
.of_match_table = keembay_wdt_match,
.pm = &keembay_wdt_pm_ops,
},
};
module_platform_driver(keembay_wdt_driver);
MODULE_DESCRIPTION("Intel Keem Bay SoC watchdog driver");
MODULE_AUTHOR("Wan Ahmad Zainie <[email protected]");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/keembay_wdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Watchdog driver for Ricoh RN5T618 PMIC
*
* Copyright (C) 2014 Beniamino Galvani <[email protected]>
*/
#include <linux/device.h>
#include <linux/mfd/rn5t618.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#define DRIVER_NAME "rn5t618-wdt"
static bool nowayout = WATCHDOG_NOWAYOUT;
static unsigned int timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds");
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct rn5t618_wdt {
struct watchdog_device wdt_dev;
struct rn5t618 *rn5t618;
};
/*
* This array encodes the values of WDOGTIM field for the supported
* watchdog expiration times. If the watchdog is not accessed before
* the timer expiration, the PMU generates an interrupt and if the CPU
* doesn't clear it within one second the system is restarted.
*/
static const struct {
u8 reg_val;
unsigned int time;
} rn5t618_wdt_map[] = {
{ 0, 1 },
{ 1, 8 },
{ 2, 32 },
{ 3, 128 },
};
static int rn5t618_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int t)
{
struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
int ret, i;
for (i = 0; i < ARRAY_SIZE(rn5t618_wdt_map); i++) {
if (rn5t618_wdt_map[i].time + 1 >= t)
break;
}
if (i == ARRAY_SIZE(rn5t618_wdt_map))
return -EINVAL;
ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
RN5T618_WATCHDOG_WDOGTIM_M,
rn5t618_wdt_map[i].reg_val);
if (!ret)
wdt_dev->timeout = rn5t618_wdt_map[i].time;
return ret;
}
static int rn5t618_wdt_start(struct watchdog_device *wdt_dev)
{
struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
int ret;
ret = rn5t618_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
if (ret)
return ret;
/* enable repower-on */
ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_REPCNT,
RN5T618_REPCNT_REPWRON,
RN5T618_REPCNT_REPWRON);
if (ret)
return ret;
/* enable watchdog */
ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
RN5T618_WATCHDOG_WDOGEN,
RN5T618_WATCHDOG_WDOGEN);
if (ret)
return ret;
/* enable watchdog interrupt */
return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIREN,
RN5T618_PWRIRQ_IR_WDOG,
RN5T618_PWRIRQ_IR_WDOG);
}
static int rn5t618_wdt_stop(struct watchdog_device *wdt_dev)
{
struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
RN5T618_WATCHDOG_WDOGEN, 0);
}
static int rn5t618_wdt_ping(struct watchdog_device *wdt_dev)
{
struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned int val;
int ret;
/* The counter is restarted after a R/W access to watchdog register */
ret = regmap_read(wdt->rn5t618->regmap, RN5T618_WATCHDOG, &val);
if (ret)
return ret;
ret = regmap_write(wdt->rn5t618->regmap, RN5T618_WATCHDOG, val);
if (ret)
return ret;
/* Clear pending watchdog interrupt */
return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIRQ,
RN5T618_PWRIRQ_IR_WDOG, 0);
}
static const struct watchdog_info rn5t618_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
.identity = DRIVER_NAME,
};
static const struct watchdog_ops rn5t618_wdt_ops = {
.owner = THIS_MODULE,
.start = rn5t618_wdt_start,
.stop = rn5t618_wdt_stop,
.ping = rn5t618_wdt_ping,
.set_timeout = rn5t618_wdt_set_timeout,
};
static int rn5t618_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rn5t618 *rn5t618 = dev_get_drvdata(dev->parent);
struct rn5t618_wdt *wdt;
int min_timeout, max_timeout;
int ret;
unsigned int val;
wdt = devm_kzalloc(dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
min_timeout = rn5t618_wdt_map[0].time;
max_timeout = rn5t618_wdt_map[ARRAY_SIZE(rn5t618_wdt_map) - 1].time;
wdt->rn5t618 = rn5t618;
wdt->wdt_dev.info = &rn5t618_wdt_info;
wdt->wdt_dev.ops = &rn5t618_wdt_ops;
wdt->wdt_dev.min_timeout = min_timeout;
wdt->wdt_dev.max_timeout = max_timeout;
wdt->wdt_dev.timeout = max_timeout;
wdt->wdt_dev.parent = dev;
/* Read out previous power-off factor */
ret = regmap_read(wdt->rn5t618->regmap, RN5T618_POFFHIS, &val);
if (ret)
return ret;
if (val & RN5T618_POFFHIS_VINDET)
wdt->wdt_dev.bootstatus = WDIOF_POWERUNDER;
else if (val & RN5T618_POFFHIS_WDG)
wdt->wdt_dev.bootstatus = WDIOF_CARDRESET;
watchdog_set_drvdata(&wdt->wdt_dev, wdt);
watchdog_init_timeout(&wdt->wdt_dev, timeout, dev);
watchdog_set_nowayout(&wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, wdt);
return devm_watchdog_register_device(dev, &wdt->wdt_dev);
}
static struct platform_driver rn5t618_wdt_driver = {
.probe = rn5t618_wdt_probe,
.driver = {
.name = DRIVER_NAME,
},
};
module_platform_driver(rn5t618_wdt_driver);
MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <[email protected]>");
MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/watchdog/rn5t618_wdt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* sch311x_wdt.c - Driver for the SCH311x Super-I/O chips
* integrated watchdog.
*
* (c) Copyright 2008 Wim Van Sebroeck <[email protected]>.
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Includes */
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/... */
#include <linux/miscdevice.h> /* For struct miscdevice */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/ioport.h> /* For io-port access */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
/* Module and version information */
#define DRV_NAME "sch311x_wdt"
/* Runtime registers */
#define GP60 0x47
#define WDT_TIME_OUT 0x65
#define WDT_VAL 0x66
#define WDT_CFG 0x67
#define WDT_CTRL 0x68
/* internal variables */
static unsigned long sch311x_wdt_is_open;
static char sch311x_wdt_expect_close;
static struct platform_device *sch311x_wdt_pdev;
static int sch311x_ioports[] = { 0x2e, 0x4e, 0x162e, 0x164e, 0x00 };
static struct { /* The devices private data */
/* the Runtime Register base address */
unsigned short runtime_reg;
/* The card's boot status */
int boot_status;
/* the lock for io operations */
spinlock_t io_lock;
} sch311x_wdt_data;
/* Module load parameters */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<= timeout <=15300, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ".");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Super-IO functions
*/
static inline void sch311x_sio_enter(int sio_config_port)
{
outb(0x55, sio_config_port);
}
static inline void sch311x_sio_exit(int sio_config_port)
{
outb(0xaa, sio_config_port);
}
static inline int sch311x_sio_inb(int sio_config_port, int reg)
{
outb(reg, sio_config_port);
return inb(sio_config_port + 1);
}
static inline void sch311x_sio_outb(int sio_config_port, int reg, int val)
{
outb(reg, sio_config_port);
outb(val, sio_config_port + 1);
}
/*
* Watchdog Operations
*/
static void sch311x_wdt_set_timeout(int t)
{
unsigned char timeout_unit = 0x80;
/* When new timeout is bigger then 255 seconds, we will use minutes */
if (t > 255) {
timeout_unit = 0;
t /= 60;
}
/* -- Watchdog Timeout --
* Bit 0-6 (Reserved)
* Bit 7 WDT Time-out Value Units Select
* (0 = Minutes, 1 = Seconds)
*/
outb(timeout_unit, sch311x_wdt_data.runtime_reg + WDT_TIME_OUT);
/* -- Watchdog Timer Time-out Value --
* Bit 0-7 Binary coded units (0=Disabled, 1..255)
*/
outb(t, sch311x_wdt_data.runtime_reg + WDT_VAL);
}
static void sch311x_wdt_start(void)
{
unsigned char t;
spin_lock(&sch311x_wdt_data.io_lock);
/* set watchdog's timeout */
sch311x_wdt_set_timeout(timeout);
/* enable the watchdog */
/* -- General Purpose I/O Bit 6.0 --
* Bit 0, In/Out: 0 = Output, 1 = Input
* Bit 1, Polarity: 0 = No Invert, 1 = Invert
* Bit 2-3, Function select: 00 = GPI/O, 01 = LED1, 11 = WDT,
* 10 = Either Edge Triggered Intr.4
* Bit 4-6 (Reserved)
* Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain
*/
t = inb(sch311x_wdt_data.runtime_reg + GP60);
outb((t & ~0x0d) | 0x0c, sch311x_wdt_data.runtime_reg + GP60);
spin_unlock(&sch311x_wdt_data.io_lock);
}
static void sch311x_wdt_stop(void)
{
unsigned char t;
spin_lock(&sch311x_wdt_data.io_lock);
/* stop the watchdog */
t = inb(sch311x_wdt_data.runtime_reg + GP60);
outb((t & ~0x0d) | 0x01, sch311x_wdt_data.runtime_reg + GP60);
/* disable timeout by setting it to 0 */
sch311x_wdt_set_timeout(0);
spin_unlock(&sch311x_wdt_data.io_lock);
}
static void sch311x_wdt_keepalive(void)
{
spin_lock(&sch311x_wdt_data.io_lock);
sch311x_wdt_set_timeout(timeout);
spin_unlock(&sch311x_wdt_data.io_lock);
}
static int sch311x_wdt_set_heartbeat(int t)
{
if (t < 1 || t > (255*60))
return -EINVAL;
/* When new timeout is bigger then 255 seconds,
* we will round up to minutes (with a max of 255) */
if (t > 255)
t = (((t - 1) / 60) + 1) * 60;
timeout = t;
return 0;
}
static void sch311x_wdt_get_status(int *status)
{
unsigned char new_status;
*status = 0;
spin_lock(&sch311x_wdt_data.io_lock);
/* -- Watchdog timer control --
* Bit 0 Status Bit: 0 = Timer counting, 1 = Timeout occurred
* Bit 1 Reserved
* Bit 2 Force Timeout: 1 = Forces WD timeout event (self-cleaning)
* Bit 3 P20 Force Timeout enabled:
* 0 = P20 activity does not generate the WD timeout event
* 1 = P20 Allows rising edge of P20, from the keyboard
* controller, to force the WD timeout event.
* Bit 4-7 Reserved
*/
new_status = inb(sch311x_wdt_data.runtime_reg + WDT_CTRL);
if (new_status & 0x01)
*status |= WDIOF_CARDRESET;
spin_unlock(&sch311x_wdt_data.io_lock);
}
/*
* /dev/watchdog handling
*/
static ssize_t sch311x_wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
sch311x_wdt_expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
sch311x_wdt_expect_close = 42;
}
}
sch311x_wdt_keepalive();
}
return count;
}
static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int status;
int new_timeout;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = DRV_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
{
sch311x_wdt_get_status(&status);
return put_user(status, p);
}
case WDIOC_GETBOOTSTATUS:
return put_user(sch311x_wdt_data.boot_status, p);
case WDIOC_SETOPTIONS:
{
int options, retval = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
sch311x_wdt_stop();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
sch311x_wdt_start();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
sch311x_wdt_keepalive();
break;
case WDIOC_SETTIMEOUT:
if (get_user(new_timeout, p))
return -EFAULT;
if (sch311x_wdt_set_heartbeat(new_timeout))
return -EINVAL;
sch311x_wdt_keepalive();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static int sch311x_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &sch311x_wdt_is_open))
return -EBUSY;
/*
* Activate
*/
sch311x_wdt_start();
return stream_open(inode, file);
}
static int sch311x_wdt_close(struct inode *inode, struct file *file)
{
if (sch311x_wdt_expect_close == 42) {
sch311x_wdt_stop();
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
sch311x_wdt_keepalive();
}
clear_bit(0, &sch311x_wdt_is_open);
sch311x_wdt_expect_close = 0;
return 0;
}
/*
* Kernel Interfaces
*/
static const struct file_operations sch311x_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = sch311x_wdt_write,
.unlocked_ioctl = sch311x_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = sch311x_wdt_open,
.release = sch311x_wdt_close,
};
static struct miscdevice sch311x_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &sch311x_wdt_fops,
};
/*
* Init & exit routines
*/
static int sch311x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int err;
spin_lock_init(&sch311x_wdt_data.io_lock);
if (!request_region(sch311x_wdt_data.runtime_reg + GP60, 1, DRV_NAME)) {
dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
sch311x_wdt_data.runtime_reg + GP60,
sch311x_wdt_data.runtime_reg + GP60);
err = -EBUSY;
goto exit;
}
if (!request_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4,
DRV_NAME)) {
dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
sch311x_wdt_data.runtime_reg + WDT_TIME_OUT,
sch311x_wdt_data.runtime_reg + WDT_CTRL);
err = -EBUSY;
goto exit_release_region;
}
/* Make sure that the watchdog is not running */
sch311x_wdt_stop();
/* Disable keyboard and mouse interaction and interrupt */
/* -- Watchdog timer configuration --
* Bit 0 Reserved
* Bit 1 Keyboard enable: 0* = No Reset, 1 = Reset WDT upon KBD Intr.
* Bit 2 Mouse enable: 0* = No Reset, 1 = Reset WDT upon Mouse Intr
* Bit 3 Reserved
* Bit 4-7 WDT Interrupt Mapping: (0000* = Disabled,
* 0001=IRQ1, 0010=(Invalid), 0011=IRQ3 to 1111=IRQ15)
*/
outb(0, sch311x_wdt_data.runtime_reg + WDT_CFG);
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (sch311x_wdt_set_heartbeat(timeout)) {
sch311x_wdt_set_heartbeat(WATCHDOG_TIMEOUT);
dev_info(dev, "timeout value must be 1<=x<=15300, using %d\n",
timeout);
}
/* Get status at boot */
sch311x_wdt_get_status(&sch311x_wdt_data.boot_status);
sch311x_wdt_miscdev.parent = dev;
err = misc_register(&sch311x_wdt_miscdev);
if (err != 0) {
dev_err(dev, "cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, err);
goto exit_release_region2;
}
dev_info(dev,
"SMSC SCH311x WDT initialized. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
return 0;
exit_release_region2:
release_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4);
exit_release_region:
release_region(sch311x_wdt_data.runtime_reg + GP60, 1);
sch311x_wdt_data.runtime_reg = 0;
exit:
return err;
}
static void sch311x_wdt_remove(struct platform_device *pdev)
{
/* Stop the timer before we leave */
if (!nowayout)
sch311x_wdt_stop();
/* Deregister */
misc_deregister(&sch311x_wdt_miscdev);
release_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4);
release_region(sch311x_wdt_data.runtime_reg + GP60, 1);
sch311x_wdt_data.runtime_reg = 0;
}
static void sch311x_wdt_shutdown(struct platform_device *dev)
{
/* Turn the WDT off if we have a soft shutdown */
sch311x_wdt_stop();
}
static struct platform_driver sch311x_wdt_driver = {
.probe = sch311x_wdt_probe,
.remove_new = sch311x_wdt_remove,
.shutdown = sch311x_wdt_shutdown,
.driver = {
.name = DRV_NAME,
},
};
static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
{
int err = 0, reg;
unsigned short base_addr;
unsigned char dev_id;
sch311x_sio_enter(sio_config_port);
/* Check device ID. We currently know about:
* SCH3112 (0x7c), SCH3114 (0x7d), and SCH3116 (0x7f). */
reg = force_id ? force_id : sch311x_sio_inb(sio_config_port, 0x20);
if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
err = -ENODEV;
goto exit;
}
dev_id = reg == 0x7c ? 2 : reg == 0x7d ? 4 : 6;
/* Select logical device A (runtime registers) */
sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
/* Check if Logical Device Register is currently active */
if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
pr_info("Seems that LDN 0x0a is not active...\n");
/* Get the base address of the runtime registers */
base_addr = (sch311x_sio_inb(sio_config_port, 0x60) << 8) |
sch311x_sio_inb(sio_config_port, 0x61);
if (!base_addr) {
pr_err("Base address not set\n");
err = -ENODEV;
goto exit;
}
*addr = base_addr;
pr_info("Found an SMSC SCH311%d chip at 0x%04x\n", dev_id, base_addr);
exit:
sch311x_sio_exit(sio_config_port);
return err;
}
static int __init sch311x_wdt_init(void)
{
int err, i, found = 0;
unsigned short addr = 0;
for (i = 0; !found && sch311x_ioports[i]; i++)
if (sch311x_detect(sch311x_ioports[i], &addr) == 0)
found++;
if (!found)
return -ENODEV;
sch311x_wdt_data.runtime_reg = addr;
err = platform_driver_register(&sch311x_wdt_driver);
if (err)
return err;
sch311x_wdt_pdev = platform_device_register_simple(DRV_NAME, addr,
NULL, 0);
if (IS_ERR(sch311x_wdt_pdev)) {
err = PTR_ERR(sch311x_wdt_pdev);
goto unreg_platform_driver;
}
return 0;
unreg_platform_driver:
platform_driver_unregister(&sch311x_wdt_driver);
return err;
}
static void __exit sch311x_wdt_exit(void)
{
platform_device_unregister(sch311x_wdt_pdev);
platform_driver_unregister(&sch311x_wdt_driver);
}
module_init(sch311x_wdt_init);
module_exit(sch311x_wdt_exit);
MODULE_AUTHOR("Wim Van Sebroeck <[email protected]>");
MODULE_DESCRIPTION("SMSC SCH311x WatchDog Timer Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sch311x_wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 International Business Machines, Inc.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/math.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/time64.h>
#include <linux/watchdog.h>
#define DRV_NAME "pseries-wdt"
/*
* H_WATCHDOG Input
*
* R4: "flags":
*
* Bits 48-55: "operation"
*/
#define PSERIES_WDTF_OP_START 0x100UL /* start timer */
#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */
#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */
/*
* Bits 56-63: "timeoutAction" (for "Start Watchdog" only)
*/
#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */
#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */
#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */
/*
* H_WATCHDOG Output
*
* R3: Return code
*
* H_SUCCESS The operation completed.
*
* H_BUSY The hypervisor is too busy; retry the operation.
*
* H_PARAMETER The given "flags" are somehow invalid. Either the
* "operation" or "timeoutAction" is invalid, or a
* reserved bit is set.
*
* H_P2 The given "watchdogNumber" is zero or exceeds the
* supported maximum value.
*
* H_P3 The given "timeoutInMs" is below the supported
* minimum value.
*
* H_NOOP The given "watchdogNumber" is already stopped.
*
* H_HARDWARE The operation failed for ineffable reasons.
*
* H_FUNCTION The H_WATCHDOG hypercall is not supported by this
* hypervisor.
*
* R4:
*
* - For the "Query Watchdog Capabilities" operation, a 64-bit
* structure:
*/
#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff)
#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff)
static const unsigned long pseries_wdt_action[] = {
[0] = PSERIES_WDTF_ACTION_HARD_POWEROFF,
[1] = PSERIES_WDTF_ACTION_HARD_RESTART,
[2] = PSERIES_WDTF_ACTION_DUMP_RESTART,
};
#define WATCHDOG_ACTION 1
static unsigned int action = WATCHDOG_ACTION;
module_param(action, uint, 0444);
MODULE_PARM_DESC(action, "Action taken when watchdog expires (default="
__MODULE_STRING(WATCHDOG_ACTION) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#define WATCHDOG_TIMEOUT 60
static unsigned int timeout = WATCHDOG_TIMEOUT;
module_param(timeout, uint, 0444);
MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
struct pseries_wdt {
struct watchdog_device wd;
unsigned long action;
unsigned long num; /* Watchdog numbers are 1-based */
};
static int pseries_wdt_start(struct watchdog_device *wdd)
{
struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
struct device *dev = wdd->parent;
unsigned long flags, msecs;
long rc;
flags = pw->action | PSERIES_WDTF_OP_START;
msecs = wdd->timeout * MSEC_PER_SEC;
rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs);
if (rc != H_SUCCESS) {
dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu",
rc, pw->num);
return -EIO;
}
return 0;
}
static int pseries_wdt_stop(struct watchdog_device *wdd)
{
struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
struct device *dev = wdd->parent;
long rc;
rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num);
if (rc != H_SUCCESS && rc != H_NOOP) {
dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu",
rc, pw->num);
return -EIO;
}
return 0;
}
static struct watchdog_info pseries_wdt_info = {
.identity = DRV_NAME,
.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT
| WDIOF_PRETIMEOUT,
};
static const struct watchdog_ops pseries_wdt_ops = {
.owner = THIS_MODULE,
.start = pseries_wdt_start,
.stop = pseries_wdt_stop,
};
static int pseries_wdt_probe(struct platform_device *pdev)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 };
struct pseries_wdt *pw;
unsigned long cap;
long msecs, rc;
int err;
rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY);
if (rc == H_FUNCTION)
return -ENODEV;
if (rc != H_SUCCESS)
return -EIO;
cap = ret[0];
pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL);
if (!pw)
return -ENOMEM;
/*
* Assume watchdogNumber 1 for now. If we ever support
* multiple timers we will need to devise a way to choose a
* distinct watchdogNumber for each platform device at device
* registration time.
*/
pw->num = 1;
if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num)
return -ENODEV;
if (action >= ARRAY_SIZE(pseries_wdt_action))
return -EINVAL;
pw->action = pseries_wdt_action[action];
pw->wd.parent = &pdev->dev;
pw->wd.info = &pseries_wdt_info;
pw->wd.ops = &pseries_wdt_ops;
msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap);
pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC);
pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */
pw->wd.timeout = timeout;
if (watchdog_init_timeout(&pw->wd, 0, NULL))
return -EINVAL;
watchdog_set_nowayout(&pw->wd, nowayout);
watchdog_stop_on_reboot(&pw->wd);
watchdog_stop_on_unregister(&pw->wd);
watchdog_set_drvdata(&pw->wd, pw);
err = devm_watchdog_register_device(&pdev->dev, &pw->wd);
if (err)
return err;
platform_set_drvdata(pdev, &pw->wd);
return 0;
}
static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
struct watchdog_device *wd = platform_get_drvdata(pdev);
if (watchdog_active(wd))
return pseries_wdt_stop(wd);
return 0;
}
static int pseries_wdt_resume(struct platform_device *pdev)
{
struct watchdog_device *wd = platform_get_drvdata(pdev);
if (watchdog_active(wd))
return pseries_wdt_start(wd);
return 0;
}
static const struct platform_device_id pseries_wdt_id[] = {
{ .name = "pseries-wdt" },
{}
};
MODULE_DEVICE_TABLE(platform, pseries_wdt_id);
static struct platform_driver pseries_wdt_driver = {
.driver = {
.name = DRV_NAME,
},
.id_table = pseries_wdt_id,
.probe = pseries_wdt_probe,
.resume = pseries_wdt_resume,
.suspend = pseries_wdt_suspend,
};
module_platform_driver(pseries_wdt_driver);
MODULE_AUTHOR("Alexey Kardashevskiy");
MODULE_AUTHOR("Scott Cheloha");
MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/pseries-wdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 60xx Single Board Computer Watchdog Timer driver for Linux 2.2.x
*
* Based on acquirewdt.c by Alan Cox.
*
* The author does NOT admit liability nor provide warranty for
* any of this software. This material is provided "AS-IS" in
* the hope that it may be useful for others.
*
* (c) Copyright 2000 Jakob Oestergaard <[email protected]>
*
* 12/4 - 2000 [Initial revision]
* 25/4 - 2000 Added /dev/watchdog support
* 09/5 - 2001 [[email protected]] fixed fop_write to "return 1"
* on success
* 12/4 - 2002 [[email protected]] eliminate fop_read
* fix possible wdt_is_open race
* add CONFIG_WATCHDOG_NOWAYOUT support
* remove lock_kernel/unlock_kernel pairs
* added KERN_* to printk's
* got rid of extraneous comments
* changed watchdog_info to correctly reflect what
* the driver offers
* added WDIOC_GETSTATUS, WDIOC_GETBOOTSTATUS,
* WDIOC_SETTIMEOUT, WDIOC_GETTIMEOUT, and
* WDIOC_SETOPTIONS ioctls
* 09/8 - 2003 [[email protected]] cleanup of trailing spaces
* use module_param
* made timeout (the emulated heartbeat) a
* module_param
* made the keepalive ping an internal subroutine
* made wdt_stop and wdt_start module params
* added extra printk's for startup problems
* added MODULE_AUTHOR and MODULE_DESCRIPTION info
*
* This WDT driver is different from the other Linux WDT
* drivers in the following ways:
* *) The driver will ping the watchdog by itself, because this
* particular WDT has a very short timeout (one second) and it
* would be insane to count on any userspace daemon always
* getting scheduled within that time frame.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#define OUR_NAME "sbc60xxwdt"
#define PFX OUR_NAME ": "
/*
* You must set these - The driver cannot probe for the settings
*/
static int wdt_stop = 0x45;
module_param(wdt_stop, int, 0);
MODULE_PARM_DESC(wdt_stop, "SBC60xx WDT 'stop' io port (default 0x45)");
static int wdt_start = 0x443;
module_param(wdt_start, int, 0);
MODULE_PARM_DESC(wdt_start, "SBC60xx WDT 'start' io port (default 0x443)");
/*
* The 60xx board can use watchdog timeout values from one second
* to several minutes. The default is one second, so if we reset
* the watchdog every ~250ms we should be safe.
*/
#define WDT_INTERVAL (HZ/4+1)
/*
* We must not require too good response from the userspace daemon.
* Here we require the userspace daemon to send us a heartbeat
* char to /dev/watchdog every 30 seconds.
* If the daemon pulses us every 25 seconds, we can still afford
* a 5 second scheduling delay on the (high priority) daemon. That
* should be sufficient for a box under any load.
*/
#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
static int timeout = WATCHDOG_TIMEOUT; /* in seconds, multiplied by HZ to
get seconds to wait for a ping */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. (1<=timeout<=3600, default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
/*
* Whack the dog
*/
static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
*/
if (time_before(jiffies, next_heartbeat)) {
/* Ping the WDT by reading from wdt_start */
inb_p(wdt_start);
/* Re-set the timer interval */
mod_timer(&timer, jiffies + WDT_INTERVAL);
} else
pr_warn("Heartbeat lost! Will not ping the watchdog\n");
}
/*
* Utility routines
*/
static void wdt_startup(void)
{
next_heartbeat = jiffies + (timeout * HZ);
/* Start the timer */
mod_timer(&timer, jiffies + WDT_INTERVAL);
pr_info("Watchdog timer is now enabled\n");
}
static void wdt_turnoff(void)
{
/* Stop the timer */
del_timer_sync(&timer);
inb_p(wdt_stop);
pr_info("Watchdog timer is now disabled...\n");
}
static void wdt_keepalive(void)
{
/* user land ping */
next_heartbeat = jiffies + (timeout * HZ);
}
/*
* /dev/watchdog handling
*/
static ssize_t fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
size_t ofs;
/* note: just in case someone wrote the
magic character five months ago... */
wdt_expect_close = 0;
/* scan to see whether or not we got the
magic character */
for (ofs = 0; ofs != count; ofs++) {
char c;
if (get_user(c, buf + ofs))
return -EFAULT;
if (c == 'V')
wdt_expect_close = 42;
}
}
/* Well, anyhow someone wrote to us, we should
return that favour */
wdt_keepalive();
}
return count;
}
static int fop_open(struct inode *inode, struct file *file)
{
/* Just in case we're already talking to someone... */
if (test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
/* Good, fire up the show */
wdt_startup();
return stream_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
{
if (wdt_expect_close == 42)
wdt_turnoff();
else {
del_timer(&timer);
pr_crit("device file closed unexpectedly. Will not stop the WDT!\n");
}
clear_bit(0, &wdt_is_open);
wdt_expect_close = 0;
return 0;
}
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SBC60xx",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int new_options, retval = -EINVAL;
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
wdt_turnoff();
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
wdt_startup();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_timeout;
if (get_user(new_timeout, p))
return -EFAULT;
/* arbitrary upper limit */
if (new_timeout < 1 || new_timeout > 3600)
return -EINVAL;
timeout = new_timeout;
wdt_keepalive();
}
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_turnoff();
return NOTIFY_DONE;
}
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static void __exit sbc60xxwdt_unload(void)
{
wdt_turnoff();
/* Deregister */
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
if ((wdt_stop != 0x45) && (wdt_stop != wdt_start))
release_region(wdt_stop, 1);
release_region(wdt_start, 1);
}
static int __init sbc60xxwdt_init(void)
{
int rc = -EBUSY;
if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */
timeout = WATCHDOG_TIMEOUT;
pr_info("timeout value must be 1 <= x <= 3600, using %d\n",
timeout);
}
if (!request_region(wdt_start, 1, "SBC 60XX WDT")) {
pr_err("I/O address 0x%04x already in use\n", wdt_start);
rc = -EIO;
goto err_out;
}
/* We cannot reserve 0x45 - the kernel already has! */
if (wdt_stop != 0x45 && wdt_stop != wdt_start) {
if (!request_region(wdt_stop, 1, "SBC 60XX WDT")) {
pr_err("I/O address 0x%04x already in use\n", wdt_stop);
rc = -EIO;
goto err_out_region1;
}
}
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
pr_err("cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region2;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot;
}
pr_info("WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
return 0;
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out_region2:
if (wdt_stop != 0x45 && wdt_stop != wdt_start)
release_region(wdt_stop, 1);
err_out_region1:
release_region(wdt_start, 1);
err_out:
return rc;
}
module_init(sbc60xxwdt_init);
module_exit(sbc60xxwdt_unload);
MODULE_AUTHOR("Jakob Oestergaard <[email protected]>");
MODULE_DESCRIPTION("60xx Single Board Computer Watchdog Timer driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/sbc60xxwdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Watchdog Timer Driver
* for ITE IT87xx Environment Control - Low Pin Count Input / Output
*
* (c) Copyright 2007 Oliver Schuster <[email protected]>
*
* Based on softdog.c by Alan Cox,
* 83977f_wdt.c by Jose Goncalves,
* it87.c by Chris Gauthron, Jean Delvare
*
* Data-sheets: Publicly available at the ITE website
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
* IT8607, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686,
* IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728,
* IT8772, IT8783 and IT8784.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#define WATCHDOG_NAME "IT87 WDT"
/* Defaults for Module Parameter */
#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
#define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT
/* IO Ports */
#define REG 0x2e
#define VAL 0x2f
/* Logical device Numbers LDN */
#define GPIO 0x07
/* Configuration Registers and Functions */
#define LDNREG 0x07
#define CHIPID 0x20
#define CHIPREV 0x22
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
#define IT8607_ID 0x8607
#define IT8620_ID 0x8620
#define IT8622_ID 0x8622
#define IT8625_ID 0x8625
#define IT8628_ID 0x8628
#define IT8655_ID 0x8655
#define IT8665_ID 0x8665
#define IT8686_ID 0x8686
#define IT8702_ID 0x8702
#define IT8705_ID 0x8705
#define IT8712_ID 0x8712
#define IT8716_ID 0x8716
#define IT8718_ID 0x8718
#define IT8720_ID 0x8720
#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
#define IT8728_ID 0x8728
#define IT8772_ID 0x8772
#define IT8783_ID 0x8783
#define IT8784_ID 0x8784
#define IT8786_ID 0x8786
/* GPIO Configuration Registers LDN=0x07 */
#define WDTCTRL 0x71
#define WDTCFG 0x72
#define WDTVALLSB 0x73
#define WDTVALMSB 0x74
/* GPIO Bits WDTCFG */
#define WDT_TOV1 0x80
#define WDT_KRST 0x40
#define WDT_TOVE 0x20
#define WDT_PWROK 0x10 /* not in it8721 */
#define WDT_INT_MASK 0x0f
static unsigned int max_units, chip_type;
static unsigned int timeout = DEFAULT_TIMEOUT;
static int testmode = DEFAULT_TESTMODE;
static bool nowayout = DEFAULT_NOWAYOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, default="
__MODULE_STRING(DEFAULT_TIMEOUT));
module_param(testmode, int, 0);
MODULE_PARM_DESC(testmode, "Watchdog test mode (1 = no reboot), default="
__MODULE_STRING(DEFAULT_TESTMODE));
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started, default="
__MODULE_STRING(WATCHDOG_NOWAYOUT));
/* Superio Chip */
static inline int superio_enter(void)
{
/*
* Try to reserve REG and REG + 1 for exclusive access.
*/
if (!request_muxed_region(REG, 2, WATCHDOG_NAME))
return -EBUSY;
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
release_region(REG, 2);
}
static inline void superio_select(int ldn)
{
outb(LDNREG, REG);
outb(ldn, VAL);
}
static inline int superio_inb(int reg)
{
outb(reg, REG);
return inb(VAL);
}
static inline void superio_outb(int val, int reg)
{
outb(reg, REG);
outb(val, VAL);
}
static inline int superio_inw(int reg)
{
int val;
outb(reg++, REG);
val = inb(VAL) << 8;
outb(reg, REG);
val |= inb(VAL);
return val;
}
/* Internal function, should be called after superio_select(GPIO) */
static void _wdt_update_timeout(unsigned int t)
{
unsigned char cfg = WDT_KRST;
if (testmode)
cfg = 0;
if (t <= max_units)
cfg |= WDT_TOV1;
else
t /= 60;
if (chip_type != IT8721_ID)
cfg |= WDT_PWROK;
superio_outb(cfg, WDTCFG);
superio_outb(t, WDTVALLSB);
if (max_units > 255)
superio_outb(t >> 8, WDTVALMSB);
}
static int wdt_update_timeout(unsigned int t)
{
int ret;
ret = superio_enter();
if (ret)
return ret;
superio_select(GPIO);
_wdt_update_timeout(t);
superio_exit();
return 0;
}
static int wdt_round_time(int t)
{
t += 59;
t -= t % 60;
return t;
}
/* watchdog timer handling */
static int wdt_start(struct watchdog_device *wdd)
{
return wdt_update_timeout(wdd->timeout);
}
static int wdt_stop(struct watchdog_device *wdd)
{
return wdt_update_timeout(0);
}
/**
* wdt_set_timeout - set a new timeout value with watchdog ioctl
* @t: timeout value in seconds
*
* The hardware device has a 8 or 16 bit watchdog timer (depends on
* chip version) that can be configured to count seconds or minutes.
*
* Used within WDIOC_SETTIMEOUT watchdog device ioctl.
*/
static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
{
int ret = 0;
if (t > max_units)
t = wdt_round_time(t);
wdd->timeout = t;
if (watchdog_hw_running(wdd))
ret = wdt_update_timeout(t);
return ret;
}
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.firmware_version = 1,
.identity = WATCHDOG_NAME,
};
static const struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
.set_timeout = wdt_set_timeout,
};
static struct watchdog_device wdt_dev = {
.info = &ident,
.ops = &wdt_ops,
.min_timeout = 1,
};
static int __init it87_wdt_init(void)
{
u8 chip_rev;
int rc;
rc = superio_enter();
if (rc)
return rc;
chip_type = superio_inw(CHIPID);
chip_rev = superio_inb(CHIPREV) & 0x0f;
superio_exit();
switch (chip_type) {
case IT8702_ID:
max_units = 255;
break;
case IT8712_ID:
max_units = (chip_rev < 8) ? 255 : 65535;
break;
case IT8716_ID:
case IT8726_ID:
max_units = 65535;
break;
case IT8607_ID:
case IT8620_ID:
case IT8622_ID:
case IT8625_ID:
case IT8628_ID:
case IT8655_ID:
case IT8665_ID:
case IT8686_ID:
case IT8718_ID:
case IT8720_ID:
case IT8721_ID:
case IT8728_ID:
case IT8772_ID:
case IT8783_ID:
case IT8784_ID:
case IT8786_ID:
max_units = 65535;
break;
case IT8705_ID:
pr_err("Unsupported Chip found, Chip %04x Revision %02x\n",
chip_type, chip_rev);
return -ENODEV;
case NO_DEV_ID:
pr_err("no device\n");
return -ENODEV;
default:
pr_err("Unknown Chip found, Chip %04x Revision %04x\n",
chip_type, chip_rev);
return -ENODEV;
}
rc = superio_enter();
if (rc)
return rc;
superio_select(GPIO);
superio_outb(WDT_TOV1, WDTCFG);
superio_outb(0x00, WDTCTRL);
superio_exit();
if (timeout < 1 || timeout > max_units * 60) {
timeout = DEFAULT_TIMEOUT;
pr_warn("Timeout value out of range, use default %d sec\n",
DEFAULT_TIMEOUT);
}
if (timeout > max_units)
timeout = wdt_round_time(timeout);
wdt_dev.timeout = timeout;
wdt_dev.max_timeout = max_units * 60;
watchdog_stop_on_reboot(&wdt_dev);
rc = watchdog_register_device(&wdt_dev);
if (rc) {
pr_err("Cannot register watchdog device (err=%d)\n", rc);
return rc;
}
pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d)\n",
chip_type, chip_rev, timeout, nowayout, testmode);
return 0;
}
static void __exit it87_wdt_exit(void)
{
watchdog_unregister_device(&wdt_dev);
}
module_init(it87_wdt_init);
module_exit(it87_wdt_exit);
MODULE_AUTHOR("Oliver Schuster");
MODULE_DESCRIPTION("Hardware Watchdog Device Driver for IT87xx EC-LPC I/O");
MODULE_LICENSE("GPL");
| linux-master | drivers/watchdog/it87_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog timer driver for the WinSystems EBC-C384
* Copyright (C) 2016 William Breathitt Gray
*/
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#define MODULE_NAME "ebc-c384_wdt"
#define WATCHDOG_TIMEOUT 60
/*
* The timeout value in minutes must fit in a single byte when sent to the
* watchdog timer; the maximum timeout possible is 15300 (255 * 60) seconds.
*/
#define WATCHDOG_MAX_TIMEOUT 15300
#define BASE_ADDR 0x564
#define ADDR_EXTENT 5
#define CFG_ADDR (BASE_ADDR + 1)
#define PET_ADDR (BASE_ADDR + 2)
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static unsigned timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static int ebc_c384_wdt_start(struct watchdog_device *wdev)
{
unsigned t = wdev->timeout;
/* resolution is in minutes for timeouts greater than 255 seconds */
if (t > 255)
t = DIV_ROUND_UP(t, 60);
outb(t, PET_ADDR);
return 0;
}
static int ebc_c384_wdt_stop(struct watchdog_device *wdev)
{
outb(0x00, PET_ADDR);
return 0;
}
static int ebc_c384_wdt_set_timeout(struct watchdog_device *wdev, unsigned t)
{
/* resolution is in minutes for timeouts greater than 255 seconds */
if (t > 255) {
/* round second resolution up to minute granularity */
wdev->timeout = roundup(t, 60);
/* set watchdog timer for minutes */
outb(0x00, CFG_ADDR);
} else {
wdev->timeout = t;
/* set watchdog timer for seconds */
outb(0x80, CFG_ADDR);
}
return 0;
}
static const struct watchdog_ops ebc_c384_wdt_ops = {
.start = ebc_c384_wdt_start,
.stop = ebc_c384_wdt_stop,
.set_timeout = ebc_c384_wdt_set_timeout
};
static const struct watchdog_info ebc_c384_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT,
.identity = MODULE_NAME
};
static int ebc_c384_wdt_probe(struct device *dev, unsigned int id)
{
struct watchdog_device *wdd;
if (!devm_request_region(dev, BASE_ADDR, ADDR_EXTENT, dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
BASE_ADDR, BASE_ADDR + ADDR_EXTENT);
return -EBUSY;
}
wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL);
if (!wdd)
return -ENOMEM;
wdd->info = &ebc_c384_wdt_info;
wdd->ops = &ebc_c384_wdt_ops;
wdd->timeout = WATCHDOG_TIMEOUT;
wdd->min_timeout = 1;
wdd->max_timeout = WATCHDOG_MAX_TIMEOUT;
watchdog_set_nowayout(wdd, nowayout);
watchdog_init_timeout(wdd, timeout, dev);
return devm_watchdog_register_device(dev, wdd);
}
static struct isa_driver ebc_c384_wdt_driver = {
.probe = ebc_c384_wdt_probe,
.driver = {
.name = MODULE_NAME
},
};
static int __init ebc_c384_wdt_init(void)
{
if (!dmi_match(DMI_BOARD_NAME, "EBC-C384 SBC"))
return -ENODEV;
return isa_register_driver(&ebc_c384_wdt_driver, 1);
}
static void __exit ebc_c384_wdt_exit(void)
{
isa_unregister_driver(&ebc_c384_wdt_driver);
}
module_init(ebc_c384_wdt_init);
module_exit(ebc_c384_wdt_exit);
MODULE_AUTHOR("William Breathitt Gray <[email protected]>");
MODULE_DESCRIPTION("WinSystems EBC-C384 watchdog timer driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("isa:" MODULE_NAME);
| linux-master | drivers/watchdog/ebc-c384_wdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* virtio_pmem.c: Virtio pmem Driver
*
* Discovers persistent memory range information
* from host and registers the virtual pmem device
* with libnvdimm core.
*/
#include "virtio_pmem.h"
#include "nd.h"
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
{ 0 },
};
/* Initialize virt queue */
static int init_vq(struct virtio_pmem *vpmem)
{
/* single vq */
vpmem->req_vq = virtio_find_single_vq(vpmem->vdev,
virtio_pmem_host_ack, "flush_queue");
if (IS_ERR(vpmem->req_vq))
return PTR_ERR(vpmem->req_vq);
spin_lock_init(&vpmem->pmem_lock);
INIT_LIST_HEAD(&vpmem->req_list);
return 0;
};
static int virtio_pmem_probe(struct virtio_device *vdev)
{
struct nd_region_desc ndr_desc = {};
struct nd_region *nd_region;
struct virtio_pmem *vpmem;
struct resource res;
int err = 0;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem), GFP_KERNEL);
if (!vpmem) {
err = -ENOMEM;
goto out_err;
}
vpmem->vdev = vdev;
vdev->priv = vpmem;
err = init_vq(vpmem);
if (err) {
dev_err(&vdev->dev, "failed to initialize virtio pmem vq's\n");
goto out_err;
}
virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
start, &vpmem->start);
virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
size, &vpmem->size);
res.start = vpmem->start;
res.end = vpmem->start + vpmem->size - 1;
vpmem->nd_desc.provider_name = "virtio-pmem";
vpmem->nd_desc.module = THIS_MODULE;
vpmem->nvdimm_bus = nvdimm_bus_register(&vdev->dev,
&vpmem->nd_desc);
if (!vpmem->nvdimm_bus) {
dev_err(&vdev->dev, "failed to register device with nvdimm_bus\n");
err = -ENXIO;
goto out_vq;
}
dev_set_drvdata(&vdev->dev, vpmem->nvdimm_bus);
ndr_desc.res = &res;
ndr_desc.numa_node = memory_add_physaddr_to_nid(res.start);
ndr_desc.target_node = phys_to_target_node(res.start);
if (ndr_desc.target_node == NUMA_NO_NODE) {
ndr_desc.target_node = ndr_desc.numa_node;
dev_dbg(&vdev->dev, "changing target node from %d to %d",
NUMA_NO_NODE, ndr_desc.target_node);
}
ndr_desc.flush = async_pmem_flush;
ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
/*
* The NVDIMM region could be available before the
* virtio_device_ready() that is called by
* virtio_dev_probe(), so we set device ready here.
*/
virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
return 0;
out_nd:
virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
out_err:
return err;
}
static void virtio_pmem_remove(struct virtio_device *vdev)
{
struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
nvdimm_bus_unregister(nvdimm_bus);
vdev->config->del_vqs(vdev);
virtio_reset_device(vdev);
}
static struct virtio_driver virtio_pmem_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_pmem_probe,
.remove = virtio_pmem_remove,
};
module_virtio_driver(virtio_pmem_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio pmem driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/nvdimm/virtio_pmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/ndctl.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include "nd-core.h"
#include "label.h"
#include "pmem.h"
#include "nd.h"
static DEFINE_IDA(dimm_ida);
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
*/
int nvdimm_check_config_data(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
if (test_bit(NDD_LABELING, &nvdimm->flags))
return -ENXIO;
else
return -ENOTTY;
}
return 0;
}
static int validate_dimm(struct nvdimm_drvdata *ndd)
{
int rc;
if (!ndd)
return -EINVAL;
rc = nvdimm_check_config_data(ndd->dev);
if (rc)
dev_dbg(ndd->dev, "%ps: %s error: %d\n",
__builtin_return_address(0), __func__, rc);
return rc;
}
/**
* nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
* @nvdimm: dimm to initialize
*/
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
{
struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc;
int rc = validate_dimm(ndd);
int cmd_rc = 0;
if (rc)
return rc;
if (cmd->config_size)
return 0; /* already valid */
memset(cmd, 0, sizeof(*cmd));
nd_desc = nvdimm_bus->nd_desc;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
if (rc < 0)
return rc;
return cmd_rc;
}
int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
size_t offset, size_t len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
size_t max_cmd_size, buf_offset;
if (rc)
return rc;
if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
for (buf_offset = 0; len;
len -= cmd->in_length, buf_offset += cmd->in_length) {
size_t cmd_size;
cmd->in_offset = offset + buf_offset;
cmd->in_length = min(max_cmd_size, len);
cmd_size = sizeof(*cmd) + cmd->in_length;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
rc = cmd_rc;
break;
}
/* out_buf should be valid, copy it into our output buffer */
memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
}
kvfree(cmd);
return rc;
}
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len)
{
size_t max_cmd_size, buf_offset;
struct nd_cmd_set_config_hdr *cmd;
int rc = validate_dimm(ndd), cmd_rc = 0;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
if (rc)
return rc;
if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
for (buf_offset = 0; len; len -= cmd->in_length,
buf_offset += cmd->in_length) {
size_t cmd_size;
cmd->in_offset = offset + buf_offset;
cmd->in_length = min(max_cmd_size, len);
memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
rc = cmd_rc;
break;
}
}
kvfree(cmd);
return rc;
}
void nvdimm_set_labeling(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
set_bit(NDD_LABELING, &nvdimm->flags);
}
void nvdimm_set_locked(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
set_bit(NDD_LOCKED, &nvdimm->flags);
}
void nvdimm_clear_locked(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
clear_bit(NDD_LOCKED, &nvdimm->flags);
}
static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
ida_simple_remove(&dimm_ida, nvdimm->id);
kfree(nvdimm);
}
struct nvdimm *to_nvdimm(struct device *dev)
{
struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
WARN_ON(!is_nvdimm(dev));
return nvdimm;
}
EXPORT_SYMBOL_GPL(to_nvdimm);
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{
struct nvdimm *nvdimm = nd_mapping->nvdimm;
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
return dev_get_drvdata(&nvdimm->dev);
}
EXPORT_SYMBOL(to_ndd);
void nvdimm_drvdata_release(struct kref *kref)
{
struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
struct device *dev = ndd->dev;
struct resource *res, *_r;
dev_dbg(dev, "trace\n");
nvdimm_bus_lock(dev);
for_each_dpa_resource_safe(ndd, res, _r)
nvdimm_free_dpa(ndd, res);
nvdimm_bus_unlock(dev);
kvfree(ndd->data);
kfree(ndd);
put_device(dev);
}
void get_ndd(struct nvdimm_drvdata *ndd)
{
kref_get(&ndd->kref);
}
void put_ndd(struct nvdimm_drvdata *ndd)
{
if (ndd)
kref_put(&ndd->kref, nvdimm_drvdata_release);
}
const char *nvdimm_name(struct nvdimm *nvdimm)
{
return dev_name(&nvdimm->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_name);
struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
{
return &nvdimm->dev.kobj;
}
EXPORT_SYMBOL_GPL(nvdimm_kobj);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
{
return nvdimm->cmd_mask;
}
EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
void *nvdimm_provider_data(struct nvdimm *nvdimm)
{
if (nvdimm)
return nvdimm->provider_data;
return NULL;
}
EXPORT_SYMBOL_GPL(nvdimm_provider_data);
static ssize_t commands_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
int cmd, len = 0;
if (!nvdimm->cmd_mask)
return sprintf(buf, "\n");
for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
len += sprintf(buf + len, "\n");
return len;
}
static DEVICE_ATTR_RO(commands);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
return sprintf(buf, "%s%s\n",
test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
static DEVICE_ATTR_RO(flags);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* The state may be in the process of changing, userspace should
* quiesce probing if it wants a static answer
*/
nvdimm_bus_lock(dev);
nvdimm_bus_unlock(dev);
return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
? "active" : "idle");
}
static DEVICE_ATTR_RO(state);
static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
struct device *dev;
ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
dev = ndd->dev;
nvdimm_bus_lock(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
nfree = 0;
} else
nfree--;
rc = sprintf(buf, "%d\n", nfree);
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t available_slots_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t rc;
device_lock(dev);
rc = __available_slots_show(dev_get_drvdata(dev), buf);
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(available_slots);
static ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
/*
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
return -ENOTTY;
}
static ssize_t frozen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
&nvdimm->sec.flags));
}
static DEVICE_ATTR_RO(frozen);
static ssize_t security_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
ssize_t rc;
/*
* Require all userspace triggered security management to be
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = nvdimm_security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RW(security);
static struct attribute *nvdimm_attributes[] = {
&dev_attr_state.attr,
&dev_attr_flags.attr,
&dev_attr_commands.attr,
&dev_attr_available_slots.attr,
&dev_attr_security.attr,
&dev_attr_frozen.attr,
NULL,
};
static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
return a->mode;
if (!nvdimm->sec.flags)
return 0;
if (a == &dev_attr_security.attr) {
/* Are there any state mutation ops (make writable)? */
if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
|| nvdimm->sec.ops->change_key
|| nvdimm->sec.ops->erase
|| nvdimm->sec.ops->overwrite)
return a->mode;
return 0444;
}
if (nvdimm->sec.ops->freeze)
return a->mode;
return 0;
}
static const struct attribute_group nvdimm_attribute_group = {
.attrs = nvdimm_attributes,
.is_visible = nvdimm_visible,
};
static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
enum nvdimm_fwa_result result;
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
nvdimm_bus_lock(dev);
result = nvdimm->fw_ops->activate_result(nvdimm);
nvdimm_bus_unlock(dev);
switch (result) {
case NVDIMM_FWA_RESULT_NONE:
return sprintf(buf, "none\n");
case NVDIMM_FWA_RESULT_SUCCESS:
return sprintf(buf, "success\n");
case NVDIMM_FWA_RESULT_FAIL:
return sprintf(buf, "fail\n");
case NVDIMM_FWA_RESULT_NOTSTAGED:
return sprintf(buf, "not_staged\n");
case NVDIMM_FWA_RESULT_NEEDRESET:
return sprintf(buf, "need_reset\n");
default:
return -ENXIO;
}
}
static DEVICE_ATTR_ADMIN_RO(result);
static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
enum nvdimm_fwa_state state;
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
nvdimm_bus_lock(dev);
state = nvdimm->fw_ops->activate_state(nvdimm);
nvdimm_bus_unlock(dev);
switch (state) {
case NVDIMM_FWA_IDLE:
return sprintf(buf, "idle\n");
case NVDIMM_FWA_BUSY:
return sprintf(buf, "busy\n");
case NVDIMM_FWA_ARMED:
return sprintf(buf, "armed\n");
default:
return -ENXIO;
}
}
static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
enum nvdimm_fwa_trigger arg;
int rc;
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
if (sysfs_streq(buf, "arm"))
arg = NVDIMM_FWA_ARM;
else if (sysfs_streq(buf, "disarm"))
arg = NVDIMM_FWA_DISARM;
else
return -EINVAL;
nvdimm_bus_lock(dev);
rc = nvdimm->fw_ops->arm(nvdimm, arg);
nvdimm_bus_unlock(dev);
if (rc < 0)
return rc;
return len;
}
static DEVICE_ATTR_ADMIN_RW(activate);
static struct attribute *nvdimm_firmware_attributes[] = {
&dev_attr_activate.attr,
&dev_attr_result.attr,
NULL,
};
static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
struct nvdimm *nvdimm = to_nvdimm(dev);
enum nvdimm_fwa_capability cap;
if (!nd_desc->fw_ops)
return 0;
if (!nvdimm->fw_ops)
return 0;
nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
return a->mode;
}
static const struct attribute_group nvdimm_firmware_attribute_group = {
.name = "firmware",
.attrs = nvdimm_firmware_attributes,
.is_visible = nvdimm_firmware_visible,
};
static const struct attribute_group *nvdimm_attribute_groups[] = {
&nd_device_attribute_group,
&nvdimm_attribute_group,
&nvdimm_firmware_attribute_group,
NULL,
};
static const struct device_type nvdimm_device_type = {
.name = "nvdimm",
.release = nvdimm_release,
.groups = nvdimm_attribute_groups,
};
bool is_nvdimm(const struct device *dev)
{
return dev->type == &nvdimm_device_type;
}
static struct lock_class_key nvdimm_key;
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id,
const struct nvdimm_security_ops *sec_ops,
const struct nvdimm_fw_ops *fw_ops)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
if (!nvdimm)
return NULL;
nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
if (nvdimm->id < 0) {
kfree(nvdimm);
return NULL;
}
nvdimm->dimm_id = dimm_id;
nvdimm->provider_data = provider_data;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
nvdimm->num_flush = num_flush;
nvdimm->flush_wpq = flush_wpq;
atomic_set(&nvdimm->busy, 0);
dev = &nvdimm->dev;
dev_set_name(dev, "nmem%d", nvdimm->id);
dev->parent = &nvdimm_bus->dev;
dev->type = &nvdimm_device_type;
dev->devt = MKDEV(nvdimm_major, nvdimm->id);
dev->groups = groups;
nvdimm->sec.ops = sec_ops;
nvdimm->fw_ops = fw_ops;
nvdimm->sec.overwrite_tmo = 0;
INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
/*
* Security state must be initialized before device_add() for
* attribute visibility.
*/
/* get security state and extended (master) state */
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
device_initialize(dev);
lockdep_set_class(&dev->mutex, &nvdimm_key);
if (test_bit(NDD_REGISTER_SYNC, &flags))
nd_device_register_sync(dev);
else
nd_device_register(dev);
return nvdimm;
}
EXPORT_SYMBOL_GPL(__nvdimm_create);
void nvdimm_delete(struct nvdimm *nvdimm)
{
struct device *dev = &nvdimm->dev;
bool dev_put = false;
/* We are shutting down. Make state frozen artificially. */
nvdimm_bus_lock(dev);
set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
dev_put = true;
nvdimm_bus_unlock(dev);
cancel_delayed_work_sync(&nvdimm->dwork);
if (dev_put)
put_device(dev);
nd_device_unregister(dev, ND_SYNC);
}
EXPORT_SYMBOL_GPL(nvdimm_delete);
static void shutdown_security_notify(void *data)
{
struct nvdimm *nvdimm = data;
sysfs_put(nvdimm->sec.overwrite_state);
}
int nvdimm_security_setup_events(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
if (!nvdimm->sec.flags || !nvdimm->sec.ops
|| !nvdimm->sec.ops->overwrite)
return 0;
nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
if (!nvdimm->sec.overwrite_state)
return -ENOMEM;
return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
}
EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
int nvdimm_in_overwrite(struct nvdimm *nvdimm)
{
return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
}
EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
int nvdimm_security_freeze(struct nvdimm *nvdimm)
{
int rc;
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
return -EOPNOTSUPP;
if (!nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
return -EBUSY;
}
rc = nvdimm->sec.ops->freeze(nvdimm);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
static unsigned long dpa_align(struct nd_region *nd_region)
{
struct device *dev = &nd_region->dev;
if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
"bus lock required for capacity provision\n"))
return 0;
if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
% nd_region->ndr_mappings,
"invalid region align %#lx mappings: %d\n",
nd_region->align, nd_region->ndr_mappings))
return 0;
return nd_region->align / nd_region->ndr_mappings;
}
/**
* nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
* contiguous unallocated dpa range.
* @nd_region: constrain available space check to this reference region
* @nd_mapping: container of dpa-resource-root + labels
*/
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nvdimm_bus *nvdimm_bus;
resource_size_t max = 0;
struct resource *res;
unsigned long align;
/* if a dimm is disabled the available capacity is zero */
if (!ndd)
return 0;
align = dpa_align(nd_region);
if (!align)
return 0;
nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
return 0;
for_each_dpa_resource(ndd, res) {
resource_size_t start, end;
if (strcmp(res->name, "pmem-reserve") != 0)
continue;
/* trim free space relative to current alignment setting */
start = ALIGN(res->start, align);
end = ALIGN_DOWN(res->end + 1, align) - 1;
if (end < start)
continue;
if (end - start + 1 > max)
max = end - start + 1;
}
release_free_pmem(nvdimm_bus, nd_mapping);
return max;
}
/**
* nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
* @nd_mapping: container of dpa-resource-root + labels
* @nd_region: constrain available space check to this reference region
*
* Validate that a PMEM label, if present, aligns with the start of an
* interleave set.
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_start, map_end, busy = 0;
struct resource *res;
unsigned long align;
if (!ndd)
return 0;
align = dpa_align(nd_region);
if (!align)
return 0;
map_start = nd_mapping->start;
map_end = map_start + nd_mapping->size - 1;
for_each_dpa_resource(ndd, res) {
resource_size_t start, end;
start = ALIGN_DOWN(res->start, align);
end = ALIGN(res->end + 1, align) - 1;
if (start >= map_start && start < map_end) {
if (end > map_end) {
nd_dbg_dpa(nd_region, ndd, res,
"misaligned to iset\n");
return 0;
}
busy += end - start + 1;
} else if (end >= map_start && end <= map_end) {
busy += end - start + 1;
} else if (map_start > start && map_start < end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
}
}
if (busy < nd_mapping->size)
return ALIGN_DOWN(nd_mapping->size - busy, align);
return 0;
}
void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
{
WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
kfree(res->name);
__release_region(&ndd->dpa, res->start, resource_size(res));
}
struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id, resource_size_t start,
resource_size_t n)
{
char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
struct resource *res;
if (!name)
return NULL;
WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
res = __request_region(&ndd->dpa, start, n, name, 0);
if (!res)
kfree(name);
return res;
}
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
* @nvdimm: container of dpa-resource-root + labels
* @label_id: dpa resource name of the form pmem-<human readable uuid>
*/
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id)
{
resource_size_t allocated = 0;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id->id) == 0)
allocated += resource_size(res);
return allocated;
}
static int count_dimms(struct device *dev, void *c)
{
int *count = c;
if (is_nvdimm(dev))
(*count)++;
return 0;
}
int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
{
int count = 0;
/* Flush any possible dimm registration failures */
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
if (count != dimm_count)
return -ENXIO;
return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
void __exit nvdimm_devs_exit(void)
{
ida_destroy(&dimm_ida);
}
| linux-master | drivers/nvdimm/dimm_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Persistent Memory Driver
*
* Copyright (c) 2014-2015, Intel Corporation.
* Copyright (c) 2015, Christoph Hellwig <[email protected]>.
* Copyright (c) 2015, Boaz Harrosh <[email protected]>.
*/
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/set_memory.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/badblocks.h>
#include <linux/memremap.h>
#include <linux/kstrtox.h>
#include <linux/vmalloc.h>
#include <linux/blk-mq.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
#include "btt.h"
#include "pfn.h"
#include "nd.h"
static struct device *to_dev(struct pmem_device *pmem)
{
/*
* nvdimm bus services need a 'dev' parameter, and we record the device
* at init in bb.dev.
*/
return pmem->bb.dev;
}
static struct nd_region *to_region(struct pmem_device *pmem)
{
return to_nd_region(to_dev(pmem)->parent);
}
static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
return pmem->phys_addr + offset;
}
static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset)
{
return (offset - pmem->data_offset) >> SECTOR_SHIFT;
}
static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
{
return (sector << SECTOR_SHIFT) + pmem->data_offset;
}
static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
phys_addr_t phys = pmem_to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
if (is_vmalloc_addr(pmem->virt_addr))
return;
pfn_start = PHYS_PFN(phys);
pfn_end = pfn_start + PHYS_PFN(len);
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
struct page *page = pfn_to_page(pfn);
/*
* Note, no need to hold a get_dev_pagemap() reference
* here since we're in the driver I/O path and
* outstanding I/O requests pin the dev_pagemap.
*/
if (test_and_clear_pmem_poison(page))
clear_mce_nospec(pfn);
}
}
static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
{
if (blks == 0)
return;
badblocks_clear(&pmem->bb, sector, blks);
if (pmem->bb_state)
sysfs_notify_dirent(pmem->bb_state);
}
static long __pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
phys_addr_t phys = pmem_to_phys(pmem, offset);
long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
if (cleared > 0) {
pmem_mkpage_present(pmem, offset, cleared);
arch_invalidate_pmem(pmem->virt_addr + offset, len);
}
return cleared;
}
static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
long cleared = __pmem_clear_poison(pmem, offset, len);
if (cleared < 0)
return BLK_STS_IOERR;
pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT);
if (cleared < len)
return BLK_STS_IOERR;
return BLK_STS_OK;
}
static void write_pmem(void *pmem_addr, struct page *page,
unsigned int off, unsigned int len)
{
unsigned int chunk;
void *mem;
while (len) {
mem = kmap_atomic(page);
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
memcpy_flushcache(pmem_addr, mem + off, chunk);
kunmap_atomic(mem);
len -= chunk;
off = 0;
page++;
pmem_addr += chunk;
}
}
static blk_status_t read_pmem(struct page *page, unsigned int off,
void *pmem_addr, unsigned int len)
{
unsigned int chunk;
unsigned long rem;
void *mem;
while (len) {
mem = kmap_atomic(page);
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
kunmap_atomic(mem);
if (rem)
return BLK_STS_IOERR;
len -= chunk;
off = 0;
page++;
pmem_addr += chunk;
}
return BLK_STS_OK;
}
static blk_status_t pmem_do_read(struct pmem_device *pmem,
struct page *page, unsigned int page_off,
sector_t sector, unsigned int len)
{
blk_status_t rc;
phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
return BLK_STS_IOERR;
rc = read_pmem(page, page_off, pmem_addr, len);
flush_dcache_page(page);
return rc;
}
static blk_status_t pmem_do_write(struct pmem_device *pmem,
struct page *page, unsigned int page_off,
sector_t sector, unsigned int len)
{
phys_addr_t pmem_off = to_offset(pmem, sector);
void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) {
blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len);
if (rc != BLK_STS_OK)
return rc;
}
flush_dcache_page(page);
write_pmem(pmem_addr, page, page_off, len);
return BLK_STS_OK;
}
static void pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
bool do_acct;
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH)
ret = nvdimm_flush(nd_region, bio);
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
if (op_is_write(bio_op(bio)))
rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
iter.bi_sector, bvec.bv_len);
else
rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
iter.bi_sector, bvec.bv_len);
if (rc) {
bio->bi_status = rc;
break;
}
}
if (do_acct)
bio_end_io_acct(bio, start);
if (bio->bi_opf & REQ_FUA)
ret = nvdimm_flush(nd_region, bio);
if (ret)
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
}
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
struct badblocks *bb = &pmem->bb;
sector_t first_bad;
int num_bad;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
if (bb->count &&
badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
long actual_nr;
if (mode != DAX_RECOVERY_WRITE)
return -EHWPOISON;
/*
* Set the recovery stride is set to kernel page size because
* the underlying driver and firmware clear poison functions
* don't appear to handle large chunk(such as 2MiB) reliably.
*/
actual_nr = PHYS_PFN(
PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT));
dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
sector, nr_pages, first_bad, actual_nr);
if (actual_nr)
return actual_nr;
return 1;
}
/*
* If badblocks are present but not in the range, limit known good range
* to the requested range.
*/
if (bb->count)
return nr_pages;
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.submit_bio = pmem_submit_bio,
};
static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
PFN_PHYS(pgoff) >> SECTOR_SHIFT,
PAGE_SIZE));
}
static long pmem_dax_direct_access(struct dax_device *dax_dev,
pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
void **kaddr, pfn_t *pfn)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
}
/*
* The recovery write thread started out as a normal pwrite thread and
* when the filesystem was told about potential media error in the
* range, filesystem turns the normal pwrite to a dax_recovery_write.
*
* The recovery write consists of clearing media poison, clearing page
* HWPoison bit, reenable page-wide read-write permission, flush the
* caches and finally write. A competing pread thread will be held
* off during the recovery process since data read back might not be
* valid, and this is achieved by clearing the badblock records after
* the recovery write is complete. Competing recovery write threads
* are already serialized by writer lock held by dax_iomap_rw().
*/
static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
size_t olen, len, off;
phys_addr_t pmem_off;
struct device *dev = pmem->bb.dev;
long cleared;
off = offset_in_page(addr);
len = PFN_PHYS(PFN_UP(off + bytes));
if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len))
return _copy_from_iter_flushcache(addr, bytes, i);
/*
* Not page-aligned range cannot be recovered. This should not
* happen unless something else went wrong.
*/
if (off || !PAGE_ALIGNED(bytes)) {
dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
addr, bytes);
return 0;
}
pmem_off = PFN_PHYS(pgoff) + pmem->data_offset;
cleared = __pmem_clear_poison(pmem, pmem_off, len);
if (cleared > 0 && cleared < len) {
dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n",
cleared, len);
return 0;
}
if (cleared < 0) {
dev_dbg(dev, "poison clear failed: %ld\n", cleared);
return 0;
}
olen = _copy_from_iter_flushcache(addr, bytes, i);
pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT);
return olen;
}
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
.zero_page_range = pmem_dax_zero_page_range,
.recovery_write = pmem_recovery_write,
};
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pmem_device *pmem = dev_to_disk(dev)->private_data;
return sprintf(buf, "%d\n", !!dax_write_cache_enabled(pmem->dax_dev));
}
static ssize_t write_cache_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct pmem_device *pmem = dev_to_disk(dev)->private_data;
bool write_cache;
int rc;
rc = kstrtobool(buf, &write_cache);
if (rc)
return rc;
dax_write_cache(pmem->dax_dev, write_cache);
return len;
}
static DEVICE_ATTR_RW(write_cache);
static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
#ifndef CONFIG_ARCH_HAS_PMEM_API
if (a == &dev_attr_write_cache.attr)
return 0;
#endif
return a->mode;
}
static struct attribute *dax_attributes[] = {
&dev_attr_write_cache.attr,
NULL,
};
static const struct attribute_group dax_attribute_group = {
.name = "dax",
.attrs = dax_attributes,
.is_visible = dax_visible,
};
static const struct attribute_group *pmem_attribute_groups[] = {
&dax_attribute_group,
NULL,
};
static void pmem_release_disk(void *__pmem)
{
struct pmem_device *pmem = __pmem;
dax_remove_host(pmem->disk);
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
put_disk(pmem->disk);
}
static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
unsigned long pfn, unsigned long nr_pages, int mf_flags)
{
struct pmem_device *pmem =
container_of(pgmap, struct pmem_device, pgmap);
u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
u64 len = nr_pages << PAGE_SHIFT;
return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
}
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
.memory_failure = pmem_pagemap_memory_failure,
};
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
struct range bb_range;
struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct request_queue *q;
struct gendisk *disk;
void *addr;
int rc;
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
return -ENOMEM;
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return rc;
/* while nsio_rw_bytes is active, parse a pfn info block if present */
if (is_nd_pfn(dev)) {
nd_pfn = to_nd_pfn(dev);
rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
if (rc)
return rc;
}
/* we're attaching a block device, disable raw namespace access */
devm_namespace_disable(dev, ndns);
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
fua = nvdimm_has_flush(nd_region);
if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
disk = blk_alloc_disk(nid);
if (!disk)
return -ENOMEM;
q = disk->queue;
pmem->disk = disk;
pmem->pgmap.owner = pmem;
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
pmem->pfn_pad = resource_size(res) -
range_len(&pmem->pgmap.range);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
bb_range.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
pmem->pgmap.range.start = res->start;
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
bb_range.start = res->start;
bb_range.end = res->end;
}
if (IS_ERR(addr)) {
rc = PTR_ERR(addr);
goto out;
}
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
disk->fops = &pmem_fops;
disk->private_data = pmem;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
disk->bb = &pmem->bb;
dax_dev = alloc_dax(pmem, &pmem_dax_ops);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto out;
}
set_dax_nocache(dax_dev);
set_dax_nomc(dax_dev);
if (is_nvdimm_sync(nd_region))
set_dax_synchronous(dax_dev);
rc = dax_add_host(dax_dev, disk);
if (rc)
goto out_cleanup_dax;
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
rc = device_add_disk(dev, disk, pmem_attribute_groups);
if (rc)
goto out_remove_host;
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
nvdimm_check_and_set_ro(disk);
pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
"badblocks");
if (!pmem->bb_state)
dev_warn(dev, "'badblocks' notification disabled\n");
return 0;
out_remove_host:
dax_remove_host(pmem->disk);
out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
out:
put_disk(pmem->disk);
return rc;
}
static int nd_pmem_probe(struct device *dev)
{
int ret;
struct nd_namespace_common *ndns;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
if (is_nd_pfn(dev))
return pmem_attach_disk(dev, ndns);
ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (ret)
return ret;
ret = nd_btt_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
/*
* We have two failure conditions here, there is no
* info reserver block or we found a valid info reserve block
* but failed to initialize the pfn superblock.
*
* For the first case consider namespace as a raw pmem namespace
* and attach a disk.
*
* For the latter, consider this a success and advance the namespace
* seed.
*/
ret = nd_pfn_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
else if (ret == -EOPNOTSUPP)
return ret;
ret = nd_dax_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
else if (ret == -EOPNOTSUPP)
return ret;
/* probe complete, attach handles namespace enabling */
devm_namespace_disable(dev, ndns);
return pmem_attach_disk(dev, ndns);
}
static void nd_pmem_remove(struct device *dev)
{
struct pmem_device *pmem = dev_get_drvdata(dev);
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
* Note, this assumes device_lock() context to not
* race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
}
nvdimm_flush(to_nd_region(dev->parent), NULL);
}
static void nd_pmem_shutdown(struct device *dev)
{
nvdimm_flush(to_nd_region(dev->parent), NULL);
}
static void pmem_revalidate_poison(struct device *dev)
{
struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
struct badblocks *bb;
struct range range;
struct kernfs_node *bb_state;
if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
ndns = nd_btt->ndns;
nd_region = to_nd_region(ndns->dev.parent);
nsio = to_nd_namespace_io(&ndns->dev);
bb = &nsio->bb;
bb_state = NULL;
} else {
struct pmem_device *pmem = dev_get_drvdata(dev);
nd_region = to_region(pmem);
bb = &pmem->bb;
bb_state = pmem->bb_state;
if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
ndns = nd_pfn->ndns;
offset = pmem->data_offset +
__le32_to_cpu(pfn_sb->start_pad);
end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
} else {
ndns = to_ndns(dev);
}
nsio = to_nd_namespace_io(&ndns->dev);
}
range.start = nsio->res.start + offset;
range.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, bb, &range);
if (bb_state)
sysfs_notify_dirent(bb_state);
}
static void pmem_revalidate_region(struct device *dev)
{
struct pmem_device *pmem;
if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
struct btt *btt = nd_btt->btt;
nvdimm_check_and_set_ro(btt->btt_disk);
return;
}
pmem = dev_get_drvdata(dev);
nvdimm_check_and_set_ro(pmem->disk);
}
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
switch (event) {
case NVDIMM_REVALIDATE_POISON:
pmem_revalidate_poison(dev);
break;
case NVDIMM_REVALIDATE_REGION:
pmem_revalidate_region(dev);
break;
default:
dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
break;
}
}
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
static struct nd_device_driver nd_pmem_driver = {
.probe = nd_pmem_probe,
.remove = nd_pmem_remove,
.notify = nd_pmem_notify,
.shutdown = nd_pmem_shutdown,
.drv = {
.name = "nd_pmem",
},
.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
};
module_nd_driver(nd_pmem_driver);
MODULE_AUTHOR("Ross Zwisler <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/nvdimm/pmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "nd-core.h"
#include "pfn.h"
#include "nd.h"
static void nd_dax_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_dax *nd_dax = to_nd_dax(dev);
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
dev_dbg(dev, "trace\n");
nd_detach_ndns(dev, &nd_pfn->ndns);
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
kfree(nd_pfn->uuid);
kfree(nd_dax);
}
struct nd_dax *to_nd_dax(struct device *dev)
{
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
WARN_ON(!is_nd_dax(dev));
return nd_dax;
}
EXPORT_SYMBOL(to_nd_dax);
static const struct device_type nd_dax_device_type = {
.name = "nd_dax",
.release = nd_dax_release,
.groups = nd_pfn_attribute_groups,
};
bool is_nd_dax(const struct device *dev)
{
return dev ? dev->type == &nd_dax_device_type : false;
}
EXPORT_SYMBOL(is_nd_dax);
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
struct nd_dax *nd_dax;
struct device *dev;
nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
if (!nd_dax)
return NULL;
nd_pfn = &nd_dax->nd_pfn;
nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
if (nd_pfn->id < 0) {
kfree(nd_dax);
return NULL;
}
dev = &nd_pfn->dev;
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
dev->type = &nd_dax_device_type;
dev->parent = &nd_region->dev;
return nd_dax;
}
struct device *nd_dax_create(struct nd_region *nd_region)
{
struct device *dev = NULL;
struct nd_dax *nd_dax;
if (!is_memory(&nd_region->dev))
return NULL;
nd_dax = nd_dax_alloc(nd_region);
if (nd_dax)
dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
nd_device_register(dev);
return dev;
}
int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
struct nd_dax *nd_dax;
struct device *dax_dev;
struct nd_pfn *nd_pfn;
struct nd_pfn_sb *pfn_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
if (ndns->force_raw)
return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_DAX:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev);
nd_dax = nd_dax_alloc(nd_region);
nd_pfn = &nd_dax->nd_pfn;
dax_dev = nd_pfn_devinit(nd_pfn, ndns);
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
if (rc < 0) {
nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev);
} else
nd_device_register(dax_dev);
return rc;
}
EXPORT_SYMBOL(nd_dax_probe);
| linux-master | drivers/nvdimm/dax_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/badblocks.h>
#include "nd-core.h"
#include "pmem.h"
#include "pfn.h"
#include "btt.h"
#include "nd.h"
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
struct nvdimm_bus *nvdimm_bus;
if (!ndns)
return;
nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
ndns->claim = NULL;
*_ndns = NULL;
put_device(&ndns->dev);
}
void nd_detach_ndns(struct device *dev,
struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
if (!ndns)
return;
get_device(&ndns->dev);
nvdimm_bus_lock(&ndns->dev);
__nd_detach_ndns(dev, _ndns);
nvdimm_bus_unlock(&ndns->dev);
put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
if (attach->claim)
return false;
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
attach->claim = dev;
*_ndns = attach;
get_device(&attach->dev);
return true;
}
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns)
{
bool claimed;
nvdimm_bus_lock(&attach->dev);
claimed = __nd_attach_ndns(dev, attach, _ndns);
nvdimm_bus_unlock(&attach->dev);
return claimed;
}
static int namespace_match(struct device *dev, void *data)
{
char *name = data;
return strcmp(name, dev_name(dev)) == 0;
}
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct device *seed = NULL;
if (is_nd_btt(dev))
seed = nd_region->btt_seed;
else if (is_nd_pfn(dev))
seed = nd_region->pfn_seed;
else if (is_nd_dax(dev))
seed = nd_region->dax_seed;
if (seed == dev || ndns || dev->driver)
return false;
return true;
}
struct nd_pfn *to_nd_pfn_safe(struct device *dev)
{
/*
* pfn device attributes are re-used by dax device instances, so we
* need to be careful to correct device-to-nd_pfn conversion.
*/
if (is_nd_pfn(dev))
return to_nd_pfn(dev);
if (is_nd_dax(dev)) {
struct nd_dax *nd_dax = to_nd_dax(dev);
return &nd_dax->nd_pfn;
}
WARN_ON(1);
return NULL;
}
static void nd_detach_and_reset(struct device *dev,
struct nd_namespace_common **_ndns)
{
/* detach the namespace and destroy / reset the device */
__nd_detach_ndns(dev, _ndns);
if (is_idle(dev, *_ndns)) {
nd_device_unregister(dev, ND_ASYNC);
} else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
nd_btt->lbasize = 0;
kfree(nd_btt->uuid);
nd_btt->uuid = NULL;
} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
kfree(nd_pfn->uuid);
nd_pfn->uuid = NULL;
nd_pfn->mode = PFN_MODE_NONE;
}
}
ssize_t nd_namespace_store(struct device *dev,
struct nd_namespace_common **_ndns, const char *buf,
size_t len)
{
struct nd_namespace_common *ndns;
struct device *found;
char *name;
if (dev->driver) {
dev_dbg(dev, "namespace already active\n");
return -EBUSY;
}
name = kstrndup(buf, len, GFP_KERNEL);
if (!name)
return -ENOMEM;
strim(name);
if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
/* pass */;
else {
len = -EINVAL;
goto out;
}
ndns = *_ndns;
if (strcmp(name, "") == 0) {
nd_detach_and_reset(dev, _ndns);
goto out;
} else if (ndns) {
dev_dbg(dev, "namespace already set to: %s\n",
dev_name(&ndns->dev));
len = -EBUSY;
goto out;
}
found = device_find_child(dev->parent, name, namespace_match);
if (!found) {
dev_dbg(dev, "'%s' not found under %s\n", name,
dev_name(dev->parent));
len = -ENODEV;
goto out;
}
ndns = to_ndns(found);
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
break;
case NVDIMM_CCLASS_BTT:
case NVDIMM_CCLASS_BTT2:
if (!is_nd_btt(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
case NVDIMM_CCLASS_PFN:
if (!is_nd_pfn(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
case NVDIMM_CCLASS_DAX:
if (!is_nd_dax(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
default:
len = -EBUSY;
goto out_attach;
break;
}
if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
dev_dbg(dev, "%s too small to host\n", name);
len = -ENXIO;
goto out_attach;
}
WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
if (!__nd_attach_ndns(dev, ndns, _ndns)) {
dev_dbg(dev, "%s already claimed\n",
dev_name(&ndns->dev));
len = -EBUSY;
}
out_attach:
put_device(&ndns->dev); /* from device_find_child */
out:
kfree(name);
return len;
}
/*
* nd_sb_checksum: compute checksum for a generic info block
*
* Returns a fletcher64 checksum of everything in the given info block
* except the last field (since that's where the checksum lives).
*/
u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
{
u64 sum;
__le64 sum_save;
BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
sum_save = nd_gen_sb->checksum;
nd_gen_sb->checksum = 0;
sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
nd_gen_sb->checksum = sum_save;
return sum;
}
EXPORT_SYMBOL(nd_sb_checksum);
static int nsio_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *buf, size_t size, int rw,
unsigned long flags)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
sector_t sector = offset >> 9;
int rc = 0, ret = 0;
if (unlikely(!size))
return 0;
if (unlikely(offset + size > nsio->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
return -EFAULT;
}
if (rw == READ) {
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
return -EIO;
return 0;
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
&& !(flags & NVDIMM_IO_ATOMIC)) {
long cleared;
might_sleep();
cleared = nvdimm_clear_poison(&ndns->dev,
nsio->res.start + offset, size);
if (cleared < size)
rc = -EIO;
if (cleared > 0 && cleared / 512) {
cleared /= 512;
badblocks_clear(&nsio->bb, sector, cleared);
}
arch_invalidate_pmem(nsio->addr + offset, size);
} else
rc = -EIO;
}
memcpy_flushcache(nsio->addr + offset, buf, size);
ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
if (ret)
rc = ret;
return rc;
}
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
resource_size_t size)
{
struct nd_namespace_common *ndns = &nsio->common;
struct range range = {
.start = nsio->res.start,
.end = nsio->res.end,
};
nsio->size = size;
if (!devm_request_mem_region(dev, range.start, size,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY;
}
ndns->rw_bytes = nsio_rw_bytes;
if (devm_init_badblocks(dev, &nsio->bb))
return -ENOMEM;
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
&range);
nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
{
struct resource *res = &nsio->res;
devm_memunmap(dev, nsio->addr);
devm_exit_badblocks(dev, &nsio->bb);
devm_release_mem_region(dev, res->start, nsio->size);
}
| linux-master | drivers/nvdimm/claim.c |
// SPDX-License-Identifier: GPL-2.0
/*
* virtio_pmem.c: Virtio pmem Driver
*
* Discovers persistent memory range information
* from host and provides a virtio based flushing
* interface.
*/
#include "virtio_pmem.h"
#include "nd.h"
/* The interrupt handler */
void virtio_pmem_host_ack(struct virtqueue *vq)
{
struct virtio_pmem *vpmem = vq->vdev->priv;
struct virtio_pmem_request *req_data, *req_buf;
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&vpmem->pmem_lock, flags);
while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
req_data->done = true;
wake_up(&req_data->host_acked);
if (!list_empty(&vpmem->req_list)) {
req_buf = list_first_entry(&vpmem->req_list,
struct virtio_pmem_request, list);
req_buf->wq_buf_avail = true;
wake_up(&req_buf->wq_buf);
list_del(&req_buf->list);
}
}
spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
}
EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
/* The request submission function */
static int virtio_pmem_flush(struct nd_region *nd_region)
{
struct virtio_device *vdev = nd_region->provider_data;
struct virtio_pmem *vpmem = vdev->priv;
struct virtio_pmem_request *req_data;
struct scatterlist *sgs[2], sg, ret;
unsigned long flags;
int err, err1;
might_sleep();
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
if (!req_data)
return -ENOMEM;
req_data->done = false;
init_waitqueue_head(&req_data->host_acked);
init_waitqueue_head(&req_data->wq_buf);
INIT_LIST_HEAD(&req_data->list);
req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
sgs[0] = &sg;
sg_init_one(&ret, &req_data->resp.ret, sizeof(req_data->resp));
sgs[1] = &ret;
spin_lock_irqsave(&vpmem->pmem_lock, flags);
/*
* If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
* queue does not have free descriptor. We add the request
* to req_list and wait for host_ack to wake us up when free
* slots are available.
*/
while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
GFP_ATOMIC)) == -ENOSPC) {
dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
req_data->wq_buf_avail = false;
list_add_tail(&req_data->list, &vpmem->req_list);
spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
/* A host response results in "host_ack" getting called */
wait_event(req_data->wq_buf, req_data->wq_buf_avail);
spin_lock_irqsave(&vpmem->pmem_lock, flags);
}
err1 = virtqueue_kick(vpmem->req_vq);
spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
/*
* virtqueue_add_sgs failed with error different than -ENOSPC, we can't
* do anything about that.
*/
if (err || !err1) {
dev_info(&vdev->dev, "failed to send command to virtio pmem device\n");
err = -EIO;
} else {
/* A host repsonse results in "host_ack" getting called */
wait_event(req_data->host_acked, req_data->done);
err = le32_to_cpu(req_data->resp.ret);
}
kfree(req_data);
return err;
};
/* The asynchronous flush callback function */
int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
{
/*
* Create child bio for asynchronous flush and chain with
* parent bio. Otherwise directly call nd_region flush.
*/
if (bio && bio->bi_iter.bi_sector != -1) {
struct bio *child = bio_alloc(bio->bi_bdev, 0,
REQ_OP_WRITE | REQ_PREFLUSH,
GFP_ATOMIC);
if (!child)
return -ENOMEM;
bio_clone_blkg_association(child, bio);
child->bi_iter.bi_sector = -1;
bio_chain(child, bio);
submit_bio(child);
return 0;
}
if (virtio_pmem_flush(nd_region))
return -EIO;
return 0;
};
EXPORT_SYMBOL_GPL(async_pmem_flush);
MODULE_LICENSE("GPL");
| linux-master | drivers/nvdimm/nd_virtio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/memregion.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
static int nd_region_probe(struct device *dev)
{
int err, rc;
static unsigned long once;
struct nd_region_data *ndrd;
struct nd_region *nd_region = to_nd_region(dev);
struct range range = {
.start = nd_region->ndr_start,
.end = nd_region->ndr_start + nd_region->ndr_size - 1,
};
if (nd_region->num_lanes > num_online_cpus()
&& nd_region->num_lanes < num_possible_cpus()
&& !test_and_set_bit(0, &once)) {
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
num_online_cpus(), nd_region->num_lanes,
num_possible_cpus());
dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
nd_region->num_lanes);
}
rc = nd_region_activate(nd_region);
if (rc)
return rc;
if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV;
nd_region->bb_state =
sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
if (!nd_region->bb_state)
dev_warn(dev, "'badblocks' notification disabled\n");
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
rc = nd_region_register_namespaces(nd_region, &err);
if (rc < 0)
return rc;
ndrd = dev_get_drvdata(dev);
ndrd->ns_active = rc;
ndrd->ns_count = rc + err;
if (rc && err && rc == err)
return -ENODEV;
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
if (err == 0)
return 0;
/*
* Given multiple namespaces per region, we do not want to
* disable all the successfully registered peer namespaces upon
* a single registration failure. If userspace is missing a
* namespace that it expects it can disable/re-enable the region
* to retry discovery after correcting the failure.
* <regionX>/namespaces returns the current
* "<async-registered>/<total>" namespace count.
*/
dev_err(dev, "failed to register %d namespace%s, continuing...\n",
err, err == 1 ? "" : "s");
return 0;
}
static int child_unregister(struct device *dev, void *data)
{
nd_device_unregister(dev, ND_SYNC);
return 0;
}
static void nd_region_remove(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev);
device_for_each_child(dev, NULL, child_unregister);
/* flush attribute readers and disable */
nvdimm_bus_lock(dev);
nd_region->ns_seed = NULL;
nd_region->btt_seed = NULL;
nd_region->pfn_seed = NULL;
nd_region->dax_seed = NULL;
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
/*
* Note, this assumes device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL;
/*
* Try to flush caches here since a disabled region may be subject to
* secure erase while disabled, and previous dirty data should not be
* written back to a new instance of the region. This only matters on
* bare metal where security commands are available, so silent failure
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
}
static int child_notify(struct device *dev, void *data)
{
nd_device_notify(dev, *(enum nvdimm_event *) data);
return 0;
}
static void nd_region_notify(struct device *dev, enum nvdimm_event event)
{
if (event == NVDIMM_REVALIDATE_POISON) {
struct nd_region *nd_region = to_nd_region(dev);
if (is_memory(&nd_region->dev)) {
struct range range = {
.start = nd_region->ndr_start,
.end = nd_region->ndr_start +
nd_region->ndr_size - 1,
};
nvdimm_badblocks_populate(nd_region,
&nd_region->bb, &range);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
}
}
device_for_each_child(dev, &event, child_notify);
}
static struct nd_device_driver nd_region_driver = {
.probe = nd_region_probe,
.remove = nd_region_remove,
.notify = nd_region_notify,
.drv = {
.name = "nd_region",
},
.type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
};
int __init nd_region_init(void)
{
return nd_driver_register(&nd_region_driver);
}
void nd_region_exit(void)
{
driver_unregister(&nd_region_driver.drv);
}
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
| linux-master | drivers/nvdimm/region.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include "nd-core.h"
#include "btt.h"
#include "nd.h"
static void nd_btt_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_btt *nd_btt = to_nd_btt(dev);
dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
kfree(nd_btt->uuid);
kfree(nd_btt);
}
struct nd_btt *to_nd_btt(struct device *dev)
{
struct nd_btt *nd_btt = container_of(dev, struct nd_btt, dev);
WARN_ON(!is_nd_btt(dev));
return nd_btt;
}
EXPORT_SYMBOL(to_nd_btt);
static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
return nd_size_select_show(nd_btt->lbasize, btt_lbasize_supported, buf);
}
static ssize_t sector_size_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(sector_size);
static ssize_t uuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
if (nd_btt->uuid)
return sprintf(buf, "%pUb\n", nd_btt->uuid);
return sprintf(buf, "\n");
}
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
rc = sprintf(buf, "%s\n", nd_btt->ndns
? dev_name(&nd_btt->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RW(namespace);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver)
rc = sprintf(buf, "%llu\n", nd_btt->size);
else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(size);
static ssize_t log_zero_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Y\n");
}
static DEVICE_ATTR_RO(log_zero_flags);
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_size.attr,
&dev_attr_log_zero_flags.attr,
NULL,
};
static struct attribute_group nd_btt_attribute_group = {
.attrs = nd_btt_attributes,
};
static const struct attribute_group *nd_btt_attribute_groups[] = {
&nd_btt_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
};
static const struct device_type nd_btt_device_type = {
.name = "nd_btt",
.release = nd_btt_release,
.groups = nd_btt_attribute_groups,
};
bool is_nd_btt(struct device *dev)
{
return dev->type == &nd_btt_device_type;
}
EXPORT_SYMBOL(is_nd_btt);
static struct lock_class_key nvdimm_btt_key;
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, uuid_t *uuid,
struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt;
struct device *dev;
nd_btt = kzalloc(sizeof(*nd_btt), GFP_KERNEL);
if (!nd_btt)
return NULL;
nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
if (nd_btt->id < 0)
goto out_nd_btt;
nd_btt->lbasize = lbasize;
if (uuid) {
uuid = kmemdup(uuid, 16, GFP_KERNEL);
if (!uuid)
goto out_put_id;
}
nd_btt->uuid = uuid;
dev = &nd_btt->dev;
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
dev->parent = &nd_region->dev;
dev->type = &nd_btt_device_type;
device_initialize(&nd_btt->dev);
lockdep_set_class(&nd_btt->dev.mutex, &nvdimm_btt_key);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
dev_name(ndns->claim));
put_device(dev);
return NULL;
}
return dev;
out_put_id:
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
out_nd_btt:
kfree(nd_btt);
return NULL;
}
struct device *nd_btt_create(struct nd_region *nd_region)
{
struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL);
nd_device_register(dev);
return dev;
}
/**
* nd_btt_arena_is_valid - check if the metadata layout is valid
* @nd_btt: device with BTT geometry and backing device info
* @super: pointer to the arena's info block being tested
*
* Check consistency of the btt info block with itself by validating
* the checksum, and with the parent namespace by verifying the
* parent_uuid contained in the info block with the one supplied in.
*
* Returns:
* false for an invalid info block, true for a valid one
*/
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
{
const uuid_t *ns_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
uuid_t parent_uuid;
u64 checksum;
if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
return false;
import_uuid(&parent_uuid, super->parent_uuid);
if (!uuid_is_null(&parent_uuid))
if (!uuid_equal(&parent_uuid, ns_uuid))
return false;
checksum = le64_to_cpu(super->checksum);
super->checksum = 0;
if (checksum != nd_sb_checksum((struct nd_gen_sb *) super))
return false;
super->checksum = cpu_to_le64(checksum);
/* TODO: figure out action for this */
if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
dev_info(&nd_btt->dev, "Found arena with an error flag\n");
return true;
}
EXPORT_SYMBOL(nd_btt_arena_is_valid);
int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
struct btt_sb *btt_sb)
{
if (ndns->claim_class == NVDIMM_CCLASS_BTT2) {
/* Probe/setup for BTT v2.0 */
nd_btt->initial_offset = 0;
nd_btt->version_major = 2;
nd_btt->version_minor = 0;
if (nvdimm_read_bytes(ndns, 0, btt_sb, sizeof(*btt_sb), 0))
return -ENXIO;
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 2) ||
(le16_to_cpu(btt_sb->version_minor) != 0))
return -ENODEV;
} else {
/*
* Probe/setup for BTT v1.1 (NVDIMM_CCLASS_NONE or
* NVDIMM_CCLASS_BTT)
*/
nd_btt->initial_offset = SZ_4K;
nd_btt->version_major = 1;
nd_btt->version_minor = 1;
if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
return -ENXIO;
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 1) ||
(le16_to_cpu(btt_sb->version_minor) != 1))
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL(nd_btt_version);
static int __nd_btt_probe(struct nd_btt *nd_btt,
struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
{
int rc;
if (!btt_sb || !ndns || !nd_btt)
return -ENODEV;
if (nvdimm_namespace_capacity(ndns) < SZ_16M)
return -ENXIO;
rc = nd_btt_version(nd_btt, ndns, btt_sb);
if (rc < 0)
return rc;
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
nd_btt->uuid = kmemdup(&btt_sb->uuid, sizeof(uuid_t), GFP_KERNEL);
if (!nd_btt->uuid)
return -ENOMEM;
nd_device_register(&nd_btt->dev);
return 0;
}
int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
struct device *btt_dev;
struct btt_sb *btt_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
if (ndns->force_raw)
return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_BTT:
case NVDIMM_CCLASS_BTT2:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev);
btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev);
if (!btt_dev)
return -ENOMEM;
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
dev_dbg(dev, "btt: %s\n", rc == 0 ? dev_name(btt_dev) : "<none>");
if (rc < 0) {
struct nd_btt *nd_btt = to_nd_btt(btt_dev);
nd_detach_ndns(btt_dev, &nd_btt->ndns);
put_device(btt_dev);
}
return rc;
}
EXPORT_SYMBOL(nd_btt_probe);
| linux-master | drivers/nvdimm/btt_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, Christoph Hellwig.
* Copyright (c) 2015, Intel Corporation.
*/
#include <linux/platform_device.h>
#include <linux/memory_hotplug.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
#include <linux/numa.h>
static int e820_pmem_remove(struct platform_device *pdev)
{
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
nvdimm_bus_unregister(nvdimm_bus);
return 0;
}
static int e820_register_one(struct resource *res, void *data)
{
struct nd_region_desc ndr_desc;
struct nvdimm_bus *nvdimm_bus = data;
int nid = phys_to_target_node(res->start);
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
ndr_desc.numa_node = numa_map_to_online_node(nid);
ndr_desc.target_node = nid;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
return -ENXIO;
return 0;
}
static int e820_pmem_probe(struct platform_device *pdev)
{
static struct nvdimm_bus_descriptor nd_desc;
struct device *dev = &pdev->dev;
struct nvdimm_bus *nvdimm_bus;
int rc = -ENXIO;
nd_desc.provider_name = "e820";
nd_desc.module = THIS_MODULE;
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
if (!nvdimm_bus)
goto err;
platform_set_drvdata(pdev, nvdimm_bus);
rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
IORESOURCE_MEM, 0, -1, nvdimm_bus, e820_register_one);
if (rc)
goto err;
return 0;
err:
nvdimm_bus_unregister(nvdimm_bus);
dev_err(dev, "failed to register legacy persistent memory ranges\n");
return rc;
}
static struct platform_driver e820_pmem_driver = {
.probe = e820_pmem_probe,
.remove = e820_pmem_remove,
.driver = {
.name = "e820_pmem",
},
};
module_platform_driver(e820_pmem_driver);
MODULE_ALIAS("platform:e820_pmem*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
| linux-master | drivers/nvdimm/e820.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*/
#include <linux/libnvdimm.h>
#include <linux/badblocks.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/ndctl.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "nd-core.h"
#include "nd.h"
void badrange_init(struct badrange *badrange)
{
INIT_LIST_HEAD(&badrange->list);
spin_lock_init(&badrange->lock);
}
EXPORT_SYMBOL_GPL(badrange_init);
static void append_badrange_entry(struct badrange *badrange,
struct badrange_entry *bre, u64 addr, u64 length)
{
lockdep_assert_held(&badrange->lock);
bre->start = addr;
bre->length = length;
list_add_tail(&bre->list, &badrange->list);
}
static int alloc_and_append_badrange_entry(struct badrange *badrange,
u64 addr, u64 length, gfp_t flags)
{
struct badrange_entry *bre;
bre = kzalloc(sizeof(*bre), flags);
if (!bre)
return -ENOMEM;
append_badrange_entry(badrange, bre, addr, length);
return 0;
}
static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
{
struct badrange_entry *bre, *bre_new;
spin_unlock(&badrange->lock);
bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
spin_lock(&badrange->lock);
if (list_empty(&badrange->list)) {
if (!bre_new)
return -ENOMEM;
append_badrange_entry(badrange, bre_new, addr, length);
return 0;
}
/*
* There is a chance this is a duplicate, check for those first.
* This will be the common case as ARS_STATUS returns all known
* errors in the SPA space, and we can't query it per region
*/
list_for_each_entry(bre, &badrange->list, list)
if (bre->start == addr) {
/* If length has changed, update this list entry */
if (bre->length != length)
bre->length = length;
kfree(bre_new);
return 0;
}
/*
* If not a duplicate or a simple length update, add the entry as is,
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
if (!bre_new)
return -ENOMEM;
append_badrange_entry(badrange, bre_new, addr, length);
return 0;
}
int badrange_add(struct badrange *badrange, u64 addr, u64 length)
{
int rc;
spin_lock(&badrange->lock);
rc = add_badrange(badrange, addr, length);
spin_unlock(&badrange->lock);
return rc;
}
EXPORT_SYMBOL_GPL(badrange_add);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
unsigned int len)
{
struct list_head *badrange_list = &badrange->list;
u64 clr_end = start + len - 1;
struct badrange_entry *bre, *next;
spin_lock(&badrange->lock);
/*
* [start, clr_end] is the badrange interval being cleared.
* [bre->start, bre_end] is the badrange_list entry we're comparing
* the above interval against. The badrange list entry may need
* to be modified (update either start or length), deleted, or
* split into two based on the overlap characteristics
*/
list_for_each_entry_safe(bre, next, badrange_list, list) {
u64 bre_end = bre->start + bre->length - 1;
/* Skip intervals with no intersection */
if (bre_end < start)
continue;
if (bre->start > clr_end)
continue;
/* Delete completely overlapped badrange entries */
if ((bre->start >= start) && (bre_end <= clr_end)) {
list_del(&bre->list);
kfree(bre);
continue;
}
/* Adjust start point of partially cleared entries */
if ((start <= bre->start) && (clr_end > bre->start)) {
bre->length -= clr_end - bre->start + 1;
bre->start = clr_end + 1;
continue;
}
/* Adjust bre->length for partial clearing at the tail end */
if ((bre->start < start) && (bre_end <= clr_end)) {
/* bre->start remains the same */
bre->length = start - bre->start;
continue;
}
/*
* If clearing in the middle of an entry, we split it into
* two by modifying the current entry to represent one half of
* the split, and adding a new entry for the second half.
*/
if ((bre->start < start) && (bre_end > clr_end)) {
u64 new_start = clr_end + 1;
u64 new_len = bre_end - new_start + 1;
/* Add new entry covering the right half */
alloc_and_append_badrange_entry(badrange, new_start,
new_len, GFP_NOWAIT);
/* Adjust this entry to cover the left half */
bre->length = start - bre->start;
continue;
}
}
spin_unlock(&badrange->lock);
}
EXPORT_SYMBOL_GPL(badrange_forget);
static void set_badblock(struct badblocks *bb, sector_t s, int num)
{
dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
(u64) s * 512, (u64) num * 512);
/* this isn't an error as the hardware will still throw an exception */
if (badblocks_set(bb, s, num, 1))
dev_info_once(bb->dev, "%s: failed for sector %llx\n",
__func__, (u64) s);
}
/**
* __add_badblock_range() - Convert a physical address range to bad sectors
* @bb: badblocks instance to populate
* @ns_offset: namespace offset where the error range begins (in bytes)
* @len: number of bytes of badrange to be added
*
* This assumes that the range provided with (ns_offset, len) is within
* the bounds of physical addresses for this namespace, i.e. lies in the
* interval [ns_start, ns_start + ns_size)
*/
static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
{
const unsigned int sector_size = 512;
sector_t start_sector, end_sector;
u64 num_sectors;
u32 rem;
start_sector = div_u64(ns_offset, sector_size);
end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
if (rem)
end_sector++;
num_sectors = end_sector - start_sector;
if (unlikely(num_sectors > (u64)INT_MAX)) {
u64 remaining = num_sectors;
sector_t s = start_sector;
while (remaining) {
int done = min_t(u64, remaining, INT_MAX);
set_badblock(bb, s, done);
remaining -= done;
s += done;
}
} else
set_badblock(bb, start_sector, num_sectors);
}
static void badblocks_populate(struct badrange *badrange,
struct badblocks *bb, const struct range *range)
{
struct badrange_entry *bre;
if (list_empty(&badrange->list))
return;
list_for_each_entry(bre, &badrange->list, list) {
u64 bre_end = bre->start + bre->length - 1;
/* Discard intervals with no intersection */
if (bre_end < range->start)
continue;
if (bre->start > range->end)
continue;
/* Deal with any overlap after start of the namespace */
if (bre->start >= range->start) {
u64 start = bre->start;
u64 len;
if (bre_end <= range->end)
len = bre->length;
else
len = range->start + range_len(range)
- bre->start;
__add_badblock_range(bb, start - range->start, len);
continue;
}
/*
* Deal with overlap for badrange starting before
* the namespace.
*/
if (bre->start < range->start) {
u64 len;
if (bre_end < range->end)
len = bre->start + bre->length - range->start;
else
len = range_len(range);
__add_badblock_range(bb, 0, len);
}
}
}
/**
* nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
* @region: parent region of the range to interrogate
* @bb: badblocks instance to populate
* @res: resource range to consider
*
* The badrange list generated during bus initialization may contain
* multiple, possibly overlapping physical address ranges. Compare each
* of these ranges to the resource range currently being initialized,
* and add badblocks entries for all matching sub-ranges
*/
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct range *range)
{
struct nvdimm_bus *nvdimm_bus;
if (!is_memory(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1,
"%s only valid for pmem regions\n", __func__);
return;
}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nvdimm_bus_lock(&nvdimm_bus->dev);
badblocks_populate(&nvdimm_bus->badrange, bb, range);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
| linux-master | drivers/nvdimm/badrange.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
static void namespace_io_release(struct device *dev)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
kfree(nsio);
}
static void namespace_pmem_release(struct device *dev)
{
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
if (nspm->id >= 0)
ida_simple_remove(&nd_region->ns_ida, nspm->id);
kfree(nspm->alt_name);
kfree(nspm->uuid);
kfree(nspm);
}
static bool is_namespace_pmem(const struct device *dev);
static bool is_namespace_io(const struct device *dev);
static int is_uuid_busy(struct device *dev, void *data)
{
uuid_t *uuid1 = data, *uuid2 = NULL;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid2 = nspm->uuid;
} else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
uuid2 = nd_btt->uuid;
} else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
uuid2 = nd_pfn->uuid;
}
if (uuid2 && uuid_equal(uuid1, uuid2))
return -EBUSY;
return 0;
}
static int is_namespace_uuid_busy(struct device *dev, void *data)
{
if (is_nd_region(dev))
return device_for_each_child(dev, data, is_uuid_busy);
return 0;
}
/**
* nd_is_uuid_unique - verify that no other namespace has @uuid
* @dev: any device on a nvdimm_bus
* @uuid: uuid to check
*/
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!nvdimm_bus)
return false;
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
if (device_for_each_child(&nvdimm_bus->dev, uuid,
is_namespace_uuid_busy) != 0)
return false;
return true;
}
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
return false;
if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
return false;
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
if (ndns->force_raw)
return false;
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED)
return false;
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
}
EXPORT_SYMBOL(pmem_should_map_pages);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
{
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(&ndns->dev);
if (nspm->lbasize == 0 || nspm->lbasize == 512)
/* default */;
else if (nspm->lbasize == 4096)
return 4096;
else
dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
nspm->lbasize);
}
/*
* There is no namespace label (is_namespace_io()), or the label
* indicates the default sector size.
*/
return 512;
}
EXPORT_SYMBOL(pmem_sector_size);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name)
{
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
const char *suffix = NULL;
if (ndns->claim && is_nd_btt(ndns->claim))
suffix = "s";
if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
int nsidx = 0;
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(&ndns->dev);
nsidx = nspm->id;
}
if (nsidx)
sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
suffix ? suffix : "");
else
sprintf(name, "pmem%d%s", nd_region->id,
suffix ? suffix : "");
} else {
return NULL;
}
return name;
}
EXPORT_SYMBOL(nvdimm_namespace_disk_name);
const uuid_t *nd_dev_to_uuid(struct device *dev)
{
if (dev && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
}
return &uuid_null;
}
EXPORT_SYMBOL(nd_dev_to_uuid);
static ssize_t nstype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
}
static DEVICE_ATTR_RO(nstype);
static ssize_t __alt_name_store(struct device *dev, const char *buf,
const size_t len)
{
char *input, *pos, *alt_name, **ns_altname;
ssize_t rc;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
ns_altname = &nspm->alt_name;
} else
return -ENXIO;
if (dev->driver || to_ndns(dev)->claim)
return -EBUSY;
input = kstrndup(buf, len, GFP_KERNEL);
if (!input)
return -ENOMEM;
pos = strim(input);
if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
rc = -EINVAL;
goto out;
}
alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
if (!alt_name) {
rc = -ENOMEM;
goto out;
}
kfree(*ns_altname);
*ns_altname = alt_name;
sprintf(*ns_altname, "%s", pos);
rc = len;
out:
kfree(input);
return rc;
}
static int nd_namespace_label_update(struct nd_region *nd_region,
struct device *dev)
{
dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
"namespace must be idle during label update\n");
if (dev->driver || to_ndns(dev)->claim)
return 0;
/*
* Only allow label writes that will result in a valid namespace
* or deletion of an existing namespace.
*/
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
resource_size_t size = resource_size(&nspm->nsio.res);
if (size == 0 && nspm->uuid)
/* delete allocation */;
else if (!nspm->uuid)
return 0;
return nd_pmem_namespace_label_update(nd_region, nspm, size);
} else
return -ENXIO;
}
static ssize_t alt_name_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc < 0 ? rc : len;
}
static ssize_t alt_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char *ns_altname;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
ns_altname = nspm->alt_name;
} else
return -ENXIO;
return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
}
static DEVICE_ATTR_RW(alt_name);
static int scan_free(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
resource_size_t n)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
int rc = 0;
while (n) {
struct resource *res, *last;
last = NULL;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id->id) == 0)
last = res;
res = last;
if (!res)
return 0;
if (n >= resource_size(res)) {
n -= resource_size(res);
nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
nvdimm_free_dpa(ndd, res);
/* retry with last resource deleted */
continue;
}
rc = adjust_resource(res, res->start, resource_size(res) - n);
if (rc == 0)
res->flags |= DPA_RESOURCE_ADJUSTED;
nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
break;
}
return rc;
}
/**
* shrink_dpa_allocation - for each dimm in region free n bytes for label_id
* @nd_region: the set of dimms to reclaim @n bytes from
* @label_id: unique identifier for the namespace consuming this dpa range
* @n: number of bytes per-dimm to release
*
* Assumes resources are ordered. Starting from the end try to
* adjust_resource() the allocation to @n, but if @n is larger than the
* allocation delete it and find the 'new' last allocation in the label
* set.
*/
static int shrink_dpa_allocation(struct nd_region *nd_region,
struct nd_label_id *label_id, resource_size_t n)
{
int i;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
int rc;
rc = scan_free(nd_region, nd_mapping, label_id, n);
if (rc)
return rc;
}
return 0;
}
static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
struct nd_region *nd_region, struct nd_mapping *nd_mapping,
resource_size_t n)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
int rc = 0;
/* first resource allocation for this label-id or dimm */
res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
if (!res)
rc = -EBUSY;
nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
return rc ? n : 0;
}
/**
* space_valid() - validate free dpa space against constraints
* @nd_region: hosting region of the free space
* @ndd: dimm device data for debug
* @label_id: namespace id to allocate space
* @prev: potential allocation that precedes free space
* @next: allocation that follows the given free space range
* @exist: first allocation with same id in the mapping
* @n: range that must satisfied for pmem allocations
* @valid: free space range to validate
*
* BLK-space is valid as long as it does not precede a PMEM
* allocation in a given region. PMEM-space must be contiguous
* and adjacent to an existing allocation (if one
* exists). If reserving PMEM any space is valid.
*/
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id, struct resource *prev,
struct resource *next, struct resource *exist,
resource_size_t n, struct resource *valid)
{
bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
unsigned long align;
align = nd_region->align / nd_region->ndr_mappings;
valid->start = ALIGN(valid->start, align);
valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
if (valid->start >= valid->end)
goto invalid;
if (is_reserve)
return;
/* allocation needs to be contiguous, so this is all or nothing */
if (resource_size(valid) < n)
goto invalid;
/* we've got all the space we need and no existing allocation */
if (!exist)
return;
/* allocation needs to be contiguous with the existing namespace */
if (valid->start == exist->end + 1
|| valid->end == exist->start - 1)
return;
invalid:
/* truncate @valid size to 0 */
valid->end = valid->start - 1;
}
enum alloc_loc {
ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
};
static resource_size_t scan_allocate(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
resource_size_t n)
{
resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res, *exist = NULL, valid;
const resource_size_t to_allocate = n;
int first;
for_each_dpa_resource(ndd, res)
if (strcmp(label_id->id, res->name) == 0)
exist = res;
valid.start = nd_mapping->start;
valid.end = mapping_end;
valid.name = "free space";
retry:
first = 0;
for_each_dpa_resource(ndd, res) {
struct resource *next = res->sibling, *new_res = NULL;
resource_size_t allocate, available = 0;
enum alloc_loc loc = ALLOC_ERR;
const char *action;
int rc = 0;
/* ignore resources outside this nd_mapping */
if (res->start > mapping_end)
continue;
if (res->end < nd_mapping->start)
continue;
/* space at the beginning of the mapping */
if (!first++ && res->start > nd_mapping->start) {
valid.start = nd_mapping->start;
valid.end = res->start - 1;
space_valid(nd_region, ndd, label_id, NULL, next, exist,
to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_BEFORE;
}
/* space between allocations */
if (!loc && next) {
valid.start = res->start + resource_size(res);
valid.end = min(mapping_end, next->start - 1);
space_valid(nd_region, ndd, label_id, res, next, exist,
to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_MID;
}
/* space at the end of the mapping */
if (!loc && !next) {
valid.start = res->start + resource_size(res);
valid.end = mapping_end;
space_valid(nd_region, ndd, label_id, res, next, exist,
to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_AFTER;
}
if (!loc || !available)
continue;
allocate = min(available, n);
switch (loc) {
case ALLOC_BEFORE:
if (strcmp(res->name, label_id->id) == 0) {
/* adjust current resource up */
rc = adjust_resource(res, res->start - allocate,
resource_size(res) + allocate);
action = "cur grow up";
} else
action = "allocate";
break;
case ALLOC_MID:
if (strcmp(next->name, label_id->id) == 0) {
/* adjust next resource up */
rc = adjust_resource(next, next->start
- allocate, resource_size(next)
+ allocate);
new_res = next;
action = "next grow up";
} else if (strcmp(res->name, label_id->id) == 0) {
action = "grow down";
} else
action = "allocate";
break;
case ALLOC_AFTER:
if (strcmp(res->name, label_id->id) == 0)
action = "grow down";
else
action = "allocate";
break;
default:
return n;
}
if (strcmp(action, "allocate") == 0) {
new_res = nvdimm_allocate_dpa(ndd, label_id,
valid.start, allocate);
if (!new_res)
rc = -EBUSY;
} else if (strcmp(action, "grow down") == 0) {
/* adjust current resource down */
rc = adjust_resource(res, res->start, resource_size(res)
+ allocate);
if (rc == 0)
res->flags |= DPA_RESOURCE_ADJUSTED;
}
if (!new_res)
new_res = res;
nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
action, loc, rc);
if (rc)
return n;
n -= allocate;
if (n) {
/*
* Retry scan with newly inserted resources.
* For example, if we did an ALLOC_BEFORE
* insertion there may also have been space
* available for an ALLOC_AFTER insertion, so we
* need to check this same resource again
*/
goto retry;
} else
return 0;
}
if (n == to_allocate)
return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
return n;
}
static int merge_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
if (strncmp("pmem", label_id->id, 4) == 0)
return 0;
retry:
for_each_dpa_resource(ndd, res) {
int rc;
struct resource *next = res->sibling;
resource_size_t end = res->start + resource_size(res);
if (!next || strcmp(res->name, label_id->id) != 0
|| strcmp(next->name, label_id->id) != 0
|| end != next->start)
continue;
end += resource_size(next);
nvdimm_free_dpa(ndd, next);
rc = adjust_resource(res, res->start, end - res->start);
nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
if (rc)
return rc;
res->flags |= DPA_RESOURCE_ADJUSTED;
goto retry;
}
return 0;
}
int __reserve_free_pmem(struct device *dev, void *data)
{
struct nvdimm *nvdimm = data;
struct nd_region *nd_region;
struct nd_label_id label_id;
int i;
if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
if (nd_region->ndr_mappings == 0)
return 0;
memset(&label_id, 0, sizeof(label_id));
strcat(label_id.id, "pmem-reserve");
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
resource_size_t n, rem = 0;
if (nd_mapping->nvdimm != nvdimm)
continue;
n = nd_pmem_available_dpa(nd_region, nd_mapping);
if (n == 0)
return 0;
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
dev_WARN_ONCE(&nd_region->dev, rem,
"pmem reserve underrun: %#llx of %#llx bytes\n",
(unsigned long long) n - rem,
(unsigned long long) n);
return rem ? -ENXIO : 0;
}
return 0;
}
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
struct nd_mapping *nd_mapping)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res, *_res;
for_each_dpa_resource_safe(ndd, res, _res)
if (strcmp(res->name, "pmem-reserve") == 0)
nvdimm_free_dpa(ndd, res);
}
/**
* grow_dpa_allocation - for each dimm allocate n bytes for @label_id
* @nd_region: the set of dimms to allocate @n more bytes from
* @label_id: unique identifier for the namespace consuming this dpa range
* @n: number of bytes per-dimm to add to the existing allocation
*
* Assumes resources are ordered. For BLK regions, first consume
* BLK-only available DPA free space, then consume PMEM-aliased DPA
* space starting at the highest DPA. For PMEM regions start
* allocations from the start of an interleave set and end at the first
* BLK allocation or the end of the interleave set, whichever comes
* first.
*/
static int grow_dpa_allocation(struct nd_region *nd_region,
struct nd_label_id *label_id, resource_size_t n)
{
int i;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
resource_size_t rem = n;
int rc;
rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
dev_WARN_ONCE(&nd_region->dev, rem,
"allocation underrun: %#llx of %#llx bytes\n",
(unsigned long long) n - rem,
(unsigned long long) n);
if (rem)
return -ENXIO;
rc = merge_dpa(nd_region, nd_mapping, label_id);
if (rc)
return rc;
}
return 0;
}
static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
struct resource *res = &nspm->nsio.res;
resource_size_t offset = 0;
if (size && !nspm->uuid) {
WARN_ON_ONCE(1);
size = 0;
}
if (size && nspm->uuid) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_id label_id;
struct resource *res;
if (!ndd) {
size = 0;
goto out;
}
nd_label_gen_id(&label_id, nspm->uuid, 0);
/* calculate a spa offset from the dpa allocation offset */
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0) {
offset = (res->start - nd_mapping->start)
* nd_region->ndr_mappings;
goto out;
}
WARN_ON_ONCE(1);
size = 0;
}
out:
res->start = nd_region->ndr_start + offset;
res->end = res->start + size - 1;
}
static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
const char *where)
{
if (!uuid) {
dev_dbg(dev, "%s: uuid not set\n", where);
return true;
}
return false;
}
static ssize_t __size_store(struct device *dev, unsigned long long val)
{
resource_size_t allocated = 0, available = 0;
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_mapping *nd_mapping;
struct nvdimm_drvdata *ndd;
struct nd_label_id label_id;
u32 flags = 0, remainder;
int rc, i, id = -1;
uuid_t *uuid = NULL;
if (dev->driver || ndns->claim)
return -EBUSY;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid = nspm->uuid;
id = nspm->id;
}
/*
* We need a uuid for the allocation-label and dimm(s) on which
* to store the label.
*/
if (uuid_not_set(uuid, dev, __func__))
return -ENXIO;
if (nd_region->ndr_mappings == 0) {
dev_dbg(dev, "not associated with dimm(s)\n");
return -ENXIO;
}
div_u64_rem(val, nd_region->align, &remainder);
if (remainder) {
dev_dbg(dev, "%llu is not %ldK aligned\n", val,
nd_region->align / SZ_1K);
return -EINVAL;
}
nd_label_gen_id(&label_id, uuid, flags);
for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
ndd = to_ndd(nd_mapping);
/*
* All dimms in an interleave set, need to be enabled
* for the size to be changed.
*/
if (!ndd)
return -ENXIO;
allocated += nvdimm_allocated_dpa(ndd, &label_id);
}
available = nd_region_allocatable_dpa(nd_region);
if (val > available + allocated)
return -ENOSPC;
if (val == allocated)
return 0;
val = div_u64(val, nd_region->ndr_mappings);
allocated = div_u64(allocated, nd_region->ndr_mappings);
if (val < allocated)
rc = shrink_dpa_allocation(nd_region, &label_id,
allocated - val);
else
rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
if (rc)
return rc;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
}
/*
* Try to delete the namespace if we deleted all of its
* allocation, this is not the seed or 0th device for the
* region, and it is not actively claimed by a btt, pfn, or dax
* instance.
*/
if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
nd_device_unregister(dev, ND_ASYNC);
return rc;
}
static ssize_t size_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
unsigned long long val;
int rc;
rc = kstrtoull(buf, 0, &val);
if (rc)
return rc;
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
/* setting size zero == 'delete namespace' */
if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
kfree(nspm->uuid);
nspm->uuid = NULL;
}
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc < 0 ? rc : len;
}
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
{
struct device *dev = &ndns->dev;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return resource_size(&nspm->nsio.res);
} else if (is_namespace_io(dev)) {
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
return resource_size(&nsio->res);
} else
WARN_ONCE(1, "unknown namespace type\n");
return 0;
}
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
{
resource_size_t size;
nvdimm_bus_lock(&ndns->dev);
size = __nvdimm_namespace_capacity(ndns);
nvdimm_bus_unlock(&ndns->dev);
return size;
}
EXPORT_SYMBOL(nvdimm_namespace_capacity);
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
{
int i;
bool locked = false;
struct device *dev = &ndns->dev;
struct nd_region *nd_region = to_nd_region(dev->parent);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
locked = true;
}
}
return locked;
}
EXPORT_SYMBOL(nvdimm_namespace_locked);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%llu\n", (unsigned long long)
nvdimm_namespace_capacity(to_ndns(dev)));
}
static DEVICE_ATTR(size, 0444, size_show, size_store);
static uuid_t *namespace_to_uuid(struct device *dev)
{
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
}
return ERR_PTR(-ENXIO);
}
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
uuid_t *uuid = namespace_to_uuid(dev);
if (IS_ERR(uuid))
return PTR_ERR(uuid);
if (uuid)
return sprintf(buf, "%pUb\n", uuid);
return sprintf(buf, "\n");
}
/**
* namespace_update_uuid - check for a unique uuid and whether we're "renaming"
* @nd_region: parent region so we can updates all dimms in the set
* @dev: namespace type for generating label_id
* @new_uuid: incoming uuid
* @old_uuid: reference to the uuid storage location in the namespace object
*/
static int namespace_update_uuid(struct nd_region *nd_region,
struct device *dev, uuid_t *new_uuid,
uuid_t **old_uuid)
{
struct nd_label_id old_label_id;
struct nd_label_id new_label_id;
int i;
if (!nd_is_uuid_unique(dev, new_uuid))
return -EINVAL;
if (*old_uuid == NULL)
goto out;
/*
* If we've already written a label with this uuid, then it's
* too late to rename because we can't reliably update the uuid
* without losing the old namespace. Userspace must delete this
* namespace to abandon the old uuid.
*/
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
/*
* This check by itself is sufficient because old_uuid
* would be NULL above if this uuid did not exist in the
* currently written set.
*
* FIXME: can we delete uuid with zero dpa allocated?
*/
if (list_empty(&nd_mapping->labels))
return -EBUSY;
}
nd_label_gen_id(&old_label_id, *old_uuid, 0);
nd_label_gen_id(&new_label_id, new_uuid, 0);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
struct nd_label_id label_id;
uuid_t uuid;
if (!nd_label)
continue;
nsl_get_uuid(ndd, nd_label, &uuid);
nd_label_gen_id(&label_id, &uuid,
nsl_get_flags(ndd, nd_label));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:
*old_uuid = new_uuid;
return 0;
}
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
uuid_t *uuid = NULL;
uuid_t **ns_uuid;
ssize_t rc = 0;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
ns_uuid = &nspm->uuid;
} else
return -ENXIO;
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
if (rc >= 0)
rc = nd_uuid_store(dev, &uuid, buf, len);
if (rc >= 0)
rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
else
kfree(uuid);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc < 0 ? rc : len;
}
static DEVICE_ATTR_RW(uuid);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct resource *res;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
res = &nspm->nsio.res;
} else if (is_namespace_io(dev)) {
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
res = &nsio->res;
} else
return -ENXIO;
/* no address to convey if the namespace has no allocation */
if (resource_size(res) == 0)
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
static DEVICE_ATTR_ADMIN_RO(resource);
static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nd_size_select_show(nspm->lbasize,
pmem_lbasize_supported, buf);
}
return -ENXIO;
}
static ssize_t sector_size_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
const unsigned long *supported;
unsigned long *lbasize;
ssize_t rc = 0;
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
lbasize = &nspm->lbasize;
supported = pmem_lbasize_supported;
} else
return -ENXIO;
device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
if (rc >= 0)
rc = nd_size_select_store(dev, buf, lbasize, supported);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(sector_size);
static ssize_t dpa_extents_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_label_id label_id;
uuid_t *uuid = NULL;
int count = 0, i;
u32 flags = 0;
nvdimm_bus_lock(dev);
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid = nspm->uuid;
flags = 0;
}
if (!uuid)
goto out;
nd_label_gen_id(&label_id, uuid, flags);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0)
count++;
}
out:
nvdimm_bus_unlock(dev);
return sprintf(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(dpa_extents);
static int btt_claim_class(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
int i, loop_bitmask = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex;
/*
* If any of the DIMMs do not support labels the only
* possible BTT format is v1.
*/
if (!ndd) {
loop_bitmask = 0;
break;
}
nsindex = to_namespace_index(ndd, ndd->ns_current);
if (nsindex == NULL)
loop_bitmask |= 1;
else {
/* check whether existing labels are v1.1 or v1.2 */
if (__le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
loop_bitmask |= 2;
else
loop_bitmask |= 4;
}
}
/*
* If nsindex is null loop_bitmask's bit 0 will be set, and if an index
* block is found, a v1.1 label for any mapping will set bit 1, and a
* v1.2 label will set bit 2.
*
* At the end of the loop, at most one of the three bits must be set.
* If multiple bits were set, it means the different mappings disagree
* about their labels, and this must be cleaned up first.
*
* If all the label index blocks are found to agree, nsindex of NULL
* implies labels haven't been initialized yet, and when they will,
* they will be of the 1.2 format, so we can assume BTT2.0
*
* If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
* found, we enforce BTT2.0
*
* If the loop was never entered, default to BTT1.1 (legacy namespaces)
*/
switch (loop_bitmask) {
case 0:
case 2:
return NVDIMM_CCLASS_BTT;
case 1:
case 4:
return NVDIMM_CCLASS_BTT2;
default:
return -ENXIO;
}
}
static ssize_t holder_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(holder);
static int __holder_class_store(struct device *dev, const char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
if (dev->driver || ndns->claim)
return -EBUSY;
if (sysfs_streq(buf, "btt")) {
int rc = btt_claim_class(dev);
if (rc < NVDIMM_CCLASS_NONE)
return rc;
ndns->claim_class = rc;
} else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
else if (sysfs_streq(buf, ""))
ndns->claim_class = NVDIMM_CCLASS_NONE;
else
return -EINVAL;
return 0;
}
static ssize_t holder_class_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
int rc;
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc < 0 ? rc : len;
}
static ssize_t holder_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
(ndns->claim_class == NVDIMM_CCLASS_BTT2))
rc = sprintf(buf, "btt\n");
else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
rc = sprintf(buf, "pfn\n");
else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RW(holder_class);
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
struct device *claim;
char *mode;
ssize_t rc;
device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
else if (claim && is_nd_pfn(claim))
mode = "memory";
else if (claim && is_nd_dax(claim))
mode = "dax";
else if (!claim && pmem_should_map_pages(dev))
mode = "memory";
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(mode);
static ssize_t force_raw_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
bool force_raw;
int rc = kstrtobool(buf, &force_raw);
if (rc)
return rc;
to_ndns(dev)->force_raw = force_raw;
return len;
}
static ssize_t force_raw_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
}
static DEVICE_ATTR_RW(force_raw);
static struct attribute *nd_namespace_attributes[] = {
&dev_attr_nstype.attr,
&dev_attr_size.attr,
&dev_attr_mode.attr,
&dev_attr_uuid.attr,
&dev_attr_holder.attr,
&dev_attr_resource.attr,
&dev_attr_alt_name.attr,
&dev_attr_force_raw.attr,
&dev_attr_sector_size.attr,
&dev_attr_dpa_extents.attr,
&dev_attr_holder_class.attr,
NULL,
};
static umode_t namespace_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
if (is_namespace_pmem(dev)) {
if (a == &dev_attr_size.attr)
return 0644;
return a->mode;
}
/* base is_namespace_io() attributes */
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
a == &dev_attr_resource.attr)
return a->mode;
return 0;
}
static struct attribute_group nd_namespace_attribute_group = {
.attrs = nd_namespace_attributes,
.is_visible = namespace_visible,
};
static const struct attribute_group *nd_namespace_attribute_groups[] = {
&nd_device_attribute_group,
&nd_namespace_attribute_group,
&nd_numa_attribute_group,
NULL,
};
static const struct device_type namespace_io_device_type = {
.name = "nd_namespace_io",
.release = namespace_io_release,
.groups = nd_namespace_attribute_groups,
};
static const struct device_type namespace_pmem_device_type = {
.name = "nd_namespace_pmem",
.release = namespace_pmem_release,
.groups = nd_namespace_attribute_groups,
};
static bool is_namespace_pmem(const struct device *dev)
{
return dev ? dev->type == &namespace_pmem_device_type : false;
}
static bool is_namespace_io(const struct device *dev)
{
return dev ? dev->type == &namespace_io_device_type : false;
}
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
struct nd_namespace_common *ndns = NULL;
resource_size_t size;
if (nd_btt || nd_pfn || nd_dax) {
if (nd_btt)
ndns = nd_btt->ndns;
else if (nd_pfn)
ndns = nd_pfn->ndns;
else if (nd_dax)
ndns = nd_dax->nd_pfn.ndns;
if (!ndns)
return ERR_PTR(-ENODEV);
/*
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
device_lock(&ndns->dev);
device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
return ERR_PTR(-EBUSY);
}
if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
"host (%s) vs claim (%s) mismatch\n",
dev_name(dev),
dev_name(ndns->claim)))
return ERR_PTR(-ENXIO);
} else {
ndns = to_ndns(dev);
if (ndns->claim) {
dev_dbg(dev, "claimed by %s, failing probe\n",
dev_name(ndns->claim));
return ERR_PTR(-ENXIO);
}
}
if (nvdimm_namespace_locked(ndns))
return ERR_PTR(-EACCES);
size = nvdimm_namespace_capacity(ndns);
if (size < ND_MIN_NAMESPACE_SIZE) {
dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
&size, ND_MIN_NAMESPACE_SIZE);
return ERR_PTR(-ENODEV);
}
/*
* Note, alignment validation for fsdax and devdax mode
* namespaces happens in nd_pfn_validate() where infoblock
* padding parameters can be applied.
*/
if (pmem_should_map_pages(dev)) {
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct resource *res = &nsio->res;
if (!IS_ALIGNED(res->start | (res->end + 1),
memremap_compat_align())) {
dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
return ERR_PTR(-EOPNOTSUPP);
}
}
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(&ndns->dev);
if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
return ERR_PTR(-ENODEV);
}
return ndns;
}
EXPORT_SYMBOL(nvdimm_namespace_common_probe);
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
resource_size_t size)
{
return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
}
EXPORT_SYMBOL_GPL(devm_namespace_enable);
void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
{
devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
}
EXPORT_SYMBOL_GPL(devm_namespace_disable);
static struct device **create_namespace_io(struct nd_region *nd_region)
{
struct nd_namespace_io *nsio;
struct device *dev, **devs;
struct resource *res;
nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
if (!nsio)
return NULL;
devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
if (!devs) {
kfree(nsio);
return NULL;
}
dev = &nsio->common.dev;
dev->type = &namespace_io_device_type;
dev->parent = &nd_region->dev;
res = &nsio->res;
res->name = dev_name(&nd_region->dev);
res->flags = IORESOURCE_MEM;
res->start = nd_region->ndr_start;
res->end = res->start + nd_region->ndr_size - 1;
devs[0] = dev;
return devs;
}
static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
u64 cookie, u16 pos)
{
struct nd_namespace_label *found = NULL;
int i;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent;
bool found_uuid = false;
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
u16 position;
if (!nd_label)
continue;
position = nsl_get_position(ndd, nd_label);
if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
continue;
if (!nsl_uuid_equal(ndd, nd_label, uuid))
continue;
if (!nsl_validate_type_guid(ndd, nd_label,
&nd_set->type_guid))
continue;
if (found_uuid) {
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
return false;
}
found_uuid = true;
if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
continue;
if (position != pos)
continue;
found = nd_label;
break;
}
if (found)
break;
}
return found != NULL;
}
static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
{
int i;
if (!pmem_id)
return -ENODEV;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_label *nd_label = NULL;
u64 hw_start, hw_end, pmem_start, pmem_end;
struct nd_label_ent *label_ent;
lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
nd_label = label_ent->label;
if (!nd_label)
continue;
if (nsl_uuid_equal(ndd, nd_label, pmem_id))
break;
nd_label = NULL;
}
if (!nd_label) {
WARN_ON(1);
return -EINVAL;
}
/*
* Check that this label is compliant with the dpa
* range published in NFIT
*/
hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size;
pmem_start = nsl_get_dpa(ndd, nd_label);
pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
if (pmem_start >= hw_start && pmem_start < hw_end
&& pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
else {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
dev_name(ndd->dev),
nsl_uuid_raw(ndd, nd_label));
return -EINVAL;
}
/* move recently validated label to the front of the list */
list_move(&label_ent->list, &nd_mapping->labels);
}
return 0;
}
/**
* create_namespace_pmem - validate interleave set labelling, retrieve label0
* @nd_region: region with mappings to validate
* @nspm: target namespace to create
* @nd_label: target pmem namespace label to evaluate
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
struct nd_mapping *nd_mapping,
struct nd_namespace_label *nd_label)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex =
to_namespace_index(ndd, ndd->ns_current);
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm;
resource_size_t size = 0;
struct resource *res;
struct device *dev;
uuid_t uuid;
int rc = 0;
u16 i;
if (cookie == 0) {
dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
return ERR_PTR(-ENXIO);
}
if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nsl_uuid_raw(ndd, nd_label));
if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
nsl_uuid_raw(ndd, nd_label));
}
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
if (!nspm)
return ERR_PTR(-ENOMEM);
nspm->id = -1;
dev = &nspm->nsio.common.dev;
dev->type = &namespace_pmem_device_type;
dev->parent = &nd_region->dev;
res = &nspm->nsio.res;
res->name = dev_name(&nd_region->dev);
res->flags = IORESOURCE_MEM;
for (i = 0; i < nd_region->ndr_mappings; i++) {
nsl_get_uuid(ndd, nd_label, &uuid);
if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
continue;
if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
continue;
break;
}
if (i < nd_region->ndr_mappings) {
struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
/*
* Give up if we don't find an instance of a uuid at each
* position (from 0 to nd_region->ndr_mappings - 1), or if we
* find a dimm with two instances of the same uuid.
*/
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
rc = -EINVAL;
goto err;
}
/*
* Fix up each mapping's 'labels' to have the validated pmem label for
* that position at labels[0], and NULL at labels[1]. In the process,
* check that the namespace aligns with interleave-set.
*/
nsl_get_uuid(ndd, nd_label, &uuid);
rc = select_pmem_id(nd_region, &uuid);
if (rc)
goto err;
/* Calculate total size and populate namespace properties from label0 */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_namespace_label *label0;
struct nvdimm_drvdata *ndd;
nd_mapping = &nd_region->mapping[i];
label_ent = list_first_entry_or_null(&nd_mapping->labels,
typeof(*label_ent), list);
label0 = label_ent ? label_ent->label : NULL;
if (!label0) {
WARN_ON(1);
continue;
}
ndd = to_ndd(nd_mapping);
size += nsl_get_rawsize(ndd, label0);
if (nsl_get_position(ndd, label0) != 0)
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
NSLABEL_NAME_LEN, GFP_KERNEL);
nsl_get_uuid(ndd, label0, &uuid);
nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
nspm->lbasize = nsl_get_lbasize(ndd, label0);
nspm->nsio.common.claim_class =
nsl_get_claim_class(ndd, label0);
}
if (!nspm->alt_name || !nspm->uuid) {
rc = -ENOMEM;
goto err;
}
nd_namespace_pmem_set_resource(nd_region, nspm, size);
return dev;
err:
namespace_pmem_release(dev);
switch (rc) {
case -EINVAL:
dev_dbg(&nd_region->dev, "invalid label(s)\n");
break;
case -ENODEV:
dev_dbg(&nd_region->dev, "label not found\n");
break;
default:
dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
break;
}
return ERR_PTR(rc);
}
static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
{
struct nd_namespace_pmem *nspm;
struct resource *res;
struct device *dev;
if (!is_memory(&nd_region->dev))
return NULL;
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
if (!nspm)
return NULL;
dev = &nspm->nsio.common.dev;
dev->type = &namespace_pmem_device_type;
dev->parent = &nd_region->dev;
res = &nspm->nsio.res;
res->name = dev_name(&nd_region->dev);
res->flags = IORESOURCE_MEM;
nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
if (nspm->id < 0) {
kfree(nspm);
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
return dev;
}
static struct lock_class_key nvdimm_namespace_key;
void nd_region_create_ns_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
return;
nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
/*
* Seed creation failures are not fatal, provisioning is simply
* disabled until memory becomes available
*/
if (!nd_region->ns_seed)
dev_err(&nd_region->dev, "failed to create namespace\n");
else {
device_initialize(nd_region->ns_seed);
lockdep_set_class(&nd_region->ns_seed->mutex,
&nvdimm_namespace_key);
nd_device_register(nd_region->ns_seed);
}
}
void nd_region_create_dax_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->dax_seed = nd_dax_create(nd_region);
/*
* Seed creation failures are not fatal, provisioning is simply
* disabled until memory becomes available
*/
if (!nd_region->dax_seed)
dev_err(&nd_region->dev, "failed to create dax namespace\n");
}
void nd_region_create_pfn_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->pfn_seed = nd_pfn_create(nd_region);
/*
* Seed creation failures are not fatal, provisioning is simply
* disabled until memory becomes available
*/
if (!nd_region->pfn_seed)
dev_err(&nd_region->dev, "failed to create pfn namespace\n");
}
void nd_region_create_btt_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region->btt_seed = nd_btt_create(nd_region);
/*
* Seed creation failures are not fatal, provisioning is simply
* disabled until memory becomes available
*/
if (!nd_region->btt_seed)
dev_err(&nd_region->dev, "failed to create btt namespace\n");
}
static int add_namespace_resource(struct nd_region *nd_region,
struct nd_namespace_label *nd_label, struct device **devs,
int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
int i;
for (i = 0; i < count; i++) {
uuid_t *uuid = namespace_to_uuid(devs[i]);
if (IS_ERR(uuid)) {
WARN_ON(1);
continue;
}
if (!nsl_uuid_equal(ndd, nd_label, uuid))
continue;
dev_err(&nd_region->dev,
"error: conflicting extents for uuid: %pUb\n", uuid);
return -ENXIO;
}
return i;
}
static int cmp_dpa(const void *a, const void *b)
{
const struct device *dev_a = *(const struct device **) a;
const struct device *dev_b = *(const struct device **) b;
struct nd_namespace_pmem *nspm_a, *nspm_b;
if (is_namespace_io(dev_a))
return 0;
nspm_a = to_nd_namespace_pmem(dev_a);
nspm_b = to_nd_namespace_pmem(dev_b);
return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
sizeof(resource_size_t));
}
static struct device **scan_labels(struct nd_region *nd_region)
{
int i, count = 0;
struct device *dev, **devs = NULL;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
/* "safe" because create_namespace_pmem() might list_move() label_ent */
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
struct device **__devs;
if (!nd_label)
continue;
/* skip labels that describe extents outside of the region */
if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
nsl_get_dpa(ndd, nd_label) > map_end)
continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0)
goto err;
if (i < count)
continue;
__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
if (!__devs)
goto err;
memcpy(__devs, devs, sizeof(dev) * count);
kfree(devs);
devs = __devs;
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
if (IS_ERR(dev)) {
switch (PTR_ERR(dev)) {
case -EAGAIN:
/* skip invalid labels */
continue;
case -ENODEV:
/* fallthrough to seed creation */
break;
default:
goto err;
}
} else
devs[count++] = dev;
}
dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
count == 1 ? "" : "s");
if (count == 0) {
struct nd_namespace_pmem *nspm;
/* Publish a zero-sized namespace for userspace to configure. */
nd_mapping_free_labels(nd_mapping);
devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
if (!devs)
goto err;
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
if (!nspm)
goto err;
dev = &nspm->nsio.common.dev;
dev->type = &namespace_pmem_device_type;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
dev->parent = &nd_region->dev;
devs[count++] = dev;
} else if (is_memory(&nd_region->dev)) {
/* clean unselected labels */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct list_head *l, *e;
LIST_HEAD(list);
int j;
nd_mapping = &nd_region->mapping[i];
if (list_empty(&nd_mapping->labels)) {
WARN_ON(1);
continue;
}
j = count;
list_for_each_safe(l, e, &nd_mapping->labels) {
if (!j--)
break;
list_move_tail(l, &list);
}
nd_mapping_free_labels(nd_mapping);
list_splice_init(&list, &nd_mapping->labels);
}
}
if (count > 1)
sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
return devs;
err:
if (devs) {
for (i = 0; devs[i]; i++)
namespace_pmem_release(devs[i]);
kfree(devs);
}
return NULL;
}
static struct device **create_namespaces(struct nd_region *nd_region)
{
struct nd_mapping *nd_mapping;
struct device **devs;
int i;
if (nd_region->ndr_mappings == 0)
return NULL;
/* lock down all mappings while we scan labels */
for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
mutex_lock_nested(&nd_mapping->lock, i);
}
devs = scan_labels(nd_region);
for (i = 0; i < nd_region->ndr_mappings; i++) {
int reverse = nd_region->ndr_mappings - 1 - i;
nd_mapping = &nd_region->mapping[reverse];
mutex_unlock(&nd_mapping->lock);
}
return devs;
}
static void deactivate_labels(void *region)
{
struct nd_region *nd_region = region;
int i;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = nd_mapping->ndd;
struct nvdimm *nvdimm = nd_mapping->nvdimm;
mutex_lock(&nd_mapping->lock);
nd_mapping_free_labels(nd_mapping);
mutex_unlock(&nd_mapping->lock);
put_ndd(ndd);
nd_mapping->ndd = NULL;
if (ndd)
atomic_dec(&nvdimm->busy);
}
}
static int init_active_labels(struct nd_region *nd_region)
{
int i, rc = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nvdimm *nvdimm = nd_mapping->nvdimm;
struct nd_label_ent *label_ent;
int count, j;
/*
* If the dimm is disabled then we may need to prevent
* the region from being activated.
*/
if (!ndd) {
if (test_bit(NDD_LOCKED, &nvdimm->flags))
/* fail, label data may be unreadable */;
else if (test_bit(NDD_LABELING, &nvdimm->flags))
/* fail, labels needed to disambiguate dpa */;
else
continue;
dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
dev_name(&nd_mapping->nvdimm->dev),
test_bit(NDD_LOCKED, &nvdimm->flags)
? "locked" : "disabled");
rc = -ENXIO;
goto out;
}
nd_mapping->ndd = ndd;
atomic_inc(&nvdimm->busy);
get_ndd(ndd);
count = nd_label_active_count(ndd);
dev_dbg(ndd->dev, "count: %d\n", count);
if (!count)
continue;
for (j = 0; j < count; j++) {
struct nd_namespace_label *label;
label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
if (!label_ent)
break;
label = nd_label_active(ndd, j);
label_ent->label = label;
mutex_lock(&nd_mapping->lock);
list_add_tail(&label_ent->list, &nd_mapping->labels);
mutex_unlock(&nd_mapping->lock);
}
if (j < count)
break;
}
if (i < nd_region->ndr_mappings)
rc = -ENOMEM;
out:
if (rc) {
deactivate_labels(nd_region);
return rc;
}
return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
nd_region);
}
int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
{
struct device **devs = NULL;
int i, rc = 0, type;
*err = 0;
nvdimm_bus_lock(&nd_region->dev);
rc = init_active_labels(nd_region);
if (rc) {
nvdimm_bus_unlock(&nd_region->dev);
return rc;
}
type = nd_region_to_nstype(nd_region);
switch (type) {
case ND_DEVICE_NAMESPACE_IO:
devs = create_namespace_io(nd_region);
break;
case ND_DEVICE_NAMESPACE_PMEM:
devs = create_namespaces(nd_region);
break;
default:
break;
}
nvdimm_bus_unlock(&nd_region->dev);
if (!devs)
return -ENODEV;
for (i = 0; devs[i]; i++) {
struct device *dev = devs[i];
int id;
if (type == ND_DEVICE_NAMESPACE_PMEM) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(dev);
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
GFP_KERNEL);
nspm->id = id;
} else
id = i;
if (id < 0)
break;
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
device_initialize(dev);
lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
nd_device_register(dev);
}
if (i)
nd_region->ns_seed = devs[0];
if (devs[i]) {
int j;
for (j = i; devs[j]; j++) {
struct device *dev = devs[j];
device_initialize(dev);
put_device(dev);
}
*err = j - i;
/*
* All of the namespaces we tried to register failed, so
* fail region activation.
*/
if (*err == 0)
rc = -ENODEV;
}
kfree(devs);
if (rc == -ENODEV)
return rc;
return i;
}
| linux-master | drivers/nvdimm/namespace_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/ndctl.h>
#include <linux/uuid.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "label.h"
#include "nd.h"
static guid_t nvdimm_btt_guid;
static guid_t nvdimm_btt2_guid;
static guid_t nvdimm_pfn_guid;
static guid_t nvdimm_dax_guid;
static uuid_t nvdimm_btt_uuid;
static uuid_t nvdimm_btt2_uuid;
static uuid_t nvdimm_pfn_uuid;
static uuid_t nvdimm_dax_uuid;
static uuid_t cxl_region_uuid;
static uuid_t cxl_namespace_uuid;
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
static u32 best_seq(u32 a, u32 b)
{
a &= NSINDEX_SEQ_MASK;
b &= NSINDEX_SEQ_MASK;
if (a == 0 || a == b)
return b;
else if (b == 0)
return a;
else if (nd_inc_seq(a) == b)
return b;
else
return a;
}
unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
{
return ndd->nslabel_size;
}
static size_t __sizeof_namespace_index(u32 nslot)
{
return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
NSINDEX_ALIGN);
}
static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
size_t index_size)
{
return (ndd->nsarea.config_size - index_size * 2) /
sizeof_namespace_label(ndd);
}
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
{
u32 tmp_nslot, n;
tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
}
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
{
u32 nslot, space, size;
/*
* Per UEFI 2.7, the minimum size of the Label Storage Area is large
* enough to hold 2 index blocks and 2 labels. The minimum index
* block size is 256 bytes. The label size is 128 for namespaces
* prior to version 1.2 and at minimum 256 for version 1.2 and later.
*/
nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
size = __sizeof_namespace_index(nslot) * 2;
if (size <= space && nslot >= 2)
return size / 2;
dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
ndd->nsarea.config_size, sizeof_namespace_label(ndd));
return 0;
}
static int __nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* On media label format consists of two index blocks followed
* by an array of labels. None of these structures are ever
* updated in place. A sequence number tracks the current
* active index and the next one to write, while labels are
* written to free slots.
*
* +------------+
* | |
* | nsindex0 |
* | |
* +------------+
* | |
* | nsindex1 |
* | |
* +------------+
* | label0 |
* +------------+
* | label1 |
* +------------+
* | |
* ....nslot...
* | |
* +------------+
* | labelN |
* +------------+
*/
struct nd_namespace_index *nsindex[] = {
to_namespace_index(ndd, 0),
to_namespace_index(ndd, 1),
};
const int num_index = ARRAY_SIZE(nsindex);
struct device *dev = ndd->dev;
bool valid[2] = { 0 };
int i, num_valid = 0;
u32 seq;
for (i = 0; i < num_index; i++) {
u32 nslot;
u8 sig[NSINDEX_SIG_LEN];
u64 sum_save, sum, size;
unsigned int version, labelsize;
memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
dev_dbg(dev, "nsindex%d signature invalid\n", i);
continue;
}
/* label sizes larger than 128 arrived with v1.2 */
version = __le16_to_cpu(nsindex[i]->major) * 100
+ __le16_to_cpu(nsindex[i]->minor);
if (version >= 102)
labelsize = 1 << (7 + nsindex[i]->labelsize);
else
labelsize = 128;
if (labelsize != sizeof_namespace_label(ndd)) {
dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
i, nsindex[i]->labelsize);
continue;
}
sum_save = __le64_to_cpu(nsindex[i]->checksum);
nsindex[i]->checksum = __cpu_to_le64(0);
sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
nsindex[i]->checksum = __cpu_to_le64(sum_save);
if (sum != sum_save) {
dev_dbg(dev, "nsindex%d checksum invalid\n", i);
continue;
}
seq = __le32_to_cpu(nsindex[i]->seq);
if ((seq & NSINDEX_SEQ_MASK) == 0) {
dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
continue;
}
/* sanity check the index against expected values */
if (__le64_to_cpu(nsindex[i]->myoff)
!= i * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
i, (unsigned long long)
__le64_to_cpu(nsindex[i]->myoff));
continue;
}
if (__le64_to_cpu(nsindex[i]->otheroff)
!= (!i) * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
i, (unsigned long long)
__le64_to_cpu(nsindex[i]->otheroff));
continue;
}
if (__le64_to_cpu(nsindex[i]->labeloff)
!= 2 * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
i, (unsigned long long)
__le64_to_cpu(nsindex[i]->labeloff));
continue;
}
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd)
|| size < sizeof(struct nd_namespace_index)) {
dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
continue;
}
nslot = __le32_to_cpu(nsindex[i]->nslot);
if (nslot * sizeof_namespace_label(ndd)
+ 2 * sizeof_namespace_index(ndd)
> ndd->nsarea.config_size) {
dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
i, nslot, ndd->nsarea.config_size);
continue;
}
valid[i] = true;
num_valid++;
}
switch (num_valid) {
case 0:
break;
case 1:
for (i = 0; i < num_index; i++)
if (valid[i])
return i;
/* can't have num_valid > 0 but valid[] = { false, false } */
WARN_ON(1);
break;
default:
/* pick the best index... */
seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
__le32_to_cpu(nsindex[1]->seq));
if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
return 1;
else
return 0;
break;
}
return -1;
}
static int nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* In order to probe for and validate namespace index blocks we
* need to know the size of the labels, and we can't trust the
* size of the labels until we validate the index blocks.
* Resolve this dependency loop by probing for known label
* sizes, but default to v1.2 256-byte namespace labels if
* discovery fails.
*/
int label_size[] = { 128, 256 };
int i, rc;
for (i = 0; i < ARRAY_SIZE(label_size); i++) {
ndd->nslabel_size = label_size[i];
rc = __nd_label_validate(ndd);
if (rc >= 0)
return rc;
}
return -1;
}
static void nd_label_copy(struct nvdimm_drvdata *ndd,
struct nd_namespace_index *dst,
struct nd_namespace_index *src)
{
/* just exit if either destination or source is NULL */
if (!dst || !src)
return;
memcpy(dst, src, sizeof_namespace_index(ndd));
}
static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
{
void *base = to_namespace_index(ndd, 0);
return base + 2 * sizeof_namespace_index(ndd);
}
static int to_slot(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
unsigned long label, base;
label = (unsigned long) nd_label;
base = (unsigned long) nd_label_base(ndd);
return (label - base) / sizeof_namespace_label(ndd);
}
static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
{
unsigned long label, base;
base = (unsigned long) nd_label_base(ndd);
label = base + sizeof_namespace_label(ndd) * slot;
return (struct nd_namespace_label *) label;
}
#define for_each_clear_bit_le(bit, addr, size) \
for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
(bit) < (size); \
(bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
/**
* preamble_index - common variable initialization for nd_label_* routines
* @ndd: dimm container for the relevant label set
* @idx: namespace_index index
* @nsindex_out: on return set to the currently active namespace index
* @free: on return set to the free label bitmap in the index
* @nslot: on return set to the number of slots in the label space
*/
static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
struct nd_namespace_index **nsindex_out,
unsigned long **free, u32 *nslot)
{
struct nd_namespace_index *nsindex;
nsindex = to_namespace_index(ndd, idx);
if (nsindex == NULL)
return false;
*free = (unsigned long *) nsindex->free;
*nslot = __le32_to_cpu(nsindex->nslot);
*nsindex_out = nsindex;
return true;
}
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
u32 flags)
{
if (!label_id || !uuid)
return NULL;
snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid);
return label_id->id;
}
static bool preamble_current(struct nvdimm_drvdata *ndd,
struct nd_namespace_index **nsindex,
unsigned long **free, u32 *nslot)
{
return preamble_index(ndd, ndd->ns_current, nsindex,
free, nslot);
}
static bool preamble_next(struct nvdimm_drvdata *ndd,
struct nd_namespace_index **nsindex,
unsigned long **free, u32 *nslot)
{
return preamble_index(ndd, ndd->ns_next, nsindex,
free, nslot);
}
static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
u64 sum, sum_save;
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
return true;
sum_save = nsl_get_checksum(ndd, nd_label);
nsl_set_checksum(ndd, nd_label, 0);
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
nsl_set_checksum(ndd, nd_label, sum_save);
return sum == sum_save;
}
static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
u64 sum;
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
return;
nsl_set_checksum(ndd, nd_label, 0);
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
nsl_set_checksum(ndd, nd_label, sum);
}
static bool slot_valid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 slot)
{
bool valid;
/* check that we are written where we expect to be written */
if (slot != nsl_get_slot(ndd, nd_label))
return false;
valid = nsl_validate_checksum(ndd, nd_label);
if (!valid)
dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
return valid;
}
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot, slot;
if (!preamble_current(ndd, &nsindex, &free, &nslot))
return 0; /* no label, nothing to reserve */
for_each_clear_bit_le(slot, free, nslot) {
struct nd_namespace_label *nd_label;
struct nd_region *nd_region = NULL;
struct nd_label_id label_id;
struct resource *res;
uuid_t label_uuid;
u32 flags;
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot))
continue;
nsl_get_uuid(ndd, nd_label, &label_uuid);
flags = nsl_get_flags(ndd, nd_label);
nd_label_gen_id(&label_id, &label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
nsl_get_dpa(ndd, nd_label),
nsl_get_rawsize(ndd, nd_label));
nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
if (!res)
return -EBUSY;
}
return 0;
}
int nd_label_data_init(struct nvdimm_drvdata *ndd)
{
size_t config_size, read_size, max_xfer, offset;
struct nd_namespace_index *nsindex;
unsigned int i;
int rc = 0;
u32 nslot;
if (ndd->data)
return 0;
if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
ndd->nsarea.max_xfer, ndd->nsarea.config_size);
return -ENXIO;
}
/*
* We need to determine the maximum index area as this is the section
* we must read and validate before we can start processing labels.
*
* If the area is too small to contain the two indexes and 2 labels
* then we abort.
*
* Start at a label size of 128 as this should result in the largest
* possible namespace index size.
*/
ndd->nslabel_size = 128;
read_size = sizeof_namespace_index(ndd) * 2;
if (!read_size)
return -ENXIO;
/* Allocate config data */
config_size = ndd->nsarea.config_size;
ndd->data = kvzalloc(config_size, GFP_KERNEL);
if (!ndd->data)
return -ENOMEM;
/*
* We want to guarantee as few reads as possible while conserving
* memory. To do that we figure out how much unused space will be left
* in the last read, divide that by the total number of reads it is
* going to take given our maximum transfer size, and then reduce our
* maximum transfer size based on that result.
*/
max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
if (read_size < max_xfer) {
/* trim waste */
max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
DIV_ROUND_UP(config_size, max_xfer);
/* make certain we read indexes in exactly 1 read */
if (max_xfer < read_size)
max_xfer = read_size;
}
/* Make our initial read size a multiple of max_xfer size */
read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
config_size);
/* Read the index data */
rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
if (rc)
goto out_err;
/* Validate index data, if not valid assume all labels are invalid */
ndd->ns_current = nd_label_validate(ndd);
if (ndd->ns_current < 0)
return 0;
/* Record our index values */
ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
/* Copy "current" index on top of the "next" index */
nsindex = to_current_namespace_index(ndd);
nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
/* Determine starting offset for label data */
offset = __le64_to_cpu(nsindex->labeloff);
nslot = __le32_to_cpu(nsindex->nslot);
/* Loop through the free list pulling in any active labels */
for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
size_t label_read_size;
/* zero out the unused labels */
if (test_bit_le(i, nsindex->free)) {
memset(ndd->data + offset, 0, ndd->nslabel_size);
continue;
}
/* if we already read past here then just continue */
if (offset + ndd->nslabel_size <= read_size)
continue;
/* if we haven't read in a while reset our read_size offset */
if (read_size < offset)
read_size = offset;
/* determine how much more will be read after this next call. */
label_read_size = offset + ndd->nslabel_size - read_size;
label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
max_xfer;
/* truncate last read if needed */
if (read_size + label_read_size > config_size)
label_read_size = config_size - read_size;
/* Read the label data */
rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
read_size, label_read_size);
if (rc)
goto out_err;
/* push read_size to next read offset */
read_size += label_read_size;
}
dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
out_err:
return rc;
}
int nd_label_active_count(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot, slot;
int count = 0;
if (!preamble_current(ndd, &nsindex, &free, &nslot))
return 0;
for_each_clear_bit_le(slot, free, nslot) {
struct nd_namespace_label *nd_label;
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot)) {
u32 label_slot = nsl_get_slot(ndd, nd_label);
u64 size = nsl_get_rawsize(ndd, nd_label);
u64 dpa = nsl_get_dpa(ndd, nd_label);
dev_dbg(ndd->dev,
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
slot, label_slot, dpa, size);
continue;
}
count++;
}
return count;
}
struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot, slot;
if (!preamble_current(ndd, &nsindex, &free, &nslot))
return NULL;
for_each_clear_bit_le(slot, free, nslot) {
struct nd_namespace_label *nd_label;
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot))
continue;
if (n-- == 0)
return to_label(ndd, slot);
}
return NULL;
}
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot, slot;
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return UINT_MAX;
WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
slot = find_next_bit_le(free, nslot, 0);
if (slot == nslot)
return UINT_MAX;
clear_bit_le(slot, free);
return slot;
}
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot;
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return false;
WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
if (slot < nslot)
return !test_and_set_bit_le(slot, free);
return false;
}
u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
unsigned long *free;
u32 nslot;
WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return nvdimm_num_label_slots(ndd);
return bitmap_weight(free, nslot);
}
static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
unsigned long flags)
{
struct nd_namespace_index *nsindex;
unsigned long offset;
u64 checksum;
u32 nslot;
int rc;
nsindex = to_namespace_index(ndd, index);
if (flags & ND_NSINDEX_INIT)
nslot = nvdimm_num_label_slots(ndd);
else
nslot = __le32_to_cpu(nsindex->nslot);
memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
memset(&nsindex->flags, 0, 3);
nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
nsindex->seq = __cpu_to_le32(seq);
offset = (unsigned long) nsindex
- (unsigned long) to_namespace_index(ndd, 0);
nsindex->myoff = __cpu_to_le64(offset);
nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
offset = (unsigned long) to_namespace_index(ndd,
nd_label_next_nsindex(index))
- (unsigned long) to_namespace_index(ndd, 0);
nsindex->otheroff = __cpu_to_le64(offset);
offset = (unsigned long) nd_label_base(ndd)
- (unsigned long) to_namespace_index(ndd, 0);
nsindex->labeloff = __cpu_to_le64(offset);
nsindex->nslot = __cpu_to_le32(nslot);
nsindex->major = __cpu_to_le16(1);
if (sizeof_namespace_label(ndd) < 256)
nsindex->minor = __cpu_to_le16(1);
else
nsindex->minor = __cpu_to_le16(2);
nsindex->checksum = __cpu_to_le64(0);
if (flags & ND_NSINDEX_INIT) {
unsigned long *free = (unsigned long *) nsindex->free;
u32 nfree = ALIGN(nslot, BITS_PER_LONG);
int last_bits, i;
memset(nsindex->free, 0xff, nfree / 8);
for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
clear_bit_le(nslot + i, free);
}
checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
nsindex->checksum = __cpu_to_le64(checksum);
rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
nsindex, sizeof_namespace_index(ndd));
if (rc < 0)
return rc;
if (flags & ND_NSINDEX_INIT)
return 0;
/* copy the index we just wrote to the new 'next' */
WARN_ON(index != ndd->ns_next);
nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
WARN_ON(ndd->ns_current == ndd->ns_next);
return 0;
}
static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return (unsigned long) nd_label
- (unsigned long) to_namespace_index(ndd, 0);
}
static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
else if (guid_equal(guid, &nvdimm_btt2_guid))
return NVDIMM_CCLASS_BTT2;
else if (guid_equal(guid, &nvdimm_pfn_guid))
return NVDIMM_CCLASS_PFN;
else if (guid_equal(guid, &nvdimm_dax_guid))
return NVDIMM_CCLASS_DAX;
else if (guid_equal(guid, &guid_null))
return NVDIMM_CCLASS_NONE;
return NVDIMM_CCLASS_UNKNOWN;
}
/* CXL labels store UUIDs instead of GUIDs for the same data */
static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid)
{
if (uuid_equal(uuid, &nvdimm_btt_uuid))
return NVDIMM_CCLASS_BTT;
else if (uuid_equal(uuid, &nvdimm_btt2_uuid))
return NVDIMM_CCLASS_BTT2;
else if (uuid_equal(uuid, &nvdimm_pfn_uuid))
return NVDIMM_CCLASS_PFN;
else if (uuid_equal(uuid, &nvdimm_dax_uuid))
return NVDIMM_CCLASS_DAX;
else if (uuid_equal(uuid, &uuid_null))
return NVDIMM_CCLASS_NONE;
return NVDIMM_CCLASS_UNKNOWN;
}
static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
guid_t *target)
{
if (claim_class == NVDIMM_CCLASS_BTT)
return &nvdimm_btt_guid;
else if (claim_class == NVDIMM_CCLASS_BTT2)
return &nvdimm_btt2_guid;
else if (claim_class == NVDIMM_CCLASS_PFN)
return &nvdimm_pfn_guid;
else if (claim_class == NVDIMM_CCLASS_DAX)
return &nvdimm_dax_guid;
else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
/*
* If we're modifying a namespace for which we don't
* know the claim_class, don't touch the existing guid.
*/
return target;
} else
return &guid_null;
}
/* CXL labels store UUIDs instead of GUIDs for the same data */
static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class,
uuid_t *target)
{
if (claim_class == NVDIMM_CCLASS_BTT)
return &nvdimm_btt_uuid;
else if (claim_class == NVDIMM_CCLASS_BTT2)
return &nvdimm_btt2_uuid;
else if (claim_class == NVDIMM_CCLASS_PFN)
return &nvdimm_pfn_uuid;
else if (claim_class == NVDIMM_CCLASS_DAX)
return &nvdimm_dax_uuid;
else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
/*
* If we're modifying a namespace for which we don't
* know the claim_class, don't touch the existing uuid.
*/
return target;
} else
return &uuid_null;
}
static void reap_victim(struct nd_mapping *nd_mapping,
struct nd_label_ent *victim)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
u32 slot = to_slot(ndd, victim->label);
dev_dbg(ndd->dev, "free: %d\n", slot);
nd_label_free_slot(ndd, slot);
victim->label = NULL;
}
static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid)
{
if (efi_namespace_label_has(ndd, type_guid))
guid_copy(&nd_label->efi.type_guid, guid);
}
bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid)
{
if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
return true;
if (!guid_equal(&nd_label->efi.type_guid, guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
&nd_label->efi.type_guid);
return false;
}
return true;
}
static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
enum nvdimm_claim_class claim_class)
{
if (ndd->cxl) {
uuid_t uuid;
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
export_uuid(nd_label->cxl.abstraction_uuid,
to_abstraction_uuid(claim_class, &uuid));
return;
}
if (!efi_namespace_label_has(ndd, abstraction_guid))
return;
guid_copy(&nd_label->efi.abstraction_guid,
to_abstraction_guid(claim_class,
&nd_label->efi.abstraction_guid));
}
enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
if (ndd->cxl) {
uuid_t uuid;
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
return uuid_to_nvdimm_cclass(&uuid);
}
if (!efi_namespace_label_has(ndd, abstraction_guid))
return NVDIMM_CCLASS_NONE;
return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid);
}
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
u32 nslot, slot;
size_t offset;
u64 cookie;
int rc;
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return -ENXIO;
cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
nd_label_gen_id(&label_id, nspm->uuid, 0);
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0)
break;
if (!res) {
WARN_ON_ONCE(1);
return -ENXIO;
}
/* allocate and write the label to the staging (next) index */
slot = nd_label_alloc_slot(ndd);
if (slot == UINT_MAX)
return -ENXIO;
dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
nsl_set_uuid(ndd, nd_label, nspm->uuid);
nsl_set_name(ndd, nd_label, nspm->alt_name);
nsl_set_flags(ndd, nd_label, flags);
nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
nsl_set_nrange(ndd, nd_label, 1);
nsl_set_position(ndd, nd_label, pos);
nsl_set_isetcookie(ndd, nd_label, cookie);
nsl_set_rawsize(ndd, nd_label, resource_size(res));
nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
nsl_set_dpa(ndd, nd_label, res->start);
nsl_set_slot(ndd, nd_label, slot);
nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
nsl_calculate_checksum(ndd, nd_label);
nd_dbg_dpa(nd_region, ndd, res, "\n");
/* update label */
offset = nd_label_offset(ndd, nd_label);
rc = nvdimm_set_config_data(ndd, offset, nd_label,
sizeof_namespace_label(ndd));
if (rc < 0)
return rc;
/* Garbage collect the previous label */
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
reap_victim(nd_mapping, label_ent);
}
/* update index */
rc = nd_label_write_index(ndd, ndd->ns_next,
nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
if (rc == 0) {
list_for_each_entry(label_ent, &nd_mapping->labels, list)
if (!label_ent->label) {
label_ent->label = nd_label;
nd_label = NULL;
break;
}
dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
"failed to track label: %d\n",
to_slot(ndd, nd_label));
if (nd_label)
rc = -ENXIO;
}
mutex_unlock(&nd_mapping->lock);
return rc;
}
static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
{
int i, old_num_labels = 0;
struct nd_label_ent *label_ent;
struct nd_namespace_index *nsindex;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list)
old_num_labels++;
mutex_unlock(&nd_mapping->lock);
/*
* We need to preserve all the old labels for the mapping so
* they can be garbage collected after writing the new labels.
*/
for (i = old_num_labels; i < num_labels; i++) {
label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
if (!label_ent)
return -ENOMEM;
mutex_lock(&nd_mapping->lock);
list_add_tail(&label_ent->list, &nd_mapping->labels);
mutex_unlock(&nd_mapping->lock);
}
if (ndd->ns_current == -1 || ndd->ns_next == -1)
/* pass */;
else
return max(num_labels, old_num_labels);
nsindex = to_namespace_index(ndd, 0);
memset(nsindex, 0, ndd->nsarea.config_size);
for (i = 0; i < 2; i++) {
int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
if (rc)
return rc;
}
ndd->ns_next = 1;
ndd->ns_current = 0;
return max(num_labels, old_num_labels);
}
static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent, *e;
struct nd_namespace_index *nsindex;
unsigned long *free;
LIST_HEAD(list);
u32 nslot, slot;
int active = 0;
if (!uuid)
return 0;
/* no index || no labels == nothing to delete */
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return 0;
mutex_lock(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
if (!nd_label)
continue;
active++;
if (!nsl_uuid_equal(ndd, nd_label, uuid))
continue;
active--;
slot = to_slot(ndd, nd_label);
nd_label_free_slot(ndd, slot);
dev_dbg(ndd->dev, "free: %d\n", slot);
list_move_tail(&label_ent->list, &list);
label_ent->label = NULL;
}
list_splice_tail_init(&list, &nd_mapping->labels);
if (active == 0) {
nd_mapping_free_labels(nd_mapping);
dev_dbg(ndd->dev, "no more active labels\n");
}
mutex_unlock(&nd_mapping->lock);
return nd_label_write_index(ndd, ndd->ns_next,
nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
}
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
if (rc)
return rc;
continue;
}
for_each_dpa_resource(ndd, res)
if (strncmp(res->name, "pmem", 4) == 0)
count++;
WARN_ON_ONCE(!count);
rc = init_labels(nd_mapping, count);
if (rc < 0)
return rc;
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
NSLABEL_FLAG_UPDATING);
if (rc)
return rc;
}
if (size == 0)
return 0;
/* Clear the UPDATING flag per UEFI 2.7 expectations */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
if (rc)
return rc;
}
return 0;
}
int __init nd_label_init(void)
{
WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid));
WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid));
WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid));
WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid));
WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid));
WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid));
return 0;
}
| linux-master | drivers/nvdimm/label.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/libnvdimm.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/ndctl.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "nd-core.h"
#include "nd.h"
LIST_HEAD(nvdimm_bus_list);
DEFINE_MUTEX(nvdimm_bus_list_mutex);
void nvdimm_bus_lock(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!nvdimm_bus)
return;
mutex_lock(&nvdimm_bus->reconfig_mutex);
}
EXPORT_SYMBOL(nvdimm_bus_lock);
void nvdimm_bus_unlock(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!nvdimm_bus)
return;
mutex_unlock(&nvdimm_bus->reconfig_mutex);
}
EXPORT_SYMBOL(nvdimm_bus_unlock);
bool is_nvdimm_bus_locked(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!nvdimm_bus)
return false;
return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
}
EXPORT_SYMBOL(is_nvdimm_bus_locked);
struct nvdimm_map {
struct nvdimm_bus *nvdimm_bus;
struct list_head list;
resource_size_t offset;
unsigned long flags;
size_t size;
union {
void *mem;
void __iomem *iomem;
};
struct kref kref;
};
static struct nvdimm_map *find_nvdimm_map(struct device *dev,
resource_size_t offset)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_map *nvdimm_map;
list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
if (nvdimm_map->offset == offset)
return nvdimm_map;
return NULL;
}
static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
resource_size_t offset, size_t size, unsigned long flags)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_map *nvdimm_map;
nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
if (!nvdimm_map)
return NULL;
INIT_LIST_HEAD(&nvdimm_map->list);
nvdimm_map->nvdimm_bus = nvdimm_bus;
nvdimm_map->offset = offset;
nvdimm_map->flags = flags;
nvdimm_map->size = size;
kref_init(&nvdimm_map->kref);
if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
&offset, size, dev_name(dev));
goto err_request_region;
}
if (flags)
nvdimm_map->mem = memremap(offset, size, flags);
else
nvdimm_map->iomem = ioremap(offset, size);
if (!nvdimm_map->mem)
goto err_map;
dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
__func__);
list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
return nvdimm_map;
err_map:
release_mem_region(offset, size);
err_request_region:
kfree(nvdimm_map);
return NULL;
}
static void nvdimm_map_release(struct kref *kref)
{
struct nvdimm_bus *nvdimm_bus;
struct nvdimm_map *nvdimm_map;
nvdimm_map = container_of(kref, struct nvdimm_map, kref);
nvdimm_bus = nvdimm_map->nvdimm_bus;
dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
list_del(&nvdimm_map->list);
if (nvdimm_map->flags)
memunmap(nvdimm_map->mem);
else
iounmap(nvdimm_map->iomem);
release_mem_region(nvdimm_map->offset, nvdimm_map->size);
kfree(nvdimm_map);
}
static void nvdimm_map_put(void *data)
{
struct nvdimm_map *nvdimm_map = data;
struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
nvdimm_bus_lock(&nvdimm_bus->dev);
kref_put(&nvdimm_map->kref, nvdimm_map_release);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
/**
* devm_nvdimm_memremap - map a resource that is shared across regions
* @dev: device that will own a reference to the shared mapping
* @offset: physical base address of the mapping
* @size: mapping size
* @flags: memremap flags, or, if zero, perform an ioremap instead
*/
void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags)
{
struct nvdimm_map *nvdimm_map;
nvdimm_bus_lock(dev);
nvdimm_map = find_nvdimm_map(dev, offset);
if (!nvdimm_map)
nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
else
kref_get(&nvdimm_map->kref);
nvdimm_bus_unlock(dev);
if (!nvdimm_map)
return NULL;
if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
return NULL;
return nvdimm_map->mem;
}
EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
u64 nd_fletcher64(void *addr, size_t len, bool le)
{
u32 *buf = addr;
u32 lo32 = 0;
u64 hi32 = 0;
int i;
for (i = 0; i < len / sizeof(u32); i++) {
lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
hi32 += lo32;
}
return hi32 << 32 | lo32;
}
EXPORT_SYMBOL_GPL(nd_fletcher64);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
{
/* struct nvdimm_bus definition is private to libnvdimm */
return nvdimm_bus->nd_desc;
}
EXPORT_SYMBOL_GPL(to_nd_desc);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
{
/* struct nvdimm_bus definition is private to libnvdimm */
return &nvdimm_bus->dev;
}
EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
/**
* nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
* @dev: container device for the uuid property
* @uuid_out: uuid buffer to replace
* @buf: raw sysfs buffer to parse
*
* Enforce that uuids can only be changed while the device is disabled
* (driver detached)
* LOCKING: expects device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
size_t len)
{
uuid_t uuid;
int rc;
if (dev->driver)
return -EBUSY;
rc = uuid_parse(buf, &uuid);
if (rc)
return rc;
kfree(*uuid_out);
*uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL);
if (!(*uuid_out))
return -ENOMEM;
return 0;
}
ssize_t nd_size_select_show(unsigned long current_size,
const unsigned long *supported, char *buf)
{
ssize_t len = 0;
int i;
for (i = 0; supported[i]; i++)
if (current_size == supported[i])
len += sprintf(buf + len, "[%ld] ", supported[i]);
else
len += sprintf(buf + len, "%ld ", supported[i]);
len += sprintf(buf + len, "\n");
return len;
}
ssize_t nd_size_select_store(struct device *dev, const char *buf,
unsigned long *current_size, const unsigned long *supported)
{
unsigned long lbasize;
int rc, i;
if (dev->driver)
return -EBUSY;
rc = kstrtoul(buf, 0, &lbasize);
if (rc)
return rc;
for (i = 0; supported[i]; i++)
if (lbasize == supported[i])
break;
if (supported[i]) {
*current_size = lbasize;
return 0;
} else {
return -EINVAL;
}
}
static ssize_t commands_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int cmd, len = 0;
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
len += sprintf(buf + len, "\n");
return len;
}
static DEVICE_ATTR_RO(commands);
static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
struct device *parent = nvdimm_bus->dev.parent;
if (nd_desc->provider_name)
return nd_desc->provider_name;
else if (parent)
return dev_name(parent);
else
return "unknown";
}
static ssize_t provider_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
}
static DEVICE_ATTR_RO(provider);
static int flush_namespaces(struct device *dev, void *data)
{
device_lock(dev);
device_unlock(dev);
return 0;
}
static int flush_regions_dimms(struct device *dev, void *data)
{
device_lock(dev);
device_unlock(dev);
device_for_each_child(dev, NULL, flush_namespaces);
return 0;
}
static ssize_t wait_probe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
int rc;
if (nd_desc->flush_probe) {
rc = nd_desc->flush_probe(nd_desc);
if (rc)
return rc;
}
nd_synchronize();
device_for_each_child(dev, NULL, flush_regions_dimms);
return sprintf(buf, "1\n");
}
static DEVICE_ATTR_RO(wait_probe);
static struct attribute *nvdimm_bus_attributes[] = {
&dev_attr_commands.attr,
&dev_attr_wait_probe.attr,
&dev_attr_provider.attr,
NULL,
};
static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
static ssize_t capability_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
enum nvdimm_fwa_capability cap;
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
cap = nd_desc->fw_ops->capability(nd_desc);
switch (cap) {
case NVDIMM_FWA_CAP_QUIESCE:
return sprintf(buf, "quiesce\n");
case NVDIMM_FWA_CAP_LIVE:
return sprintf(buf, "live\n");
default:
return -EOPNOTSUPP;
}
}
static DEVICE_ATTR_RO(capability);
static ssize_t activate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
enum nvdimm_fwa_capability cap;
enum nvdimm_fwa_state state;
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
cap = nd_desc->fw_ops->capability(nd_desc);
state = nd_desc->fw_ops->activate_state(nd_desc);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return -EOPNOTSUPP;
switch (state) {
case NVDIMM_FWA_IDLE:
return sprintf(buf, "idle\n");
case NVDIMM_FWA_BUSY:
return sprintf(buf, "busy\n");
case NVDIMM_FWA_ARMED:
return sprintf(buf, "armed\n");
case NVDIMM_FWA_ARM_OVERFLOW:
return sprintf(buf, "overflow\n");
default:
return -ENXIO;
}
}
static int exec_firmware_activate(void *data)
{
struct nvdimm_bus_descriptor *nd_desc = data;
return nd_desc->fw_ops->activate(nd_desc);
}
static ssize_t activate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
enum nvdimm_fwa_state state;
bool quiesce;
ssize_t rc;
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
if (sysfs_streq(buf, "live"))
quiesce = false;
else if (sysfs_streq(buf, "quiesce"))
quiesce = true;
else
return -EINVAL;
state = nd_desc->fw_ops->activate_state(nd_desc);
switch (state) {
case NVDIMM_FWA_BUSY:
rc = -EBUSY;
break;
case NVDIMM_FWA_ARMED:
case NVDIMM_FWA_ARM_OVERFLOW:
if (quiesce)
rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
else
rc = nd_desc->fw_ops->activate(nd_desc);
break;
case NVDIMM_FWA_IDLE:
default:
rc = -ENXIO;
}
if (rc == 0)
rc = len;
return rc;
}
static DEVICE_ATTR_ADMIN_RW(activate);
static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
enum nvdimm_fwa_capability cap;
/*
* Both 'activate' and 'capability' disappear when no ops
* detected, or a negative capability is indicated.
*/
if (!nd_desc->fw_ops)
return 0;
cap = nd_desc->fw_ops->capability(nd_desc);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
return a->mode;
}
static struct attribute *nvdimm_bus_firmware_attributes[] = {
&dev_attr_activate.attr,
&dev_attr_capability.attr,
NULL,
};
static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
.name = "firmware",
.attrs = nvdimm_bus_firmware_attributes,
.is_visible = nvdimm_bus_firmware_visible,
};
const struct attribute_group *nvdimm_bus_attribute_groups[] = {
&nvdimm_bus_attribute_group,
&nvdimm_bus_firmware_attribute_group,
NULL,
};
int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
return badrange_add(&nvdimm_bus->badrange, addr, length);
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
struct blk_integrity bi;
if (meta_size == 0)
return 0;
memset(&bi, 0, sizeof(bi));
bi.tuple_size = meta_size;
bi.tag_size = meta_size;
blk_integrity_register(disk, &bi);
blk_queue_max_integrity_segments(disk->queue, 1);
return 0;
}
EXPORT_SYMBOL(nd_integrity_init);
#else /* CONFIG_BLK_DEV_INTEGRITY */
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
return 0;
}
EXPORT_SYMBOL(nd_integrity_init);
#endif
static __init int libnvdimm_init(void)
{
int rc;
rc = nvdimm_bus_init();
if (rc)
return rc;
rc = nvdimm_init();
if (rc)
goto err_dimm;
rc = nd_region_init();
if (rc)
goto err_region;
nd_label_init();
return 0;
err_region:
nvdimm_exit();
err_dimm:
nvdimm_bus_exit();
return rc;
}
static __exit void libnvdimm_exit(void)
{
WARN_ON(!list_empty(&nvdimm_bus_list));
nd_region_exit();
nvdimm_exit();
nvdimm_bus_exit();
nvdimm_devs_exit();
}
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
subsys_initcall(libnvdimm_init);
module_exit(libnvdimm_exit);
| linux-master | drivers/nvdimm/core.c |
// SPDX-License-Identifier: GPL-2.0+
#define pr_fmt(fmt) "of_pmem: " fmt
#include <linux/of.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct of_pmem_private {
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
};
static int of_pmem_region_probe(struct platform_device *pdev)
{
struct of_pmem_private *priv;
struct device_node *np;
struct nvdimm_bus *bus;
bool is_volatile;
int i;
np = dev_of_node(&pdev->dev);
if (!np)
return -ENXIO;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
priv->bus = bus = nvdimm_bus_register(&pdev->dev, &priv->bus_desc);
if (!bus) {
kfree(priv);
return -ENODEV;
}
platform_set_drvdata(pdev, priv);
is_volatile = !!of_find_property(np, "volatile", NULL);
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
is_volatile ? "volatile" : "non-volatile", np);
for (i = 0; i < pdev->num_resources; i++) {
struct nd_region_desc ndr_desc;
struct nd_region *region;
/*
* NB: libnvdimm copies the data from ndr_desc into it's own
* structures so passing a stack pointer is fine.
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &pdev->resource[i];
ndr_desc.of_node = np;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (is_volatile)
region = nvdimm_volatile_region_create(bus, &ndr_desc);
else {
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
region = nvdimm_pmem_region_create(bus, &ndr_desc);
}
if (!region)
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
ndr_desc.res, np);
else
dev_dbg(&pdev->dev, "Registered region %pR from %pOF\n",
ndr_desc.res, np);
}
return 0;
}
static int of_pmem_region_remove(struct platform_device *pdev)
{
struct of_pmem_private *priv = platform_get_drvdata(pdev);
nvdimm_bus_unregister(priv->bus);
kfree(priv);
return 0;
}
static const struct of_device_id of_pmem_region_match[] = {
{ .compatible = "pmem-region" },
{ .compatible = "pmem-region-v2" },
{ },
};
static struct platform_driver of_pmem_region_driver = {
.probe = of_pmem_region_probe,
.remove = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
.of_match_table = of_pmem_region_match,
},
};
module_platform_driver(of_pmem_region_driver);
MODULE_DEVICE_TABLE(of, of_pmem_region_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
| linux-master | drivers/nvdimm/of_pmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/fcntl.h>
#include <linux/async.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
#include "pfn.h"
int nvdimm_major;
static int nvdimm_bus_major;
static struct class *nd_class;
static DEFINE_IDA(nd_ida);
static int to_nd_device_type(const struct device *dev)
{
if (is_nvdimm(dev))
return ND_DEVICE_DIMM;
else if (is_memory(dev))
return ND_DEVICE_REGION_PMEM;
else if (is_nd_dax(dev))
return ND_DEVICE_DAX_PMEM;
else if (is_nd_region(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent));
return 0;
}
static int nvdimm_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
}
static struct module *to_bus_provider(struct device *dev)
{
/* pin bus providers while regions are enabled */
if (is_nd_region(dev)) {
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
return nvdimm_bus->nd_desc->module;
}
return NULL;
}
static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
nvdimm_bus->probe_active++;
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static int nvdimm_bus_probe(struct device *dev)
{
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
struct module *provider = to_bus_provider(dev);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
int rc;
if (!try_module_get(provider))
return -ENXIO;
dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
rc = nd_drv->probe(dev);
if ((rc == 0 || rc == -EOPNOTSUPP) &&
dev->parent && is_nd_region(dev->parent))
nd_region_advance_seeds(to_nd_region(dev->parent), dev);
nvdimm_bus_probe_end(nvdimm_bus);
dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
dev_name(dev), rc);
if (rc != 0)
module_put(provider);
return rc;
}
static void nvdimm_bus_remove(struct device *dev)
{
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
struct module *provider = to_bus_provider(dev);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (nd_drv->remove)
nd_drv->remove(dev);
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
dev_name(dev));
module_put(provider);
}
static void nvdimm_bus_shutdown(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nd_device_driver *nd_drv = NULL;
if (dev->driver)
nd_drv = to_nd_device_driver(dev->driver);
if (nd_drv && nd_drv->shutdown) {
nd_drv->shutdown(dev);
dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
dev->driver->name, dev_name(dev));
}
}
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
device_lock(dev);
if (dev->driver) {
struct nd_device_driver *nd_drv;
nd_drv = to_nd_device_driver(dev->driver);
if (nd_drv->notify)
nd_drv->notify(dev, event);
}
device_unlock(dev);
}
EXPORT_SYMBOL(nd_device_notify);
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
if (!nvdimm_bus)
return;
/* caller is responsible for holding a reference on the device */
nd_device_notify(&nd_region->dev, event);
}
EXPORT_SYMBOL_GPL(nvdimm_region_notify);
struct clear_badblocks_context {
resource_size_t phys, cleared;
};
static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
{
struct clear_badblocks_context *ctx = data;
struct nd_region *nd_region;
resource_size_t ndr_end;
sector_t sector;
/* make sure device is a region */
if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
if (ctx->phys < nd_region->ndr_start ||
(ctx->phys + ctx->cleared - 1) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
return 0;
}
static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
phys_addr_t phys, u64 cleared)
{
struct clear_badblocks_context ctx = {
.phys = phys,
.cleared = cleared,
};
device_for_each_child(&nvdimm_bus->dev, &ctx,
nvdimm_clear_badblocks_region);
}
static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t phys, u64 cleared)
{
if (cleared > 0)
badrange_forget(&nvdimm_bus->badrange, phys, cleared);
if (cleared > 0 && cleared / 512)
nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
}
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc;
struct nd_cmd_clear_error clear_err;
struct nd_cmd_ars_cap ars_cap;
u32 clear_err_unit, mask;
unsigned int noio_flag;
int cmd_rc, rc;
if (!nvdimm_bus)
return -ENXIO;
nd_desc = nvdimm_bus->nd_desc;
/*
* if ndctl does not exist, it's PMEM_LEGACY and
* we want to just pretend everything is handled.
*/
if (!nd_desc->ndctl)
return len;
memset(&ars_cap, 0, sizeof(ars_cap));
ars_cap.address = phys;
ars_cap.length = len;
noio_flag = memalloc_noio_save();
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
sizeof(ars_cap), &cmd_rc);
memalloc_noio_restore(noio_flag);
if (rc < 0)
return rc;
if (cmd_rc < 0)
return cmd_rc;
clear_err_unit = ars_cap.clear_err_unit;
if (!clear_err_unit || !is_power_of_2(clear_err_unit))
return -ENXIO;
mask = clear_err_unit - 1;
if ((phys | len) & mask)
return -ENXIO;
memset(&clear_err, 0, sizeof(clear_err));
clear_err.address = phys;
clear_err.length = len;
noio_flag = memalloc_noio_save();
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
sizeof(clear_err), &cmd_rc);
memalloc_noio_restore(noio_flag);
if (rc < 0)
return rc;
if (cmd_rc < 0)
return cmd_rc;
nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
return clear_err.cleared;
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
static struct bus_type nvdimm_bus_type = {
.name = "nd",
.uevent = nvdimm_bus_uevent,
.match = nvdimm_bus_match,
.probe = nvdimm_bus_probe,
.remove = nvdimm_bus_remove,
.shutdown = nvdimm_bus_shutdown,
};
static void nvdimm_bus_release(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus;
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
ida_simple_remove(&nd_ida, nvdimm_bus->id);
kfree(nvdimm_bus);
}
static const struct device_type nvdimm_bus_dev_type = {
.release = nvdimm_bus_release,
.groups = nvdimm_bus_attribute_groups,
};
bool is_nvdimm_bus(struct device *dev)
{
return dev->type == &nvdimm_bus_dev_type;
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
{
struct device *dev;
for (dev = nd_dev; dev; dev = dev->parent)
if (is_nvdimm_bus(dev))
break;
dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
if (dev)
return to_nvdimm_bus(dev);
return NULL;
}
struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus;
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
WARN_ON(!is_nvdimm_bus(dev));
return nvdimm_bus;
}
EXPORT_SYMBOL_GPL(to_nvdimm_bus);
struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
{
return to_nvdimm_bus(nvdimm->dev.parent);
}
EXPORT_SYMBOL_GPL(nvdimm_to_bus);
static struct lock_class_key nvdimm_bus_key;
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nd_desc)
{
struct nvdimm_bus *nvdimm_bus;
int rc;
nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
if (!nvdimm_bus)
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
}
mutex_init(&nvdimm_bus->reconfig_mutex);
badrange_init(&nvdimm_bus->badrange);
nvdimm_bus->nd_desc = nd_desc;
nvdimm_bus->dev.parent = parent;
nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
device_initialize(&nvdimm_bus->dev);
lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key);
device_set_pm_not_required(&nvdimm_bus->dev);
rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
if (rc)
goto err;
rc = device_add(&nvdimm_bus->dev);
if (rc) {
dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
goto err;
}
return nvdimm_bus;
err:
put_device(&nvdimm_bus->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_register);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
{
if (!nvdimm_bus)
return;
device_unregister(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
static int child_unregister(struct device *dev, void *data)
{
/*
* the singular ndctl class device per bus needs to be
* "device_destroy"ed, so skip it here
*
* i.e. remove classless children
*/
if (dev->class)
return 0;
if (is_nvdimm(dev))
nvdimm_delete(to_nvdimm(dev));
else
nd_device_unregister(dev, ND_SYNC);
return 0;
}
static void free_badrange_list(struct list_head *badrange_list)
{
struct badrange_entry *bre, *next;
list_for_each_entry_safe(bre, next, badrange_list, list) {
list_del(&bre->list);
kfree(bre);
}
list_del_init(badrange_list);
}
static void nd_bus_remove(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
mutex_lock(&nvdimm_bus_list_mutex);
list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex);
wait_event(nvdimm_bus->wait,
atomic_read(&nvdimm_bus->ioctl_active) == 0);
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
spin_lock(&nvdimm_bus->badrange.lock);
free_badrange_list(&nvdimm_bus->badrange.list);
spin_unlock(&nvdimm_bus->badrange.lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
}
static int nd_bus_probe(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
int rc;
rc = nvdimm_bus_create_ndctl(nvdimm_bus);
if (rc)
return rc;
mutex_lock(&nvdimm_bus_list_mutex);
list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
mutex_unlock(&nvdimm_bus_list_mutex);
/* enable bus provider attributes to look up their local context */
dev_set_drvdata(dev, nvdimm_bus->nd_desc);
return 0;
}
static struct nd_device_driver nd_bus_driver = {
.probe = nd_bus_probe,
.remove = nd_bus_remove,
.drv = {
.name = "nd_bus",
.suppress_bind_attrs = true,
.bus = &nvdimm_bus_type,
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
};
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
{
struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
return true;
return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
}
static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
void nd_synchronize(void)
{
async_synchronize_full_domain(&nd_async_domain);
}
EXPORT_SYMBOL_GPL(nd_synchronize);
static void nd_async_device_register(void *d, async_cookie_t cookie)
{
struct device *dev = d;
if (device_add(dev) != 0) {
dev_err(dev, "%s: failed\n", __func__);
put_device(dev);
}
put_device(dev);
if (dev->parent)
put_device(dev->parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
{
struct device *dev = d;
/* flush bus operations before delete */
nvdimm_bus_lock(dev);
nvdimm_bus_unlock(dev);
device_unregister(dev);
put_device(dev);
}
static void __nd_device_register(struct device *dev, bool sync)
{
if (!dev)
return;
/*
* Ensure that region devices always have their NUMA node set as
* early as possible. This way we are able to make certain that
* any memory associated with the creation and the creation
* itself of the region is associated with the correct node.
*/
if (is_nd_region(dev))
set_dev_node(dev, to_nd_region(dev)->numa_node);
dev->bus = &nvdimm_bus_type;
device_set_pm_not_required(dev);
if (dev->parent) {
get_device(dev->parent);
if (dev_to_node(dev) == NUMA_NO_NODE)
set_dev_node(dev, dev_to_node(dev->parent));
}
get_device(dev);
if (sync)
nd_async_device_register(dev, 0);
else
async_schedule_dev_domain(nd_async_device_register, dev,
&nd_async_domain);
}
void nd_device_register(struct device *dev)
{
__nd_device_register(dev, false);
}
EXPORT_SYMBOL(nd_device_register);
void nd_device_register_sync(struct device *dev)
{
__nd_device_register(dev, true);
}
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
{
bool killed;
switch (mode) {
case ND_ASYNC:
/*
* In the async case this is being triggered with the
* device lock held and the unregistration work needs to
* be moved out of line iff this is thread has won the
* race to schedule the deletion.
*/
if (!kill_device(dev))
return;
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
&nd_async_domain);
break;
case ND_SYNC:
/*
* In the sync case the device is being unregistered due
* to a state change of the parent. Claim the kill state
* to synchronize against other unregistration requests,
* or otherwise let the async path handle it if the
* unregistration was already queued.
*/
device_lock(dev);
killed = kill_device(dev);
device_unlock(dev);
if (!killed)
return;
nd_synchronize();
device_unregister(dev);
break;
}
}
EXPORT_SYMBOL(nd_device_unregister);
/**
* __nd_driver_register() - register a region or a namespace driver
* @nd_drv: driver to register
* @owner: automatically set by nd_driver_register() macro
* @mod_name: automatically set by nd_driver_register() macro
*/
int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
const char *mod_name)
{
struct device_driver *drv = &nd_drv->drv;
if (!nd_drv->type) {
pr_debug("driver type bitmask not set (%ps)\n",
__builtin_return_address(0));
return -EINVAL;
}
if (!nd_drv->probe) {
pr_debug("%s ->probe() must be specified\n", mod_name);
return -EINVAL;
}
drv->bus = &nvdimm_bus_type;
drv->owner = owner;
drv->mod_name = mod_name;
return driver_register(drv);
}
EXPORT_SYMBOL(__nd_driver_register);
void nvdimm_check_and_set_ro(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
int disk_ro = get_disk_ro(disk);
/* catch the disk up with the region ro state */
if (disk_ro == nd_region->ro)
return;
dev_info(dev, "%s read-%s, marking %s read-%s\n",
dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
disk->disk_name, nd_region->ro ? "only" : "write");
set_disk_ro(disk, nd_region->ro);
}
EXPORT_SYMBOL(nvdimm_check_and_set_ro);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
to_nd_device_type(dev));
}
static DEVICE_ATTR_RO(modalias);
static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n", dev->type->name);
}
static DEVICE_ATTR_RO(devtype);
static struct attribute *nd_device_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_devtype.attr,
NULL,
};
/*
* nd_device_attribute_group - generic attributes for all devices on an nd bus
*/
const struct attribute_group nd_device_attribute_group = {
.attrs = nd_device_attributes,
};
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
static int nvdimm_dev_to_target_node(struct device *dev)
{
struct device *parent = dev->parent;
struct nd_region *nd_region = NULL;
if (is_nd_region(dev))
nd_region = to_nd_region(dev);
else if (parent && is_nd_region(parent))
nd_region = to_nd_region(parent);
if (!nd_region)
return NUMA_NO_NODE;
return nd_region->target_node;
}
static ssize_t target_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
}
static DEVICE_ATTR_RO(target_node);
static struct attribute *nd_numa_attributes[] = {
&dev_attr_numa_node.attr,
&dev_attr_target_node.attr,
NULL,
};
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
if (a == &dev_attr_target_node.attr &&
nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
return 0;
return a->mode;
}
/*
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
*/
const struct attribute_group nd_numa_attribute_group = {
.attrs = nd_numa_attributes,
.is_visible = nd_numa_attr_visible,
};
static void ndctl_release(struct device *dev)
{
kfree(dev);
}
static struct lock_class_key nvdimm_ndctl_key;
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{
dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
struct device *dev;
int rc;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key);
device_set_pm_not_required(dev);
dev->class = nd_class;
dev->parent = &nvdimm_bus->dev;
dev->devt = devt;
dev->release = ndctl_release;
rc = dev_set_name(dev, "ndctl%d", nvdimm_bus->id);
if (rc)
goto err;
rc = device_add(dev);
if (rc) {
dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %d\n",
nvdimm_bus->id, rc);
goto err;
}
return 0;
err:
put_device(dev);
return rc;
}
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
{
device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
}
static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
.out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,
.out_sizes = { 4, 8, },
},
[ND_CMD_DIMM_FLAGS] = {
.out_num = 2,
.out_sizes = { 4, 4 },
},
[ND_CMD_GET_CONFIG_SIZE] = {
.out_num = 3,
.out_sizes = { 4, 4, 4, },
},
[ND_CMD_GET_CONFIG_DATA] = {
.in_num = 2,
.in_sizes = { 4, 4, },
.out_num = 2,
.out_sizes = { 4, UINT_MAX, },
},
[ND_CMD_SET_CONFIG_DATA] = {
.in_num = 3,
.in_sizes = { 4, 4, UINT_MAX, },
.out_num = 1,
.out_sizes = { 4, },
},
[ND_CMD_VENDOR] = {
.in_num = 3,
.in_sizes = { 4, 4, UINT_MAX, },
.out_num = 3,
.out_sizes = { 4, 4, UINT_MAX, },
},
[ND_CMD_CALL] = {
.in_num = 2,
.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
.out_num = 1,
.out_sizes = { UINT_MAX, },
},
};
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
{
if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
return &__nd_cmd_dimm_descs[cmd];
return NULL;
}
EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_ARS_CAP] = {
.in_num = 2,
.in_sizes = { 8, 8, },
.out_num = 4,
.out_sizes = { 4, 4, 4, 4, },
},
[ND_CMD_ARS_START] = {
.in_num = 5,
.in_sizes = { 8, 8, 2, 1, 5, },
.out_num = 2,
.out_sizes = { 4, 4, },
},
[ND_CMD_ARS_STATUS] = {
.out_num = 3,
.out_sizes = { 4, 4, UINT_MAX, },
},
[ND_CMD_CLEAR_ERROR] = {
.in_num = 2,
.in_sizes = { 8, 8, },
.out_num = 3,
.out_sizes = { 4, 4, 8, },
},
[ND_CMD_CALL] = {
.in_num = 2,
.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
.out_num = 1,
.out_sizes = { UINT_MAX, },
},
};
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
{
if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
return &__nd_cmd_bus_descs[cmd];
return NULL;
}
EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, void *buf)
{
if (idx >= desc->in_num)
return UINT_MAX;
if (desc->in_sizes[idx] < UINT_MAX)
return desc->in_sizes[idx];
if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
struct nd_cmd_set_config_hdr *hdr = buf;
return hdr->in_length;
} else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
struct nd_cmd_vendor_hdr *hdr = buf;
return hdr->in_length;
} else if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = buf;
return pkg->nd_size_in;
}
return UINT_MAX;
}
EXPORT_SYMBOL_GPL(nd_cmd_in_size);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
const u32 *out_field, unsigned long remainder)
{
if (idx >= desc->out_num)
return UINT_MAX;
if (desc->out_sizes[idx] < UINT_MAX)
return desc->out_sizes[idx];
if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
return in_field[1];
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
/*
* Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
* "Size of Output Buffer in bytes, including this
* field."
*/
if (out_field[1] < 4)
return 0;
/*
* ACPI 6.1 is ambiguous if 'status' is included in the
* output size. If we encounter an output size that
* overshoots the remainder by 4 bytes, assume it was
* including 'status'.
*/
if (out_field[1] - 4 == remainder)
return remainder;
return out_field[1] - 8;
} else if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
return pkg->nd_size_out;
}
return UINT_MAX;
}
EXPORT_SYMBOL_GPL(nd_cmd_out_size);
void wait_nvdimm_bus_probe_idle(struct device *dev)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
do {
if (nvdimm_bus->probe_active == 0)
break;
nvdimm_bus_unlock(dev);
device_unlock(dev);
wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
device_lock(dev);
nvdimm_bus_lock(dev);
} while (true);
}
static int nd_pmem_forget_poison_check(struct device *dev, void *data)
{
struct nd_cmd_clear_error *clear_err =
(struct nd_cmd_clear_error *)data;
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
struct nd_namespace_common *ndns = NULL;
struct nd_namespace_io *nsio;
resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
if (nd_dax || !dev->driver)
return 0;
start = clear_err->address;
end = clear_err->address + clear_err->cleared - 1;
if (nd_btt || nd_pfn || nd_dax) {
if (nd_btt)
ndns = nd_btt->ndns;
else if (nd_pfn)
ndns = nd_pfn->ndns;
else if (nd_dax)
ndns = nd_dax->nd_pfn.ndns;
if (!ndns)
return 0;
} else
ndns = to_ndns(dev);
nsio = to_nd_namespace_io(&ndns->dev);
pstart = nsio->res.start + offset;
pend = nsio->res.end - end_trunc;
if ((pstart >= start) && (pend <= end))
return -EBUSY;
return 0;
}
static int nd_ns_forget_poison_check(struct device *dev, void *data)
{
return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
}
/* set_config requires an idle interleave set */
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
struct nvdimm *nvdimm, unsigned int cmd, void *data)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
/* ask the bus provider if it would like to block this request */
if (nd_desc->clear_to_send) {
int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data);
if (rc)
return rc;
}
/* require clear error to go through the pmem driver */
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
return device_for_each_child(&nvdimm_bus->dev, data,
nd_ns_forget_poison_check);
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
return 0;
/* prevent label manipulation while the kernel owns label updates */
wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
if (atomic_read(&nvdimm->busy))
return -EBUSY;
return 0;
}
static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
void *buf = NULL;
u64 buf_len = 0;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
cmd_name = nvdimm_cmd_name(cmd);
cmd_mask = nvdimm->cmd_mask;
dimm_name = dev_name(&nvdimm->dev);
} else {
desc = nd_cmd_bus_desc(cmd);
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
dimm_name = "bus";
}
/* Validate command family support against bus declared support */
if (cmd == ND_CMD_CALL) {
unsigned long *mask;
if (copy_from_user(&pkg, p, sizeof(pkg)))
return -EFAULT;
if (nvdimm) {
if (pkg.nd_family > NVDIMM_FAMILY_MAX)
return -EINVAL;
mask = &nd_desc->dimm_family_mask;
} else {
if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
return -EINVAL;
mask = &nd_desc->bus_family_mask;
}
if (!test_bit(pkg.nd_family, mask))
return -EINVAL;
}
if (!desc ||
(desc->out_num + desc->in_num == 0) ||
cmd > ND_CMD_CALL ||
!test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
if (read_only)
switch (cmd) {
case ND_CMD_VENDOR:
case ND_CMD_SET_CONFIG_DATA:
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
dev_dbg(dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
default:
break;
}
/* process an input envelope */
in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
if (!in_env)
return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
u32 in_size, copy;
in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
rc = -ENXIO;
goto out;
}
if (in_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
rc = -EFAULT;
goto out;
}
in_len += in_size;
}
if (cmd == ND_CMD_CALL) {
func = pkg.nd_command;
dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
dimm_name, pkg.nd_command,
in_len, out_len, buf_len);
}
/* process an output envelope */
out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
if (!out_env) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env, 0);
u32 copy;
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
rc = -EFAULT;
goto out;
}
if (out_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
p + in_len + out_len, copy)) {
rc = -EFAULT;
goto out;
}
out_len += out_size;
}
buf_len = (u64) out_len + (u64) in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
rc = -EINVAL;
goto out;
}
buf = vmalloc(buf_len);
if (!buf) {
rc = -ENOMEM;
goto out;
}
if (copy_from_user(buf, p, buf_len)) {
rc = -EFAULT;
goto out;
}
device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
if (rc < 0)
goto out_unlock;
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
struct nd_cmd_clear_error *clear_err = buf;
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
clear_err->cleared);
}
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
out_unlock:
nvdimm_bus_unlock(dev);
device_unlock(dev);
out:
kfree(in_env);
kfree(out_env);
vfree(buf);
return rc;
}
enum nd_ioctl_mode {
BUS_IOCTL,
DIMM_IOCTL,
};
static int match_dimm(struct device *dev, void *data)
{
long id = (long) data;
if (is_nvdimm(dev)) {
struct nvdimm *nvdimm = to_nvdimm(dev);
return nvdimm->id == id;
}
return 0;
}
static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
enum nd_ioctl_mode mode)
{
struct nvdimm_bus *nvdimm_bus, *found = NULL;
long id = (long) file->private_data;
struct nvdimm *nvdimm = NULL;
int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
if (mode == DIMM_IOCTL) {
struct device *dev;
dev = device_find_child(&nvdimm_bus->dev,
file->private_data, match_dimm);
if (!dev)
continue;
nvdimm = to_nvdimm(dev);
found = nvdimm_bus;
} else if (nvdimm_bus->id == id) {
found = nvdimm_bus;
}
if (found) {
atomic_inc(&nvdimm_bus->ioctl_active);
break;
}
}
mutex_unlock(&nvdimm_bus_list_mutex);
if (!found)
return -ENXIO;
nvdimm_bus = found;
rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
if (nvdimm)
put_device(&nvdimm->dev);
if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
wake_up(&nvdimm_bus->wait);
return rc;
}
static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return nd_ioctl(file, cmd, arg, BUS_IOCTL);
}
static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
}
static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
file->private_data = (void *) minor;
return 0;
}
static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = bus_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = dimm_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
int __init nvdimm_bus_init(void)
{
int rc;
rc = bus_register(&nvdimm_bus_type);
if (rc)
return rc;
rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
if (rc < 0)
goto err_bus_chrdev;
nvdimm_bus_major = rc;
rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
if (rc < 0)
goto err_dimm_chrdev;
nvdimm_major = rc;
nd_class = class_create("nd");
if (IS_ERR(nd_class)) {
rc = PTR_ERR(nd_class);
goto err_class;
}
rc = driver_register(&nd_bus_driver.drv);
if (rc)
goto err_nd_bus;
return 0;
err_nd_bus:
class_destroy(nd_class);
err_class:
unregister_chrdev(nvdimm_major, "dimmctl");
err_dimm_chrdev:
unregister_chrdev(nvdimm_bus_major, "ndctl");
err_bus_chrdev:
bus_unregister(&nvdimm_bus_type);
return rc;
}
void nvdimm_bus_exit(void)
{
driver_unregister(&nd_bus_driver.drv);
class_destroy(nd_class);
unregister_chrdev(nvdimm_bus_major, "ndctl");
unregister_chrdev(nvdimm_major, "dimmctl");
bus_unregister(&nvdimm_bus_type);
ida_destroy(&nd_ida);
}
| linux-master | drivers/nvdimm/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/scatterlist.h>
#include <linux/memregion.h>
#include <linux/highmem.h>
#include <linux/kstrtox.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
* irrelevant.
*/
#include <linux/io-64-nonatomic-hi-lo.h>
static DEFINE_PER_CPU(int, flush_idx);
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
struct nd_region_data *ndrd)
{
int i, j;
dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
for (i = 0; i < (1 << ndrd->hints_shift); i++) {
struct resource *res = &nvdimm->flush_wpq[i];
unsigned long pfn = PHYS_PFN(res->start);
void __iomem *flush_page;
/* check if flush hints share a page */
for (j = 0; j < i; j++) {
struct resource *res_j = &nvdimm->flush_wpq[j];
unsigned long pfn_j = PHYS_PFN(res_j->start);
if (pfn == pfn_j)
break;
}
if (j < i)
flush_page = (void __iomem *) ((unsigned long)
ndrd_get_flush_wpq(ndrd, dimm, j)
& PAGE_MASK);
else
flush_page = devm_nvdimm_ioremap(dev,
PFN_PHYS(pfn), PAGE_SIZE);
if (!flush_page)
return -ENXIO;
ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+ (res->start & ~PAGE_MASK));
}
return 0;
}
static int nd_region_invalidate_memregion(struct nd_region *nd_region)
{
int i, incoherent = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
incoherent++;
break;
}
}
if (!incoherent)
return 0;
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
dev_warn(
&nd_region->dev,
"Bypassing cpu_cache_invalidate_memergion() for testing!\n");
goto out;
} else {
dev_err(&nd_region->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
clear_bit(NDD_INCOHERENT, &nvdimm->flags);
}
return 0;
}
int nd_region_activate(struct nd_region *nd_region)
{
int i, j, rc, num_flush = 0;
struct nd_region_data *ndrd;
struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
nvdimm_bus_lock(&nd_region->dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
nvdimm_bus_unlock(&nd_region->dev);
return -EBUSY;
}
/* at least one null hint slot per-dimm for the "no-hint" case */
flush_data_size += sizeof(void *);
num_flush = min_not_zero(num_flush, nvdimm->num_flush);
if (!nvdimm->num_flush)
continue;
flush_data_size += nvdimm->num_flush * sizeof(void *);
}
nvdimm_bus_unlock(&nd_region->dev);
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
return rc;
ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
if (!ndrd)
return -ENOMEM;
dev_set_drvdata(dev, ndrd);
if (!num_flush)
return 0;
ndrd->hints_shift = ilog2(num_flush);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
if (rc)
return rc;
}
/*
* Clear out entries that are duplicates. This should prevent the
* extra flushings.
*/
for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
/* ignore if NULL already */
if (!ndrd_get_flush_wpq(ndrd, i, 0))
continue;
for (j = i + 1; j < nd_region->ndr_mappings; j++)
if (ndrd_get_flush_wpq(ndrd, i, 0) ==
ndrd_get_flush_wpq(ndrd, j, 0))
ndrd_set_flush_wpq(ndrd, j, 0, NULL);
}
return 0;
}
static void nd_region_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev);
u16 i;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
if (!test_bit(ND_REGION_CXL, &nd_region->flags))
memregion_free(nd_region->id);
kfree(nd_region);
}
struct nd_region *to_nd_region(struct device *dev)
{
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
WARN_ON(dev->type->release != nd_region_release);
return nd_region;
}
EXPORT_SYMBOL_GPL(to_nd_region);
struct device *nd_region_dev(struct nd_region *nd_region)
{
if (!nd_region)
return NULL;
return &nd_region->dev;
}
EXPORT_SYMBOL_GPL(nd_region_dev);
void *nd_region_provider_data(struct nd_region *nd_region)
{
return nd_region->provider_data;
}
EXPORT_SYMBOL_GPL(nd_region_provider_data);
/**
* nd_region_to_nstype() - region to an integer namespace type
* @nd_region: region-device to interrogate
*
* This is the 'nstype' attribute of a region as well, an input to the
* MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
* namespace devices with namespace drivers.
*/
int nd_region_to_nstype(struct nd_region *nd_region)
{
if (is_memory(&nd_region->dev)) {
u16 i, label;
for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_LABELING, &nvdimm->flags))
label++;
}
if (label)
return ND_DEVICE_NAMESPACE_PMEM;
else
return ND_DEVICE_NAMESPACE_IO;
}
return 0;
}
EXPORT_SYMBOL(nd_region_to_nstype);
static unsigned long long region_size(struct nd_region *nd_region)
{
if (is_memory(&nd_region->dev)) {
return nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
return nd_mapping->size;
}
return 0;
}
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%llu\n", region_size(nd_region));
}
static DEVICE_ATTR_RO(size);
static ssize_t deep_flush_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
/*
* NOTE: in the nvdimm_has_flush() error case this attribute is
* not visible.
*/
return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
}
static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
bool flush;
int rc = kstrtobool(buf, &flush);
struct nd_region *nd_region = to_nd_region(dev);
if (rc)
return rc;
if (!flush)
return -EINVAL;
rc = nvdimm_flush(nd_region, NULL);
if (rc)
return rc;
return len;
}
static DEVICE_ATTR_RW(deep_flush);
static ssize_t mappings_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%d\n", nd_region->ndr_mappings);
}
static DEVICE_ATTR_RO(mappings);
static ssize_t nstype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
}
static DEVICE_ATTR_RO(nstype);
static ssize_t set_cookie_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
struct nd_interleave_set *nd_set = nd_region->nd_set;
ssize_t rc = 0;
if (is_memory(dev) && nd_set)
/* pass, should be precluded by region_visible */;
else
return -ENXIO;
/*
* The cookie to show depends on which specification of the
* labels we are using. If there are not labels then default to
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
if (ndd) {
struct nd_namespace_index *nsindex;
nsindex = to_namespace_index(ndd, ndd->ns_current);
rc = sprintf(buf, "%#llx\n",
nd_region_interleave_set_cookie(nd_region,
nsindex));
}
}
nvdimm_bus_unlock(dev);
device_unlock(dev);
if (rc)
return rc;
return sprintf(buf, "%#llx\n", nd_set->cookie1);
}
static DEVICE_ATTR_RO(set_cookie);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{
resource_size_t available;
int i;
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
available = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
/* if a dimm is disabled the available capacity is zero */
if (!ndd)
return 0;
available += nd_pmem_available_dpa(nd_region, nd_mapping);
}
return available;
}
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
{
resource_size_t avail = 0;
int i;
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa(
nd_region, nd_mapping));
}
return avail * nd_region->ndr_mappings;
}
static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
/*
* Flush in-flight updates and grab a snapshot of the available
* size. Of course, this value is potentially invalidated the
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(available_size);
static ssize_t max_available_extent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(max_available_extent);
static ssize_t init_namespaces_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region_data *ndrd = dev_get_drvdata(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
if (ndrd)
rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
else
rc = -ENXIO;
nvdimm_bus_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(init_namespaces);
static ssize_t namespace_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
if (nd_region->ns_seed)
rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
else
rc = sprintf(buf, "\n");
nvdimm_bus_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(namespace_seed);
static ssize_t btt_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
if (nd_region->btt_seed)
rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
else
rc = sprintf(buf, "\n");
nvdimm_bus_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(btt_seed);
static ssize_t pfn_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
if (nd_region->pfn_seed)
rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
else
rc = sprintf(buf, "\n");
nvdimm_bus_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(pfn_seed);
static ssize_t dax_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
if (nd_region->dax_seed)
rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
else
rc = sprintf(buf, "\n");
nvdimm_bus_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(dax_seed);
static ssize_t read_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%d\n", nd_region->ro);
}
static int revalidate_read_only(struct device *dev, void *data)
{
nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
return 0;
}
static ssize_t read_only_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
bool ro;
int rc = kstrtobool(buf, &ro);
struct nd_region *nd_region = to_nd_region(dev);
if (rc)
return rc;
nd_region->ro = ro;
device_for_each_child(dev, NULL, revalidate_read_only);
return len;
}
static DEVICE_ATTR_RW(read_only);
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%#lx\n", nd_region->align);
}
static ssize_t align_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long val, dpa;
u32 mappings, remainder;
int rc;
rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
/*
* Ensure space-align is evenly divisible by the region
* interleave-width because the kernel typically has no facility
* to determine which DIMM(s), dimm-physical-addresses, would
* contribute to the tail capacity in system-physical-address
* space for the namespace.
*/
mappings = max_t(u32, 1, nd_region->ndr_mappings);
dpa = div_u64_rem(val, mappings, &remainder);
if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
|| val > region_size(nd_region) || remainder)
return -EINVAL;
/*
* Given that space allocation consults this value multiple
* times ensure it does not change for the duration of the
* allocation.
*/
nvdimm_bus_lock(dev);
nd_region->align = val;
nvdimm_bus_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(align);
static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
device_unlock(dev);
return rc;
}
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
return sprintf(buf, "cpu_cache\n");
else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
return sprintf(buf, "memory_controller\n");
else
return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(persistence_domain);
static struct attribute *nd_region_attributes[] = {
&dev_attr_size.attr,
&dev_attr_align.attr,
&dev_attr_nstype.attr,
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
&dev_attr_pfn_seed.attr,
&dev_attr_dax_seed.attr,
&dev_attr_deep_flush.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
&dev_attr_max_available_extent.attr,
&dev_attr_namespace_seed.attr,
&dev_attr_init_namespaces.attr,
&dev_attr_badblocks.attr,
&dev_attr_resource.attr,
&dev_attr_persistence_domain.attr,
NULL,
};
static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct nd_region *nd_region = to_nd_region(dev);
struct nd_interleave_set *nd_set = nd_region->nd_set;
int type = nd_region_to_nstype(nd_region);
if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
return 0;
if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
return 0;
if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
return 0;
if (a == &dev_attr_resource.attr && !is_memory(dev))
return 0;
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
if (has_flush == 1)
return a->mode;
else if (has_flush == 0)
return 0444;
else
return 0;
}
if (a == &dev_attr_persistence_domain.attr) {
if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
| BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
return 0;
return a->mode;
}
if (a == &dev_attr_align.attr)
return a->mode;
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
if (type == ND_DEVICE_NAMESPACE_PMEM &&
a == &dev_attr_available_size.attr)
return a->mode;
else if (is_memory(dev) && nd_set)
return a->mode;
return 0;
}
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
struct nd_region *nd_region = to_nd_region(dev);
struct nd_mapping *nd_mapping;
struct nvdimm *nvdimm;
if (n >= nd_region->ndr_mappings)
return -ENXIO;
nd_mapping = &nd_region->mapping[n];
nvdimm = nd_mapping->nvdimm;
return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
nd_mapping->start, nd_mapping->size,
nd_mapping->position);
}
#define REGION_MAPPING(idx) \
static ssize_t mapping##idx##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return mappingN(dev, buf, idx); \
} \
static DEVICE_ATTR_RO(mapping##idx)
/*
* 32 should be enough for a while, even in the presence of socket
* interleave a 32-way interleave set is a degenerate case.
*/
REGION_MAPPING(0);
REGION_MAPPING(1);
REGION_MAPPING(2);
REGION_MAPPING(3);
REGION_MAPPING(4);
REGION_MAPPING(5);
REGION_MAPPING(6);
REGION_MAPPING(7);
REGION_MAPPING(8);
REGION_MAPPING(9);
REGION_MAPPING(10);
REGION_MAPPING(11);
REGION_MAPPING(12);
REGION_MAPPING(13);
REGION_MAPPING(14);
REGION_MAPPING(15);
REGION_MAPPING(16);
REGION_MAPPING(17);
REGION_MAPPING(18);
REGION_MAPPING(19);
REGION_MAPPING(20);
REGION_MAPPING(21);
REGION_MAPPING(22);
REGION_MAPPING(23);
REGION_MAPPING(24);
REGION_MAPPING(25);
REGION_MAPPING(26);
REGION_MAPPING(27);
REGION_MAPPING(28);
REGION_MAPPING(29);
REGION_MAPPING(30);
REGION_MAPPING(31);
static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct nd_region *nd_region = to_nd_region(dev);
if (n < nd_region->ndr_mappings)
return a->mode;
return 0;
}
static struct attribute *mapping_attributes[] = {
&dev_attr_mapping0.attr,
&dev_attr_mapping1.attr,
&dev_attr_mapping2.attr,
&dev_attr_mapping3.attr,
&dev_attr_mapping4.attr,
&dev_attr_mapping5.attr,
&dev_attr_mapping6.attr,
&dev_attr_mapping7.attr,
&dev_attr_mapping8.attr,
&dev_attr_mapping9.attr,
&dev_attr_mapping10.attr,
&dev_attr_mapping11.attr,
&dev_attr_mapping12.attr,
&dev_attr_mapping13.attr,
&dev_attr_mapping14.attr,
&dev_attr_mapping15.attr,
&dev_attr_mapping16.attr,
&dev_attr_mapping17.attr,
&dev_attr_mapping18.attr,
&dev_attr_mapping19.attr,
&dev_attr_mapping20.attr,
&dev_attr_mapping21.attr,
&dev_attr_mapping22.attr,
&dev_attr_mapping23.attr,
&dev_attr_mapping24.attr,
&dev_attr_mapping25.attr,
&dev_attr_mapping26.attr,
&dev_attr_mapping27.attr,
&dev_attr_mapping28.attr,
&dev_attr_mapping29.attr,
&dev_attr_mapping30.attr,
&dev_attr_mapping31.attr,
NULL,
};
static const struct attribute_group nd_mapping_attribute_group = {
.is_visible = mapping_visible,
.attrs = mapping_attributes,
};
static const struct attribute_group nd_region_attribute_group = {
.attrs = nd_region_attributes,
.is_visible = region_visible,
};
static const struct attribute_group *nd_region_attribute_groups[] = {
&nd_device_attribute_group,
&nd_region_attribute_group,
&nd_numa_attribute_group,
&nd_mapping_attribute_group,
NULL,
};
static const struct device_type nd_pmem_device_type = {
.name = "nd_pmem",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
static const struct device_type nd_volatile_device_type = {
.name = "nd_volatile",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
bool is_nd_pmem(const struct device *dev)
{
return dev ? dev->type == &nd_pmem_device_type : false;
}
bool is_nd_volatile(const struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
struct nd_namespace_index *nsindex)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (!nd_set)
return 0;
if (nsindex && __le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
if (nd_set)
return nd_set->altcookie;
return 0;
}
void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
struct nd_label_ent *label_ent, *e;
lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
list_del(&label_ent->list);
kfree(label_ent);
}
}
/*
* When a namespace is activated create new seeds for the next
* namespace, or namespace-personality to be configured.
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
if (nd_region->ns_seed == &nd_btt->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
if (nd_region->pfn_seed == dev)
nd_region_create_pfn_seed(nd_region);
if (nd_region->ns_seed == &nd_pfn->ndns->dev)
nd_region_create_ns_seed(nd_region);
} else if (is_nd_dax(dev)) {
struct nd_dax *nd_dax = to_nd_dax(dev);
if (nd_region->dax_seed == dev)
nd_region_create_dax_seed(nd_region);
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
nvdimm_bus_unlock(dev);
}
/**
* nd_region_acquire_lane - allocate and lock a lane
* @nd_region: region id and number of lanes possible
*
* A lane correlates to a BLK-data-window and/or a log slot in the BTT.
* We optimize for the common case where there are 256 lanes, one
* per-cpu. For larger systems we need to lock to share lanes. For now
* this implementation assumes the cost of maintaining an allocator for
* free lanes is on the order of the lock hold time, so it implements a
* static lane = cpu % num_lanes mapping.
*
* In the case of a BTT instance on top of a BLK namespace a lane may be
* acquired recursively. We lock on the first instance.
*
* In the case of a BTT instance on top of PMEM, we only acquire a lane
* for the BTT metadata updates.
*/
unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{
unsigned int cpu, lane;
cpu = get_cpu();
if (nd_region->num_lanes < nr_cpu_ids) {
struct nd_percpu_lane *ndl_lock, *ndl_count;
lane = cpu % nd_region->num_lanes;
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
ndl_lock = per_cpu_ptr(nd_region->lane, lane);
if (ndl_count->count++ == 0)
spin_lock(&ndl_lock->lock);
} else
lane = cpu;
return lane;
}
EXPORT_SYMBOL(nd_region_acquire_lane);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{
if (nd_region->num_lanes < nr_cpu_ids) {
unsigned int cpu = get_cpu();
struct nd_percpu_lane *ndl_lock, *ndl_count;
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
ndl_lock = per_cpu_ptr(nd_region->lane, lane);
if (--ndl_count->count == 0)
spin_unlock(&ndl_lock->lock);
put_cpu();
}
put_cpu();
}
EXPORT_SYMBOL(nd_region_release_lane);
/*
* PowerPC requires this alignment for memremap_pages(). All other archs
* should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
*/
#define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
static unsigned long default_align(struct nd_region *nd_region)
{
unsigned long align;
u32 remainder;
int mappings;
align = MEMREMAP_COMPAT_ALIGN_MAX;
if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
align = PAGE_SIZE;
mappings = max_t(u16, 1, nd_region->ndr_mappings);
div_u64_rem(align, mappings, &remainder);
if (remainder)
align *= mappings;
return align;
}
static struct lock_class_key nvdimm_region_key;
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc,
const struct device_type *dev_type, const char *caller)
{
struct nd_region *nd_region;
struct device *dev;
unsigned int i;
int ro = 0;
for (i = 0; i < ndr_desc->num_mappings; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
if ((mapping->start | mapping->size) % PAGE_SIZE) {
dev_err(&nvdimm_bus->dev,
"%s: %s mapping%d is not %ld aligned\n",
caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
return NULL;
}
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
}
nd_region =
kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
GFP_KERNEL);
if (!nd_region)
return NULL;
/* CXL pre-assigns memregion ids before creating nvdimm regions */
if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
nd_region->id = ndr_desc->memregion;
} else {
nd_region->id = memregion_alloc(GFP_KERNEL);
if (nd_region->id < 0)
goto err_id;
}
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
goto err_percpu;
for (i = 0; i < nr_cpu_ids; i++) {
struct nd_percpu_lane *ndl;
ndl = per_cpu_ptr(nd_region->lane, i);
spin_lock_init(&ndl->lock);
ndl->count = 0;
}
for (i = 0; i < ndr_desc->num_mappings; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
nd_region->mapping[i].nvdimm = nvdimm;
nd_region->mapping[i].start = mapping->start;
nd_region->mapping[i].size = mapping->size;
nd_region->mapping[i].position = mapping->position;
INIT_LIST_HEAD(&nd_region->mapping[i].labels);
mutex_init(&nd_region->mapping[i].lock);
get_device(&nvdimm->dev);
}
nd_region->ndr_mappings = ndr_desc->num_mappings;
nd_region->provider_data = ndr_desc->provider_data;
nd_region->nd_set = ndr_desc->nd_set;
nd_region->num_lanes = ndr_desc->num_lanes;
nd_region->flags = ndr_desc->flags;
nd_region->ro = ro;
nd_region->numa_node = ndr_desc->numa_node;
nd_region->target_node = ndr_desc->target_node;
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
ida_init(&nd_region->pfn_ida);
ida_init(&nd_region->dax_ida);
dev = &nd_region->dev;
dev_set_name(dev, "region%d", nd_region->id);
dev->parent = &nvdimm_bus->dev;
dev->type = dev_type;
dev->groups = ndr_desc->attr_groups;
dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start;
nd_region->align = default_align(nd_region);
if (ndr_desc->flush)
nd_region->flush = ndr_desc->flush;
else
nd_region->flush = NULL;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &nvdimm_region_key);
nd_device_register(dev);
return nd_region;
err_percpu:
if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
memregion_free(nd_region->id);
err_id:
kfree(nd_region);
return NULL;
}
struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc)
{
ndr_desc->num_lanes = ND_MAX_LANES;
return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc)
{
ndr_desc->num_lanes = ND_MAX_LANES;
return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
void nvdimm_region_delete(struct nd_region *nd_region)
{
if (nd_region)
nd_device_unregister(&nd_region->dev, ND_SYNC);
}
EXPORT_SYMBOL_GPL(nvdimm_region_delete);
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
if (!nd_region->flush)
rc = generic_nvdimm_flush(nd_region);
else {
if (nd_region->flush(nd_region, bio))
rc = -EIO;
}
return rc;
}
/**
* generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
* @nd_region: interleaved pmem region
*/
int generic_nvdimm_flush(struct nd_region *nd_region)
{
struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
int i, idx;
/*
* Try to encourage some diversity in flush hint addresses
* across cpus assuming a limited number of flush hints.
*/
idx = this_cpu_read(flush_idx);
idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
/*
* The pmem_wmb() is needed to 'sfence' all
* previous writes such that they are architecturally visible for
* the platform buffer flush. Note that we've already arranged for pmem
* writes to avoid the cache via memcpy_flushcache(). The final
* wmb() ensures ordering for the NVDIMM flush write.
*/
pmem_wmb();
for (i = 0; i < nd_region->ndr_mappings; i++)
if (ndrd_get_flush_wpq(ndrd, i, 0))
writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
wmb();
return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_flush);
/**
* nvdimm_has_flush - determine write flushing requirements
* @nd_region: interleaved pmem region
*
* Returns 1 if writes require flushing
* Returns 0 if writes do not require flushing
* Returns -ENXIO if flushing capability can not be determined
*/
int nvdimm_has_flush(struct nd_region *nd_region)
{
int i;
/* no nvdimm or pmem api == flushing capability unknown */
if (nd_region->ndr_mappings == 0
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
/* Test if an explicit flush function is defined */
if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
return 1;
/* Test if any flush hints for the region are available */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
/* flush hints present / available */
if (nvdimm->num_flush)
return 1;
}
/*
* The platform defines dimm devices without hints nor explicit flush,
* assume platform persistence mechanism like ADR
*/
return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_has_flush);
int nvdimm_has_cache(struct nd_region *nd_region)
{
return is_nd_pmem(&nd_region->dev) &&
!test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
bool is_nvdimm_sync(struct nd_region *nd_region)
{
if (is_nd_volatile(&nd_region->dev))
return true;
return is_nd_pmem(&nd_region->dev) &&
!test_bit(ND_REGION_ASYNC, &nd_region->flags);
}
EXPORT_SYMBOL_GPL(is_nvdimm_sync);
struct conflict_context {
struct nd_region *nd_region;
resource_size_t start, size;
};
static int region_conflict(struct device *dev, void *data)
{
struct nd_region *nd_region;
struct conflict_context *ctx = data;
resource_size_t res_end, region_end, region_start;
if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
if (nd_region == ctx->nd_region)
return 0;
res_end = ctx->start + ctx->size;
region_start = nd_region->ndr_start;
region_end = region_start + nd_region->ndr_size;
if (ctx->start >= region_start && ctx->start < region_end)
return -EBUSY;
if (res_end > region_start && res_end <= region_end)
return -EBUSY;
return 0;
}
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
resource_size_t size)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
struct conflict_context ctx = {
.nd_region = nd_region,
.start = start,
.size = size,
};
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
MODULE_IMPORT_NS(DEVMEM);
| linux-master | drivers/nvdimm/region_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
*/
#include <linux/memremap.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include "nd-core.h"
#include "pfn.h"
#include "nd.h"
static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
static void nd_pfn_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
kfree(nd_pfn->uuid);
kfree(nd_pfn);
}
struct nd_pfn *to_nd_pfn(struct device *dev)
{
struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
WARN_ON(!is_nd_pfn(dev));
return nd_pfn;
}
EXPORT_SYMBOL(to_nd_pfn);
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
switch (nd_pfn->mode) {
case PFN_MODE_RAM:
return sprintf(buf, "ram\n");
case PFN_MODE_PMEM:
return sprintf(buf, "pmem\n");
default:
return sprintf(buf, "none\n");
}
}
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
else {
size_t n = len - 1;
if (strncmp(buf, "pmem\n", n) == 0
|| strncmp(buf, "pmem", n) == 0) {
nd_pfn->mode = PFN_MODE_PMEM;
} else if (strncmp(buf, "ram\n", n) == 0
|| strncmp(buf, "ram", n) == 0)
nd_pfn->mode = PFN_MODE_RAM;
else if (strncmp(buf, "none\n", n) == 0
|| strncmp(buf, "none", n) == 0)
nd_pfn->mode = PFN_MODE_NONE;
else
rc = -EINVAL;
}
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(mode);
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
return sprintf(buf, "%ld\n", nd_pfn->align);
}
static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
{
alignments[0] = PAGE_SIZE;
if (has_transparent_hugepage()) {
alignments[1] = HPAGE_PMD_SIZE;
if (has_transparent_pud_hugepage())
alignments[2] = HPAGE_PUD_SIZE;
}
return alignments;
}
/*
* Use pmd mapping if supported as default alignment
*/
static unsigned long nd_pfn_default_alignment(void)
{
if (has_transparent_hugepage())
return HPAGE_PMD_SIZE;
return PAGE_SIZE;
}
static ssize_t align_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc;
device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(align);
static ssize_t uuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
if (nd_pfn->uuid)
return sprintf(buf, "%pUb\n", nd_pfn->uuid);
return sprintf(buf, "\n");
}
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
rc = sprintf(buf, "%s\n", nd_pfn->ndns
? dev_name(&nd_pfn->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RW(namespace);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
+ start_pad + offset);
} else {
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%llu\n", (unsigned long long)
resource_size(&nsio->res) - start_pad
- end_trunc - offset);
} else {
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(size);
static ssize_t supported_alignments_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
return nd_size_select_show(0,
nd_pfn_supported_alignments(aligns), buf);
}
static DEVICE_ATTR_RO(supported_alignments);
static struct attribute *nd_pfn_attributes[] = {
&dev_attr_mode.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_size.attr,
&dev_attr_supported_alignments.attr,
NULL,
};
static struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
};
const struct attribute_group *nd_pfn_attribute_groups[] = {
&nd_pfn_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
};
static const struct device_type nd_pfn_device_type = {
.name = "nd_pfn",
.release = nd_pfn_release,
.groups = nd_pfn_attribute_groups,
};
bool is_nd_pfn(struct device *dev)
{
return dev ? dev->type == &nd_pfn_device_type : false;
}
EXPORT_SYMBOL(is_nd_pfn);
static struct lock_class_key nvdimm_pfn_key;
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
struct device *dev;
if (!nd_pfn)
return NULL;
nd_pfn->mode = PFN_MODE_NONE;
nd_pfn->align = nd_pfn_default_alignment();
dev = &nd_pfn->dev;
device_initialize(&nd_pfn->dev);
lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
dev_name(ndns->claim));
put_device(dev);
return NULL;
}
return dev;
}
static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
struct device *dev;
nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
if (!nd_pfn)
return NULL;
nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
if (nd_pfn->id < 0) {
kfree(nd_pfn);
return NULL;
}
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
dev->type = &nd_pfn_device_type;
dev->parent = &nd_region->dev;
return nd_pfn;
}
struct device *nd_pfn_create(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
struct device *dev;
if (!is_memory(&nd_region->dev))
return NULL;
nd_pfn = nd_pfn_alloc(nd_region);
dev = nd_pfn_devinit(nd_pfn, NULL);
nd_device_register(dev);
return dev;
}
/*
* nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
* space associated with the namespace. If the memmap is set to DRAM, then
* this is a no-op. Since the memmap area is freshly initialized during
* probe, we have an opportunity to clear any badblocks in this area.
*/
static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
{
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
struct nd_namespace_common *ndns = nd_pfn->ndns;
void *zero_page = page_address(ZERO_PAGE(0));
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
int num_bad, meta_num, rc, bb_present;
sector_t first_bad, meta_start;
struct nd_namespace_io *nsio;
if (nd_pfn->mode != PFN_MODE_PMEM)
return 0;
nsio = to_nd_namespace_io(&ndns->dev);
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
/*
* re-enable the namespace with correct size so that we can access
* the device memmap area.
*/
devm_namespace_disable(&nd_pfn->dev, ndns);
rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
if (rc)
return rc;
do {
unsigned long zero_len;
u64 nsoff;
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
PAGE_SIZE);
zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
while (zero_len) {
unsigned long chunk = min(zero_len, PAGE_SIZE);
rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
chunk, 0);
if (rc)
break;
zero_len -= chunk;
nsoff += chunk;
}
if (rc) {
dev_err(&nd_pfn->dev,
"error clearing %x badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
}
} while (bb_present);
return 0;
}
static bool nd_supported_alignment(unsigned long align)
{
int i;
unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
if (align == 0)
return false;
nd_pfn_supported_alignments(supported);
for (i = 0; supported[i]; i++)
if (align == supported[i])
return true;
return false;
}
/**
* nd_pfn_validate - read and validate info-block
* @nd_pfn: fsdax namespace runtime state / properties
* @sig: 'devdax' or 'fsdax' signature
*
* Upon return the info-block buffer contents (->pfn_sb) are
* indeterminate when validation fails, and a coherent info-block
* otherwise.
*/
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
struct resource *res;
enum nd_pfn_mode mode;
resource_size_t res_size;
struct nd_namespace_io *nsio;
unsigned long align, start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
if (!pfn_sb || !ndns)
return -ENODEV;
if (!is_memory(nd_pfn->dev.parent))
return -ENODEV;
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
return -ENXIO;
if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
return -ENODEV;
checksum = le64_to_cpu(pfn_sb->checksum);
pfn_sb->checksum = 0;
if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
return -ENODEV;
pfn_sb->checksum = cpu_to_le64(checksum);
if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
return -ENODEV;
if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
pfn_sb->start_pad = 0;
pfn_sb->end_trunc = 0;
}
if (__le16_to_cpu(pfn_sb->version_minor) < 2)
pfn_sb->align = 0;
if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
pfn_sb->page_struct_size = cpu_to_le16(64);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
}
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
case PFN_MODE_PMEM:
break;
default:
return -ENXIO;
}
align = le32_to_cpu(pfn_sb->align);
offset = le64_to_cpu(pfn_sb->dataoff);
start_pad = le32_to_cpu(pfn_sb->start_pad);
end_trunc = le32_to_cpu(pfn_sb->end_trunc);
if (align == 0)
align = 1UL << ilog2(offset);
mode = le32_to_cpu(pfn_sb->mode);
if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
(mode == PFN_MODE_PMEM)) {
dev_err(&nd_pfn->dev,
"init failed, page size mismatch %d\n",
le32_to_cpu(pfn_sb->page_size));
return -EOPNOTSUPP;
}
if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
(mode == PFN_MODE_PMEM)) {
dev_err(&nd_pfn->dev,
"init failed, struct page size mismatch %d\n",
le16_to_cpu(pfn_sb->page_struct_size));
return -EOPNOTSUPP;
}
/*
* Check whether the we support the alignment. For Dax if the
* superblock alignment is not matching, we won't initialize
* the device.
*/
if (!nd_supported_alignment(align) &&
!memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
"%ld:%ld\n", nd_pfn->align, align);
return -EOPNOTSUPP;
}
if (!nd_pfn->uuid) {
/*
* When probing a namepace via nd_pfn_probe() the uuid
* is NULL (see: nd_pfn_devinit()) we init settings from
* pfn_sb
*/
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
if (!nd_pfn->uuid)
return -ENOMEM;
nd_pfn->align = align;
nd_pfn->mode = mode;
} else {
/*
* When probing a pfn / dax instance we validate the
* live settings against the pfn_sb
*/
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -ENODEV;
/*
* If the uuid validates, but other settings mismatch
* return EINVAL because userspace has managed to change
* the configuration without specifying new
* identification.
*/
if (nd_pfn->align != align || nd_pfn->mode != mode) {
dev_err(&nd_pfn->dev,
"init failed, settings mismatch\n");
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
nd_pfn->align, align, nd_pfn->mode,
mode);
return -EOPNOTSUPP;
}
}
if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
align, nvdimm_namespace_capacity(ndns));
return -EOPNOTSUPP;
}
/*
* These warnings are verbose because they can only trigger in
* the case where the physical address alignment of the
* namespace has changed since the pfn superblock was
* established.
*/
nsio = to_nd_namespace_io(&ndns->dev);
res = &nsio->res;
res_size = resource_size(res);
if (offset >= res_size) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
dev_name(&ndns->dev));
return -EOPNOTSUPP;
}
if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
return -EOPNOTSUPP;
}
if (!IS_ALIGNED(res->start + start_pad, memremap_compat_align())) {
dev_err(&nd_pfn->dev, "resource start misaligned\n");
return -EOPNOTSUPP;
}
if (!IS_ALIGNED(res->end + 1 - end_trunc, memremap_compat_align())) {
dev_err(&nd_pfn->dev, "resource end misaligned\n");
return -EOPNOTSUPP;
}
if (offset >= (res_size - start_pad - end_trunc)) {
dev_err(&nd_pfn->dev, "bad offset with small namespace\n");
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL(nd_pfn_validate);
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
struct nd_pfn *nd_pfn;
struct device *pfn_dev;
struct nd_pfn_sb *pfn_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
if (ndns->force_raw)
return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_PFN:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev);
nd_pfn = nd_pfn_alloc(nd_region);
pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) {
nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev);
} else
nd_device_register(pfn_dev);
return rc;
}
EXPORT_SYMBOL(nd_pfn_probe);
/*
* We hotplug memory at sub-section granularity, pad the reserved area
* from the previous section base to the namespace base address.
*/
static unsigned long init_altmap_base(resource_size_t base)
{
unsigned long base_pfn = PHYS_PFN(base);
return SUBSECTION_ALIGN_DOWN(base_pfn);
}
static unsigned long init_altmap_reserve(resource_size_t base)
{
unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
return reserve;
}
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
struct range *range = &pgmap->range;
struct vmem_altmap *altmap = &pgmap->altmap;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
u32 reserve = nd_info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
resource_size_t end = nsio->res.end - end_trunc;
struct vmem_altmap __altmap = {
.base_pfn = init_altmap_base(base),
.reserve = init_altmap_reserve(base),
.end_pfn = PHYS_PFN(end),
};
*range = (struct range) {
.start = nsio->res.start + start_pad,
.end = nsio->res.end - end_trunc,
};
pgmap->nr_range = 1;
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
memcpy(altmap, &__altmap, sizeof(*altmap));
altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
pgmap->flags |= PGMAP_ALTMAP_VALID;
} else
return -ENXIO;
return 0;
}
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t start, size;
struct nd_region *nd_region;
unsigned long npfns, align;
u32 end_trunc;
struct nd_pfn_sb *pfn_sb;
phys_addr_t offset;
const char *sig;
u64 checksum;
int rc;
pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;
nd_pfn->pfn_sb = pfn_sb;
if (is_nd_dax(&nd_pfn->dev))
sig = DAX_SIG;
else
sig = PFN_SIG;
rc = nd_pfn_validate(nd_pfn, sig);
if (rc == 0)
return nd_pfn_clear_memmap_errors(nd_pfn);
if (rc != -ENODEV)
return rc;
/* no info block, do init */;
memset(pfn_sb, 0, sizeof(*pfn_sb));
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
"%s is read-only, unable to init metadata\n",
dev_name(&nd_region->dev));
return -ENXIO;
}
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
align = max(nd_pfn->align, memremap_compat_align());
/*
* When @start is misaligned fail namespace creation. See
* the 'struct nd_pfn_sb' commentary on why ->start_pad is not
* an option.
*/
if (!IS_ALIGNED(start, memremap_compat_align())) {
dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
dev_name(&ndns->dev), &start,
memremap_compat_align());
return -EINVAL;
}
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
* Also make sure size of struct page is less than
* MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
* face of production kernel configurations that reduce the
* 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
* kernel configurations that increase the 'struct page' size
* above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
* for continuing with the capacity that will be wasted when
* reverting to a production kernel configuration. Otherwise,
* those configurations are blocked by default.
*/
if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
if (page_struct_override)
page_map_size = sizeof(struct page) * npfns;
else {
dev_err(&nd_pfn->dev,
"Memory debug options prevent using pmem for the page map\n");
return -EINVAL;
}
}
offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
return -ENXIO;
if (offset >= (size - end_trunc)) {
/* This results in zero size devices */
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
dev_name(&ndns->dev));
return -ENXIO;
}
npfns = PHYS_PFN(size - offset - end_trunc);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
pfn_sb->version_minor = cpu_to_le16(4);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
else
pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
rc = nd_pfn_clear_memmap_errors(nd_pfn);
if (rc)
return rc;
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
}
/*
* Determine the effective resource range and vmem_altmap from an nd_pfn
* instance.
*/
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
int rc;
if (!nd_pfn->uuid || !nd_pfn->ndns)
return -ENODEV;
rc = nd_pfn_init(nd_pfn);
if (rc)
return rc;
/* we need a valid pfn_sb before we can init a dev_pagemap */
return __nvdimm_setup_pfn(nd_pfn, pgmap);
}
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
| linux-master | drivers/nvdimm/pfn_devs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sizes.h>
#include <linux/ndctl.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/nd.h>
#include "label.h"
#include "nd.h"
static int nvdimm_probe(struct device *dev)
{
struct nvdimm_drvdata *ndd;
int rc;
rc = nvdimm_security_setup_events(dev);
if (rc < 0) {
dev_err(dev, "security event setup failed: %d\n", rc);
return rc;
}
rc = nvdimm_check_config_data(dev);
if (rc) {
/* not required for non-aliased nvdimm, ex. NVDIMM-N */
if (rc == -ENOTTY)
rc = 0;
return rc;
}
/*
* The locked status bit reflects explicit status codes from the
* label reading commands, revalidate it each time the driver is
* activated and re-reads the label area.
*/
nvdimm_clear_locked(dev);
ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
if (!ndd)
return -ENOMEM;
dev_set_drvdata(dev, ndd);
ndd->dpa.name = dev_name(dev);
ndd->ns_current = -1;
ndd->ns_next = -1;
ndd->dpa.start = 0;
ndd->dpa.end = -1;
ndd->dev = dev;
get_device(dev);
kref_init(&ndd->kref);
/*
* Attempt to unlock, if the DIMM supports security commands,
* otherwise the locked indication is determined by explicit
* status codes from the label reading commands.
*/
rc = nvdimm_security_unlock(dev);
if (rc < 0)
dev_dbg(dev, "failed to unlock dimm: %d\n", rc);
/*
* EACCES failures reading the namespace label-area-properties
* are interpreted as the DIMM capacity being locked but the
* namespace labels themselves being accessible.
*/
rc = nvdimm_init_nsarea(ndd);
if (rc == -EACCES) {
/*
* See nvdimm_namespace_common_probe() where we fail to
* allow namespaces to probe while the DIMM is locked,
* but we do allow for namespace enumeration.
*/
nvdimm_set_locked(dev);
rc = 0;
}
if (rc)
goto err;
/*
* EACCES failures reading the namespace label-data are
* interpreted as the label area being locked in addition to the
* DIMM capacity. We fail the dimm probe to prevent regions from
* attempting to parse the label area.
*/
rc = nd_label_data_init(ndd);
if (rc == -EACCES)
nvdimm_set_locked(dev);
if (rc)
goto err;
dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size);
nvdimm_bus_lock(dev);
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
nvdimm_set_labeling(dev);
}
nvdimm_bus_unlock(dev);
if (rc)
goto err;
return 0;
err:
put_ndd(ndd);
return rc;
}
static void nvdimm_remove(struct device *dev)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
nvdimm_bus_lock(dev);
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
put_ndd(ndd);
}
static struct nd_device_driver nvdimm_driver = {
.probe = nvdimm_probe,
.remove = nvdimm_remove,
.drv = {
.name = "nvdimm",
},
.type = ND_DRIVER_DIMM,
};
int __init nvdimm_init(void)
{
return nd_driver_register(&nvdimm_driver);
}
void nvdimm_exit(void)
{
driver_unregister(&nvdimm_driver.drv);
}
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DIMM);
| linux-master | drivers/nvdimm/dimm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Intel Corporation. All rights reserved. */
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ndctl.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/cred.h>
#include <linux/key.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
#include "nd-core.h"
#include "nd.h"
#define NVDIMM_BASE_KEY 0
#define NVDIMM_NEW_KEY 1
static bool key_revalidate = true;
module_param(key_revalidate, bool, 0444);
MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
static const char zero_key[NVDIMM_PASSPHRASE_LEN];
static void *key_data(struct key *key)
{
struct encrypted_key_payload *epayload = dereference_key_locked(key);
lockdep_assert_held_read(&key->sem);
return epayload->decrypted_data;
}
static void nvdimm_put_key(struct key *key)
{
if (!key)
return;
up_read(&key->sem);
key_put(key);
}
/*
* Retrieve kernel key for DIMM and request from user space if
* necessary. Returns a key held for read and must be put by
* nvdimm_put_key() before the usage goes out of scope.
*/
static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
{
struct key *key = NULL;
static const char NVDIMM_PREFIX[] = "nvdimm:";
char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
struct device *dev = &nvdimm->dev;
sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
key = request_key(&key_type_encrypted, desc, "");
if (IS_ERR(key)) {
if (PTR_ERR(key) == -ENOKEY)
dev_dbg(dev, "request_key() found no key\n");
else
dev_dbg(dev, "request_key() upcall failed\n");
key = NULL;
} else {
struct encrypted_key_payload *epayload;
down_read(&key->sem);
epayload = dereference_key_locked(key);
if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
up_read(&key->sem);
key_put(key);
key = NULL;
}
}
return key;
}
static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
struct key **key)
{
*key = nvdimm_request_key(nvdimm);
if (!*key)
return zero_key;
return key_data(*key);
}
static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
key_serial_t id, int subclass)
{
key_ref_t keyref;
struct key *key;
struct encrypted_key_payload *epayload;
struct device *dev = &nvdimm->dev;
keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyref))
return NULL;
key = key_ref_to_ptr(keyref);
if (key->type != &key_type_encrypted) {
key_put(key);
return NULL;
}
dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
down_read_nested(&key->sem, subclass);
epayload = dereference_key_locked(key);
if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
up_read(&key->sem);
key_put(key);
key = NULL;
}
return key;
}
static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
key_serial_t id, int subclass, struct key **key)
{
*key = NULL;
if (id == 0) {
if (subclass == NVDIMM_BASE_KEY)
return zero_key;
else
return NULL;
}
*key = nvdimm_lookup_user_key(nvdimm, id, subclass);
if (!*key)
return NULL;
return key_data(*key);
}
static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
{
struct key *key;
int rc;
const void *data;
if (!nvdimm->sec.ops->change_key)
return -EOPNOTSUPP;
data = nvdimm_get_key_payload(nvdimm, &key);
/*
* Send the same key to the hardware as new and old key to
* verify that the key is good.
*/
rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
if (rc < 0) {
nvdimm_put_key(key);
return rc;
}
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return 0;
}
static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key;
const void *data;
int rc;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
|| !nvdimm->sec.flags)
return -EIO;
/* cxl_test needs this to pre-populate the security state */
if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
/* No need to go further if security is disabled */
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return 0;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
dev_dbg(dev, "Security operation in progress.\n");
return -EBUSY;
}
/*
* If the pre-OS has unlocked the DIMM, attempt to send the key
* from request_key() to the hardware for verification. Failure
* to revalidate the key against the hardware results in a
* freeze of the security configuration. I.e. if the OS does not
* have the key, security is being managed pre-OS.
*/
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
if (!key_revalidate)
return 0;
return nvdimm_key_revalidate(nvdimm);
} else
data = nvdimm_get_key_payload(nvdimm, &key);
rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
int nvdimm_security_unlock(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
int rc;
nvdimm_bus_lock(dev);
rc = __nvdimm_security_unlock(nvdimm);
nvdimm_bus_unlock(dev);
return rc;
}
static int check_security_state(struct nvdimm *nvdimm)
{
struct device *dev = &nvdimm->dev;
if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
dev_dbg(dev, "Incorrect security state: %#lx\n",
nvdimm->sec.flags);
return -EIO;
}
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
dev_dbg(dev, "Security operation in progress.\n");
return -EBUSY;
}
return 0;
}
static int security_disable(struct nvdimm *nvdimm, unsigned int keyid,
enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key;
int rc;
const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.flags)
return -EOPNOTSUPP;
if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable)
return -EOPNOTSUPP;
if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
if (pass_type == NVDIMM_MASTER) {
rc = nvdimm->sec.ops->disable_master(nvdimm, data);
dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
} else {
rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
}
nvdimm_put_key(key);
if (pass_type == NVDIMM_MASTER)
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
else
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
unsigned int new_keyid,
enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key, *newkey;
int rc;
const void *data, *newdata;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
NVDIMM_NEW_KEY, &newkey);
if (!newdata) {
nvdimm_put_key(key);
return -ENOKEY;
}
rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
dev_dbg(dev, "key: %d %d update%s: %s\n",
key_serial(key), key_serial(newkey),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
nvdimm_put_key(newkey);
nvdimm_put_key(key);
if (pass_type == NVDIMM_MASTER)
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
NVDIMM_MASTER);
else
nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
NVDIMM_USER);
return rc;
}
static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key = NULL;
int rc;
const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
&& pass_type == NVDIMM_MASTER) {
dev_dbg(dev,
"Attempt to secure erase in wrong master state.\n");
return -EOPNOTSUPP;
}
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key = NULL;
int rc;
const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data);
if (rc == 0)
set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
if (rc == 0) {
set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
set_bit(NDD_WORK_PENDING, &nvdimm->flags);
set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
/*
* Make sure we don't lose device while doing overwrite
* query.
*/
get_device(dev);
queue_delayed_work(system_wq, &nvdimm->dwork, 0);
}
return rc;
}
static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
int rc;
unsigned int tmo;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
/*
* Abort and release device if we no longer have the overwrite
* flag set. It means the work has been canceled.
*/
if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
return;
tmo = nvdimm->sec.overwrite_tmo;
if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
|| !nvdimm->sec.flags)
return;
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
if (rc == -EBUSY) {
/* setup delayed work again */
tmo += 10;
queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
return;
}
if (rc < 0)
dev_dbg(&nvdimm->dev, "overwrite failed\n");
else
dev_dbg(&nvdimm->dev, "overwrite completed\n");
/*
* Mark the overwrite work done and update dimm security flags,
* then send a sysfs event notification to wake up userspace
* poll threads to picked up the changed state.
*/
nvdimm->sec.overwrite_tmo = 0;
clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
if (nvdimm->sec.overwrite_state)
sysfs_notify_dirent(nvdimm->sec.overwrite_state);
put_device(&nvdimm->dev);
}
void nvdimm_security_overwrite_query(struct work_struct *work)
{
struct nvdimm *nvdimm =
container_of(work, typeof(*nvdimm), dwork.work);
nvdimm_bus_lock(&nvdimm->dev);
__nvdimm_security_overwrite_query(nvdimm);
nvdimm_bus_unlock(&nvdimm->dev);
}
#define OPS \
C( OP_FREEZE, "freeze", 1), \
C( OP_DISABLE, "disable", 2), \
C( OP_DISABLE_MASTER, "disable_master", 2), \
C( OP_UPDATE, "update", 3), \
C( OP_ERASE, "erase", 2), \
C( OP_OVERWRITE, "overwrite", 2), \
C( OP_MASTER_UPDATE, "master_update", 3), \
C( OP_MASTER_ERASE, "master_erase", 2)
#undef C
#define C(a, b, c) a
enum nvdimmsec_op_ids { OPS };
#undef C
#define C(a, b, c) { b, c }
static struct {
const char *name;
int args;
} ops[] = { OPS };
#undef C
#define SEC_CMD_SIZE 32
#define KEY_ID_SIZE 10
ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
ssize_t rc;
char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
nkeystr[KEY_ID_SIZE+1];
unsigned int key, newkey;
int i;
rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
" %"__stringify(KEY_ID_SIZE)"s"
" %"__stringify(KEY_ID_SIZE)"s",
cmd, keystr, nkeystr);
if (rc < 1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ops); i++)
if (sysfs_streq(cmd, ops[i].name))
break;
if (i >= ARRAY_SIZE(ops))
return -EINVAL;
if (ops[i].args > 1)
rc = kstrtouint(keystr, 0, &key);
if (rc >= 0 && ops[i].args > 2)
rc = kstrtouint(nkeystr, 0, &newkey);
if (rc < 0)
return rc;
if (i == OP_FREEZE) {
dev_dbg(dev, "freeze\n");
rc = nvdimm_security_freeze(nvdimm);
} else if (i == OP_DISABLE) {
dev_dbg(dev, "disable %u\n", key);
rc = security_disable(nvdimm, key, NVDIMM_USER);
} else if (i == OP_DISABLE_MASTER) {
dev_dbg(dev, "disable_master %u\n", key);
rc = security_disable(nvdimm, key, NVDIMM_MASTER);
} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
? NVDIMM_USER : NVDIMM_MASTER);
} else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
dev_dbg(dev, "%s %u\n", ops[i].name, key);
if (atomic_read(&nvdimm->busy)) {
dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
return -EBUSY;
}
rc = security_erase(nvdimm, key, i == OP_ERASE
? NVDIMM_USER : NVDIMM_MASTER);
} else if (i == OP_OVERWRITE) {
dev_dbg(dev, "overwrite %u\n", key);
if (atomic_read(&nvdimm->busy)) {
dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
return -EBUSY;
}
rc = security_overwrite(nvdimm, key);
} else
return -EINVAL;
if (rc == 0)
rc = len;
return rc;
}
| linux-master | drivers/nvdimm/security.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Block Translation Table
* Copyright (c) 2014-2015, Intel Corporation.
*/
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/sizes.h>
#include <linux/ndctl.h>
#include <linux/fs.h>
#include <linux/nd.h>
#include <linux/backing-dev.h>
#include "btt.h"
#include "nd.h"
enum log_ent_request {
LOG_NEW_ENT = 0,
LOG_OLD_ENT
};
static struct device *to_dev(struct arena_info *arena)
{
return &arena->nd_btt->dev;
}
static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
{
return offset + nd_btt->initial_offset;
}
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
void *buf, size_t n, unsigned long flags)
{
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
/* arena offsets may be shifted from the base of the device */
offset = adjust_initial_offset(nd_btt, offset);
return nvdimm_read_bytes(ndns, offset, buf, n, flags);
}
static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
void *buf, size_t n, unsigned long flags)
{
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
/* arena offsets may be shifted from the base of the device */
offset = adjust_initial_offset(nd_btt, offset);
return nvdimm_write_bytes(ndns, offset, buf, n, flags);
}
static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
{
int ret;
/*
* infooff and info2off should always be at least 512B aligned.
* We rely on that to make sure rw_bytes does error clearing
* correctly, so make sure that is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
"arena->infooff: %#llx is unaligned\n", arena->infooff);
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
"arena->info2off: %#llx is unaligned\n", arena->info2off);
ret = arena_write_bytes(arena, arena->info2off, super,
sizeof(struct btt_sb), 0);
if (ret)
return ret;
return arena_write_bytes(arena, arena->infooff, super,
sizeof(struct btt_sb), 0);
}
static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
{
return arena_read_bytes(arena, arena->infooff, super,
sizeof(struct btt_sb), 0);
}
/*
* 'raw' version of btt_map write
* Assumptions:
* mapping is in little-endian
* mapping contains 'E' and 'Z' flags as desired
*/
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
unsigned long flags)
{
u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
if (unlikely(lba >= arena->external_nlba))
dev_err_ratelimited(to_dev(arena),
"%s: lba %#x out of range (max: %#x)\n",
__func__, lba, arena->external_nlba);
return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
}
static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
u32 z_flag, u32 e_flag, unsigned long rwb_flags)
{
u32 ze;
__le32 mapping_le;
/*
* This 'mapping' is supposed to be just the LBA mapping, without
* any flags set, so strip the flag bits.
*/
mapping = ent_lba(mapping);
ze = (z_flag << 1) + e_flag;
switch (ze) {
case 0:
/*
* We want to set neither of the Z or E flags, and
* in the actual layout, this means setting the bit
* positions of both to '1' to indicate a 'normal'
* map entry
*/
mapping |= MAP_ENT_NORMAL;
break;
case 1:
mapping |= (1 << MAP_ERR_SHIFT);
break;
case 2:
mapping |= (1 << MAP_TRIM_SHIFT);
break;
default:
/*
* The case where Z and E are both sent in as '1' could be
* construed as a valid 'normal' case, but we decide not to,
* to avoid confusion
*/
dev_err_ratelimited(to_dev(arena),
"Invalid use of Z and E flags\n");
return -EIO;
}
mapping_le = cpu_to_le32(mapping);
return __btt_map_write(arena, lba, mapping_le, rwb_flags);
}
static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
int *trim, int *error, unsigned long rwb_flags)
{
int ret;
__le32 in;
u32 raw_mapping, postmap, ze, z_flag, e_flag;
u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
if (unlikely(lba >= arena->external_nlba))
dev_err_ratelimited(to_dev(arena),
"%s: lba %#x out of range (max: %#x)\n",
__func__, lba, arena->external_nlba);
ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
if (ret)
return ret;
raw_mapping = le32_to_cpu(in);
z_flag = ent_z_flag(raw_mapping);
e_flag = ent_e_flag(raw_mapping);
ze = (z_flag << 1) + e_flag;
postmap = ent_lba(raw_mapping);
/* Reuse the {z,e}_flag variables for *trim and *error */
z_flag = 0;
e_flag = 0;
switch (ze) {
case 0:
/* Initial state. Return postmap = premap */
*mapping = lba;
break;
case 1:
*mapping = postmap;
e_flag = 1;
break;
case 2:
*mapping = postmap;
z_flag = 1;
break;
case 3:
*mapping = postmap;
break;
default:
return -EIO;
}
if (trim)
*trim = z_flag;
if (error)
*error = e_flag;
return ret;
}
static int btt_log_group_read(struct arena_info *arena, u32 lane,
struct log_group *log)
{
return arena_read_bytes(arena,
arena->logoff + (lane * LOG_GRP_SIZE), log,
LOG_GRP_SIZE, 0);
}
static struct dentry *debugfs_root;
static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
int idx)
{
char dirname[32];
struct dentry *d;
/* If for some reason, parent bttN was not created, exit */
if (!parent)
return;
snprintf(dirname, 32, "arena%d", idx);
d = debugfs_create_dir(dirname, parent);
if (IS_ERR_OR_NULL(d))
return;
a->debugfs_dir = d;
debugfs_create_x64("size", S_IRUGO, d, &a->size);
debugfs_create_x64("external_lba_start", S_IRUGO, d,
&a->external_lba_start);
debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
debugfs_create_u32("internal_lbasize", S_IRUGO, d,
&a->internal_lbasize);
debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
debugfs_create_u32("external_lbasize", S_IRUGO, d,
&a->external_lbasize);
debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
}
static void btt_debugfs_init(struct btt *btt)
{
int i = 0;
struct arena_info *arena;
btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
debugfs_root);
if (IS_ERR_OR_NULL(btt->debugfs_dir))
return;
list_for_each_entry(arena, &btt->arena_list, list) {
arena_debugfs_init(arena, btt->debugfs_dir, i);
i++;
}
}
static u32 log_seq(struct log_group *log, int log_idx)
{
return le32_to_cpu(log->ent[log_idx].seq);
}
/*
* This function accepts two log entries, and uses the
* sequence number to find the 'older' entry.
* It also updates the sequence number in this old entry to
* make it the 'new' one if the mark_flag is set.
* Finally, it returns which of the entries was the older one.
*
* TODO The logic feels a bit kludge-y. make it better..
*/
static int btt_log_get_old(struct arena_info *a, struct log_group *log)
{
int idx0 = a->log_index[0];
int idx1 = a->log_index[1];
int old;
/*
* the first ever time this is seen, the entry goes into [0]
* the next time, the following logic works out to put this
* (next) entry into [1]
*/
if (log_seq(log, idx0) == 0) {
log->ent[idx0].seq = cpu_to_le32(1);
return 0;
}
if (log_seq(log, idx0) == log_seq(log, idx1))
return -EINVAL;
if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
return -EINVAL;
if (log_seq(log, idx0) < log_seq(log, idx1)) {
if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
old = 0;
else
old = 1;
} else {
if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
old = 1;
else
old = 0;
}
return old;
}
/*
* This function copies the desired (old/new) log entry into ent if
* it is not NULL. It returns the sub-slot number (0 or 1)
* where the desired log entry was found. Negative return values
* indicate errors.
*/
static int btt_log_read(struct arena_info *arena, u32 lane,
struct log_entry *ent, int old_flag)
{
int ret;
int old_ent, ret_ent;
struct log_group log;
ret = btt_log_group_read(arena, lane, &log);
if (ret)
return -EIO;
old_ent = btt_log_get_old(arena, &log);
if (old_ent < 0 || old_ent > 1) {
dev_err(to_dev(arena),
"log corruption (%d): lane %d seq [%d, %d]\n",
old_ent, lane, log.ent[arena->log_index[0]].seq,
log.ent[arena->log_index[1]].seq);
/* TODO set error state? */
return -EIO;
}
ret_ent = (old_flag ? old_ent : (1 - old_ent));
if (ent != NULL)
memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
return ret_ent;
}
/*
* This function commits a log entry to media
* It does _not_ prepare the freelist entry for the next write
* btt_flog_write is the wrapper for updating the freelist elements
*/
static int __btt_log_write(struct arena_info *arena, u32 lane,
u32 sub, struct log_entry *ent, unsigned long flags)
{
int ret;
u32 group_slot = arena->log_index[sub];
unsigned int log_half = LOG_ENT_SIZE / 2;
void *src = ent;
u64 ns_off;
ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
(group_slot * LOG_ENT_SIZE);
/* split the 16B write into atomic, durable halves */
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
if (ret)
return ret;
ns_off += log_half;
src += log_half;
return arena_write_bytes(arena, ns_off, src, log_half, flags);
}
static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
struct log_entry *ent)
{
int ret;
ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
if (ret)
return ret;
/* prepare the next free entry */
arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
if (++(arena->freelist[lane].seq) == 4)
arena->freelist[lane].seq = 1;
if (ent_e_flag(le32_to_cpu(ent->old_map)))
arena->freelist[lane].has_err = 1;
arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
return ret;
}
/*
* This function initializes the BTT map to the initial state, which is
* all-zeroes, and indicates an identity mapping
*/
static int btt_map_init(struct arena_info *arena)
{
int ret = -EINVAL;
void *zerobuf;
size_t offset = 0;
size_t chunk_size = SZ_2M;
size_t mapsize = arena->logoff - arena->mapoff;
zerobuf = kzalloc(chunk_size, GFP_KERNEL);
if (!zerobuf)
return -ENOMEM;
/*
* mapoff should always be at least 512B aligned. We rely on that to
* make sure rw_bytes does error clearing correctly, so make sure that
* is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
"arena->mapoff: %#llx is unaligned\n", arena->mapoff);
while (mapsize) {
size_t size = min(mapsize, chunk_size);
dev_WARN_ONCE(to_dev(arena), size < 512,
"chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
size, 0);
if (ret)
goto free;
offset += size;
mapsize -= size;
cond_resched();
}
free:
kfree(zerobuf);
return ret;
}
/*
* This function initializes the BTT log with 'fake' entries pointing
* to the initial reserved set of blocks as being free
*/
static int btt_log_init(struct arena_info *arena)
{
size_t logsize = arena->info2off - arena->logoff;
size_t chunk_size = SZ_4K, offset = 0;
struct log_entry ent;
void *zerobuf;
int ret;
u32 i;
zerobuf = kzalloc(chunk_size, GFP_KERNEL);
if (!zerobuf)
return -ENOMEM;
/*
* logoff should always be at least 512B aligned. We rely on that to
* make sure rw_bytes does error clearing correctly, so make sure that
* is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
"arena->logoff: %#llx is unaligned\n", arena->logoff);
while (logsize) {
size_t size = min(logsize, chunk_size);
dev_WARN_ONCE(to_dev(arena), size < 512,
"chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
size, 0);
if (ret)
goto free;
offset += size;
logsize -= size;
cond_resched();
}
for (i = 0; i < arena->nfree; i++) {
ent.lba = cpu_to_le32(i);
ent.old_map = cpu_to_le32(arena->external_nlba + i);
ent.new_map = cpu_to_le32(arena->external_nlba + i);
ent.seq = cpu_to_le32(LOG_SEQ_INIT);
ret = __btt_log_write(arena, i, 0, &ent, 0);
if (ret)
goto free;
}
free:
kfree(zerobuf);
return ret;
}
static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
{
return arena->dataoff + ((u64)lba * arena->internal_lbasize);
}
static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
{
int ret = 0;
if (arena->freelist[lane].has_err) {
void *zero_page = page_address(ZERO_PAGE(0));
u32 lba = arena->freelist[lane].block;
u64 nsoff = to_namespace_offset(arena, lba);
unsigned long len = arena->sector_size;
mutex_lock(&arena->err_lock);
while (len) {
unsigned long chunk = min(len, PAGE_SIZE);
ret = arena_write_bytes(arena, nsoff, zero_page,
chunk, 0);
if (ret)
break;
len -= chunk;
nsoff += chunk;
if (len == 0)
arena->freelist[lane].has_err = 0;
}
mutex_unlock(&arena->err_lock);
}
return ret;
}
static int btt_freelist_init(struct arena_info *arena)
{
int new, ret;
struct log_entry log_new;
u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
if (!arena->freelist)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
/* old and new map entries with any flags stripped out */
log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
/* sub points to the next one to be overwritten */
arena->freelist[i].sub = 1 - new;
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
arena->freelist[i].block = log_oldmap;
/*
* FIXME: if error clearing fails during init, we want to make
* the BTT read-only
*/
if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
!ent_normal(le32_to_cpu(log_new.old_map))) {
arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
"Unable to clear known errors\n");
}
/* This implies a newly created or untouched flog entry */
if (log_oldmap == log_newmap)
continue;
/* Check if map recovery is needed */
ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
NULL, NULL, 0);
if (ret)
return ret;
/*
* The map_entry from btt_read_map is stripped of any flag bits,
* so use the stripped out versions from the log as well for
* testing whether recovery is needed. For restoration, use the
* 'raw' version of the log entries as that captured what we
* were going to write originally.
*/
if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
/*
* Last transaction wrote the flog, but wasn't able
* to complete the map write. So fix up the map.
*/
ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
le32_to_cpu(log_new.new_map), 0, 0, 0);
if (ret)
return ret;
}
}
return 0;
}
static bool ent_is_padding(struct log_entry *ent)
{
return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
&& (ent->seq == 0);
}
/*
* Detecting valid log indices: We read a log group (see the comments in btt.h
* for a description of a 'log_group' and its 'slots'), and iterate over its
* four slots. We expect that a padding slot will be all-zeroes, and use this
* to detect a padding slot vs. an actual entry.
*
* If a log_group is in the initial state, i.e. hasn't been used since the
* creation of this BTT layout, it will have three of the four slots with
* zeroes. We skip over these log_groups for the detection of log_index. If
* all log_groups are in the initial state (i.e. the BTT has never been
* written to), it is safe to assume the 'new format' of log entries in slots
* (0, 1).
*/
static int log_set_indices(struct arena_info *arena)
{
bool idx_set = false, initial_state = true;
int ret, log_index[2] = {-1, -1};
u32 i, j, next_idx = 0;
struct log_group log;
u32 pad_count = 0;
for (i = 0; i < arena->nfree; i++) {
ret = btt_log_group_read(arena, i, &log);
if (ret < 0)
return ret;
for (j = 0; j < 4; j++) {
if (!idx_set) {
if (ent_is_padding(&log.ent[j])) {
pad_count++;
continue;
} else {
/* Skip if index has been recorded */
if ((next_idx == 1) &&
(j == log_index[0]))
continue;
/* valid entry, record index */
log_index[next_idx] = j;
next_idx++;
}
if (next_idx == 2) {
/* two valid entries found */
idx_set = true;
} else if (next_idx > 2) {
/* too many valid indices */
return -ENXIO;
}
} else {
/*
* once the indices have been set, just verify
* that all subsequent log groups are either in
* their initial state or follow the same
* indices.
*/
if (j == log_index[0]) {
/* entry must be 'valid' */
if (ent_is_padding(&log.ent[j]))
return -ENXIO;
} else if (j == log_index[1]) {
;
/*
* log_index[1] can be padding if the
* lane never got used and it is still
* in the initial state (three 'padding'
* entries)
*/
} else {
/* entry must be invalid (padding) */
if (!ent_is_padding(&log.ent[j]))
return -ENXIO;
}
}
}
/*
* If any of the log_groups have more than one valid,
* non-padding entry, then the we are no longer in the
* initial_state
*/
if (pad_count < 3)
initial_state = false;
pad_count = 0;
}
if (!initial_state && !idx_set)
return -ENXIO;
/*
* If all the entries in the log were in the initial state,
* assume new padding scheme
*/
if (initial_state)
log_index[1] = 1;
/*
* Only allow the known permutations of log/padding indices,
* i.e. (0, 1), and (0, 2)
*/
if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
; /* known index possibilities */
else {
dev_err(to_dev(arena), "Found an unknown padding scheme\n");
return -ENXIO;
}
arena->log_index[0] = log_index[0];
arena->log_index[1] = log_index[1];
dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
return 0;
}
static int btt_rtt_init(struct arena_info *arena)
{
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
if (arena->rtt == NULL)
return -ENOMEM;
return 0;
}
static int btt_maplocks_init(struct arena_info *arena)
{
u32 i;
arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
GFP_KERNEL);
if (!arena->map_locks)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++)
spin_lock_init(&arena->map_locks[i].lock);
return 0;
}
static struct arena_info *alloc_arena(struct btt *btt, size_t size,
size_t start, size_t arena_off)
{
struct arena_info *arena;
u64 logsize, mapsize, datasize;
u64 available = size;
arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
if (!arena)
return NULL;
arena->nd_btt = btt->nd_btt;
arena->sector_size = btt->sector_size;
mutex_init(&arena->err_lock);
if (!size)
return arena;
arena->size = size;
arena->external_lba_start = start;
arena->external_lbasize = btt->lbasize;
arena->internal_lbasize = roundup(arena->external_lbasize,
INT_LBASIZE_ALIGNMENT);
arena->nfree = BTT_DEFAULT_NFREE;
arena->version_major = btt->nd_btt->version_major;
arena->version_minor = btt->nd_btt->version_minor;
if (available % BTT_PG_SIZE)
available -= (available % BTT_PG_SIZE);
/* Two pages are reserved for the super block and its copy */
available -= 2 * BTT_PG_SIZE;
/* The log takes a fixed amount of space based on nfree */
logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
available -= logsize;
/* Calculate optimal split between map and data area */
arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
arena->internal_lbasize + MAP_ENT_SIZE);
arena->external_nlba = arena->internal_nlba - arena->nfree;
mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
datasize = available - mapsize;
/* 'Absolute' values, relative to start of storage space */
arena->infooff = arena_off;
arena->dataoff = arena->infooff + BTT_PG_SIZE;
arena->mapoff = arena->dataoff + datasize;
arena->logoff = arena->mapoff + mapsize;
arena->info2off = arena->logoff + logsize;
/* Default log indices are (0,1) */
arena->log_index[0] = 0;
arena->log_index[1] = 1;
return arena;
}
static void free_arenas(struct btt *btt)
{
struct arena_info *arena, *next;
list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
list_del(&arena->list);
kfree(arena->rtt);
kfree(arena->map_locks);
kfree(arena->freelist);
debugfs_remove_recursive(arena->debugfs_dir);
kfree(arena);
}
}
/*
* This function reads an existing valid btt superblock and
* populates the corresponding arena_info struct
*/
static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
u64 arena_off)
{
arena->internal_nlba = le32_to_cpu(super->internal_nlba);
arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
arena->external_nlba = le32_to_cpu(super->external_nlba);
arena->external_lbasize = le32_to_cpu(super->external_lbasize);
arena->nfree = le32_to_cpu(super->nfree);
arena->version_major = le16_to_cpu(super->version_major);
arena->version_minor = le16_to_cpu(super->version_minor);
arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
le64_to_cpu(super->nextoff));
arena->infooff = arena_off;
arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
arena->logoff = arena_off + le64_to_cpu(super->logoff);
arena->info2off = arena_off + le64_to_cpu(super->info2off);
arena->size = (le64_to_cpu(super->nextoff) > 0)
? (le64_to_cpu(super->nextoff))
: (arena->info2off - arena->infooff + BTT_PG_SIZE);
arena->flags = le32_to_cpu(super->flags);
}
static int discover_arenas(struct btt *btt)
{
int ret = 0;
struct arena_info *arena;
struct btt_sb *super;
size_t remaining = btt->rawsize;
u64 cur_nlba = 0;
size_t cur_off = 0;
int num_arenas = 0;
super = kzalloc(sizeof(*super), GFP_KERNEL);
if (!super)
return -ENOMEM;
while (remaining) {
/* Alloc memory for arena */
arena = alloc_arena(btt, 0, 0, 0);
if (!arena) {
ret = -ENOMEM;
goto out_super;
}
arena->infooff = cur_off;
ret = btt_info_read(arena, super);
if (ret)
goto out;
if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
if (remaining == btt->rawsize) {
btt->init_state = INIT_NOTFOUND;
dev_info(to_dev(arena), "No existing arenas\n");
goto out;
} else {
dev_err(to_dev(arena),
"Found corrupted metadata!\n");
ret = -ENODEV;
goto out;
}
}
arena->external_lba_start = cur_nlba;
parse_arena_meta(arena, super, cur_off);
ret = log_set_indices(arena);
if (ret) {
dev_err(to_dev(arena),
"Unable to deduce log/padding indices\n");
goto out;
}
ret = btt_freelist_init(arena);
if (ret)
goto out;
ret = btt_rtt_init(arena);
if (ret)
goto out;
ret = btt_maplocks_init(arena);
if (ret)
goto out;
list_add_tail(&arena->list, &btt->arena_list);
remaining -= arena->size;
cur_off += arena->size;
cur_nlba += arena->external_nlba;
num_arenas++;
if (arena->nextoff == 0)
break;
}
btt->num_arenas = num_arenas;
btt->nlba = cur_nlba;
btt->init_state = INIT_READY;
kfree(super);
return ret;
out:
kfree(arena);
free_arenas(btt);
out_super:
kfree(super);
return ret;
}
static int create_arenas(struct btt *btt)
{
size_t remaining = btt->rawsize;
size_t cur_off = 0;
while (remaining) {
struct arena_info *arena;
size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
remaining -= arena_size;
if (arena_size < ARENA_MIN_SIZE)
break;
arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
if (!arena) {
free_arenas(btt);
return -ENOMEM;
}
btt->nlba += arena->external_nlba;
if (remaining >= ARENA_MIN_SIZE)
arena->nextoff = arena->size;
else
arena->nextoff = 0;
cur_off += arena_size;
list_add_tail(&arena->list, &btt->arena_list);
}
return 0;
}
/*
* This function completes arena initialization by writing
* all the metadata.
* It is only called for an uninitialized arena when a write
* to that arena occurs for the first time.
*/
static int btt_arena_write_layout(struct arena_info *arena)
{
int ret;
u64 sum;
struct btt_sb *super;
struct nd_btt *nd_btt = arena->nd_btt;
const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
ret = btt_map_init(arena);
if (ret)
return ret;
ret = btt_log_init(arena);
if (ret)
return ret;
super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
if (!super)
return -ENOMEM;
strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
export_uuid(super->uuid, nd_btt->uuid);
export_uuid(super->parent_uuid, parent_uuid);
super->flags = cpu_to_le32(arena->flags);
super->version_major = cpu_to_le16(arena->version_major);
super->version_minor = cpu_to_le16(arena->version_minor);
super->external_lbasize = cpu_to_le32(arena->external_lbasize);
super->external_nlba = cpu_to_le32(arena->external_nlba);
super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
super->internal_nlba = cpu_to_le32(arena->internal_nlba);
super->nfree = cpu_to_le32(arena->nfree);
super->infosize = cpu_to_le32(sizeof(struct btt_sb));
super->nextoff = cpu_to_le64(arena->nextoff);
/*
* Subtract arena->infooff (arena start) so numbers are relative
* to 'this' arena
*/
super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
super->flags = 0;
sum = nd_sb_checksum((struct nd_gen_sb *) super);
super->checksum = cpu_to_le64(sum);
ret = btt_info_write(arena, super);
kfree(super);
return ret;
}
/*
* This function completes the initialization for the BTT namespace
* such that it is ready to accept IOs
*/
static int btt_meta_init(struct btt *btt)
{
int ret = 0;
struct arena_info *arena;
mutex_lock(&btt->init_lock);
list_for_each_entry(arena, &btt->arena_list, list) {
ret = btt_arena_write_layout(arena);
if (ret)
goto unlock;
ret = btt_freelist_init(arena);
if (ret)
goto unlock;
ret = btt_rtt_init(arena);
if (ret)
goto unlock;
ret = btt_maplocks_init(arena);
if (ret)
goto unlock;
}
btt->init_state = INIT_READY;
unlock:
mutex_unlock(&btt->init_lock);
return ret;
}
static u32 btt_meta_size(struct btt *btt)
{
return btt->lbasize - btt->sector_size;
}
/*
* This function calculates the arena in which the given LBA lies
* by doing a linear walk. This is acceptable since we expect only
* a few arenas. If we have backing devices that get much larger,
* we can construct a balanced binary tree of arenas at init time
* so that this range search becomes faster.
*/
static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
struct arena_info **arena)
{
struct arena_info *arena_list;
__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
list_for_each_entry(arena_list, &btt->arena_list, list) {
if (lba < arena_list->external_nlba) {
*arena = arena_list;
*premap = lba;
return 0;
}
lba -= arena_list->external_nlba;
}
return -EIO;
}
/*
* The following (lock_map, unlock_map) are mostly just to improve
* readability, since they index into an array of locks
*/
static void lock_map(struct arena_info *arena, u32 premap)
__acquires(&arena->map_locks[idx].lock)
{
u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
spin_lock(&arena->map_locks[idx].lock);
}
static void unlock_map(struct arena_info *arena, u32 premap)
__releases(&arena->map_locks[idx].lock)
{
u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
spin_unlock(&arena->map_locks[idx].lock);
}
static int btt_data_read(struct arena_info *arena, struct page *page,
unsigned int off, u32 lba, u32 len)
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
void *mem = kmap_atomic(page);
ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
kunmap_atomic(mem);
return ret;
}
static int btt_data_write(struct arena_info *arena, u32 lba,
struct page *page, unsigned int off, u32 len)
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
void *mem = kmap_atomic(page);
ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
kunmap_atomic(mem);
return ret;
}
static void zero_fill_data(struct page *page, unsigned int off, u32 len)
{
void *mem = kmap_atomic(page);
memset(mem + off, 0, len);
kunmap_atomic(mem);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
struct arena_info *arena, u32 postmap, int rw)
{
unsigned int len = btt_meta_size(btt);
u64 meta_nsoff;
int ret = 0;
if (bip == NULL)
return 0;
meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
while (len) {
unsigned int cur_len;
struct bio_vec bv;
void *mem;
bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
/*
* The 'bv' obtained from bvec_iter_bvec has its .bv_len and
* .bv_offset already adjusted for iter->bi_bvec_done, and we
* can use those directly
*/
cur_len = min(len, bv.bv_len);
mem = bvec_kmap_local(&bv);
if (rw)
ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
else
ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
NVDIMM_IO_ATOMIC);
kunmap_local(mem);
if (ret)
return ret;
len -= cur_len;
meta_nsoff += cur_len;
if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
return -EIO;
}
return ret;
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
struct arena_info *arena, u32 postmap, int rw)
{
return 0;
}
#endif
static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int off, sector_t sector,
unsigned int len)
{
int ret = 0;
int t_flag, e_flag;
struct arena_info *arena = NULL;
u32 lane = 0, premap, postmap;
while (len) {
u32 cur_len;
lane = nd_region_acquire_lane(btt->nd_region);
ret = lba_to_arena(btt, sector, &premap, &arena);
if (ret)
goto out_lane;
cur_len = min(btt->sector_size, len);
ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
NVDIMM_IO_ATOMIC);
if (ret)
goto out_lane;
/*
* We loop to make sure that the post map LBA didn't change
* from under us between writing the RTT and doing the actual
* read.
*/
while (1) {
u32 new_map;
int new_t, new_e;
if (t_flag) {
zero_fill_data(page, off, cur_len);
goto out_lane;
}
if (e_flag) {
ret = -EIO;
goto out_lane;
}
arena->rtt[lane] = RTT_VALID | postmap;
/*
* Barrier to make sure this write is not reordered
* to do the verification map_read before the RTT store
*/
barrier();
ret = btt_map_read(arena, premap, &new_map, &new_t,
&new_e, NVDIMM_IO_ATOMIC);
if (ret)
goto out_rtt;
if ((postmap == new_map) && (t_flag == new_t) &&
(e_flag == new_e))
break;
postmap = new_map;
t_flag = new_t;
e_flag = new_e;
}
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
/* Media error - set the e_flag */
if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
dev_warn_ratelimited(to_dev(arena),
"Error persistently tracking bad blocks at %#x\n",
premap);
goto out_rtt;
}
if (bip) {
ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
if (ret)
goto out_rtt;
}
arena->rtt[lane] = RTT_INVALID;
nd_region_release_lane(btt->nd_region, lane);
len -= cur_len;
off += cur_len;
sector += btt->sector_size >> SECTOR_SHIFT;
}
return 0;
out_rtt:
arena->rtt[lane] = RTT_INVALID;
out_lane:
nd_region_release_lane(btt->nd_region, lane);
return ret;
}
/*
* Normally, arena_{read,write}_bytes will take care of the initial offset
* adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
* we need the final, raw namespace offset here
*/
static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
u32 postmap)
{
u64 nsoff = adjust_initial_offset(arena->nd_btt,
to_namespace_offset(arena, postmap));
sector_t phys_sector = nsoff >> 9;
return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
}
static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
sector_t sector, struct page *page, unsigned int off,
unsigned int len)
{
int ret = 0;
struct arena_info *arena = NULL;
u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
struct log_entry log;
int sub;
while (len) {
u32 cur_len;
int e_flag;
retry:
lane = nd_region_acquire_lane(btt->nd_region);
ret = lba_to_arena(btt, sector, &premap, &arena);
if (ret)
goto out_lane;
cur_len = min(btt->sector_size, len);
if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
ret = -EIO;
goto out_lane;
}
if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
arena->freelist[lane].has_err = 1;
if (mutex_is_locked(&arena->err_lock)
|| arena->freelist[lane].has_err) {
nd_region_release_lane(btt->nd_region, lane);
ret = arena_clear_freelist_error(arena, lane);
if (ret)
return ret;
/* OK to acquire a different lane/free block */
goto retry;
}
new_postmap = arena->freelist[lane].block;
/* Wait if the new block is being read from */
for (i = 0; i < arena->nfree; i++)
while (arena->rtt[i] == (RTT_VALID | new_postmap))
cpu_relax();
if (new_postmap >= arena->internal_nlba) {
ret = -EIO;
goto out_lane;
}
ret = btt_data_write(arena, new_postmap, page, off, cur_len);
if (ret)
goto out_lane;
if (bip) {
ret = btt_rw_integrity(btt, bip, arena, new_postmap,
WRITE);
if (ret)
goto out_lane;
}
lock_map(arena, premap);
ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
NVDIMM_IO_ATOMIC);
if (ret)
goto out_map;
if (old_postmap >= arena->internal_nlba) {
ret = -EIO;
goto out_map;
}
if (e_flag)
set_e_flag(old_postmap);
log.lba = cpu_to_le32(premap);
log.old_map = cpu_to_le32(old_postmap);
log.new_map = cpu_to_le32(new_postmap);
log.seq = cpu_to_le32(arena->freelist[lane].seq);
sub = arena->freelist[lane].sub;
ret = btt_flog_write(arena, lane, sub, &log);
if (ret)
goto out_map;
ret = btt_map_write(arena, premap, new_postmap, 0, 0,
NVDIMM_IO_ATOMIC);
if (ret)
goto out_map;
unlock_map(arena, premap);
nd_region_release_lane(btt->nd_region, lane);
if (e_flag) {
ret = arena_clear_freelist_error(arena, lane);
if (ret)
return ret;
}
len -= cur_len;
off += cur_len;
sector += btt->sector_size >> SECTOR_SHIFT;
}
return 0;
out_map:
unlock_map(arena, premap);
out_lane:
nd_region_release_lane(btt->nd_region, lane);
return ret;
}
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
enum req_op op, sector_t sector)
{
int ret;
if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
ret = btt_write_pg(btt, bip, sector, page, off, len);
}
return ret;
}
static void btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
int err = 0;
bool do_acct;
if (!bio_integrity_prep(bio))
return;
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
if (len > PAGE_SIZE || len < btt->sector_size ||
len % btt->sector_size) {
dev_err_ratelimited(&btt->nd_btt->dev,
"unaligned bio segment (len: %d)\n", len);
bio->bi_status = BLK_STS_IOERR;
break;
}
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), iter.bi_sector);
if (err) {
dev_err(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
(op_is_write(bio_op(bio))) ? "WRITE" :
"READ",
(unsigned long long) iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
}
if (do_acct)
bio_end_io_acct(bio, start);
bio_endio(bio);
}
static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
return 0;
}
static const struct block_device_operations btt_fops = {
.owner = THIS_MODULE,
.submit_bio = btt_submit_bio,
.getgeo = btt_getgeo,
};
static int btt_blk_init(struct btt *btt)
{
struct nd_btt *nd_btt = btt->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
int rc = -ENOMEM;
btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
if (!btt->btt_disk)
return -ENOMEM;
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
if (btt_meta_size(btt)) {
rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
if (rc)
goto out_cleanup_disk;
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
if (rc)
goto out_cleanup_disk;
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
nvdimm_check_and_set_ro(btt->btt_disk);
return 0;
out_cleanup_disk:
put_disk(btt->btt_disk);
return rc;
}
static void btt_blk_cleanup(struct btt *btt)
{
del_gendisk(btt->btt_disk);
put_disk(btt->btt_disk);
}
/**
* btt_init - initialize a block translation table for the given device
* @nd_btt: device with BTT geometry and backing device info
* @rawsize: raw size in bytes of the backing device
* @lbasize: lba size of the backing device
* @uuid: A uuid for the backing device - this is stored on media
* @maxlane: maximum number of parallel requests the device can handle
*
* Initialize a Block Translation Table on a backing device to provide
* single sector power fail atomicity.
*
* Context:
* Might sleep.
*
* Returns:
* Pointer to a new struct btt on success, NULL on failure.
*/
static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
u32 lbasize, uuid_t *uuid,
struct nd_region *nd_region)
{
int ret;
struct btt *btt;
struct nd_namespace_io *nsio;
struct device *dev = &nd_btt->dev;
btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
if (!btt)
return NULL;
btt->nd_btt = nd_btt;
btt->rawsize = rawsize;
btt->lbasize = lbasize;
btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
INIT_LIST_HEAD(&btt->arena_list);
mutex_init(&btt->init_lock);
btt->nd_region = nd_region;
nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
btt->phys_bb = &nsio->bb;
ret = discover_arenas(btt);
if (ret) {
dev_err(dev, "init: error in arena_discover: %d\n", ret);
return NULL;
}
if (btt->init_state != INIT_READY && nd_region->ro) {
dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
dev_name(&nd_region->dev));
return NULL;
} else if (btt->init_state != INIT_READY) {
btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
btt->num_arenas, rawsize);
ret = create_arenas(btt);
if (ret) {
dev_info(dev, "init: create_arenas: %d\n", ret);
return NULL;
}
ret = btt_meta_init(btt);
if (ret) {
dev_err(dev, "init: error in meta_init: %d\n", ret);
return NULL;
}
}
ret = btt_blk_init(btt);
if (ret) {
dev_err(dev, "init: error in blk_init: %d\n", ret);
return NULL;
}
btt_debugfs_init(btt);
return btt;
}
/**
* btt_fini - de-initialize a BTT
* @btt: the BTT handle that was generated by btt_init
*
* De-initialize a Block Translation Table on device removal
*
* Context:
* Might sleep.
*/
static void btt_fini(struct btt *btt)
{
if (btt) {
btt_blk_cleanup(btt);
free_arenas(btt);
debugfs_remove_recursive(btt->debugfs_dir);
}
}
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct nd_region *nd_region;
struct btt_sb *btt_sb;
struct btt *btt;
size_t size, rawsize;
int rc;
if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
return -ENODEV;
}
btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
if (!btt_sb)
return -ENOMEM;
size = nvdimm_namespace_capacity(ndns);
rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
if (rc)
return rc;
/*
* If this returns < 0, that is ok as it just means there wasn't
* an existing BTT, and we're creating a new one. We still need to
* call this as we need the version dependent fields in nd_btt to be
* set correctly based on the holder class
*/
nd_btt_version(nd_btt, ndns, btt_sb);
rawsize = size - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
dev_name(&ndns->dev),
ARENA_MIN_SIZE + nd_btt->initial_offset);
return -ENXIO;
}
nd_region = to_nd_region(nd_btt->dev.parent);
btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
nd_region);
if (!btt)
return -ENOMEM;
nd_btt->btt = btt;
return 0;
}
EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
{
struct btt *btt = nd_btt->btt;
btt_fini(btt);
nd_btt->btt = NULL;
return 0;
}
EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
static int __init nd_btt_init(void)
{
int rc = 0;
debugfs_root = debugfs_create_dir("btt", NULL);
if (IS_ERR_OR_NULL(debugfs_root))
rc = -ENXIO;
return rc;
}
static void __exit nd_btt_exit(void)
{
debugfs_remove_recursive(debugfs_root);
}
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
MODULE_AUTHOR("Vishal Verma <[email protected]>");
MODULE_LICENSE("GPL v2");
module_init(nd_btt_init);
module_exit(nd_btt_exit);
| linux-master | drivers/nvdimm/btt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nd_perf.c: NVDIMM Device Performance Monitoring Unit support
*
* Perf interface to expose nvdimm performance stats.
*
* Copyright (C) 2021 IBM Corporation
*/
#define pr_fmt(fmt) "nvdimm_pmu: " fmt
#include <linux/nd.h>
#include <linux/platform_device.h>
#define EVENT(_name, _code) enum{_name = _code}
/*
* NVDIMM Events codes.
*/
/* Controller Reset Count */
EVENT(CTL_RES_CNT, 0x1);
/* Controller Reset Elapsed Time */
EVENT(CTL_RES_TM, 0x2);
/* Power-on Seconds */
EVENT(POWERON_SECS, 0x3);
/* Life Remaining */
EVENT(MEM_LIFE, 0x4);
/* Critical Resource Utilization */
EVENT(CRI_RES_UTIL, 0x5);
/* Host Load Count */
EVENT(HOST_L_CNT, 0x6);
/* Host Store Count */
EVENT(HOST_S_CNT, 0x7);
/* Host Store Duration */
EVENT(HOST_S_DUR, 0x8);
/* Host Load Duration */
EVENT(HOST_L_DUR, 0x9);
/* Media Read Count */
EVENT(MED_R_CNT, 0xa);
/* Media Write Count */
EVENT(MED_W_CNT, 0xb);
/* Media Read Duration */
EVENT(MED_R_DUR, 0xc);
/* Media Write Duration */
EVENT(MED_W_DUR, 0xd);
/* Cache Read Hit Count */
EVENT(CACHE_RH_CNT, 0xe);
/* Cache Write Hit Count */
EVENT(CACHE_WH_CNT, 0xf);
/* Fast Write Count */
EVENT(FAST_W_CNT, 0x10);
NVDIMM_EVENT_ATTR(ctl_res_cnt, CTL_RES_CNT);
NVDIMM_EVENT_ATTR(ctl_res_tm, CTL_RES_TM);
NVDIMM_EVENT_ATTR(poweron_secs, POWERON_SECS);
NVDIMM_EVENT_ATTR(mem_life, MEM_LIFE);
NVDIMM_EVENT_ATTR(cri_res_util, CRI_RES_UTIL);
NVDIMM_EVENT_ATTR(host_l_cnt, HOST_L_CNT);
NVDIMM_EVENT_ATTR(host_s_cnt, HOST_S_CNT);
NVDIMM_EVENT_ATTR(host_s_dur, HOST_S_DUR);
NVDIMM_EVENT_ATTR(host_l_dur, HOST_L_DUR);
NVDIMM_EVENT_ATTR(med_r_cnt, MED_R_CNT);
NVDIMM_EVENT_ATTR(med_w_cnt, MED_W_CNT);
NVDIMM_EVENT_ATTR(med_r_dur, MED_R_DUR);
NVDIMM_EVENT_ATTR(med_w_dur, MED_W_DUR);
NVDIMM_EVENT_ATTR(cache_rh_cnt, CACHE_RH_CNT);
NVDIMM_EVENT_ATTR(cache_wh_cnt, CACHE_WH_CNT);
NVDIMM_EVENT_ATTR(fast_w_cnt, FAST_W_CNT);
static struct attribute *nvdimm_events_attr[] = {
NVDIMM_EVENT_PTR(CTL_RES_CNT),
NVDIMM_EVENT_PTR(CTL_RES_TM),
NVDIMM_EVENT_PTR(POWERON_SECS),
NVDIMM_EVENT_PTR(MEM_LIFE),
NVDIMM_EVENT_PTR(CRI_RES_UTIL),
NVDIMM_EVENT_PTR(HOST_L_CNT),
NVDIMM_EVENT_PTR(HOST_S_CNT),
NVDIMM_EVENT_PTR(HOST_S_DUR),
NVDIMM_EVENT_PTR(HOST_L_DUR),
NVDIMM_EVENT_PTR(MED_R_CNT),
NVDIMM_EVENT_PTR(MED_W_CNT),
NVDIMM_EVENT_PTR(MED_R_DUR),
NVDIMM_EVENT_PTR(MED_W_DUR),
NVDIMM_EVENT_PTR(CACHE_RH_CNT),
NVDIMM_EVENT_PTR(CACHE_WH_CNT),
NVDIMM_EVENT_PTR(FAST_W_CNT),
NULL
};
static struct attribute_group nvdimm_pmu_events_group = {
.name = "events",
.attrs = nvdimm_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-4");
static struct attribute *nvdimm_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group nvdimm_pmu_format_group = {
.name = "format",
.attrs = nvdimm_pmu_format_attr,
};
ssize_t nvdimm_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
}
static ssize_t nvdimm_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct nvdimm_pmu *nd_pmu;
nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
}
static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
struct nvdimm_pmu *nd_pmu;
u32 target;
int nodeid;
const struct cpumask *cpumask;
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
/* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */
cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
/*
* If given cpu is not same as current designated cpu for
* counter access, just return.
*/
if (cpu != nd_pmu->cpu)
return 0;
/* Check for any active cpu in nd_pmu->arch_cpumask */
target = cpumask_any(&nd_pmu->arch_cpumask);
/*
* Incase we don't have any active cpu in nd_pmu->arch_cpumask,
* check in given cpu's numa node list.
*/
if (target >= nr_cpu_ids) {
nodeid = cpu_to_node(cpu);
cpumask = cpumask_of_node(nodeid);
target = cpumask_any_but(cpumask, cpu);
}
nd_pmu->cpu = target;
/* Migrate nvdimm pmu events to the new target cpu if valid */
if (target >= 0 && target < nr_cpu_ids)
perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target);
return 0;
}
static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct nvdimm_pmu *nd_pmu;
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
if (nd_pmu->cpu >= nr_cpu_ids)
nd_pmu->cpu = cpu;
return 0;
}
static int create_cpumask_attr_group(struct nvdimm_pmu *nd_pmu)
{
struct perf_pmu_events_attr *pmu_events_attr;
struct attribute **attrs_group;
struct attribute_group *nvdimm_pmu_cpumask_group;
pmu_events_attr = kzalloc(sizeof(*pmu_events_attr), GFP_KERNEL);
if (!pmu_events_attr)
return -ENOMEM;
attrs_group = kzalloc(2 * sizeof(struct attribute *), GFP_KERNEL);
if (!attrs_group) {
kfree(pmu_events_attr);
return -ENOMEM;
}
/* Allocate memory for cpumask attribute group */
nvdimm_pmu_cpumask_group = kzalloc(sizeof(*nvdimm_pmu_cpumask_group), GFP_KERNEL);
if (!nvdimm_pmu_cpumask_group) {
kfree(pmu_events_attr);
kfree(attrs_group);
return -ENOMEM;
}
sysfs_attr_init(&pmu_events_attr->attr.attr);
pmu_events_attr->attr.attr.name = "cpumask";
pmu_events_attr->attr.attr.mode = 0444;
pmu_events_attr->attr.show = nvdimm_pmu_cpumask_show;
attrs_group[0] = &pmu_events_attr->attr.attr;
attrs_group[1] = NULL;
nvdimm_pmu_cpumask_group->attrs = attrs_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group;
return 0;
}
static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu)
{
int nodeid, rc;
const struct cpumask *cpumask;
/*
* Incase of cpu hotplug feature, arch specific code
* can provide required cpumask which can be used
* to get designatd cpu for counter access.
* Check for any active cpu in nd_pmu->arch_cpumask.
*/
if (!cpumask_empty(&nd_pmu->arch_cpumask)) {
nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
} else {
/* pick active cpu from the cpumask of device numa node. */
nodeid = dev_to_node(nd_pmu->dev);
cpumask = cpumask_of_node(nodeid);
nd_pmu->cpu = cpumask_any(cpumask);
}
rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/nvdimm:online",
nvdimm_pmu_cpu_online, nvdimm_pmu_cpu_offline);
if (rc < 0)
return rc;
nd_pmu->cpuhp_state = rc;
/* Register the pmu instance for cpu hotplug */
rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
if (rc) {
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
return rc;
}
/* Create cpumask attribute group */
rc = create_cpumask_attr_group(nd_pmu);
if (rc) {
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
return rc;
}
return 0;
}
static void nvdimm_pmu_free_hotplug_memory(struct nvdimm_pmu *nd_pmu)
{
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR])
kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs);
kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]);
}
int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
{
int rc;
if (!nd_pmu || !pdev)
return -EINVAL;
/* event functions like add/del/read/event_init and pmu name should not be NULL */
if (WARN_ON_ONCE(!(nd_pmu->pmu.event_init && nd_pmu->pmu.add &&
nd_pmu->pmu.del && nd_pmu->pmu.read && nd_pmu->pmu.name)))
return -EINVAL;
nd_pmu->pmu.attr_groups = kzalloc((NVDIMM_PMU_NULL_ATTR + 1) *
sizeof(struct attribute_group *), GFP_KERNEL);
if (!nd_pmu->pmu.attr_groups)
return -ENOMEM;
/*
* Add platform_device->dev pointer to nvdimm_pmu to access
* device data in events functions.
*/
nd_pmu->dev = &pdev->dev;
/* Fill attribute groups for the nvdimm pmu device */
nd_pmu->pmu.attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_EVENT_ATTR] = &nvdimm_pmu_events_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL;
/* Fill attribute group for cpumask */
rc = nvdimm_pmu_cpu_hotplug_init(nd_pmu);
if (rc) {
pr_info("cpu hotplug feature failed for device: %s\n", nd_pmu->pmu.name);
kfree(nd_pmu->pmu.attr_groups);
return rc;
}
rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
if (rc) {
nvdimm_pmu_free_hotplug_memory(nd_pmu);
kfree(nd_pmu->pmu.attr_groups);
return rc;
}
pr_info("%s NVDIMM performance monitor support registered\n",
nd_pmu->pmu.name);
return 0;
}
EXPORT_SYMBOL_GPL(register_nvdimm_pmu);
void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
{
perf_pmu_unregister(&nd_pmu->pmu);
nvdimm_pmu_free_hotplug_memory(nd_pmu);
kfree(nd_pmu->pmu.attr_groups);
kfree(nd_pmu);
}
EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu);
| linux-master | drivers/nvdimm/nd_perf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helpers for the host side of a virtio ring.
*
* Since these may be in userspace, we use (inline) accessors.
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
#include <linux/kernel.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/bvec.h>
#include <linux/highmem.h>
#include <linux/vhost_iotlb.h>
#endif
#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
{
static DEFINE_RATELIMIT_STATE(vringh_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
if (__ratelimit(&vringh_rs)) {
va_list ap;
va_start(ap, fmt);
printk(KERN_NOTICE "vringh:");
vprintk(fmt, ap);
va_end(ap);
}
}
/* Returns vring->num if empty, -ve on error. */
static inline int __vringh_get_head(const struct vringh *vrh,
int (*getu16)(const struct vringh *vrh,
u16 *val, const __virtio16 *p),
u16 *last_avail_idx)
{
u16 avail_idx, i, head;
int err;
err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
if (err) {
vringh_bad("Failed to access avail idx at %p",
&vrh->vring.avail->idx);
return err;
}
if (*last_avail_idx == avail_idx)
return vrh->vring.num;
/* Only get avail ring entries after they have been exposed by guest. */
virtio_rmb(vrh->weak_barriers);
i = *last_avail_idx & (vrh->vring.num - 1);
err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
if (err) {
vringh_bad("Failed to read head: idx %d address %p",
*last_avail_idx, &vrh->vring.avail->ring[i]);
return err;
}
if (head >= vrh->vring.num) {
vringh_bad("Guest says index %u > %u is available",
head, vrh->vring.num);
return -EINVAL;
}
(*last_avail_idx)++;
return head;
}
/**
* vringh_kiov_advance - skip bytes from vring_kiov
* @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
* @len: the maximum length to advance
*/
void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
{
while (len && iov->i < iov->used) {
size_t partlen = min(iov->iov[iov->i].iov_len, len);
iov->consumed += partlen;
iov->iov[iov->i].iov_len -= partlen;
iov->iov[iov->i].iov_base += partlen;
if (!iov->iov[iov->i].iov_len) {
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
iov->consumed = 0;
iov->i++;
}
len -= partlen;
}
}
EXPORT_SYMBOL(vringh_kiov_advance);
/* Copy some bytes to/from the iovec. Returns num copied. */
static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
struct vringh_kiov *iov,
void *ptr, size_t len,
int (*xfer)(const struct vringh *vrh,
void *addr, void *ptr,
size_t len))
{
int err, done = 0;
while (len && iov->i < iov->used) {
size_t partlen;
partlen = min(iov->iov[iov->i].iov_len, len);
err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
if (err)
return err;
done += partlen;
len -= partlen;
ptr += partlen;
vringh_kiov_advance(iov, partlen);
}
return done;
}
/* May reduce *len if range is shorter. */
static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
struct vringh_range *range,
bool (*getrange)(struct vringh *,
u64, struct vringh_range *))
{
if (addr < range->start || addr > range->end_incl) {
if (!getrange(vrh, addr, range))
return false;
}
BUG_ON(addr < range->start || addr > range->end_incl);
/* To end of memory? */
if (unlikely(addr + *len == 0)) {
if (range->end_incl == -1ULL)
return true;
goto truncate;
}
/* Otherwise, don't wrap. */
if (addr + *len < addr) {
vringh_bad("Wrapping descriptor %zu@0x%llx",
*len, (unsigned long long)addr);
return false;
}
if (unlikely(addr + *len - 1 > range->end_incl))
goto truncate;
return true;
truncate:
*len = range->end_incl + 1 - addr;
return true;
}
static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
struct vringh_range *range,
bool (*getrange)(struct vringh *,
u64, struct vringh_range *))
{
return true;
}
/* No reason for this code to be inline. */
static int move_to_indirect(const struct vringh *vrh,
int *up_next, u16 *i, void *addr,
const struct vring_desc *desc,
struct vring_desc **descs, int *desc_max)
{
u32 len;
/* Indirect tables can't have indirect. */
if (*up_next != -1) {
vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
return -EINVAL;
}
len = vringh32_to_cpu(vrh, desc->len);
if (unlikely(len % sizeof(struct vring_desc))) {
vringh_bad("Strange indirect len %u", desc->len);
return -EINVAL;
}
/* We will check this when we follow it! */
if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
*up_next = vringh16_to_cpu(vrh, desc->next);
else
*up_next = -2;
*descs = addr;
*desc_max = len / sizeof(struct vring_desc);
/* Now, start at the first indirect. */
*i = 0;
return 0;
}
static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
{
struct kvec *new;
unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
if (new_num < 8)
new_num = 8;
flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
if (flag)
new = krealloc_array(iov->iov, new_num,
sizeof(struct iovec), gfp);
else {
new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
if (new) {
memcpy(new, iov->iov,
iov->max_num * sizeof(struct iovec));
flag = VRINGH_IOV_ALLOCATED;
}
}
if (!new)
return -ENOMEM;
iov->iov = new;
iov->max_num = (new_num | flag);
return 0;
}
static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
struct vring_desc **descs, int *desc_max)
{
u16 i = *up_next;
*up_next = -1;
*descs = vrh->vring.desc;
*desc_max = vrh->vring.num;
return i;
}
static int slow_copy(struct vringh *vrh, void *dst, const void *src,
bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
struct vringh_range *range,
bool (*getrange)(struct vringh *vrh,
u64,
struct vringh_range *)),
bool (*getrange)(struct vringh *vrh,
u64 addr,
struct vringh_range *r),
struct vringh_range *range,
int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len))
{
size_t part, len = sizeof(struct vring_desc);
do {
u64 addr;
int err;
part = len;
addr = (u64)(unsigned long)src - range->offset;
if (!rcheck(vrh, addr, &part, range, getrange))
return -EINVAL;
err = copy(vrh, dst, src, part);
if (err)
return err;
dst += part;
src += part;
len -= part;
} while (len);
return 0;
}
static inline int
__vringh_iov(struct vringh *vrh, u16 i,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
struct vringh_range *range,
bool (*getrange)(struct vringh *, u64,
struct vringh_range *)),
bool (*getrange)(struct vringh *, u64, struct vringh_range *),
gfp_t gfp,
int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len))
{
int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false;
/* We start traversing vring's descriptor table. */
descs = vrh->vring.desc;
desc_max = vrh->vring.num;
up_next = -1;
/* You must want something! */
if (WARN_ON(!riov && !wiov))
return -EINVAL;
if (riov)
riov->i = riov->used = riov->consumed = 0;
if (wiov)
wiov->i = wiov->used = wiov->consumed = 0;
for (;;) {
void *addr;
struct vringh_kiov *iov;
size_t len;
if (unlikely(slow))
err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
&slowrange, copy);
else
err = copy(vrh, &desc, &descs[i], sizeof(desc));
if (unlikely(err))
goto fail;
if (unlikely(desc.flags &
cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
u64 a = vringh64_to_cpu(vrh, desc.addr);
/* Make sure it's OK, and get offset. */
len = vringh32_to_cpu(vrh, desc.len);
if (!rcheck(vrh, a, &len, &range, getrange)) {
err = -EINVAL;
goto fail;
}
if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
slow = true;
/* We need to save this range to use offset */
slowrange = range;
}
addr = (void *)(long)(a + range.offset);
err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
&descs, &desc_max);
if (err)
goto fail;
continue;
}
if (up_next == -1)
count++;
else
indirect_count++;
if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP;
goto fail;
}
if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
iov = wiov;
else {
iov = riov;
if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable",
&descs[i]);
err = -EINVAL;
goto fail;
}
}
if (!iov) {
vringh_bad("Unexpected %s desc",
!wiov ? "writable" : "readable");
err = -EPROTO;
goto fail;
}
again:
/* Make sure it's OK, and get offset. */
len = vringh32_to_cpu(vrh, desc.len);
if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
getrange)) {
err = -EINVAL;
goto fail;
}
addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
range.offset);
if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
err = resize_iovec(iov, gfp);
if (err)
goto fail;
}
iov->iov[iov->used].iov_base = addr;
iov->iov[iov->used].iov_len = len;
iov->used++;
if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
desc.len = cpu_to_vringh32(vrh,
vringh32_to_cpu(vrh, desc.len) - len);
desc.addr = cpu_to_vringh64(vrh,
vringh64_to_cpu(vrh, desc.addr) + len);
goto again;
}
if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
i = vringh16_to_cpu(vrh, desc.next);
} else {
/* Just in case we need to finish traversing above. */
if (unlikely(up_next > 0)) {
i = return_from_indirect(vrh, &up_next,
&descs, &desc_max);
slow = false;
indirect_count = 0;
} else
break;
}
if (i >= desc_max) {
vringh_bad("Chained index %u > %u", i, desc_max);
err = -EINVAL;
goto fail;
}
}
return 0;
fail:
return err;
}
static inline int __vringh_complete(struct vringh *vrh,
const struct vring_used_elem *used,
unsigned int num_used,
int (*putu16)(const struct vringh *vrh,
__virtio16 *p, u16 val),
int (*putused)(const struct vringh *vrh,
struct vring_used_elem *dst,
const struct vring_used_elem
*src, unsigned num))
{
struct vring_used *used_ring;
int err;
u16 used_idx, off;
used_ring = vrh->vring.used;
used_idx = vrh->last_used_idx + vrh->completed;
off = used_idx % vrh->vring.num;
/* Compiler knows num_used == 1 sometimes, hence extra check */
if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
u16 part = vrh->vring.num - off;
err = putused(vrh, &used_ring->ring[off], used, part);
if (!err)
err = putused(vrh, &used_ring->ring[0], used + part,
num_used - part);
} else
err = putused(vrh, &used_ring->ring[off], used, num_used);
if (err) {
vringh_bad("Failed to write %u used entries %u at %p",
num_used, off, &used_ring->ring[off]);
return err;
}
/* Make sure buffer is written before we update index. */
virtio_wmb(vrh->weak_barriers);
err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
if (err) {
vringh_bad("Failed to update used index at %p",
&vrh->vring.used->idx);
return err;
}
vrh->completed += num_used;
return 0;
}
static inline int __vringh_need_notify(struct vringh *vrh,
int (*getu16)(const struct vringh *vrh,
u16 *val,
const __virtio16 *p))
{
bool notify;
u16 used_event;
int err;
/* Flush out used index update. This is paired with the
* barrier that the Guest executes when enabling
* interrupts. */
virtio_mb(vrh->weak_barriers);
/* Old-style, without event indices. */
if (!vrh->event_indices) {
u16 flags;
err = getu16(vrh, &flags, &vrh->vring.avail->flags);
if (err) {
vringh_bad("Failed to get flags at %p",
&vrh->vring.avail->flags);
return err;
}
return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
}
/* Modern: we know when other side wants to know. */
err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
if (err) {
vringh_bad("Failed to get used event idx at %p",
&vring_used_event(&vrh->vring));
return err;
}
/* Just in case we added so many that we wrap. */
if (unlikely(vrh->completed > 0xffff))
notify = true;
else
notify = vring_need_event(used_event,
vrh->last_used_idx + vrh->completed,
vrh->last_used_idx);
vrh->last_used_idx += vrh->completed;
vrh->completed = 0;
return notify;
}
static inline bool __vringh_notify_enable(struct vringh *vrh,
int (*getu16)(const struct vringh *vrh,
u16 *val, const __virtio16 *p),
int (*putu16)(const struct vringh *vrh,
__virtio16 *p, u16 val))
{
u16 avail;
if (!vrh->event_indices) {
/* Old-school; update flags. */
if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
vringh_bad("Clearing used flags %p",
&vrh->vring.used->flags);
return true;
}
} else {
if (putu16(vrh, &vring_avail_event(&vrh->vring),
vrh->last_avail_idx) != 0) {
vringh_bad("Updating avail event index %p",
&vring_avail_event(&vrh->vring));
return true;
}
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
virtio_mb(vrh->weak_barriers);
if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
vringh_bad("Failed to check avail idx at %p",
&vrh->vring.avail->idx);
return true;
}
/* This is unlikely, so we just leave notifications enabled
* (if we're using event_indices, we'll only get one
* notification anyway). */
return avail == vrh->last_avail_idx;
}
static inline void __vringh_notify_disable(struct vringh *vrh,
int (*putu16)(const struct vringh *vrh,
__virtio16 *p, u16 val))
{
if (!vrh->event_indices) {
/* Old-school; update flags. */
if (putu16(vrh, &vrh->vring.used->flags,
VRING_USED_F_NO_NOTIFY)) {
vringh_bad("Setting used flags %p",
&vrh->vring.used->flags);
}
}
}
/* Userspace access helpers: in this case, addresses are really userspace. */
static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
{
__virtio16 v = 0;
int rc = get_user(v, (__force __virtio16 __user *)p);
*val = vringh16_to_cpu(vrh, v);
return rc;
}
static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
{
__virtio16 v = cpu_to_vringh16(vrh, val);
return put_user(v, (__force __virtio16 __user *)p);
}
static inline int copydesc_user(const struct vringh *vrh,
void *dst, const void *src, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
static inline int putused_user(const struct vringh *vrh,
struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
return copy_to_user((__force void __user *)dst, src,
sizeof(*dst) * num) ? -EFAULT : 0;
}
static inline int xfer_from_user(const struct vringh *vrh, void *src,
void *dst, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
static inline int xfer_to_user(const struct vringh *vrh,
void *dst, void *src, size_t len)
{
return copy_to_user((__force void __user *)dst, src, len) ?
-EFAULT : 0;
}
/**
* vringh_init_user - initialize a vringh for a userspace vring.
* @vrh: the vringh to initialize.
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
* @desc: the userspace descriptor pointer.
* @avail: the userspace avail pointer.
* @used: the userspace used pointer.
*
* Returns an error if num is invalid: you should check pointers
* yourself!
*/
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
vring_desc_t __user *desc,
vring_avail_t __user *avail,
vring_used_t __user *used)
{
/* Sane power of 2 please! */
if (!num || num > 0xffff || (num & (num - 1))) {
vringh_bad("Bad ring size %u", num);
return -EINVAL;
}
vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
vrh->last_avail_idx = 0;
vrh->last_used_idx = 0;
vrh->vring.num = num;
/* vring expects kernel addresses, but only used via accessors. */
vrh->vring.desc = (__force struct vring_desc *)desc;
vrh->vring.avail = (__force struct vring_avail *)avail;
vrh->vring.used = (__force struct vring_used *)used;
return 0;
}
EXPORT_SYMBOL(vringh_init_user);
/**
* vringh_getdesc_user - get next available descriptor from userspace ring.
* @vrh: the userspace vring.
* @riov: where to put the readable descriptors (or NULL)
* @wiov: where to put the writable descriptors (or NULL)
* @getrange: function to call to check ranges.
* @head: head index we received, for passing to vringh_complete_user().
*
* Returns 0 if there was no descriptor, 1 if there was, or -errno.
*
* Note that on error return, you can tell the difference between an
* invalid ring and a single invalid descriptor: in the former case,
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
* Note that you can reuse riov and wiov with subsequent calls. Content is
* overwritten and memory reallocated if more space is needed.
* When you don't have to use riov and wiov anymore, you should clean up them
* calling vringh_iov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_user(struct vringh *vrh,
struct vringh_iov *riov,
struct vringh_iov *wiov,
bool (*getrange)(struct vringh *vrh,
u64 addr, struct vringh_range *r),
u16 *head)
{
int err;
*head = vrh->vring.num;
err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
if (err < 0)
return err;
/* Empty... */
if (err == vrh->vring.num)
return 0;
/* We need the layouts to be the identical for this to work */
BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
offsetof(struct vringh_iov, iov));
BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
offsetof(struct vringh_iov, i));
BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
offsetof(struct vringh_iov, used));
BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
offsetof(struct vringh_iov, max_num));
BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
offsetof(struct kvec, iov_base));
BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
offsetof(struct kvec, iov_len));
BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
!= sizeof(((struct kvec *)NULL)->iov_base));
BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
!= sizeof(((struct kvec *)NULL)->iov_len));
*head = err;
err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
(struct vringh_kiov *)wiov,
range_check, getrange, GFP_KERNEL, copydesc_user);
if (err)
return err;
return 1;
}
EXPORT_SYMBOL(vringh_getdesc_user);
/**
* vringh_iov_pull_user - copy bytes from vring_iov.
* @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
* @dst: the place to copy.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
{
return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
dst, len, xfer_from_user);
}
EXPORT_SYMBOL(vringh_iov_pull_user);
/**
* vringh_iov_push_user - copy bytes into vring_iov.
* @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
* @src: the place to copy from.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
const void *src, size_t len)
{
return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
(void *)src, len, xfer_to_user);
}
EXPORT_SYMBOL(vringh_iov_push_user);
/**
* vringh_abandon_user - we've decided not to handle the descriptor(s).
* @vrh: the vring.
* @num: the number of descriptors to put back (ie. num
* vringh_get_user() to undo).
*
* The next vringh_get_user() will return the old descriptor(s) again.
*/
void vringh_abandon_user(struct vringh *vrh, unsigned int num)
{
/* We only update vring_avail_event(vr) when we want to be notified,
* so we haven't changed that yet. */
vrh->last_avail_idx -= num;
}
EXPORT_SYMBOL(vringh_abandon_user);
/**
* vringh_complete_user - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_user.
* @len: the length of data we have written.
*
* You should check vringh_need_notify_user() after one or more calls
* to this function.
*/
int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
used.id = cpu_to_vringh32(vrh, head);
used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
}
EXPORT_SYMBOL(vringh_complete_user);
/**
* vringh_complete_multi_user - we've finished with many descriptors.
* @vrh: the vring.
* @used: the head, length pairs.
* @num_used: the number of used elements.
*
* You should check vringh_need_notify_user() after one or more calls
* to this function.
*/
int vringh_complete_multi_user(struct vringh *vrh,
const struct vring_used_elem used[],
unsigned num_used)
{
return __vringh_complete(vrh, used, num_used,
putu16_user, putused_user);
}
EXPORT_SYMBOL(vringh_complete_multi_user);
/**
* vringh_notify_enable_user - we want to know if something changes.
* @vrh: the vring.
*
* This always enables notifications, but returns false if there are
* now more buffers available in the vring.
*/
bool vringh_notify_enable_user(struct vringh *vrh)
{
return __vringh_notify_enable(vrh, getu16_user, putu16_user);
}
EXPORT_SYMBOL(vringh_notify_enable_user);
/**
* vringh_notify_disable_user - don't tell us if something changes.
* @vrh: the vring.
*
* This is our normal running state: we disable and then only enable when
* we're going to sleep.
*/
void vringh_notify_disable_user(struct vringh *vrh)
{
__vringh_notify_disable(vrh, putu16_user);
}
EXPORT_SYMBOL(vringh_notify_disable_user);
/**
* vringh_need_notify_user - must we tell the other side about used buffers?
* @vrh: the vring we've called vringh_complete_user() on.
*
* Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
*/
int vringh_need_notify_user(struct vringh *vrh)
{
return __vringh_need_notify(vrh, getu16_user);
}
EXPORT_SYMBOL(vringh_need_notify_user);
/* Kernelspace access helpers. */
static inline int getu16_kern(const struct vringh *vrh,
u16 *val, const __virtio16 *p)
{
*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0;
}
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0;
}
static inline int copydesc_kern(const struct vringh *vrh,
void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
}
static inline int putused_kern(const struct vringh *vrh,
struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
memcpy(dst, src, num * sizeof(*dst));
return 0;
}
static inline int xfer_kern(const struct vringh *vrh, void *src,
void *dst, size_t len)
{
memcpy(dst, src, len);
return 0;
}
static inline int kern_xfer(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
}
/**
* vringh_init_kern - initialize a vringh for a kernelspace vring.
* @vrh: the vringh to initialize.
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
* @desc: the userspace descriptor pointer.
* @avail: the userspace avail pointer.
* @used: the userspace used pointer.
*
* Returns an error if num is invalid.
*/
int vringh_init_kern(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
struct vring_used *used)
{
/* Sane power of 2 please! */
if (!num || num > 0xffff || (num & (num - 1))) {
vringh_bad("Bad ring size %u", num);
return -EINVAL;
}
vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
vrh->weak_barriers = weak_barriers;
vrh->completed = 0;
vrh->last_avail_idx = 0;
vrh->last_used_idx = 0;
vrh->vring.num = num;
vrh->vring.desc = desc;
vrh->vring.avail = avail;
vrh->vring.used = used;
return 0;
}
EXPORT_SYMBOL(vringh_init_kern);
/**
* vringh_getdesc_kern - get next available descriptor from kernelspace ring.
* @vrh: the kernelspace vring.
* @riov: where to put the readable descriptors (or NULL)
* @wiov: where to put the writable descriptors (or NULL)
* @head: head index we received, for passing to vringh_complete_kern().
* @gfp: flags for allocating larger riov/wiov.
*
* Returns 0 if there was no descriptor, 1 if there was, or -errno.
*
* Note that on error return, you can tell the difference between an
* invalid ring and a single invalid descriptor: in the former case,
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
* Note that you can reuse riov and wiov with subsequent calls. Content is
* overwritten and memory reallocated if more space is needed.
* When you don't have to use riov and wiov anymore, you should clean up them
* calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
u16 *head,
gfp_t gfp)
{
int err;
err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
if (err < 0)
return err;
/* Empty... */
if (err == vrh->vring.num)
return 0;
*head = err;
err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
gfp, copydesc_kern);
if (err)
return err;
return 1;
}
EXPORT_SYMBOL(vringh_getdesc_kern);
/**
* vringh_iov_pull_kern - copy bytes from vring_iov.
* @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
* @dst: the place to copy.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
{
return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
}
EXPORT_SYMBOL(vringh_iov_pull_kern);
/**
* vringh_iov_push_kern - copy bytes into vring_iov.
* @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
* @src: the place to copy from.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len)
{
return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
}
EXPORT_SYMBOL(vringh_iov_push_kern);
/**
* vringh_abandon_kern - we've decided not to handle the descriptor(s).
* @vrh: the vring.
* @num: the number of descriptors to put back (ie. num
* vringh_get_kern() to undo).
*
* The next vringh_get_kern() will return the old descriptor(s) again.
*/
void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
{
/* We only update vring_avail_event(vr) when we want to be notified,
* so we haven't changed that yet. */
vrh->last_avail_idx -= num;
}
EXPORT_SYMBOL(vringh_abandon_kern);
/**
* vringh_complete_kern - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_kern.
* @len: the length of data we have written.
*
* You should check vringh_need_notify_kern() after one or more calls
* to this function.
*/
int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
used.id = cpu_to_vringh32(vrh, head);
used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
}
EXPORT_SYMBOL(vringh_complete_kern);
/**
* vringh_notify_enable_kern - we want to know if something changes.
* @vrh: the vring.
*
* This always enables notifications, but returns false if there are
* now more buffers available in the vring.
*/
bool vringh_notify_enable_kern(struct vringh *vrh)
{
return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
}
EXPORT_SYMBOL(vringh_notify_enable_kern);
/**
* vringh_notify_disable_kern - don't tell us if something changes.
* @vrh: the vring.
*
* This is our normal running state: we disable and then only enable when
* we're going to sleep.
*/
void vringh_notify_disable_kern(struct vringh *vrh)
{
__vringh_notify_disable(vrh, putu16_kern);
}
EXPORT_SYMBOL(vringh_notify_disable_kern);
/**
* vringh_need_notify_kern - must we tell the other side about used buffers?
* @vrh: the vring we've called vringh_complete_kern() on.
*
* Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
*/
int vringh_need_notify_kern(struct vringh *vrh)
{
return __vringh_need_notify(vrh, getu16_kern);
}
EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
struct iotlb_vec {
union {
struct iovec *iovec;
struct bio_vec *bvec;
} iov;
size_t count;
};
static int iotlb_translate(const struct vringh *vrh,
u64 addr, u64 len, u64 *translated,
struct iotlb_vec *ivec, u32 perm)
{
struct vhost_iotlb_map *map;
struct vhost_iotlb *iotlb = vrh->iotlb;
int ret = 0;
u64 s = 0, last = addr + len - 1;
spin_lock(vrh->iotlb_lock);
while (len > s) {
uintptr_t io_addr;
size_t io_len;
u64 size;
if (unlikely(ret >= ivec->count)) {
ret = -ENOBUFS;
break;
}
map = vhost_iotlb_itree_first(iotlb, addr, last);
if (!map || map->start > addr) {
ret = -EINVAL;
break;
} else if (!(map->perm & perm)) {
ret = -EPERM;
break;
}
size = map->size - addr + map->start;
io_len = min(len - s, size);
io_addr = map->addr - map->start + addr;
if (vrh->use_va) {
struct iovec *iovec = ivec->iov.iovec;
iovec[ret].iov_len = io_len;
iovec[ret].iov_base = (void __user *)io_addr;
} else {
u64 pfn = io_addr >> PAGE_SHIFT;
struct bio_vec *bvec = ivec->iov.bvec;
bvec_set_page(&bvec[ret], pfn_to_page(pfn), io_len,
io_addr & (PAGE_SIZE - 1));
}
s += size;
addr += size;
++ret;
}
spin_unlock(vrh->iotlb_lock);
if (translated)
*translated = min(len, s);
return ret;
}
#define IOTLB_IOV_STRIDE 16
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
struct iotlb_vec ivec;
union {
struct iovec iovec[IOTLB_IOV_STRIDE];
struct bio_vec bvec[IOTLB_IOV_STRIDE];
} iov;
u64 total_translated = 0;
ivec.iov.iovec = iov.iovec;
ivec.count = IOTLB_IOV_STRIDE;
while (total_translated < len) {
struct iov_iter iter;
u64 translated;
int ret;
ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
len - total_translated, &translated,
&ivec, VHOST_MAP_RO);
if (ret == -ENOBUFS)
ret = IOTLB_IOV_STRIDE;
else if (ret < 0)
return ret;
if (vrh->use_va) {
iov_iter_init(&iter, ITER_SOURCE, ivec.iov.iovec, ret,
translated);
} else {
iov_iter_bvec(&iter, ITER_SOURCE, ivec.iov.bvec, ret,
translated);
}
ret = copy_from_iter(dst, translated, &iter);
if (ret < 0)
return ret;
src += translated;
dst += translated;
total_translated += translated;
}
return total_translated;
}
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
struct iotlb_vec ivec;
union {
struct iovec iovec[IOTLB_IOV_STRIDE];
struct bio_vec bvec[IOTLB_IOV_STRIDE];
} iov;
u64 total_translated = 0;
ivec.iov.iovec = iov.iovec;
ivec.count = IOTLB_IOV_STRIDE;
while (total_translated < len) {
struct iov_iter iter;
u64 translated;
int ret;
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
len - total_translated, &translated,
&ivec, VHOST_MAP_WO);
if (ret == -ENOBUFS)
ret = IOTLB_IOV_STRIDE;
else if (ret < 0)
return ret;
if (vrh->use_va) {
iov_iter_init(&iter, ITER_DEST, ivec.iov.iovec, ret,
translated);
} else {
iov_iter_bvec(&iter, ITER_DEST, ivec.iov.bvec, ret,
translated);
}
ret = copy_to_iter(src, translated, &iter);
if (ret < 0)
return ret;
src += translated;
dst += translated;
total_translated += translated;
}
return total_translated;
}
static inline int getu16_iotlb(const struct vringh *vrh,
u16 *val, const __virtio16 *p)
{
struct iotlb_vec ivec;
union {
struct iovec iovec[1];
struct bio_vec bvec[1];
} iov;
__virtio16 tmp;
int ret;
ivec.iov.iovec = iov.iovec;
ivec.count = 1;
/* Atomic read is needed for getu16 */
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
NULL, &ivec, VHOST_MAP_RO);
if (ret < 0)
return ret;
if (vrh->use_va) {
ret = __get_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
if (ret)
return ret;
} else {
void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
void *from = kaddr + ivec.iov.bvec[0].bv_offset;
tmp = READ_ONCE(*(__virtio16 *)from);
kunmap_local(kaddr);
}
*val = vringh16_to_cpu(vrh, tmp);
return 0;
}
static inline int putu16_iotlb(const struct vringh *vrh,
__virtio16 *p, u16 val)
{
struct iotlb_vec ivec;
union {
struct iovec iovec;
struct bio_vec bvec;
} iov;
__virtio16 tmp;
int ret;
ivec.iov.iovec = &iov.iovec;
ivec.count = 1;
/* Atomic write is needed for putu16 */
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
NULL, &ivec, VHOST_MAP_RO);
if (ret < 0)
return ret;
tmp = cpu_to_vringh16(vrh, val);
if (vrh->use_va) {
ret = __put_user(tmp, (__virtio16 __user *)ivec.iov.iovec[0].iov_base);
if (ret)
return ret;
} else {
void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
void *to = kaddr + ivec.iov.bvec[0].bv_offset;
WRITE_ONCE(*(__virtio16 *)to, tmp);
kunmap_local(kaddr);
}
return 0;
}
static inline int copydesc_iotlb(const struct vringh *vrh,
void *dst, const void *src, size_t len)
{
int ret;
ret = copy_from_iotlb(vrh, dst, (void *)src, len);
if (ret != len)
return -EFAULT;
return 0;
}
static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
void *dst, size_t len)
{
int ret;
ret = copy_from_iotlb(vrh, dst, src, len);
if (ret != len)
return -EFAULT;
return 0;
}
static inline int xfer_to_iotlb(const struct vringh *vrh,
void *dst, void *src, size_t len)
{
int ret;
ret = copy_to_iotlb(vrh, dst, src, len);
if (ret != len)
return -EFAULT;
return 0;
}
static inline int putused_iotlb(const struct vringh *vrh,
struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
int size = num * sizeof(*dst);
int ret;
ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
if (ret != size)
return -EFAULT;
return 0;
}
/**
* vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
* @vrh: the vringh to initialize.
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
* @desc: the userspace descriptor pointer.
* @avail: the userspace avail pointer.
* @used: the userspace used pointer.
*
* Returns an error if num is invalid.
*/
int vringh_init_iotlb(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
struct vring_used *used)
{
vrh->use_va = false;
return vringh_init_kern(vrh, features, num, weak_barriers,
desc, avail, used);
}
EXPORT_SYMBOL(vringh_init_iotlb);
/**
* vringh_init_iotlb_va - initialize a vringh for a ring with IOTLB containing
* user VA.
* @vrh: the vringh to initialize.
* @features: the feature bits for this ring.
* @num: the number of elements.
* @weak_barriers: true if we only need memory barriers, not I/O.
* @desc: the userspace descriptor pointer.
* @avail: the userspace avail pointer.
* @used: the userspace used pointer.
*
* Returns an error if num is invalid.
*/
int vringh_init_iotlb_va(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
struct vring_used *used)
{
vrh->use_va = true;
return vringh_init_kern(vrh, features, num, weak_barriers,
desc, avail, used);
}
EXPORT_SYMBOL(vringh_init_iotlb_va);
/**
* vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
* @vrh: the vring
* @iotlb: iotlb associated with this vring
* @iotlb_lock: spinlock to synchronize the iotlb accesses
*/
void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
spinlock_t *iotlb_lock)
{
vrh->iotlb = iotlb;
vrh->iotlb_lock = iotlb_lock;
}
EXPORT_SYMBOL(vringh_set_iotlb);
/**
* vringh_getdesc_iotlb - get next available descriptor from ring with
* IOTLB.
* @vrh: the kernelspace vring.
* @riov: where to put the readable descriptors (or NULL)
* @wiov: where to put the writable descriptors (or NULL)
* @head: head index we received, for passing to vringh_complete_iotlb().
* @gfp: flags for allocating larger riov/wiov.
*
* Returns 0 if there was no descriptor, 1 if there was, or -errno.
*
* Note that on error return, you can tell the difference between an
* invalid ring and a single invalid descriptor: in the former case,
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
* Note that you can reuse riov and wiov with subsequent calls. Content is
* overwritten and memory reallocated if more space is needed.
* When you don't have to use riov and wiov anymore, you should clean up them
* calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_iotlb(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
u16 *head,
gfp_t gfp)
{
int err;
err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
if (err < 0)
return err;
/* Empty... */
if (err == vrh->vring.num)
return 0;
*head = err;
err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
gfp, copydesc_iotlb);
if (err)
return err;
return 1;
}
EXPORT_SYMBOL(vringh_getdesc_iotlb);
/**
* vringh_iov_pull_iotlb - copy bytes from vring_iov.
* @vrh: the vring.
* @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
* @dst: the place to copy.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
struct vringh_kiov *riov,
void *dst, size_t len)
{
return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
}
EXPORT_SYMBOL(vringh_iov_pull_iotlb);
/**
* vringh_iov_push_iotlb - copy bytes into vring_iov.
* @vrh: the vring.
* @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
* @src: the place to copy from.
* @len: the maximum length to copy.
*
* Returns the bytes copied <= len or a negative errno.
*/
ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
struct vringh_kiov *wiov,
const void *src, size_t len)
{
return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
}
EXPORT_SYMBOL(vringh_iov_push_iotlb);
/**
* vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
* @vrh: the vring.
* @num: the number of descriptors to put back (ie. num
* vringh_get_iotlb() to undo).
*
* The next vringh_get_iotlb() will return the old descriptor(s) again.
*/
void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
{
/* We only update vring_avail_event(vr) when we want to be notified,
* so we haven't changed that yet.
*/
vrh->last_avail_idx -= num;
}
EXPORT_SYMBOL(vringh_abandon_iotlb);
/**
* vringh_complete_iotlb - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_iotlb.
* @len: the length of data we have written.
*
* You should check vringh_need_notify_iotlb() after one or more calls
* to this function.
*/
int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
{
struct vring_used_elem used;
used.id = cpu_to_vringh32(vrh, head);
used.len = cpu_to_vringh32(vrh, len);
return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
}
EXPORT_SYMBOL(vringh_complete_iotlb);
/**
* vringh_notify_enable_iotlb - we want to know if something changes.
* @vrh: the vring.
*
* This always enables notifications, but returns false if there are
* now more buffers available in the vring.
*/
bool vringh_notify_enable_iotlb(struct vringh *vrh)
{
return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
}
EXPORT_SYMBOL(vringh_notify_enable_iotlb);
/**
* vringh_notify_disable_iotlb - don't tell us if something changes.
* @vrh: the vring.
*
* This is our normal running state: we disable and then only enable when
* we're going to sleep.
*/
void vringh_notify_disable_iotlb(struct vringh *vrh)
{
__vringh_notify_disable(vrh, putu16_iotlb);
}
EXPORT_SYMBOL(vringh_notify_disable_iotlb);
/**
* vringh_need_notify_iotlb - must we tell the other side about used buffers?
* @vrh: the vring we've called vringh_complete_iotlb() on.
*
* Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
*/
int vringh_need_notify_iotlb(struct vringh *vrh)
{
return __vringh_need_notify(vrh, getu16_iotlb);
}
EXPORT_SYMBOL(vringh_need_notify_iotlb);
#endif
MODULE_LICENSE("GPL");
| linux-master | drivers/vhost/vringh.c |
// SPDX-License-Identifier: GPL-2.0+
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
* (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
* Authors: Nicholas A. Bellinger <[email protected]>
* Stefan Hajnoczi <[email protected]>
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/blk_types.h>
#include <linux/bio.h>
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <linux/vhost.h>
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
#include "vhost.h"
#define VHOST_SCSI_VERSION "v0.1"
#define VHOST_SCSI_NAMELEN 256
#define VHOST_SCSI_MAX_CDB_SIZE 32
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
/* Max number of requests before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
* request.
*/
#define VHOST_SCSI_WEIGHT 256
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp;
/* Refcount for the inflight reqs */
struct kref kref;
};
struct vhost_scsi_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc;
/* virtio-scsi initiator task attribute */
int tvc_task_attr;
/* virtio-scsi response incoming iovecs */
int tvc_in_iovs;
/* virtio-scsi initiator data direction */
enum dma_data_direction tvc_data_direction;
/* Expected data transfer length from virtio-scsi header */
u32 tvc_exp_data_len;
/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
u64 tvc_tag;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
u32 tvc_prot_sgl_count;
/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
u32 tvc_lun;
u32 copied_iov:1;
const void *saved_iter_addr;
struct iov_iter saved_iter;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue *tvc_vq;
/* Pointer to vhost nexus memory */
struct vhost_scsi_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd;
/* Copy of the incoming SCSI command descriptor block (CDB) */
unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list;
/* Used to track inflight cmd */
struct vhost_scsi_inflight *inflight;
};
struct vhost_scsi_nexus {
/* Pointer to TCM session for I_T Nexus */
struct se_session *tvn_se_sess;
};
struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
/* Used for enabling T10-PI with legacy devices */
int tv_fabric_prot_type;
/* list for vhost_scsi_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
struct vhost_scsi_nexus *tpg_nexus;
/* Pointer back to vhost_scsi_tport */
struct vhost_scsi_tport *tport;
/* Returned by vhost_scsi_make_tpg() */
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
};
struct vhost_scsi_tport {
/* SCSI protocol the tport is providing */
u8 tport_proto_id;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn;
/* ASCII formatted WWPN for Vhost Target port */
char tport_name[VHOST_SCSI_NAMELEN];
/* Returned by vhost_scsi_make_tport() */
struct se_wwn tport_wwn;
};
struct vhost_scsi_evt {
/* event to be sent to guest */
struct virtio_scsi_event event;
/* event list, serviced from vhost worker thread */
struct llist_node list;
};
enum {
VHOST_SCSI_VQ_CTL = 0,
VHOST_SCSI_VQ_EVT = 1,
VHOST_SCSI_VQ_IO = 2,
};
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
(1ULL << VIRTIO_SCSI_F_T10_PI)
};
#define VHOST_SCSI_MAX_TARGET 256
#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
static unsigned vhost_scsi_max_io_vqs = 128;
module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
struct vhost_scsi *vs;
/*
* Reference counting for inflight reqs, used for flush operation. At
* each time, one reference tracks new commands submitted, while we
* wait for another one to reach 0.
*/
struct vhost_scsi_inflight inflights[2];
/*
* Indicate current inflight in use, protected by vq->mutex.
* Writers must also take dev mutex and flush under it.
*/
int inflight_idx;
struct vhost_scsi_cmd *scsi_cmds;
struct sbitmap scsi_tags;
int max_cmds;
struct vhost_work completion_work;
struct llist_head completion_list;
};
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
struct vhost_scsi_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
struct vhost_scsi_virtqueue *vqs;
struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_event_work; /* evt injection work item */
struct llist_head vs_event_list; /* evt injection queue */
bool vs_events_missed; /* any missed events, protected by vq->mutex */
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
struct vhost_scsi_tmf {
struct vhost_work vwork;
struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq;
struct se_cmd se_cmd;
u8 scsi_resp;
struct vhost_scsi_inflight *inflight;
struct iovec resp_iov;
int in_iovs;
int vq_desc;
};
/*
* Context for processing request and control queue operations.
*/
struct vhost_scsi_ctx {
int head;
unsigned int out, in;
size_t req_size, rsp_size;
size_t out_size, in_size;
u8 *target, *lunp;
void *req;
struct iov_iter out_iter;
};
/*
* Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
* configfs management operations.
*/
static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(vhost_scsi_list);
static void vhost_scsi_done_inflight(struct kref *kref)
{
struct vhost_scsi_inflight *inflight;
inflight = container_of(kref, struct vhost_scsi_inflight, kref);
complete(&inflight->comp);
}
static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_scsi_inflight *old_inflight[])
{
struct vhost_scsi_inflight *new_inflight;
struct vhost_virtqueue *vq;
int idx, i;
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
/* store old infight */
idx = vs->vqs[i].inflight_idx;
if (old_inflight)
old_inflight[i] = &vs->vqs[i].inflights[idx];
/* setup new infight */
vs->vqs[i].inflight_idx = idx ^ 1;
new_inflight = &vs->vqs[i].inflights[idx ^ 1];
kref_init(&new_inflight->kref);
init_completion(&new_inflight->comp);
mutex_unlock(&vq->mutex);
}
}
static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
{
struct vhost_scsi_inflight *inflight;
struct vhost_scsi_virtqueue *svq;
svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
inflight = &svq->inflights[svq->inflight_idx];
kref_get(&inflight->kref);
return inflight;
}
static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
{
kref_put(&inflight->kref, vhost_scsi_done_inflight);
}
static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tport *tport = tpg->tport;
return &tport->tport_name[0];
}
static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
return tpg->tport_tpgt;
}
static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
return tpg->tv_fabric_prot_type;
}
static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
int i;
if (tv_cmd->tvc_sgl_count) {
for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
if (tv_cmd->copied_iov)
__free_page(sg_page(&tv_cmd->tvc_sgl[i]));
else
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
}
kfree(tv_cmd->saved_iter_addr);
}
if (tv_cmd->tvc_prot_sgl_count) {
for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
}
sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
vhost_scsi_put_inflight(inflight);
}
static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
struct vhost_scsi_inflight *inflight = tmf->inflight;
kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct vhost_scsi_tmf *tmf = container_of(se_cmd,
struct vhost_scsi_tmf, se_cmd);
struct vhost_virtqueue *vq = &tmf->svq->vq;
vhost_vq_work_queue(vq, &tmf->vwork);
} else {
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
struct vhost_scsi_virtqueue, vq);
llist_add(&cmd->tvc_completion_list, &svq->completion_list);
vhost_vq_work_queue(&svq->vq, &svq->completion_work);
}
}
static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
{
/* Go ahead and process the write immediately */
target_execute_cmd(se_cmd);
return 0;
}
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
se_cmd);
tmf->scsi_resp = se_cmd->se_tmr_req->response;
transport_generic_free_cmd(&tmf->se_cmd, 0);
}
static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{
vs->vs_events_nr--;
kfree(evt);
}
static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct vhost_scsi_evt *evt;
if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
vs->vs_events_missed = true;
return NULL;
}
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt) {
vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
vs->vs_events_missed = true;
return NULL;
}
evt->event.event = cpu_to_vhost32(vq, event);
evt->event.reason = cpu_to_vhost32(vq, reason);
vs->vs_events_nr++;
return evt;
}
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
}
static void
vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
unsigned out, in;
int head, ret;
if (!vhost_vq_get_backend(vq)) {
vs->vs_events_missed = true;
return;
}
again:
vhost_disable_notify(&vs->dev, vq);
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
if (head < 0) {
vs->vs_events_missed = true;
return;
}
if (head == vq->num) {
if (vhost_enable_notify(&vs->dev, vq))
goto again;
vs->vs_events_missed = true;
return;
}
if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
vq->iov[out].iov_len);
vs->vs_events_missed = true;
return;
}
if (vs->vs_events_missed) {
event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
vs->vs_events_missed = false;
}
eventp = vq->iov[out].iov_base;
ret = __copy_to_user(eventp, event, sizeof(*event));
if (!ret)
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
}
static void vhost_scsi_evt_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
llist_for_each_entry_safe(evt, t, llnode, list) {
vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
mutex_unlock(&vq->mutex);
}
static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
{
struct iov_iter *iter = &cmd->saved_iter;
struct scatterlist *sg = cmd->tvc_sgl;
struct page *page;
size_t len;
int i;
for (i = 0; i < cmd->tvc_sgl_count; i++) {
page = sg_page(&sg[i]);
len = sg[i].length;
if (copy_page_to_iter(page, 0, len, iter) != len) {
pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
len);
return -1;
}
}
return 0;
}
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
* process mm and can access the vring.
*/
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi_virtqueue *svq = container_of(work,
struct vhost_scsi_virtqueue, completion_work);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
struct iov_iter iov_iter;
bool signal = false;
int ret;
llnode = llist_del_all(&svq->completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
cmd, se_cmd->residual_count, se_cmd->scsi_status);
memset(&v_rsp, 0, sizeof(v_rsp));
if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
} else {
v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
se_cmd->residual_count);
/* TODO is status_qualifier field needed? */
v_rsp.status = se_cmd->scsi_status;
v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
se_cmd->scsi_sense_length);
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
}
iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
signal = true;
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_release_cmd_res(se_cmd);
}
if (signal)
vhost_signal(&svq->vs->dev, &svq->vq);
}
static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
u32 exp_data_len, int data_direction)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg, *prot_sg;
struct iovec *tvc_resp_iov;
struct page **pages;
int tag;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Unable to locate active struct vhost_scsi_nexus\n");
return ERR_PTR(-EIO);
}
tag = sbitmap_get(&svq->scsi_tags);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
}
cmd = &svq->scsi_cmds[tag];
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
tvc_resp_iov = cmd->tvc_resp_iov;
memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
cmd->tvc_exp_data_len = exp_data_len;
cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus;
cmd->inflight = vhost_scsi_get_inflight(vq);
cmd->tvc_resp_iov = tvc_resp_iov;
memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
return cmd;
}
/*
* Map a user memory range into a scatterlist
*
* Returns the number of scatterlist entries used or -errno on error.
*/
static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
struct iov_iter *iter,
struct scatterlist *sgl,
bool is_prot)
{
struct page **pages = cmd->tvc_upages;
struct scatterlist *sg = sgl;
ssize_t bytes, mapped_bytes;
size_t offset, mapped_offset;
unsigned int npages = 0;
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
mapped_bytes = bytes;
mapped_offset = offset;
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
/*
* The block layer requires bios/requests to be a multiple of
* 512 bytes, but Windows can send us vecs that are misaligned.
* This can result in bios and later requests with misaligned
* sizes if we have to break up a cmd/scatterlist into multiple
* bios.
*
* We currently only break up a command into multiple bios if
* we hit the vec/seg limit, so check if our sgl_count is
* greater than the max and if a vec in the cmd has a
* misaligned offset/size.
*/
if (!is_prot &&
(offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
cmd->tvc_sgl_count > BIO_MAX_VECS) {
WARN_ONCE(true,
"vhost-scsi detected misaligned IO. Performance may be degraded.");
goto revert_iter_get_pages;
}
sg_set_page(sg++, pages[npages++], n, offset);
bytes -= n;
offset = 0;
}
return npages;
revert_iter_get_pages:
iov_iter_revert(iter, mapped_bytes);
npages = 0;
while (mapped_bytes) {
unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
mapped_bytes);
put_page(pages[npages++]);
mapped_bytes -= n;
mapped_offset = 0;
}
return -EINVAL;
}
static int
vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
{
int sgl_count = 0;
if (!iter || !iter_iov(iter)) {
pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
" present\n", __func__, bytes);
return -EINVAL;
}
sgl_count = iov_iter_npages(iter, 0xffff);
if (sgl_count > max_sgls) {
pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
" max_sgls: %d\n", __func__, sgl_count, max_sgls);
return -EINVAL;
}
return sgl_count;
}
static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
struct scatterlist *sg, int sg_count)
{
size_t len = iov_iter_count(iter);
unsigned int nbytes = 0;
struct page *page;
int i;
if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
GFP_KERNEL);
if (!cmd->saved_iter_addr)
return -ENOMEM;
}
for (i = 0; i < sg_count; i++) {
page = alloc_page(GFP_KERNEL);
if (!page) {
i--;
goto err;
}
nbytes = min_t(unsigned int, PAGE_SIZE, len);
sg_set_page(&sg[i], page, nbytes, 0);
if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
goto err;
len -= nbytes;
}
cmd->copied_iov = 1;
return 0;
err:
pr_err("Could not read %u bytes while handling misaligned cmd\n",
nbytes);
for (; i >= 0; i--)
__free_page(sg_page(&sg[i]));
kfree(cmd->saved_iter_addr);
return -ENOMEM;
}
static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
struct scatterlist *sg, int sg_count, bool is_prot)
{
struct scatterlist *p = sg;
size_t revert_bytes;
int ret;
while (iov_iter_count(iter)) {
ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
if (ret < 0) {
revert_bytes = 0;
while (p < sg) {
struct page *page = sg_page(p);
if (page) {
put_page(page);
revert_bytes += p->length;
}
p++;
}
iov_iter_revert(iter, revert_bytes);
return ret;
}
sg += ret;
}
return 0;
}
static int
vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
size_t prot_bytes, struct iov_iter *prot_iter,
size_t data_bytes, struct iov_iter *data_iter)
{
int sgl_count, ret;
if (prot_bytes) {
sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
VHOST_SCSI_PREALLOC_PROT_SGLS);
if (sgl_count < 0)
return sgl_count;
sg_init_table(cmd->tvc_prot_sgl, sgl_count);
cmd->tvc_prot_sgl_count = sgl_count;
pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
cmd->tvc_prot_sgl,
cmd->tvc_prot_sgl_count, true);
if (ret < 0) {
cmd->tvc_prot_sgl_count = 0;
return ret;
}
}
sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
VHOST_SCSI_PREALLOC_SGLS);
if (sgl_count < 0)
return sgl_count;
sg_init_table(cmd->tvc_sgl, sgl_count);
cmd->tvc_sgl_count = sgl_count;
pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
cmd->tvc_sgl, cmd->tvc_sgl_count);
ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
cmd->tvc_sgl_count, false);
if (ret == -EINVAL) {
sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
cmd->tvc_sgl_count);
}
if (ret < 0) {
cmd->tvc_sgl_count = 0;
return ret;
}
return 0;
}
static int vhost_scsi_to_tcm_attr(int attr)
{
switch (attr) {
case VIRTIO_SCSI_S_SIMPLE:
return TCM_SIMPLE_TAG;
case VIRTIO_SCSI_S_ORDERED:
return TCM_ORDERED_TAG;
case VIRTIO_SCSI_S_HEAD:
return TCM_HEAD_TAG;
case VIRTIO_SCSI_S_ACA:
return TCM_ACA_TAG;
default:
break;
}
return TCM_SIMPLE_TAG;
}
static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
/* FIXME: BIDI operation */
if (cmd->tvc_sgl_count) {
sg_ptr = cmd->tvc_sgl;
if (cmd->tvc_prot_sgl_count)
sg_prot_ptr = cmd->tvc_prot_sgl;
else
se_cmd->prot_pto = true;
} else {
sg_ptr = NULL;
}
tv_nexus = cmd->tvc_nexus;
se_cmd->tag = 0;
target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
cmd->tvc_prot_sgl_count, GFP_KERNEL))
return;
target_queue_submission(se_cmd);
}
static void
vhost_scsi_send_bad_target(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
int head, unsigned out)
{
struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
int ret;
memset(&rsp, 0, sizeof(rsp));
rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
resp = vq->iov[out].iov_base;
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
if (!ret)
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
static int
vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
int ret = -ENXIO;
vc->head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
NULL, NULL);
pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
vc->head, vc->out, vc->in);
/* On error, stop handling until the next kick. */
if (unlikely(vc->head < 0))
goto done;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (vc->head == vq->num) {
if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
vhost_disable_notify(&vs->dev, vq);
ret = -EAGAIN;
}
goto done;
}
/*
* Get the size of request and response buffers.
* FIXME: Not correct for BIDI operation
*/
vc->out_size = iov_length(vq->iov, vc->out);
vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
/*
* Copy over the virtio-scsi request header, which for a
* ANY_LAYOUT enabled guest may span multiple iovecs, or a
* single iovec may contain both the header + outgoing
* WRITE payloads.
*
* copy_from_iter() will advance out_iter, so that it will
* point at the start of the outgoing WRITE payload, if
* DMA_TO_DEVICE is set.
*/
iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
ret = 0;
done:
return ret;
}
static int
vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
{
if (unlikely(vc->in_size < vc->rsp_size)) {
vq_err(vq,
"Response buf too small, need min %zu bytes got %zu",
vc->rsp_size, vc->in_size);
return -EINVAL;
} else if (unlikely(vc->out_size < vc->req_size)) {
vq_err(vq,
"Request buf too small, need min %zu bytes got %zu",
vc->req_size, vc->out_size);
return -EIO;
}
return 0;
}
static int
vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
struct vhost_scsi_tpg **tpgp)
{
int ret = -EIO;
if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
&vc->out_iter))) {
vq_err(vq, "Faulted on copy_from_iter_full\n");
} else if (unlikely(*vc->lunp != 1)) {
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
} else {
struct vhost_scsi_tpg **vs_tpg, *tpg;
vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
tpg = READ_ONCE(vs_tpg[*vc->target]);
if (unlikely(!tpg)) {
vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
} else {
if (tpgp)
*tpgp = tpg;
ret = 0;
}
}
return ret;
}
static u16 vhost_buf_to_lun(u8 *lun_buf)
{
return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
}
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
struct vhost_scsi_tpg **vs_tpg, *tpg;
struct virtio_scsi_cmd_req v_req;
struct virtio_scsi_cmd_req_pi v_req_pi;
struct vhost_scsi_ctx vc;
struct vhost_scsi_cmd *cmd;
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
int ret, prot_bytes, i, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
void *cdb;
mutex_lock(&vq->mutex);
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
vs_tpg = vhost_vq_get_backend(vq);
if (!vs_tpg)
goto out;
memset(&vc, 0, sizeof(vc));
vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
vhost_disable_notify(&vs->dev, vq);
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
/*
* Setup pointers and values based upon different virtio-scsi
* request header if T10_PI is enabled in KVM guest.
*/
if (t10_pi) {
vc.req = &v_req_pi;
vc.req_size = sizeof(v_req_pi);
vc.lunp = &v_req_pi.lun[0];
vc.target = &v_req_pi.lun[1];
} else {
vc.req = &v_req;
vc.req_size = sizeof(v_req);
vc.lunp = &v_req.lun[0];
vc.target = &v_req.lun[1];
}
/*
* Validate the size of request and response buffers.
* Check for a sane response buffer so we can report
* early errors back to the guest.
*/
ret = vhost_scsi_chk_size(vq, &vc);
if (ret)
goto err;
ret = vhost_scsi_get_req(vq, &vc, &tpg);
if (ret)
goto err;
ret = -EIO; /* bad target on any error from here on */
/*
* Determine data_direction by calculating the total outgoing
* iovec sizes + incoming iovec sizes vs. virtio-scsi request +
* response headers respectively.
*
* For DMA_TO_DEVICE this is out_iter, which is already pointing
* to the right place.
*
* For DMA_FROM_DEVICE, the iovec will be just past the end
* of the virtio-scsi response header in either the same
* or immediately following iovec.
*
* Any associated T10_PI bytes for the outgoing / incoming
* payloads are included in calculation of exp_data_len here.
*/
prot_bytes = 0;
if (vc.out_size > vc.req_size) {
data_direction = DMA_TO_DEVICE;
exp_data_len = vc.out_size - vc.req_size;
data_iter = vc.out_iter;
} else if (vc.in_size > vc.rsp_size) {
data_direction = DMA_FROM_DEVICE;
exp_data_len = vc.in_size - vc.rsp_size;
iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
vc.rsp_size + exp_data_len);
iov_iter_advance(&in_iter, vc.rsp_size);
data_iter = in_iter;
} else {
data_direction = DMA_NONE;
exp_data_len = 0;
}
/*
* If T10_PI header + payload is present, setup prot_iter values
* and recalculate data_iter for vhost_scsi_mapal() mapping to
* host scatterlists via get_user_pages_fast().
*/
if (t10_pi) {
if (v_req_pi.pi_bytesout) {
if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero pi_bytesout,"
" but wrong data_direction\n");
goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero pi_bytesin,"
" but wrong data_direction\n");
goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
/*
* Set prot_iter to data_iter and truncate it to
* prot_bytes, and advance data_iter past any
* preceeding prot_bytes that may be present.
*
* Also fix up the exp_data_len to reflect only the
* actual data payload length.
*/
if (prot_bytes) {
exp_data_len -= prot_bytes;
prot_iter = data_iter;
iov_iter_truncate(&prot_iter, prot_bytes);
iov_iter_advance(&data_iter, prot_bytes);
}
tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
lun = vhost_buf_to_lun(v_req_pi.lun);
} else {
tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
lun = vhost_buf_to_lun(v_req.lun);
}
/*
* Check that the received CDB size does not exceeded our
* hardcoded max for vhost-scsi, then get a pre-allocated
* cmd descriptor for the new virtio-scsi tag.
*
* TODO what if cdb was too small for varlen cdb header?
*/
if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
vq_err(vq, "Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
goto err;
}
cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
exp_data_len + prot_bytes,
data_direction);
if (IS_ERR(cmd)) {
vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
PTR_ERR(cmd));
goto err;
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
for (i = 0; i < vc.in ; i++)
cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
cmd->tvc_in_iovs = vc.in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cmd->tvc_cdb[0], cmd->tvc_lun);
pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
" %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) {
if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
&prot_iter, exp_data_len,
&data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}
/*
* Save the descriptor from vhost_get_vq_desc() to be used to
* complete the virtio-scsi request in TCM callback context via
* vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
cmd->tvc_vq_desc = vc.head;
vhost_scsi_target_queue_cmd(cmd);
ret = 0;
err:
/*
* ENXIO: No more requests, or read error, wait for next kick
* EINVAL: Invalid response buffer, drop the request
* EIO: Respond with bad target
* EAGAIN: Pending request
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
static void
vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
int in_iovs, int vq_desc, struct iovec *resp_iov,
int tmf_resp_code)
{
struct virtio_scsi_ctrl_tmf_resp rsp;
struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
rsp.response = tmf_resp_code;
iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
}
static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
{
struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
vwork);
struct vhost_virtqueue *ctl_vq, *vq;
int resp_code, i;
if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
/*
* Flush IO vqs that don't share a worker with the ctl to make
* sure they have sent their responses before us.
*/
ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
vq = &tmf->vhost->vqs[i].vq;
if (vhost_vq_is_setup(vq) &&
vq->worker != ctl_vq->worker)
vhost_vq_flush(vq);
}
resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
}
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
vhost_scsi_release_tmf_res(tmf);
}
static void
vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
struct vhost_virtqueue *vq,
struct virtio_scsi_ctrl_tmf_req *vtmf,
struct vhost_scsi_ctx *vc)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_tmf *tmf;
if (vhost32_to_cpu(vq, vtmf->subtype) !=
VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
goto send_reject;
if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
goto send_reject;
}
tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
if (!tmf)
goto send_reject;
vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
tmf->vhost = vs;
tmf->svq = svq;
tmf->resp_iov = vq->iov[vc->out];
tmf->vq_desc = vc->head;
tmf->in_iovs = vc->in;
tmf->inflight = vhost_scsi_get_inflight(vq);
if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
vhost_buf_to_lun(vtmf->lun), NULL,
TMR_LUN_RESET, GFP_KERNEL, 0,
TARGET_SCF_ACK_KREF) < 0) {
vhost_scsi_release_tmf_res(tmf);
goto send_reject;
}
return;
send_reject:
vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
VIRTIO_SCSI_S_FUNCTION_REJECTED);
}
static void
vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
struct virtio_scsi_ctrl_an_resp rsp;
struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
rsp.response = VIRTIO_SCSI_S_OK;
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
}
static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
struct vhost_scsi_tpg *tpg;
union {
__virtio32 type;
struct virtio_scsi_ctrl_an_req an;
struct virtio_scsi_ctrl_tmf_req tmf;
} v_req;
struct vhost_scsi_ctx vc;
size_t typ_size;
int ret, c = 0;
mutex_lock(&vq->mutex);
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
if (!vhost_vq_get_backend(vq))
goto out;
memset(&vc, 0, sizeof(vc));
vhost_disable_notify(&vs->dev, vq);
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
/*
* Get the request type first in order to setup
* other parameters dependent on the type.
*/
vc.req = &v_req.type;
typ_size = sizeof(v_req.type);
if (unlikely(!copy_from_iter_full(vc.req, typ_size,
&vc.out_iter))) {
vq_err(vq, "Faulted on copy_from_iter tmf type\n");
/*
* The size of the response buffer depends on the
* request type and must be validated against it.
* Since the request type is not known, don't send
* a response.
*/
continue;
}
switch (vhost32_to_cpu(vq, v_req.type)) {
case VIRTIO_SCSI_T_TMF:
vc.req = &v_req.tmf;
vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
vc.lunp = &v_req.tmf.lun[0];
vc.target = &v_req.tmf.lun[1];
break;
case VIRTIO_SCSI_T_AN_QUERY:
case VIRTIO_SCSI_T_AN_SUBSCRIBE:
vc.req = &v_req.an;
vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
vc.lunp = &v_req.an.lun[0];
vc.target = NULL;
break;
default:
vq_err(vq, "Unknown control request %d", v_req.type);
continue;
}
/*
* Validate the size of request and response buffers.
* Check for a sane response buffer so we can report
* early errors back to the guest.
*/
ret = vhost_scsi_chk_size(vq, &vc);
if (ret)
goto err;
/*
* Get the rest of the request now that its size is known.
*/
vc.req += typ_size;
vc.req_size -= typ_size;
ret = vhost_scsi_get_req(vq, &vc, &tpg);
if (ret)
goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
else
vhost_scsi_send_an_resp(vs, vq, &vc);
err:
/*
* ENXIO: No more requests, or read error, wait for next kick
* EINVAL: Invalid response buffer, drop the request
* EIO: Respond with bad target
* EAGAIN: Pending request
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
pr_debug("%s: The handling func for control queue.\n", __func__);
vhost_scsi_ctl_handle_vq(vs, vq);
}
static void
vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
struct vhost_scsi_tpg *tpg, struct se_lun *lun,
u32 event, u32 reason)
{
struct vhost_scsi_evt *evt;
evt = vhost_scsi_allocate_evt(vs, event, reason);
if (!evt)
return;
if (tpg && lun) {
/* TODO: share lun setup code with virtio-scsi.ko */
/*
* Note: evt->event is zeroed when we allocate it and
* lun[4-7] need to be zero according to virtio-scsi spec.
*/
evt->event.lun[0] = 0x01;
evt->event.lun[1] = tpg->tport_tpgt;
if (lun->unpacked_lun >= 256)
evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
evt->event.lun[3] = lun->unpacked_lun & 0xFF;
}
llist_add(&evt->list, &vs->vs_event_list);
vhost_vq_work_queue(vq, &vs->vs_event_work);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
mutex_lock(&vq->mutex);
if (!vhost_vq_get_backend(vq))
goto out;
if (vs->vs_events_missed)
vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
0);
out:
mutex_unlock(&vq->mutex);
}
static void vhost_scsi_handle_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
vhost_scsi_handle_vq(vs, vq);
}
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
int i;
/* Init new inflight and remember the old inflight */
vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
for (i = 0; i < vs->dev.nvqs; i++)
kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < vs->dev.nvqs; i++)
wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
if (!svq->scsi_cmds)
return;
for (i = 0; i < svq->max_cmds; i++) {
tv_cmd = &svq->scsi_cmds[i];
kfree(tv_cmd->tvc_sgl);
kfree(tv_cmd->tvc_prot_sgl);
kfree(tv_cmd->tvc_upages);
kfree(tv_cmd->tvc_resp_iov);
}
sbitmap_free(&svq->scsi_tags);
kfree(svq->scsi_cmds);
svq->scsi_cmds = NULL;
}
static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
if (svq->scsi_cmds)
return 0;
if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
NUMA_NO_NODE, false, true))
return -ENOMEM;
svq->max_cmds = max_cmds;
svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
if (!svq->scsi_cmds) {
sbitmap_free(&svq->scsi_tags);
return -ENOMEM;
}
for (i = 0; i < max_cmds; i++) {
tv_cmd = &svq->scsi_cmds[i];
tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
sizeof(struct scatterlist),
GFP_KERNEL);
if (!tv_cmd->tvc_sgl) {
pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
goto out;
}
tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
sizeof(struct page *),
GFP_KERNEL);
if (!tv_cmd->tvc_upages) {
pr_err("Unable to allocate tv_cmd->tvc_upages\n");
goto out;
}
tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
sizeof(struct iovec),
GFP_KERNEL);
if (!tv_cmd->tvc_resp_iov) {
pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
goto out;
}
tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
sizeof(struct scatterlist),
GFP_KERNEL);
if (!tv_cmd->tvc_prot_sgl) {
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
goto out;
}
}
return 0;
out:
vhost_scsi_destroy_vq_cmds(vq);
return -ENOMEM;
}
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* vhost_scsi_tpg with an active struct vhost_scsi_nexus
*
* The lock nesting rule is:
* vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
*/
static int
vhost_scsi_set_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t)
{
struct se_portal_group *se_tpg;
struct vhost_scsi_tport *tv_tport;
struct vhost_scsi_tpg *tpg;
struct vhost_scsi_tpg **vs_tpg;
struct vhost_virtqueue *vq;
int index, ret, i, len;
bool match = false;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto out;
}
}
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
vs_tpg = kzalloc(len, GFP_KERNEL);
if (!vs_tpg) {
ret = -ENOMEM;
goto out;
}
if (vs->vs_tpg)
memcpy(vs_tpg, vs->vs_tpg, len);
mutex_lock(&vhost_scsi_mutex);
list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
mutex_lock(&tpg->tv_tpg_mutex);
if (!tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
continue;
}
if (tpg->tv_tpg_vhost_count != 0) {
mutex_unlock(&tpg->tv_tpg_mutex);
continue;
}
tv_tport = tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
mutex_unlock(&tpg->tv_tpg_mutex);
mutex_unlock(&vhost_scsi_mutex);
ret = -EEXIST;
goto undepend;
}
/*
* In order to ensure individual vhost-scsi configfs
* groups cannot be removed while in use by vhost ioctl,
* go ahead and take an explicit se_tpg->tpg_group.cg_item
* dependency now.
*/
se_tpg = &tpg->se_tpg;
ret = target_depend_item(&se_tpg->tpg_group.cg_item);
if (ret) {
pr_warn("target_depend_item() failed: %d\n", ret);
mutex_unlock(&tpg->tv_tpg_mutex);
mutex_unlock(&vhost_scsi_mutex);
goto undepend;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
}
mutex_unlock(&vhost_scsi_mutex);
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
if (!vhost_vq_is_setup(vq))
continue;
ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
if (ret)
goto destroy_vq_cmds;
}
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
} else {
ret = -EEXIST;
}
/*
* Act as synchronize_rcu to make sure access to
* old vs->vs_tpg is finished.
*/
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg;
goto out;
destroy_vq_cmds:
for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
if (!vhost_vq_get_backend(&vs->vqs[i].vq))
vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
}
undepend:
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
tpg = vs_tpg[i];
if (tpg) {
mutex_lock(&tpg->tv_tpg_mutex);
tpg->vhost_scsi = NULL;
tpg->tv_tpg_vhost_count--;
mutex_unlock(&tpg->tv_tpg_mutex);
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
}
}
kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex);
return ret;
}
static int
vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t)
{
struct se_portal_group *se_tpg;
struct vhost_scsi_tport *tv_tport;
struct vhost_scsi_tpg *tpg;
struct vhost_virtqueue *vq;
bool match = false;
int index, ret, i;
u8 target;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
ret = -EFAULT;
goto err_dev;
}
}
if (!vs->vs_tpg) {
ret = 0;
goto err_dev;
}
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
tpg = vs->vs_tpg[target];
if (!tpg)
continue;
tv_tport = tpg->tport;
if (!tv_tport) {
ret = -ENODEV;
goto err_dev;
}
if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
tv_tport->tport_name, tpg->tport_tpgt,
t->vhost_wwpn, t->vhost_tpgt);
ret = -EINVAL;
goto err_dev;
}
match = true;
}
if (!match)
goto free_vs_tpg;
/* Prevent new cmds from starting and accessing the tpgs/sessions */
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
/*
* We can now release our hold on the tpg and sessions and userspace
* can free them after this point.
*/
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
tpg = vs->vs_tpg[target];
if (!tpg)
continue;
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_vhost_count--;
tpg->vhost_scsi = NULL;
vs->vs_tpg[target] = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
se_tpg = &tpg->se_tpg;
target_undepend_item(&se_tpg->tpg_group.cg_item);
}
free_vs_tpg:
/*
* Act as synchronize_rcu to make sure access to
* old vs->vs_tpg is finished.
*/
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = NULL;
WARN_ON(vs->vs_events_nr);
mutex_unlock(&vs->dev.mutex);
return 0;
err_dev:
mutex_unlock(&vs->dev.mutex);
return ret;
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
struct vhost_virtqueue *vq;
int i;
if (features & ~VHOST_SCSI_FEATURES)
return -EOPNOTSUPP;
mutex_lock(&vs->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vs->dev)) {
mutex_unlock(&vs->dev.mutex);
return -EFAULT;
}
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
mutex_unlock(&vs->dev.mutex);
return 0;
}
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi_virtqueue *svq;
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
VHOST_SCSI_MAX_IO_VQ);
nvqs = VHOST_SCSI_MAX_IO_VQ;
} else if (nvqs == 0) {
pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
nvqs = 1;
}
nvqs += VHOST_SCSI_VQ_IO;
vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
GFP_KERNEL | __GFP_ZERO);
if (!vs->old_inflight)
goto err_inflight;
vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
GFP_KERNEL | __GFP_ZERO);
if (!vs->vqs)
goto err_vqs;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_local_vqs;
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
vs->vs_events_nr = 0;
vs->vs_events_missed = false;
vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
svq = &vs->vqs[i];
vqs[i] = &svq->vq;
svq->vs = vs;
init_llist_head(&svq->completion_list);
vhost_work_init(&svq->completion_work,
vhost_scsi_complete_cmd_work);
svq->vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
f->private_data = vs;
return 0;
err_local_vqs:
kfree(vs->vqs);
err_vqs:
kfree(vs->old_inflight);
err_inflight:
kvfree(vs);
err_vs:
return r;
}
static int vhost_scsi_release(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs = f->private_data;
struct vhost_scsi_target t;
mutex_lock(&vs->dev.mutex);
memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
mutex_unlock(&vs->dev.mutex);
vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
kfree(vs->vqs);
kfree(vs->old_inflight);
kvfree(vs);
return 0;
}
static long
vhost_scsi_ioctl(struct file *f,
unsigned int ioctl,
unsigned long arg)
{
struct vhost_scsi *vs = f->private_data;
struct vhost_scsi_target backend;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
u32 __user *eventsp = argp;
u32 events_missed;
u64 features;
int r, abi_version = VHOST_SCSI_ABI_VERSION;
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
switch (ioctl) {
case VHOST_SCSI_SET_ENDPOINT:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
if (backend.reserved != 0)
return -EOPNOTSUPP;
return vhost_scsi_set_endpoint(vs, &backend);
case VHOST_SCSI_CLEAR_ENDPOINT:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
if (backend.reserved != 0)
return -EOPNOTSUPP;
return vhost_scsi_clear_endpoint(vs, &backend);
case VHOST_SCSI_GET_ABI_VERSION:
if (copy_to_user(argp, &abi_version, sizeof abi_version))
return -EFAULT;
return 0;
case VHOST_SCSI_SET_EVENTS_MISSED:
if (get_user(events_missed, eventsp))
return -EFAULT;
mutex_lock(&vq->mutex);
vs->vs_events_missed = events_missed;
mutex_unlock(&vq->mutex);
return 0;
case VHOST_SCSI_GET_EVENTS_MISSED:
mutex_lock(&vq->mutex);
events_missed = vs->vs_events_missed;
mutex_unlock(&vq->mutex);
if (put_user(events_missed, eventsp))
return -EFAULT;
return 0;
case VHOST_GET_FEATURES:
features = VHOST_SCSI_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
return vhost_scsi_set_features(vs, features);
case VHOST_NEW_WORKER:
case VHOST_FREE_WORKER:
case VHOST_ATTACH_VRING_WORKER:
case VHOST_GET_VRING_WORKER:
mutex_lock(&vs->dev.mutex);
r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
mutex_unlock(&vs->dev.mutex);
return r;
default:
mutex_lock(&vs->dev.mutex);
r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
/* TODO: flush backend after dev ioctl. */
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
mutex_unlock(&vs->dev.mutex);
return r;
}
}
static const struct file_operations vhost_scsi_fops = {
.owner = THIS_MODULE,
.release = vhost_scsi_release,
.unlocked_ioctl = vhost_scsi_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = vhost_scsi_open,
.llseek = noop_llseek,
};
static struct miscdevice vhost_scsi_misc = {
MISC_DYNAMIC_MINOR,
"vhost-scsi",
&vhost_scsi_fops,
};
static int __init vhost_scsi_register(void)
{
return misc_register(&vhost_scsi_misc);
}
static void vhost_scsi_deregister(void)
{
misc_deregister(&vhost_scsi_misc);
}
static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
{
switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS:
return "SAS";
case SCSI_PROTOCOL_FCP:
return "FCP";
case SCSI_PROTOCOL_ISCSI:
return "iSCSI";
default:
break;
}
return "Unknown";
}
static void
vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
struct se_lun *lun, bool plug)
{
struct vhost_scsi *vs = tpg->vhost_scsi;
struct vhost_virtqueue *vq;
u32 reason;
if (!vs)
return;
if (plug)
reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex);
/*
* We can't queue events if the backend has been cleared, because
* we could end up queueing an event after the flush.
*/
if (!vhost_vq_get_backend(vq))
goto unlock;
if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
vhost_scsi_send_evt(vs, vq, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
unlock:
mutex_unlock(&vq->mutex);
}
static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{
vhost_scsi_do_plug(tpg, lun, true);
}
static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{
vhost_scsi_do_plug(tpg, lun, false);
}
static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
vhost_scsi_hotplug(tpg, lun);
mutex_unlock(&tpg->tv_tpg_mutex);
return 0;
}
static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
vhost_scsi_hotunplug(tpg, lun);
mutex_unlock(&tpg->tv_tpg_mutex);
}
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
struct config_item *item, const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
unsigned long val;
int ret = kstrtoul(page, 0, &val);
if (ret) {
pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
return ret;
}
if (val != 0 && val != 1 && val != 3) {
pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
return -EINVAL;
}
tpg->tv_fabric_prot_type = val;
return count;
}
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
}
CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
NULL,
};
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct vhost_scsi_nexus\n");
return -ENOMEM;
}
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
(unsigned char *)name, tv_nexus, NULL);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
return -ENOMEM;
}
tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tv_tpg_mutex);
return 0;
}
static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
{
struct se_session *se_sess;
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
return -ENODEV;
}
se_sess = tv_nexus->tvn_se_sess;
if (!se_sess) {
mutex_unlock(&tpg->tv_tpg_mutex);
return -ENODEV;
}
if (tpg->tv_tpg_port_count != 0) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to remove TCM_vhost I_T Nexus with"
" active TPG port count: %d\n",
tpg->tv_tpg_port_count);
return -EBUSY;
}
if (tpg->tv_tpg_vhost_count != 0) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to remove TCM_vhost I_T Nexus with"
" active TPG vhost count: %d\n",
tpg->tv_tpg_vhost_count);
return -EBUSY;
}
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
return 0;
}
static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_nexus *tv_nexus;
ssize_t ret;
mutex_lock(&tpg->tv_tpg_mutex);
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
return -ENODEV;
}
ret = sysfs_emit(page, "%s\n",
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
mutex_unlock(&tpg->tv_tpg_mutex);
return ret;
}
static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tport *tport_wwn = tpg->tport;
unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
int ret;
/*
* Shutdown the active I_T nexus if 'NULL' is passed..
*/
if (!strncmp(page, "NULL", 4)) {
ret = vhost_scsi_drop_nexus(tpg);
return (!ret) ? count : ret;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in vhost_scsi_make_tport(), and call
* vhost_scsi_make_nexus().
*/
if (strlen(page) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, VHOST_SCSI_NAMELEN);
return -EINVAL;
}
snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
goto check_newline;
}
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline:
if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0';
ret = vhost_scsi_make_nexus(tpg, port_ptr);
if (ret < 0)
return ret;
return count;
}
CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
&vhost_scsi_tpg_attr_nexus,
NULL,
};
static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
struct vhost_scsi_tpg *tpg;
u16 tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
if (ret < 0) {
kfree(tpg);
return NULL;
}
mutex_lock(&vhost_scsi_mutex);
list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg;
}
static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
mutex_lock(&vhost_scsi_mutex);
list_del(&tpg->tv_tpg_list);
mutex_unlock(&vhost_scsi_mutex);
/*
* Release the virtual I_T Nexus for this vhost TPG
*/
vhost_scsi_drop_nexus(tpg);
/*
* Deregister the se_tpg from TCM..
*/
core_tpg_deregister(se_tpg);
kfree(tpg);
}
static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct vhost_scsi_tport *tport;
char *ptr;
u64 wwpn = 0;
int off = 0;
/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport) {
pr_err("Unable to allocate struct vhost_scsi_tport");
return ERR_PTR(-ENOMEM);
}
tport->tport_wwpn = wwpn;
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
*/
ptr = strstr(name, "naa.");
if (ptr) {
tport->tport_proto_id = SCSI_PROTOCOL_SAS;
goto check_len;
}
ptr = strstr(name, "fc.");
if (ptr) {
tport->tport_proto_id = SCSI_PROTOCOL_FCP;
off = 3; /* Skip over "fc." */
goto check_len;
}
ptr = strstr(name, "iqn.");
if (ptr) {
tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
goto check_len;
}
pr_err("Unable to locate prefix for emulated Target Port:"
" %s\n", name);
kfree(tport);
return ERR_PTR(-EINVAL);
check_len:
if (strlen(name) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated %s Address: %s, exceeds"
" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
VHOST_SCSI_NAMELEN);
kfree(tport);
return ERR_PTR(-EINVAL);
}
snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
return &tport->tport_wwn;
}
static void vhost_scsi_drop_tport(struct se_wwn *wwn)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
tport->tport_name);
kfree(tport);
}
static ssize_t
vhost_scsi_wwn_version_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
}
CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
&vhost_scsi_wwn_attr_version,
NULL,
};
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.fabric_name = "vhost",
.max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,
.tpg_check_demo_mode_cache = vhost_scsi_check_true,
.tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
.release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
.sess_get_initiator_sid = NULL,
.write_pending = vhost_scsi_write_pending,
.queue_data_in = vhost_scsi_queue_data_in,
.queue_status = vhost_scsi_queue_status,
.queue_tm_rsp = vhost_scsi_queue_tm_rsp,
.aborted_task = vhost_scsi_aborted_task,
/*
* Setup callers for generic logic in target_core_fabric_configfs.c
*/
.fabric_make_wwn = vhost_scsi_make_tport,
.fabric_drop_wwn = vhost_scsi_drop_tport,
.fabric_make_tpg = vhost_scsi_make_tpg,
.fabric_drop_tpg = vhost_scsi_drop_tpg,
.fabric_post_link = vhost_scsi_port_link,
.fabric_pre_unlink = vhost_scsi_port_unlink,
.tfc_wwn_attrs = vhost_scsi_wwn_attrs,
.tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
.tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
};
static int __init vhost_scsi_init(void)
{
int ret = -ENOMEM;
pr_debug("TCM_VHOST fabric module %s on %s/%s"
" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
ret = vhost_scsi_register();
if (ret < 0)
goto out;
ret = target_register_template(&vhost_scsi_ops);
if (ret < 0)
goto out_vhost_scsi_deregister;
return 0;
out_vhost_scsi_deregister:
vhost_scsi_deregister();
out:
return ret;
};
static void vhost_scsi_exit(void)
{
target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
};
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
MODULE_ALIAS("tcm_vhost");
MODULE_LICENSE("GPL");
module_init(vhost_scsi_init);
module_exit(vhost_scsi_exit);
| linux-master | drivers/vhost/scsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Red Hat, Inc.
* Author: Jason Wang <[email protected]>
*
* IOTLB implementation for vhost.
*/
#include <linux/slab.h>
#include <linux/vhost_iotlb.h>
#include <linux/module.h>
#define MOD_VERSION "0.1"
#define MOD_DESC "VHOST IOTLB"
#define MOD_AUTHOR "Jason Wang <[email protected]>"
#define MOD_LICENSE "GPL v2"
#define START(map) ((map)->start)
#define LAST(map) ((map)->last)
INTERVAL_TREE_DEFINE(struct vhost_iotlb_map,
rb, __u64, __subtree_last,
START, LAST, static inline, vhost_iotlb_itree);
/**
* vhost_iotlb_map_free - remove a map node and free it
* @iotlb: the IOTLB
* @map: the map that want to be remove and freed
*/
void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
struct vhost_iotlb_map *map)
{
vhost_iotlb_itree_remove(map, &iotlb->root);
list_del(&map->link);
kfree(map);
iotlb->nmaps--;
}
EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
/**
* vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
* @addr: the address that is mapped to @start
* @perm: access permission of this range
* @opaque: the opaque pointer for the new mapping
*
* Returns an error last is smaller than start or memory allocation
* fails
*/
int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
u64 start, u64 last,
u64 addr, unsigned int perm,
void *opaque)
{
struct vhost_iotlb_map *map;
if (last < start)
return -EFAULT;
/* If the range being mapped is [0, ULONG_MAX], split it into two entries
* otherwise its size would overflow u64.
*/
if (start == 0 && last == ULONG_MAX) {
u64 mid = last / 2;
int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
perm, opaque);
if (err)
return err;
addr += mid + 1;
start = mid + 1;
}
if (iotlb->limit &&
iotlb->nmaps == iotlb->limit &&
iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
map = list_first_entry(&iotlb->list, typeof(*map), link);
vhost_iotlb_map_free(iotlb, map);
}
map = kmalloc(sizeof(*map), GFP_ATOMIC);
if (!map)
return -ENOMEM;
map->start = start;
map->size = last - start + 1;
map->last = last;
map->addr = addr;
map->perm = perm;
map->opaque = opaque;
iotlb->nmaps++;
vhost_iotlb_itree_insert(map, &iotlb->root);
INIT_LIST_HEAD(&map->link);
list_add_tail(&map->link, &iotlb->list);
return 0;
}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
u64 start, u64 last,
u64 addr, unsigned int perm)
{
return vhost_iotlb_add_range_ctx(iotlb, start, last,
addr, perm, NULL);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
* vhost_iotlb_del_range - delete overlapped ranges from vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
*/
void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
{
struct vhost_iotlb_map *map;
while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
start, last)))
vhost_iotlb_map_free(iotlb, map);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
/**
* vhost_iotlb_init - initialize a vhost IOTLB
* @iotlb: the IOTLB that needs to be initialized
* @limit: maximum number of IOTLB entries
* @flags: VHOST_IOTLB_FLAG_XXX
*/
void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
unsigned int flags)
{
iotlb->root = RB_ROOT_CACHED;
iotlb->limit = limit;
iotlb->nmaps = 0;
iotlb->flags = flags;
INIT_LIST_HEAD(&iotlb->list);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_init);
/**
* vhost_iotlb_alloc - add a new vhost IOTLB
* @limit: maximum number of IOTLB entries
* @flags: VHOST_IOTLB_FLAG_XXX
*
* Returns an error is memory allocation fails
*/
struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
{
struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL);
if (!iotlb)
return NULL;
vhost_iotlb_init(iotlb, limit, flags);
return iotlb;
}
EXPORT_SYMBOL_GPL(vhost_iotlb_alloc);
/**
* vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries)
* @iotlb: the IOTLB to be reset
*/
void vhost_iotlb_reset(struct vhost_iotlb *iotlb)
{
vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_reset);
/**
* vhost_iotlb_free - reset and free vhost IOTLB
* @iotlb: the IOTLB to be freed
*/
void vhost_iotlb_free(struct vhost_iotlb *iotlb)
{
if (iotlb) {
vhost_iotlb_reset(iotlb);
kfree(iotlb);
}
}
EXPORT_SYMBOL_GPL(vhost_iotlb_free);
/**
* vhost_iotlb_itree_first - return the first overlapped range
* @iotlb: the IOTLB
* @start: start of IOVA range
* @last: last byte in IOVA range
*/
struct vhost_iotlb_map *
vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
{
return vhost_iotlb_itree_iter_first(&iotlb->root, start, last);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
/**
* vhost_iotlb_itree_next - return the next overlapped range
* @map: the starting map node
* @start: start of IOVA range
* @last: last byte IOVA range
*/
struct vhost_iotlb_map *
vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
{
return vhost_iotlb_itree_iter_next(map, start, last);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next);
MODULE_VERSION(MOD_VERSION);
MODULE_DESCRIPTION(MOD_DESC);
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_LICENSE(MOD_LICENSE);
| linux-master | drivers/vhost/iotlb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2020 Intel Corporation.
* Copyright (C) 2020 Red Hat, Inc.
*
* Author: Tiwei Bie <[email protected]>
* Jason Wang <[email protected]>
*
* Thanks Michael S. Tsirkin for the valuable comments and
* suggestions. And thanks to Cunming Liang and Zhihong Wang for all
* their supports.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/uuid.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>
#include "vhost.h"
enum {
VHOST_VDPA_BACKEND_FEATURES =
(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
};
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
#define VHOST_VDPA_IOTLB_BUCKETS 16
struct vhost_vdpa_as {
struct hlist_node hash_link;
struct vhost_iotlb iotlb;
u32 id;
};
struct vhost_vdpa {
struct vhost_dev vdev;
struct iommu_domain *domain;
struct vhost_virtqueue *vqs;
struct completion completion;
struct vdpa_device *vdpa;
struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
struct device dev;
struct cdev cdev;
atomic_t opened;
u32 nvqs;
int virtio_id;
int minor;
struct eventfd_ctx *config_ctx;
int in_batch;
struct vdpa_iova_range range;
u32 batch_asid;
};
static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb, u64 start,
u64 last, u32 asid);
static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
{
struct vhost_vdpa_as *as = container_of(iotlb, struct
vhost_vdpa_as, iotlb);
return as->id;
}
static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
{
struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
struct vhost_vdpa_as *as;
hlist_for_each_entry(as, head, hash_link)
if (as->id == asid)
return as;
return NULL;
}
static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
{
struct vhost_vdpa_as *as = asid_to_as(v, asid);
if (!as)
return NULL;
return &as->iotlb;
}
static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
{
struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
struct vhost_vdpa_as *as;
if (asid_to_as(v, asid))
return NULL;
if (asid >= v->vdpa->nas)
return NULL;
as = kmalloc(sizeof(*as), GFP_KERNEL);
if (!as)
return NULL;
vhost_iotlb_init(&as->iotlb, 0, 0);
as->id = asid;
hlist_add_head(&as->hash_link, head);
return as;
}
static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
u32 asid)
{
struct vhost_vdpa_as *as = asid_to_as(v, asid);
if (as)
return as;
return vhost_vdpa_alloc_as(v, asid);
}
static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
{
struct vhost_vdpa_as *as = asid_to_as(v, asid);
if (!as)
return -EINVAL;
hlist_del(&as->hash_link);
vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
kfree(as);
return 0;
}
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
const struct vdpa_config_ops *ops = v->vdpa->config;
ops->kick_vq(v->vdpa, vq - v->vqs);
}
static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
{
struct vhost_virtqueue *vq = private;
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
eventfd_signal(call_ctx, 1);
return IRQ_HANDLED;
}
static irqreturn_t vhost_vdpa_config_cb(void *private)
{
struct vhost_vdpa *v = private;
struct eventfd_ctx *config_ctx = v->config_ctx;
if (config_ctx)
eventfd_signal(config_ctx, 1);
return IRQ_HANDLED;
}
static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
const struct vdpa_config_ops *ops = v->vdpa->config;
struct vdpa_device *vdpa = v->vdpa;
int ret, irq;
if (!ops->get_vq_irq)
return;
irq = ops->get_vq_irq(vdpa, qid);
if (irq < 0)
return;
irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
if (unlikely(ret))
dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
qid, vq->call_ctx.producer.token, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
static int vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
v->in_batch = 0;
return vdpa_reset(vdpa);
}
static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!vdpa->use_va || !ops->bind_mm)
return 0;
return ops->bind_mm(vdpa, v->vdev.mm);
}
static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!vdpa->use_va || !ops->unbind_mm)
return;
ops->unbind_mm(vdpa);
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u32 device_id;
device_id = ops->get_device_id(vdpa);
if (copy_to_user(argp, &device_id, sizeof(device_id)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u8 status;
status = ops->get_status(vdpa);
if (copy_to_user(statusp, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u8 status, status_old;
u32 nvqs = v->nvqs;
int ret;
u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
return -EFAULT;
status_old = ops->get_status(vdpa);
/*
* Userspace shouldn't remove status bits unless reset the
* status to 0.
*/
if (status != 0 && (status_old & ~status) != 0)
return -EINVAL;
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) {
ret = vdpa_reset(vdpa);
if (ret)
return ret;
} else
vdpa_set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_setup_vq_irq(v, i);
return 0;
}
static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vhost_vdpa_config *c)
{
struct vdpa_device *vdpa = v->vdpa;
size_t size = vdpa->config->get_config_size(vdpa);
if (c->len == 0 || c->off > size)
return -EINVAL;
if (c->len > size - c->off)
return -E2BIG;
return 0;
}
static long vhost_vdpa_get_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
if (copy_from_user(&config, c, size))
return -EFAULT;
if (vhost_vdpa_config_validate(v, &config))
return -EINVAL;
buf = kvzalloc(config.len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
vdpa_get_config(vdpa, config.off, buf, config.len);
if (copy_to_user(c->buf, buf, config.len)) {
kvfree(buf);
return -EFAULT;
}
kvfree(buf);
return 0;
}
static long vhost_vdpa_set_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
if (copy_from_user(&config, c, size))
return -EFAULT;
if (vhost_vdpa_config_validate(v, &config))
return -EINVAL;
buf = vmemdup_user(c->buf, config.len);
if (IS_ERR(buf))
return PTR_ERR(buf);
vdpa_set_config(vdpa, config.off, buf, config.len);
kvfree(buf);
return 0;
}
static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
return ops->suspend;
}
static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
return ops->resume;
}
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u64 features;
features = ops->get_device_features(vdpa);
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
return 0;
}
static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!ops->get_backend_features)
return 0;
else
return ops->get_backend_features(vdpa);
}
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_dev *d = &v->vdev;
u64 actual_features;
u64 features;
int i;
/*
* It's not allowed to change the features after they have
* been negotiated.
*/
if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
return -EBUSY;
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (vdpa_set_features(vdpa, features))
return -EINVAL;
/* let the vqs know what has been configured */
actual_features = ops->get_driver_features(vdpa);
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq = d->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_features = actual_features;
mutex_unlock(&vq->mutex);
}
return 0;
}
static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u16 num;
num = ops->get_vq_num_max(vdpa);
if (copy_to_user(argp, &num, sizeof(num)))
return -EFAULT;
return 0;
}
static void vhost_vdpa_config_put(struct vhost_vdpa *v)
{
if (v->config_ctx) {
eventfd_ctx_put(v->config_ctx);
v->config_ctx = NULL;
}
}
static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
{
struct vdpa_callback cb;
int fd;
struct eventfd_ctx *ctx;
cb.callback = vhost_vdpa_config_cb;
cb.private = v;
if (copy_from_user(&fd, argp, sizeof(fd)))
return -EFAULT;
ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
swap(ctx, v->config_ctx);
if (!IS_ERR_OR_NULL(ctx))
eventfd_ctx_put(ctx);
if (IS_ERR(v->config_ctx)) {
long ret = PTR_ERR(v->config_ctx);
v->config_ctx = NULL;
return ret;
}
v->vdpa->config->set_config_cb(v->vdpa, &cb);
return 0;
}
static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
{
struct vhost_vdpa_iova_range range = {
.first = v->range.first,
.last = v->range.last,
};
if (copy_to_user(argp, &range, sizeof(range)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u32 size;
size = ops->get_config_size(vdpa);
if (copy_to_user(argp, &size, sizeof(size)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
return -EFAULT;
return 0;
}
/* After a successful return of ioctl the device must not process more
* virtqueue descriptors. The device can answer to read or writes of config
* fields as if it were not suspended. In particular, writing to "queue_enable"
* with a value of 1 will not make the device start processing buffers.
*/
static long vhost_vdpa_suspend(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!ops->suspend)
return -EOPNOTSUPP;
return ops->suspend(vdpa);
}
/* After a successful return of this ioctl the device resumes processing
* virtqueue descriptors. The device becomes fully operational the same way it
* was before it was suspended.
*/
static long vhost_vdpa_resume(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!ops->resume)
return -EOPNOTSUPP;
return ops->resume(vdpa);
}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vdpa_vq_state vq_state;
struct vdpa_callback cb;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
u32 idx;
long r;
r = get_user(idx, (u32 __user *)argp);
if (r < 0)
return r;
if (idx >= v->nvqs)
return -ENOBUFS;
idx = array_index_nospec(idx, v->nvqs);
vq = &v->vqs[idx];
switch (cmd) {
case VHOST_VDPA_SET_VRING_ENABLE:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
case VHOST_VDPA_GET_VRING_GROUP:
if (!ops->get_vq_group)
return -EOPNOTSUPP;
s.index = idx;
s.num = ops->get_vq_group(vdpa, idx);
if (s.num >= vdpa->ngroups)
return -EIO;
else if (copy_to_user(argp, &s, sizeof(s)))
return -EFAULT;
return 0;
case VHOST_VDPA_SET_GROUP_ASID:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
if (s.num >= vdpa->nas)
return -EINVAL;
if (!ops->set_group_asid)
return -EOPNOTSUPP;
return ops->set_group_asid(vdpa, idx, s.num);
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
return r;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq->last_avail_idx = vq_state.packed.last_avail_idx |
(vq_state.packed.last_avail_counter << 15);
vq->last_used_idx = vq_state.packed.last_used_idx |
(vq_state.packed.last_used_counter << 15);
} else {
vq->last_avail_idx = vq_state.split.avail_index;
}
break;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
if (r)
return r;
switch (cmd) {
case VHOST_SET_VRING_ADDR:
if (ops->set_vq_address(vdpa, idx,
(u64)(uintptr_t)vq->desc,
(u64)(uintptr_t)vq->avail,
(u64)(uintptr_t)vq->used))
r = -EINVAL;
break;
case VHOST_SET_VRING_BASE:
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
} else {
vq_state.split.avail_index = vq->last_avail_idx;
}
r = ops->set_vq_state(vdpa, idx, &vq_state);
break;
case VHOST_SET_VRING_CALL:
if (vq->call_ctx.ctx) {
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
} else {
cb.callback = NULL;
cb.private = NULL;
cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
ops->set_vq_num(vdpa, idx, vq->num);
break;
}
return r;
}
static long vhost_vdpa_unlocked_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vhost_vdpa *v = filep->private_data;
struct vhost_dev *d = &v->vdev;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
u64 features;
long r = 0;
if (cmd == VHOST_SET_BACKEND_FEATURES) {
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
BIT_ULL(VHOST_BACKEND_F_RESUME) |
BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
!vhost_vdpa_can_resume(v))
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
}
mutex_lock(&d->mutex);
switch (cmd) {
case VHOST_VDPA_GET_DEVICE_ID:
r = vhost_vdpa_get_device_id(v, argp);
break;
case VHOST_VDPA_GET_STATUS:
r = vhost_vdpa_get_status(v, argp);
break;
case VHOST_VDPA_SET_STATUS:
r = vhost_vdpa_set_status(v, argp);
break;
case VHOST_VDPA_GET_CONFIG:
r = vhost_vdpa_get_config(v, argp);
break;
case VHOST_VDPA_SET_CONFIG:
r = vhost_vdpa_set_config(v, argp);
break;
case VHOST_GET_FEATURES:
r = vhost_vdpa_get_features(v, argp);
break;
case VHOST_SET_FEATURES:
r = vhost_vdpa_set_features(v, argp);
break;
case VHOST_VDPA_GET_VRING_NUM:
r = vhost_vdpa_get_vring_num(v, argp);
break;
case VHOST_VDPA_GET_GROUP_NUM:
if (copy_to_user(argp, &v->vdpa->ngroups,
sizeof(v->vdpa->ngroups)))
r = -EFAULT;
break;
case VHOST_VDPA_GET_AS_NUM:
if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
r = -EFAULT;
break;
case VHOST_SET_LOG_BASE:
case VHOST_SET_LOG_FD:
r = -ENOIOCTLCMD;
break;
case VHOST_VDPA_SET_CONFIG_CALL:
r = vhost_vdpa_set_config_call(v, argp);
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
if (vhost_vdpa_can_suspend(v))
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
case VHOST_VDPA_GET_IOVA_RANGE:
r = vhost_vdpa_get_iova_range(v, argp);
break;
case VHOST_VDPA_GET_CONFIG_SIZE:
r = vhost_vdpa_get_config_size(v, argp);
break;
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
case VHOST_VDPA_SUSPEND:
r = vhost_vdpa_suspend(v);
break;
case VHOST_VDPA_RESUME:
r = vhost_vdpa_resume(v);
break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vdpa_vring_ioctl(v, cmd, argp);
break;
}
if (r)
goto out;
switch (cmd) {
case VHOST_SET_OWNER:
r = vhost_vdpa_bind_mm(v);
if (r)
vhost_dev_reset_owner(d, NULL);
break;
}
out:
mutex_unlock(&d->mutex);
return r;
}
static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
struct vhost_iotlb_map *map, u32 asid)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (ops->dma_map) {
ops->dma_unmap(vdpa, asid, map->start, map->size);
} else if (ops->set_map == NULL) {
iommu_unmap(v->domain, map->start, map->size);
}
}
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
u64 start, u64 last, u32 asid)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb_map *map;
struct page *page;
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
pinned = PFN_DOWN(map->size);
for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map);
}
}
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
u64 start, u64 last, u32 asid)
{
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file);
kfree(map_file);
vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map);
}
}
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb, u64 start,
u64 last, u32 asid)
{
struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va)
return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
}
static int perm_to_iommu_flags(u32 perm)
{
int flags = 0;
switch (perm) {
case VHOST_ACCESS_WO:
flags |= IOMMU_WRITE;
break;
case VHOST_ACCESS_RO:
flags |= IOMMU_READ;
break;
case VHOST_ACCESS_RW:
flags |= (IOMMU_WRITE | IOMMU_READ);
break;
default:
WARN(1, "invalidate vhost IOTLB permission\n");
break;
}
return flags | IOMMU_CACHE;
}
static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u32 asid = iotlb_to_asid(iotlb);
int r = 0;
r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm), GFP_KERNEL);
}
if (r) {
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
return r;
}
if (!vdpa->use_va)
atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
return 0;
}
static void vhost_vdpa_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u32 asid = iotlb_to_asid(iotlb);
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
if (ops->set_map) {
if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb);
}
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
u64 offset, map_size, map_iova = iova;
struct vdpa_map_file *map_file;
struct vm_area_struct *vma;
int ret = 0;
mmap_read_lock(dev->mm);
while (size) {
vma = find_vma(dev->mm, uaddr);
if (!vma) {
ret = -EINVAL;
break;
}
map_size = min(size, vma->vm_end - uaddr);
if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
!(vma->vm_flags & (VM_IO | VM_PFNMAP))))
goto next;
map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
if (!map_file) {
ret = -ENOMEM;
break;
}
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
map_file->offset = offset;
map_file->file = get_file(vma->vm_file);
ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
perm, map_file);
if (ret) {
fput(map_file->file);
kfree(map_file);
break;
}
next:
size -= map_size;
uaddr += map_size;
map_iova += map_size;
}
if (ret)
vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
mmap_read_unlock(dev->mm);
return ret;
}
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
struct page **page_list;
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long lock_limit, sz2pin, nchunks, i;
u64 start = iova;
long pinned;
int ret = 0;
/* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list)
return -ENOMEM;
if (perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
npages = PFN_UP(size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
}
mmap_read_lock(dev->mm);
lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
}
cur_base = uaddr & PAGE_MASK;
iova &= PAGE_MASK;
nchunks = 0;
while (npages) {
sz2pin = min_t(unsigned long, npages, list_size);
pinned = pin_user_pages(cur_base, sz2pin,
gup_flags, page_list);
if (sz2pin != pinned) {
if (pinned < 0) {
ret = pinned;
} else {
unpin_user_pages(page_list, pinned);
ret = -ENOMEM;
}
goto out;
}
nchunks++;
if (!last_pfn)
map_pfn = page_to_pfn(page_list[0]);
for (i = 0; i < pinned; i++) {
unsigned long this_pfn = page_to_pfn(page_list[i]);
u64 csize;
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iotlb, iova, csize,
PFN_PHYS(map_pfn),
perm, NULL);
if (ret) {
/*
* Unpin the pages that are left unmapped
* from this point on in the current
* page_list. The remaining outstanding
* ones which may stride across several
* chunks will be covered in the common
* error path subsequently.
*/
unpin_user_pages(&page_list[i],
pinned - i);
goto out;
}
map_pfn = this_pfn;
iova += csize;
nchunks = 0;
}
last_pfn = this_pfn;
}
cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
if (nchunks) {
unsigned long pfn;
/*
* Unpin the outstanding pages which are yet to be
* mapped but haven't due to vdpa_map() or
* pin_user_pages() failure.
*
* Mapped pages are accounted in vdpa_map(), hence
* the corresponding unpinning will be handled by
* vdpa_unmap().
*/
WARN_ON(!last_pfn);
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
vhost_vdpa_unmap(v, iotlb, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
free:
free_page((unsigned long)page_list);
return ret;
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
struct vhost_iotlb_msg *msg)
{
struct vdpa_device *vdpa = v->vdpa;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;
if (vhost_iotlb_itree_first(iotlb, msg->iova,
msg->iova + msg->size - 1))
return -EEXIST;
if (vdpa->use_va)
return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
msg->uaddr, msg->perm);
return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
msg->perm);
}
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_iotlb *iotlb = NULL;
struct vhost_vdpa_as *as = NULL;
int r = 0;
mutex_lock(&dev->mutex);
r = vhost_dev_check_owner(dev);
if (r)
goto unlock;
if (msg->type == VHOST_IOTLB_UPDATE ||
msg->type == VHOST_IOTLB_BATCH_BEGIN) {
as = vhost_vdpa_find_alloc_as(v, asid);
if (!as) {
dev_err(&v->dev, "can't find and alloc asid %d\n",
asid);
r = -EINVAL;
goto unlock;
}
iotlb = &as->iotlb;
} else
iotlb = asid_to_iotlb(v, asid);
if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
if (v->in_batch && v->batch_asid != asid) {
dev_info(&v->dev, "batch id %d asid %d\n",
v->batch_asid, asid);
}
if (!iotlb)
dev_err(&v->dev, "no iotlb for asid %d\n", asid);
r = -EINVAL;
goto unlock;
}
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
break;
case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
break;
case VHOST_IOTLB_BATCH_BEGIN:
v->batch_asid = asid;
v->in_batch = true;
break;
case VHOST_IOTLB_BATCH_END:
if (v->in_batch && ops->set_map)
ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
break;
default:
r = -EINVAL;
break;
}
unlock:
mutex_unlock(&dev->mutex);
return r;
}
static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vhost_vdpa *v = file->private_data;
struct vhost_dev *dev = &v->vdev;
return vhost_chr_write_iter(dev, from);
}
static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
const struct bus_type *bus;
int ret;
/* Device want to do DMA by itself */
if (ops->set_map || ops->dma_map)
return 0;
bus = dma_dev->bus;
if (!bus)
return -EFAULT;
if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
dev_warn_once(&v->dev,
"Failed to allocate domain, device is not IOMMU cache coherent capable\n");
return -ENOTSUPP;
}
v->domain = iommu_domain_alloc(bus);
if (!v->domain)
return -EIO;
ret = iommu_attach_device(v->domain, dma_dev);
if (ret)
goto err_attach;
return 0;
err_attach:
iommu_domain_free(v->domain);
v->domain = NULL;
return ret;
}
static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
if (v->domain) {
iommu_detach_device(v->domain, dma_dev);
iommu_domain_free(v->domain);
}
v->domain = NULL;
}
static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
{
struct vdpa_iova_range *range = &v->range;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (ops->get_iova_range) {
*range = ops->get_iova_range(vdpa);
} else if (v->domain && v->domain->geometry.force_aperture) {
range->first = v->domain->geometry.aperture_start;
range->last = v->domain->geometry.aperture_end;
} else {
range->first = 0;
range->last = ULLONG_MAX;
}
}
static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
{
struct vhost_vdpa_as *as;
u32 asid;
for (asid = 0; asid < v->vdpa->nas; asid++) {
as = asid_to_as(v, asid);
if (as)
vhost_vdpa_remove_as(v, asid);
}
vhost_vdpa_free_domain(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
}
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int r, opened;
u32 i, nvqs;
v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
opened = atomic_cmpxchg(&v->opened, 0, 1);
if (opened)
return -EBUSY;
nvqs = v->nvqs;
r = vhost_vdpa_reset(v);
if (r)
goto err;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
r = -ENOMEM;
goto err;
}
dev = &v->vdev;
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
r = vhost_vdpa_alloc_domain(v);
if (r)
goto err_alloc_domain;
vhost_vdpa_set_iova_range(v);
filep->private_data = v;
return 0;
err_alloc_domain:
vhost_vdpa_cleanup(v);
err:
atomic_dec(&v->opened);
return r;
}
static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
{
u32 i;
for (i = 0; i < v->nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i);
}
static int vhost_vdpa_release(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v = filep->private_data;
struct vhost_dev *d = &v->vdev;
mutex_lock(&d->mutex);
filep->private_data = NULL;
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
vhost_vdpa_unbind_mm(v);
vhost_vdpa_config_put(v);
vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);
atomic_dec(&v->opened);
complete(&v->completion);
return 0;
}
#ifdef CONFIG_MMU
static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
{
struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vdpa_notification_area notify;
struct vm_area_struct *vma = vmf->vma;
u16 index = vma->vm_pgoff;
notify = ops->get_vq_notification(vdpa, index);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct vhost_vdpa_vm_ops = {
.fault = vhost_vdpa_fault,
};
static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
{
struct vhost_vdpa *v = vma->vm_file->private_data;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vdpa_notification_area notify;
unsigned long index = vma->vm_pgoff;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
if (vma->vm_flags & VM_READ)
return -EINVAL;
if (index > 65535)
return -EINVAL;
if (!ops->get_vq_notification)
return -ENOTSUPP;
/* To be safe and easily modelled by userspace, We only
* support the doorbell which sits on the page boundary and
* does not share the page with other registers.
*/
notify = ops->get_vq_notification(vdpa, index);
if (notify.addr & (PAGE_SIZE - 1))
return -EINVAL;
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
#endif /* CONFIG_MMU */
static const struct file_operations vhost_vdpa_fops = {
.owner = THIS_MODULE,
.open = vhost_vdpa_open,
.release = vhost_vdpa_release,
.write_iter = vhost_vdpa_chr_write_iter,
.unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
#ifdef CONFIG_MMU
.mmap = vhost_vdpa_mmap,
#endif /* CONFIG_MMU */
.compat_ioctl = compat_ptr_ioctl,
};
static void vhost_vdpa_release_dev(struct device *device)
{
struct vhost_vdpa *v =
container_of(device, struct vhost_vdpa, dev);
ida_simple_remove(&vhost_vdpa_ida, v->minor);
kfree(v->vqs);
kfree(v);
}
static int vhost_vdpa_probe(struct vdpa_device *vdpa)
{
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa *v;
int minor;
int i, r;
/* We can't support platform IOMMU device with more than 1
* group or as
*/
if (!ops->set_map && !ops->dma_map &&
(vdpa->ngroups > 1 || vdpa->nas > 1))
return -EOPNOTSUPP;
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!v)
return -ENOMEM;
minor = ida_simple_get(&vhost_vdpa_ida, 0,
VHOST_VDPA_DEV_MAX, GFP_KERNEL);
if (minor < 0) {
kfree(v);
return minor;
}
atomic_set(&v->opened, 0);
v->minor = minor;
v->vdpa = vdpa;
v->nvqs = vdpa->nvqs;
v->virtio_id = ops->get_device_id(vdpa);
device_initialize(&v->dev);
v->dev.release = vhost_vdpa_release_dev;
v->dev.parent = &vdpa->dev;
v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
GFP_KERNEL);
if (!v->vqs) {
r = -ENOMEM;
goto err;
}
r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
if (r)
goto err;
cdev_init(&v->cdev, &vhost_vdpa_fops);
v->cdev.owner = THIS_MODULE;
r = cdev_device_add(&v->cdev, &v->dev);
if (r)
goto err;
init_completion(&v->completion);
vdpa_set_drvdata(vdpa, v);
for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
INIT_HLIST_HEAD(&v->as[i]);
return 0;
err:
put_device(&v->dev);
ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
static void vhost_vdpa_remove(struct vdpa_device *vdpa)
{
struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
int opened;
cdev_device_del(&v->cdev, &v->dev);
do {
opened = atomic_cmpxchg(&v->opened, 0, 1);
if (!opened)
break;
wait_for_completion(&v->completion);
} while (1);
put_device(&v->dev);
}
static struct vdpa_driver vhost_vdpa_driver = {
.driver = {
.name = "vhost_vdpa",
},
.probe = vhost_vdpa_probe,
.remove = vhost_vdpa_remove,
};
static int __init vhost_vdpa_init(void)
{
int r;
r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
"vhost-vdpa");
if (r)
goto err_alloc_chrdev;
r = vdpa_register_driver(&vhost_vdpa_driver);
if (r)
goto err_vdpa_register_driver;
return 0;
err_vdpa_register_driver:
unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
err_alloc_chrdev:
return r;
}
module_init(vhost_vdpa_init);
static void __exit vhost_vdpa_exit(void)
{
vdpa_unregister_driver(&vhost_vdpa_driver);
unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
}
module_exit(vhost_vdpa_exit);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
| linux-master | drivers/vhost/vdpa.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <[email protected]>
*
* virtio-net server in host kernel.
*/
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/vmalloc.h>
#include <linux/net.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
#include <linux/if_tap.h>
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/xdp.h>
#include "vhost.h"
static int experimental_zcopytx = 0;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
" 1 -Enable; 0 - Disable");
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
/* Max number of packets transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others with small
* pkts.
*/
#define VHOST_NET_PKT_WEIGHT 256
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
/*
* For transmit, used buffer len is unused; we override it to track buffer
* status internally; used for zerocopy tx only.
*/
/* Lower device DMA failed */
#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
/* Lower device DMA done */
#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
/* Lower device DMA in progress */
#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
/* Buffer unused */
#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
(1ULL << VIRTIO_F_ACCESS_PLATFORM) |
(1ULL << VIRTIO_F_RING_RESET)
};
enum {
VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
VHOST_NET_VQ_MAX = 2,
};
struct vhost_net_ubuf_ref {
/* refcount follows semantics similar to kref:
* 0: object is released
* 1: no outstanding ubufs
* >1: outstanding ubufs
*/
atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
};
#define VHOST_NET_BATCH 64
struct vhost_net_buf {
void **queue;
int tail;
int head;
};
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
size_t vhost_hlen;
size_t sock_hlen;
/* vhost zerocopy support fields below: */
/* last used idx for outstanding DMA zerocopy buffers */
int upend_idx;
/* For TX, first used idx for DMA done zerocopy buffers
* For RX, number of batched heads
*/
int done_idx;
/* Number of XDP frames batched */
int batched_xdp;
/* an array of userspace buffers info */
struct ubuf_info_msgzc *ubuf_info;
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
/* Batched XDP buffs */
struct xdp_buff *xdp;
};
struct vhost_net {
struct vhost_dev dev;
struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
struct vhost_poll poll[VHOST_NET_VQ_MAX];
/* Number of TX recently submitted.
* Protected by tx vq lock. */
unsigned tx_packets;
/* Number of times zerocopy TX recently failed.
* Protected by tx vq lock. */
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
/* Private page frag */
struct page_frag page_frag;
/* Refcount bias of page frag */
int refcnt_bias;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
{
if (rxq->tail != rxq->head)
return rxq->queue[rxq->head];
else
return NULL;
}
static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
{
return rxq->tail - rxq->head;
}
static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
{
return rxq->tail == rxq->head;
}
static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
{
void *ret = vhost_net_buf_get_ptr(rxq);
++rxq->head;
return ret;
}
static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
rxq->head = 0;
rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
VHOST_NET_BATCH);
return rxq->tail;
}
static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
vhost_net_buf_get_size(rxq),
tun_ptr_free);
rxq->head = rxq->tail = 0;
}
}
static int vhost_net_buf_peek_len(void *ptr)
{
if (tun_is_xdp_frame(ptr)) {
struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
return xdpf->len;
}
return __skb_array_len_with_tag(ptr);
}
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
if (!vhost_net_buf_is_empty(rxq))
goto out;
if (!vhost_net_buf_produce(nvq))
return 0;
out:
return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}
static void vhost_net_buf_init(struct vhost_net_buf *rxq)
{
rxq->head = rxq->tail = 0;
}
static void vhost_net_enable_zcopy(int vq)
{
vhost_net_zcopy_mask |= 0x1 << vq;
}
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{
struct vhost_net_ubuf_ref *ubufs;
/* No zero copy backend? Nothing to count. */
if (!zcopy)
return NULL;
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
int r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r))
wake_up(&ubufs->wait);
return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put(ubufs);
wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put_and_wait(ubufs);
kfree(ubufs);
}
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{
int i;
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
kfree(n->vqs[i].ubuf_info);
n->vqs[i].ubuf_info = NULL;
}
}
static int vhost_net_set_ubuf_info(struct vhost_net *n)
{
bool zcopy;
int i;
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
zcopy = vhost_net_zcopy_mask & (0x1 << i);
if (!zcopy)
continue;
n->vqs[i].ubuf_info =
kmalloc_array(UIO_MAXIOV,
sizeof(*n->vqs[i].ubuf_info),
GFP_KERNEL);
if (!n->vqs[i].ubuf_info)
goto err;
}
return 0;
err:
vhost_net_clear_ubuf_info(n);
return -ENOMEM;
}
static void vhost_net_vq_reset(struct vhost_net *n)
{
int i;
vhost_net_clear_ubuf_info(n);
for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
n->vqs[i].done_idx = 0;
n->vqs[i].upend_idx = 0;
n->vqs[i].ubufs = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
vhost_net_buf_init(&n->vqs[i].rxq);
}
}
static void vhost_net_tx_packet(struct vhost_net *net)
{
++net->tx_packets;
if (net->tx_packets < 1024)
return;
net->tx_packets = 0;
net->tx_zcopy_err = 0;
}
static void vhost_net_tx_err(struct vhost_net *net)
{
++net->tx_zcopy_err;
}
static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
{
/* TX flush waits for outstanding DMAs to be done.
* Don't start new DMAs.
*/
return !net->tx_flush &&
net->tx_packets / 64 >= net->tx_zcopy_err;
}
static bool vhost_sock_zcopy(struct socket *sock)
{
return unlikely(experimental_zcopytx) &&
sock_flag(sock->sk, SOCK_ZEROCOPY);
}
static bool vhost_sock_xdp(struct socket *sock)
{
return sock_flag(sock->sk, SOCK_XDP);
}
/* In case of DMA done not in order in lower device driver for some reason.
* upend_idx is used to track end of used idx, done_idx is used to track head
* of used idx. Once lower device DMA done contiguously, we will signal KVM
* guest used idx.
*/
static void vhost_zerocopy_signal_used(struct vhost_net *net,
struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
int i, add;
int j = 0;
for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
++j;
} else
break;
}
while (j) {
add = min(UIO_MAXIOV - nvq->done_idx, j);
vhost_add_used_and_signal_n(vq->dev, vq,
&vq->heads[nvq->done_idx], add);
nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
j -= add;
}
}
static void vhost_zerocopy_callback(struct sk_buff *skb,
struct ubuf_info *ubuf_base, bool success)
{
struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
int cnt;
rcu_read_lock_bh();
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
* in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
rcu_read_unlock_bh();
}
static inline unsigned long busy_clock(void)
{
return local_clock() >> 10;
}
static bool vhost_can_busy_poll(unsigned long endtime)
{
return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
!signal_pending(current));
}
static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
if (!vhost_vq_get_backend(vq))
return;
vhost_poll_stop(poll);
}
static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
sock = vhost_vq_get_backend(vq);
if (!sock)
return 0;
return vhost_poll_start(poll, sock->file);
}
static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
{
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_dev *dev = vq->dev;
if (!nvq->done_idx)
return;
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
nvq->done_idx = 0;
}
static void vhost_tx_batch(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct socket *sock,
struct msghdr *msghdr)
{
struct tun_msg_ctl ctl = {
.type = TUN_MSG_PTR,
.num = nvq->batched_xdp,
.ptr = nvq->xdp,
};
int i, err;
if (nvq->batched_xdp == 0)
goto signal_used;
msghdr->msg_control = &ctl;
msghdr->msg_controllen = sizeof(ctl);
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
/* free pages owned by XDP; since this is an unlikely error path,
* keep it simple and avoid more complex bulk update for the
* used pages
*/
for (i = 0; i < nvq->batched_xdp; ++i)
put_page(virt_to_head_page(nvq->xdp[i].data));
nvq->batched_xdp = 0;
nvq->done_idx = 0;
return;
}
signal_used:
vhost_net_signal_used(nvq);
nvq->batched_xdp = 0;
}
static int sock_has_rx_data(struct socket *sock)
{
if (unlikely(!sock))
return 0;
if (sock->ops->peek_len)
return sock->ops->peek_len(sock);
return skb_queue_empty(&sock->sk->sk_receive_queue);
}
static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
struct vhost_virtqueue *vq)
{
if (!vhost_vq_avail_empty(&net->dev, vq)) {
vhost_poll_queue(&vq->poll);
} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
vhost_poll_queue(&vq->poll);
}
}
static void vhost_net_busy_poll(struct vhost_net *net,
struct vhost_virtqueue *rvq,
struct vhost_virtqueue *tvq,
bool *busyloop_intr,
bool poll_rx)
{
unsigned long busyloop_timeout;
unsigned long endtime;
struct socket *sock;
struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
/* Try to hold the vq mutex of the paired virtqueue. We can't
* use mutex_lock() here since we could not guarantee a
* consistenet lock ordering.
*/
if (!mutex_trylock(&vq->mutex))
return;
vhost_disable_notify(&net->dev, vq);
sock = vhost_vq_get_backend(rvq);
busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
tvq->busyloop_timeout;
preempt_disable();
endtime = busy_clock() + busyloop_timeout;
while (vhost_can_busy_poll(endtime)) {
if (vhost_vq_has_work(vq)) {
*busyloop_intr = true;
break;
}
if ((sock_has_rx_data(sock) &&
!vhost_vq_avail_empty(&net->dev, rvq)) ||
!vhost_vq_avail_empty(&net->dev, tvq))
break;
cpu_relax();
}
preempt_enable();
if (poll_rx || sock_has_rx_data(sock))
vhost_net_busy_poll_try_queue(net, vq);
else if (!poll_rx) /* On tx here, sock has no rx data. */
vhost_enable_notify(&net->dev, rvq);
mutex_unlock(&vq->mutex);
}
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_net_virtqueue *tnvq,
unsigned int *out_num, unsigned int *in_num,
struct msghdr *msghdr, bool *busyloop_intr)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *rvq = &rnvq->vq;
struct vhost_virtqueue *tvq = &tnvq->vq;
int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
vhost_tx_batch(net, tnvq,
vhost_vq_get_backend(tvq),
msghdr);
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
}
return r;
}
static bool vhost_exceeds_maxpend(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
}
static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
size_t hdr_size, int out)
{
/* Skip header. TODO: support TSO. */
size_t len = iov_length(vq->iov, out);
iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
iov_iter_advance(iter, hdr_size);
return iov_iter_count(iter);
}
static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
unsigned int *out, unsigned int *in,
size_t *len, bool *busyloop_intr)
{
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
if (ret < 0 || ret == vq->num)
return ret;
if (*in) {
vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
*out, *in);
return -EFAULT;
}
/* Sanity check */
*len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
if (*len == 0) {
vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
*len, nvq->vhost_hlen);
return -EFAULT;
}
return ret;
}
static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
{
return total_len < VHOST_NET_WEIGHT &&
!vhost_vq_avail_empty(vq->dev, vq);
}
static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
struct page_frag *pfrag, gfp_t gfp)
{
if (pfrag->page) {
if (pfrag->offset + sz <= pfrag->size)
return true;
__page_frag_cache_drain(pfrag->page, net->refcnt_bias);
}
pfrag->offset = 0;
net->refcnt_bias = 0;
if (SKB_FRAG_PAGE_ORDER) {
/* Avoid direct reclaim but allow kswapd to wake */
pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_NOWARN |
__GFP_NORETRY,
SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
goto done;
}
}
pfrag->page = alloc_page(gfp);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE;
goto done;
}
return false;
done:
net->refcnt_bias = USHRT_MAX;
page_ref_add(pfrag->page, USHRT_MAX - 1);
return true;
}
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct iov_iter *from)
{
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
size_t len = iov_iter_count(from);
int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
if (SKB_DATA_ALIGN(len + pad) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
if (unlikely(!vhost_net_page_frag_refill(net, buflen,
alloc_frag, GFP_KERNEL)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset +
offsetof(struct tun_xdp_hdr, gso),
sock_hlen, from);
if (copied != sock_hlen)
return -EFAULT;
hdr = buf;
gso = &hdr->gso;
if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2 >
vhost16_to_cpu(vq, gso->hdr_len)) {
gso->hdr_len = cpu_to_vhost16(vq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
if (vhost16_to_cpu(vq, gso->hdr_len) > len)
return -EINVAL;
}
len -= sock_hlen;
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset + pad,
len, from);
if (copied != len)
return -EFAULT;
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
++nvq->batched_xdp;
return 0;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned out, in;
int head;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL,
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
size_t len, total_len = 0;
int err;
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
do {
bool busyloop_intr = false;
if (nvq->done_idx == VHOST_NET_BATCH)
vhost_tx_batch(net, nvq, sock, &msg);
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
&busyloop_intr);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
if (unlikely(busyloop_intr)) {
vhost_poll_queue(&vq->poll);
} else if (unlikely(vhost_enable_notify(&net->dev,
vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
}
break;
}
total_len += len;
/* For simplicity, TX batching is only enabled if
* sndbuf is unlimited.
*/
if (sock_can_batch) {
err = vhost_net_build_xdp(nvq, &msg.msg_iter);
if (!err) {
goto done;
} else if (unlikely(err != -ENOSPC)) {
vhost_tx_batch(net, nvq, sock, &msg);
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;
}
/* We can't build XDP buff, go for single
* packet path but let's flush batched
* packets.
*/
vhost_tx_batch(net, nvq, sock, &msg);
msg.msg_control = NULL;
} else {
if (tx_can_batch(vq, total_len))
msg.msg_flags |= MSG_MORE;
else
msg.msg_flags &= ~MSG_MORE;
}
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;
}
pr_debug("Fail to send packet: err %d", err);
} else if (unlikely(err != len))
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
done:
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
vhost_tx_batch(net, nvq, sock, &msg);
}
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned out, in;
int head;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL,
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
struct tun_msg_ctl ctl;
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
struct ubuf_info_msgzc *ubuf;
bool zcopy_used;
int sent_pkts = 0;
do {
bool busyloop_intr;
/* Release DMAs done buffers first */
vhost_zerocopy_signal_used(net, vq);
busyloop_intr = false;
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
&busyloop_intr);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
if (unlikely(busyloop_intr)) {
vhost_poll_queue(&vq->poll);
} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
}
break;
}
zcopy_used = len >= VHOST_GOODCOPY_LEN
&& !vhost_exceeds_maxpend(net)
&& vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
ubuf = nvq->ubuf_info + nvq->upend_idx;
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
ubuf->ubuf.callback = vhost_zerocopy_callback;
ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->ubuf.refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
ctl.ptr = &ubuf->ubuf;
msg.msg_controllen = sizeof(ctl);
ubufs = nvq->ubufs;
atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else {
msg.msg_control = NULL;
ubufs = NULL;
}
total_len += len;
if (tx_can_batch(vq, total_len) &&
likely(!vhost_exceeds_maxpend(net))) {
msg.msg_flags |= MSG_MORE;
} else {
msg.msg_flags &= ~MSG_MORE;
}
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
if (zcopy_used) {
if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
vhost_net_ubuf_put(ubufs);
if (retry)
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
else
vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
if (retry) {
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;
}
pr_debug("Fail to send packet: err %d", err);
} else if (unlikely(err != len))
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
if (!zcopy_used)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
if (!vq_meta_prefetch(vq))
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_net_disable_vq(net, vq);
if (vhost_sock_zcopy(sock))
handle_tx_zerocopy(net, sock);
else
handle_tx_copy(net, sock);
out:
mutex_unlock(&vq->mutex);
}
static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
{
struct sk_buff *head;
int len = 0;
unsigned long flags;
if (rvq->rx_ring)
return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
if (likely(head)) {
len = head->len;
if (skb_vlan_tag_present(head))
len += VLAN_HLEN;
}
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
return len;
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
bool *busyloop_intr)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *rvq = &rnvq->vq;
struct vhost_virtqueue *tvq = &tnvq->vq;
int len = peek_head_len(rnvq, sk);
if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
vhost_net_signal_used(rnvq);
/* Both tx vq and rx socket were polled here */
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
len = peek_head_len(rnvq, sk);
}
return len;
}
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
* @vq - the relevant virtqueue
* @datalen - data length we'll be reading
* @iovcount - returned count of io vectors we fill
* @log - vhost log
* @log_num - log offset
* @quota - headcount quota, 1 for big buffer
* returns number of buffer heads allocated, negative on error
*/
static int get_rx_bufs(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
int datalen,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
unsigned int quota)
{
unsigned int out, in;
int seg = 0;
int headcount = 0;
unsigned d;
int r, nlogs = 0;
/* len is always initialized before use since we are always called with
* datalen > 0.
*/
u32 len;
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
r = -ENOBUFS;
goto err;
}
r = vhost_get_vq_desc(vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
if (unlikely(r < 0))
goto err;
d = r;
if (d == vq->num) {
r = 0;
goto err;
}
if (unlikely(out || in <= 0)) {
vq_err(vq, "unexpected descriptor format for RX: "
"out %d, in %d\n", out, in);
r = -EINVAL;
goto err;
}
if (unlikely(log)) {
nlogs += *log_num;
log += *log_num;
}
heads[headcount].id = cpu_to_vhost32(vq, d);
len = iov_length(vq->iov + seg, in);
heads[headcount].len = cpu_to_vhost32(vq, len);
datalen -= len;
++headcount;
seg += in;
}
heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
/* Detect overrun */
if (unlikely(datalen > 0)) {
r = UIO_MAXIOV + 1;
goto err;
}
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
return r;
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned in, log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_control = NULL, /* FIXME: get and handle RX aux data. */
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
struct virtio_net_hdr hdr = {
.flags = 0,
.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, mergeable;
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
bool busyloop_intr = false;
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
if (!vq_meta_prefetch(vq))
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_net_disable_vq(net, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
&busyloop_intr);
if (!sock_len)
break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(busyloop_intr)) {
vhost_poll_queue(&vq->poll);
} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
goto out;
}
busyloop_intr = false;
if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
err = sock->ops->recvmsg(sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
}
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
fixup = msg.msg_iter;
if (unlikely((vhost_hlen))) {
/* We will supply the header ourselves
* TODO: support TSO.
*/
iov_iter_advance(&msg.msg_iter, vhost_hlen);
}
err = sock->ops->recvmsg(sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
* it's not supposed to do this usually, but might be hard
* to prevent. Discard data we got (if any) and keep going. */
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
vhost_discard_vq_desc(vq, headcount);
continue;
}
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
if (unlikely(vhost_hlen)) {
if (copy_to_iter(&hdr, sizeof(hdr),
&fixup) != sizeof(hdr)) {
vq_err(vq, "Unable to write vnet_hdr "
"at addr %p\n", vq->iov->iov_base);
goto out;
}
} else {
/* Header came from socket; we'll need to patch
* ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
*/
iov_iter_advance(&fixup, sizeof(hdr));
}
/* TODO: Should check and handle checksum. */
num_buffers = cpu_to_vhost16(vq, headcount);
if (likely(mergeable) &&
copy_to_iter(&num_buffers, sizeof num_buffers,
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
goto out;
}
nvq->done_idx += headcount;
if (nvq->done_idx > VHOST_NET_BATCH)
vhost_net_signal_used(nvq);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
} while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
if (unlikely(busyloop_intr))
vhost_poll_queue(&vq->poll);
else if (!sock_len)
vhost_net_enable_vq(net, vq);
out:
vhost_net_signal_used(nvq);
mutex_unlock(&vq->mutex);
}
static void handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
handle_tx(net);
}
static void handle_rx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
handle_rx(net);
}
static void handle_tx_net(struct vhost_work *work)
{
struct vhost_net *net = container_of(work, struct vhost_net,
poll[VHOST_NET_VQ_TX].work);
handle_tx(net);
}
static void handle_rx_net(struct vhost_work *work)
{
struct vhost_net *net = container_of(work, struct vhost_net,
poll[VHOST_NET_VQ_RX].work);
handle_rx(net);
}
static int vhost_net_open(struct inode *inode, struct file *f)
{
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
void **queue;
struct xdp_buff *xdp;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!n)
return -ENOMEM;
vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kvfree(n);
return -ENOMEM;
}
queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
kvfree(n);
return -ENOMEM;
}
n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
if (!xdp) {
kfree(vqs);
kvfree(n);
kfree(queue);
return -ENOMEM;
}
n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
dev = &n->dev;
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
n->vqs[i].ubufs = NULL;
n->vqs[i].ubuf_info = NULL;
n->vqs[i].upend_idx = 0;
n->vqs[i].done_idx = 0;
n->vqs[i].batched_xdp = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
n->vqs[i].rx_ring = NULL;
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
UIO_MAXIOV + VHOST_NET_BATCH,
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
NULL);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
vqs[VHOST_NET_VQ_TX]);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
n->page_frag.page = NULL;
n->refcnt_bias = 0;
return 0;
}
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct socket *sock;
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
sock = vhost_vq_get_backend(vq);
vhost_net_disable_vq(n, vq);
vhost_vq_set_backend(vq, NULL);
vhost_net_buf_unproduce(nvq);
nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
struct socket **rx_sock)
{
*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
}
static void vhost_net_flush(struct vhost_net *n)
{
vhost_dev_flush(&n->dev);
if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = true;
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
/* Wait for all lower device DMAs done. */
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
static int vhost_net_release(struct inode *inode, struct file *f)
{
struct vhost_net *n = f->private_data;
struct socket *tx_sock;
struct socket *rx_sock;
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
vhost_net_vq_reset(n);
if (tx_sock)
sockfd_put(tx_sock);
if (rx_sock)
sockfd_put(rx_sock);
/* Make sure no callbacks are outstanding */
synchronize_rcu();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
if (n->page_frag.page)
__page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
kvfree(n);
return 0;
}
static struct socket *get_raw_socket(int fd)
{
int r;
struct socket *sock = sockfd_lookup(fd, &r);
if (!sock)
return ERR_PTR(-ENOTSOCK);
/* Parameter checking */
if (sock->sk->sk_type != SOCK_RAW) {
r = -ESOCKTNOSUPPORT;
goto err;
}
if (sock->sk->sk_family != AF_PACKET) {
r = -EPFNOSUPPORT;
goto err;
}
return sock;
err:
sockfd_put(sock);
return ERR_PTR(r);
}
static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
ring = tap_get_ptr_ring(file);
if (!IS_ERR(ring))
goto out;
ring = NULL;
out:
return ring;
}
static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
struct socket *sock;
if (!file)
return ERR_PTR(-EBADF);
sock = tun_get_socket(file);
if (!IS_ERR(sock))
return sock;
sock = tap_get_socket(file);
if (IS_ERR(sock))
fput(file);
return sock;
}
static struct socket *get_socket(int fd)
{
struct socket *sock;
/* special case to disable backend */
if (fd == -1)
return NULL;
sock = get_raw_socket(fd);
if (!IS_ERR(sock))
return sock;
sock = get_tap_socket(fd);
if (!IS_ERR(sock))
return sock;
return ERR_PTR(-ENOTSOCK);
}
static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
struct socket *sock, *oldsock;
struct vhost_virtqueue *vq;
struct vhost_net_virtqueue *nvq;
struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
int r;
mutex_lock(&n->dev.mutex);
r = vhost_dev_check_owner(&n->dev);
if (r)
goto err;
if (index >= VHOST_NET_VQ_MAX) {
r = -ENOBUFS;
goto err;
}
vq = &n->vqs[index].vq;
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
if (fd == -1)
vhost_clear_msg(&n->dev);
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
goto err_vq;
}
sock = get_socket(fd);
if (IS_ERR(sock)) {
r = PTR_ERR(sock);
goto err_vq;
}
/* start polling new socket */
oldsock = vhost_vq_get_backend(vq);
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
if (IS_ERR(ubufs)) {
r = PTR_ERR(ubufs);
goto err_ubufs;
}
vhost_net_disable_vq(n, vq);
vhost_vq_set_backend(vq, sock);
vhost_net_buf_unproduce(nvq);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
if (index == VHOST_NET_VQ_RX) {
if (sock)
nvq->rx_ring = get_tap_ptr_ring(sock->file);
else
nvq->rx_ring = NULL;
}
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
n->tx_packets = 0;
n->tx_zcopy_err = 0;
n->tx_flush = false;
}
mutex_unlock(&vq->mutex);
if (oldubufs) {
vhost_net_ubuf_put_wait_and_free(oldubufs);
mutex_lock(&vq->mutex);
vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
}
if (oldsock) {
vhost_dev_flush(&n->dev);
sockfd_put(oldsock);
}
mutex_unlock(&n->dev.mutex);
return 0;
err_used:
vhost_vq_set_backend(vq, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
if (sock)
sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
mutex_unlock(&n->dev.mutex);
return r;
}
static long vhost_net_reset_owner(struct vhost_net *n)
{
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
umem = vhost_dev_reset_owner_prepare();
if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
if (tx_sock)
sockfd_put(tx_sock);
if (rx_sock)
sockfd_put(rx_sock);
return err;
}
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
int i;
hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
(1ULL << VIRTIO_F_VERSION_1))) ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
sizeof(struct virtio_net_hdr);
if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
/* vhost provides vnet_hdr */
vhost_hlen = hdr_len;
sock_hlen = 0;
} else {
/* socket provides vnet_hdr */
vhost_hlen = 0;
sock_hlen = hdr_len;
}
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&n->dev))
goto out_unlock;
if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&n->dev))
goto out_unlock;
}
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_features = features;
n->vqs[i].vhost_hlen = vhost_hlen;
n->vqs[i].sock_hlen = sock_hlen;
mutex_unlock(&n->vqs[i].vq.mutex);
}
mutex_unlock(&n->dev.mutex);
return 0;
out_unlock:
mutex_unlock(&n->dev.mutex);
return -EFAULT;
}
static long vhost_net_set_owner(struct vhost_net *n)
{
int r;
mutex_lock(&n->dev.mutex);
if (vhost_dev_has_owner(&n->dev)) {
r = -EBUSY;
goto out;
}
r = vhost_net_set_ubuf_info(n);
if (r)
goto out;
r = vhost_dev_set_owner(&n->dev);
if (r)
vhost_net_clear_ubuf_info(n);
vhost_net_flush(n);
out:
mutex_unlock(&n->dev.mutex);
return r;
}
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
struct vhost_net *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
struct vhost_vring_file backend;
u64 features;
int r;
switch (ioctl) {
case VHOST_NET_SET_BACKEND:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_NET_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_NET_BACKEND_FEATURES;
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
return 0;
case VHOST_SET_BACKEND_FEATURES:
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP;
vhost_set_backend_features(&n->dev, features);
return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
return vhost_net_set_owner(n);
default:
mutex_lock(&n->dev.mutex);
r = vhost_dev_ioctl(&n->dev, ioctl, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&n->dev, ioctl, argp);
else
vhost_net_flush(n);
mutex_unlock(&n->dev.mutex);
return r;
}
}
static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
int noblock = file->f_flags & O_NONBLOCK;
return vhost_chr_read_iter(dev, to, noblock);
}
static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
return vhost_chr_write_iter(dev, from);
}
static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
return vhost_chr_poll(file, dev, wait);
}
static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
.read_iter = vhost_net_chr_read_iter,
.write_iter = vhost_net_chr_write_iter,
.poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = vhost_net_open,
.llseek = noop_llseek,
};
static struct miscdevice vhost_net_misc = {
.minor = VHOST_NET_MINOR,
.name = "vhost-net",
.fops = &vhost_net_fops,
};
static int __init vhost_net_init(void)
{
if (experimental_zcopytx)
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
return misc_register(&vhost_net_misc);
}
module_init(vhost_net_init);
static void __exit vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
}
module_exit(vhost_net_exit);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
MODULE_ALIAS("devname:vhost-net");
| linux-master | drivers/vhost/net.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <[email protected]>
*
* test virtio server in host kernel.
*/
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/slab.h>
#include "test.h"
#include "vhost.h"
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_TEST_WEIGHT 0x80000
/* Max number of packets transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
* pkts.
*/
#define VHOST_TEST_PKT_WEIGHT 256
enum {
VHOST_TEST_VQ = 0,
VHOST_TEST_VQ_MAX = 1,
};
struct vhost_test {
struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
};
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_vq(struct vhost_test *n)
{
struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
unsigned out, in;
int head;
size_t len, total_len = 0;
void *private;
mutex_lock(&vq->mutex);
private = vhost_vq_get_backend(vq);
if (!private) {
mutex_unlock(&vq->mutex);
return;
}
vhost_disable_notify(&n->dev, vq);
for (;;) {
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
if (unlikely(vhost_enable_notify(&n->dev, vq))) {
vhost_disable_notify(&n->dev, vq);
continue;
}
break;
}
if (in) {
vq_err(vq, "Unexpected descriptor format for TX: "
"out %d, int %d\n", out, in);
break;
}
len = iov_length(vq->iov, out);
/* Sanity check */
if (!len) {
vq_err(vq, "Unexpected 0 len for TX\n");
break;
}
vhost_add_used_and_signal(&n->dev, vq, head, 0);
total_len += len;
if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
break;
}
mutex_unlock(&vq->mutex);
}
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
handle_vq(n);
}
static int vhost_test_open(struct inode *inode, struct file *f)
{
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
if (!n)
return -ENOMEM;
vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
}
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
f->private_data = n;
return 0;
}
static void *vhost_test_stop_vq(struct vhost_test *n,
struct vhost_virtqueue *vq)
{
void *private;
mutex_lock(&vq->mutex);
private = vhost_vq_get_backend(vq);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
return private;
}
static void vhost_test_stop(struct vhost_test *n, void **privatep)
{
*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
}
static void vhost_test_flush(struct vhost_test *n)
{
vhost_dev_flush(&n->dev);
}
static int vhost_test_release(struct inode *inode, struct file *f)
{
struct vhost_test *n = f->private_data;
void *private;
vhost_test_stop(n, &private);
vhost_test_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
kfree(n->dev.vqs);
kfree(n);
return 0;
}
static long vhost_test_run(struct vhost_test *n, int test)
{
void *priv, *oldpriv;
struct vhost_virtqueue *vq;
int r, index;
if (test < 0 || test > 1)
return -EINVAL;
mutex_lock(&n->dev.mutex);
r = vhost_dev_check_owner(&n->dev);
if (r)
goto err;
for (index = 0; index < n->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(&n->vqs[index])) {
r = -EFAULT;
goto err;
}
}
for (index = 0; index < n->dev.nvqs; ++index) {
vq = n->vqs + index;
mutex_lock(&vq->mutex);
priv = test ? n : NULL;
/* start polling new socket */
oldpriv = vhost_vq_get_backend(vq);
vhost_vq_set_backend(vq, priv);
r = vhost_vq_init_access(&n->vqs[index]);
mutex_unlock(&vq->mutex);
if (r)
goto err;
if (oldpriv) {
vhost_test_flush(n);
}
}
mutex_unlock(&n->dev.mutex);
return 0;
err:
mutex_unlock(&n->dev.mutex);
return r;
}
static long vhost_test_reset_owner(struct vhost_test *n)
{
void *priv = NULL;
long err;
struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
umem = vhost_dev_reset_owner_prepare();
if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
return err;
}
static int vhost_test_set_features(struct vhost_test *n, u64 features)
{
struct vhost_virtqueue *vq;
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&n->dev)) {
mutex_unlock(&n->dev.mutex);
return -EFAULT;
}
vq = &n->vqs[VHOST_TEST_VQ];
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
mutex_unlock(&n->dev.mutex);
return 0;
}
static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
{
static void *backend;
const bool enable = fd != -1;
struct vhost_virtqueue *vq;
int r;
mutex_lock(&n->dev.mutex);
r = vhost_dev_check_owner(&n->dev);
if (r)
goto err;
if (index >= VHOST_TEST_VQ_MAX) {
r = -ENOBUFS;
goto err;
}
vq = &n->vqs[index];
mutex_lock(&vq->mutex);
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
goto err_vq;
}
if (!enable) {
vhost_poll_stop(&vq->poll);
backend = vhost_vq_get_backend(vq);
vhost_vq_set_backend(vq, NULL);
} else {
vhost_vq_set_backend(vq, backend);
r = vhost_vq_init_access(vq);
if (r == 0)
r = vhost_poll_start(&vq->poll, vq->kick);
}
mutex_unlock(&vq->mutex);
if (enable) {
vhost_test_flush(n);
}
mutex_unlock(&n->dev.mutex);
return 0;
err_vq:
mutex_unlock(&vq->mutex);
err:
mutex_unlock(&n->dev.mutex);
return r;
}
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
int test;
u64 features;
int r;
switch (ioctl) {
case VHOST_TEST_RUN:
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
case VHOST_TEST_SET_BACKEND:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
if (features & ~VHOST_FEATURES)
return -EOPNOTSUPP;
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
return vhost_test_reset_owner(n);
default:
mutex_lock(&n->dev.mutex);
r = vhost_dev_ioctl(&n->dev, ioctl, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&n->dev, ioctl, argp);
vhost_test_flush(n);
mutex_unlock(&n->dev.mutex);
return r;
}
}
static const struct file_operations vhost_test_fops = {
.owner = THIS_MODULE,
.release = vhost_test_release,
.unlocked_ioctl = vhost_test_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = vhost_test_open,
.llseek = noop_llseek,
};
static struct miscdevice vhost_test_misc = {
MISC_DYNAMIC_MINOR,
"vhost-test",
&vhost_test_fops,
};
module_misc_device(vhost_test_misc);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel side for virtio simulator");
| linux-master | drivers/vhost/test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vhost transport for vsock
*
* Copyright (C) 2013-2015 Red Hat, Inc.
* Author: Asias He <[email protected]>
* Stefan Hajnoczi <[email protected]>
*/
#include <linux/miscdevice.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <net/sock.h>
#include <linux/virtio_vsock.h>
#include <linux/vhost.h>
#include <linux/hashtable.h>
#include <net/af_vsock.h>
#include "vhost.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_VSOCK_WEIGHT 0x80000
/* Max number of packets transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
* small pkts.
*/
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES |
(1ULL << VIRTIO_F_ACCESS_PLATFORM) |
(1ULL << VIRTIO_VSOCK_F_SEQPACKET)
};
enum {
VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
static DEFINE_MUTEX(vhost_vsock_mutex);
static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
struct hlist_node hash;
struct vhost_work send_pkt_work;
struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
atomic_t queued_replies;
u32 guest_cid;
bool seqpacket_allow;
};
static u32 vhost_transport_get_local_cid(void)
{
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
/* Callers that dereference the return value must hold vhost_vsock_mutex or the
* RCU read lock.
*/
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */
if (other_cid == 0)
continue;
if (other_cid == guest_cid)
return vsock;
}
return NULL;
}
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
{
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
int pkts = 0, total_len = 0;
bool added = false;
bool restart_tx = false;
mutex_lock(&vq->mutex);
if (!vhost_vq_get_backend(vq))
goto out;
if (!vq_meta_prefetch(vq))
goto out;
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
do {
struct virtio_vsock_hdr *hdr;
size_t iov_len, payload_len;
struct iov_iter iov_iter;
u32 flags_to_restore = 0;
struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
int head;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
if (!skb) {
vhost_enable_notify(&vsock->dev, vq);
break;
}
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
if (head < 0) {
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
if (head == vq->num) {
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
/* We cannot finish yet if more buffers snuck in while
* re-enabling notify.
*/
if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
vhost_disable_notify(&vsock->dev, vq);
continue;
}
break;
}
if (out) {
kfree_skb(skb);
vq_err(vq, "Expected 0 output buffers, got %u\n", out);
break;
}
iov_len = iov_length(&vq->iov[out], in);
if (iov_len < sizeof(*hdr)) {
kfree_skb(skb);
vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
break;
}
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
payload_len = skb->len;
hdr = virtio_vsock_hdr(skb);
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
if (payload_len > iov_len - sizeof(*hdr)) {
payload_len = iov_len - sizeof(*hdr);
/* As we are copying pieces of large packet's buffer to
* small rx buffers, headers of packets in rx queue are
* created dynamically and are initialized with header
* of current packet(except length). But in case of
* SOCK_SEQPACKET, we also must clear message delimeter
* bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
* (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
* there will be sequence of packets with these
* bits set. After initialized header will be copied to
* rx buffer, these required bits will be restored.
*/
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
}
}
}
/* Set the correct length in the header */
hdr->len = cpu_to_le32(payload_len);
nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
if (nbytes != sizeof(*hdr)) {
kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt hdr\n");
break;
}
nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
}
/* Deliver to monitoring devices all packets that we
* will transmit.
*/
virtio_transport_deliver_tap_pkt(skb);
vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;
skb_pull(skb, payload_len);
total_len += payload_len;
/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
if (skb->len > 0) {
hdr->flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same skb to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
*/
virtio_vsock_skb_clear_tap_delivered(skb);
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
} else {
if (virtio_vsock_skb_reply(skb)) {
int val;
val = atomic_dec_return(&vsock->queued_replies);
/* Do we have resources to resume tx
* processing?
*/
if (val + 1 == tx_vq->num)
restart_tx = true;
}
consume_skb(skb);
}
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
vhost_signal(&vsock->dev, vq);
out:
mutex_unlock(&vq->mutex);
if (restart_tx)
vhost_poll_queue(&tx_vq->poll);
}
static void vhost_transport_send_pkt_work(struct vhost_work *work)
{
struct vhost_virtqueue *vq;
struct vhost_vsock *vsock;
vsock = container_of(work, struct vhost_vsock, send_pkt_work);
vq = &vsock->vqs[VSOCK_VQ_RX];
vhost_transport_do_send_pkt(vsock, vq);
}
static int
vhost_transport_send_pkt(struct sk_buff *skb)
{
struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vhost_vsock *vsock;
int len = skb->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
if (!vsock) {
rcu_read_unlock();
kfree_skb(skb);
return -ENODEV;
}
if (virtio_vsock_skb_reply(skb))
atomic_inc(&vsock->queued_replies);
virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
rcu_read_unlock();
return len;
}
static int
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct vhost_vsock *vsock;
int cnt = 0;
int ret = -ENODEV;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
if (!vsock)
goto out;
cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
if (cnt) {
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
int new_cnt;
new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
vhost_poll_queue(&tx_vq->poll);
}
ret = 0;
out:
rcu_read_unlock();
return ret;
}
static struct sk_buff *
vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in)
{
struct virtio_vsock_hdr *hdr;
struct iov_iter iov_iter;
struct sk_buff *skb;
size_t payload_len;
size_t nbytes;
size_t len;
if (in != 0) {
vq_err(vq, "Expected 0 input buffers, got %u\n", in);
return NULL;
}
len = iov_length(vq->iov, out);
/* len contains both payload and hdr */
skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
hdr = virtio_vsock_hdr(skb);
nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
if (nbytes != sizeof(*hdr)) {
vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
sizeof(*hdr), nbytes);
kfree_skb(skb);
return NULL;
}
payload_len = le32_to_cpu(hdr->len);
/* No payload */
if (!payload_len)
return skb;
/* The pkt is too big or the length in the header is invalid */
if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
payload_len + sizeof(*hdr) > len) {
kfree_skb(skb);
return NULL;
}
virtio_vsock_skb_rx_put(skb);
nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
payload_len, nbytes);
kfree_skb(skb);
return NULL;
}
return skb;
}
/* Is there space left for replies to rx packets? */
static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
{
struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
int val;
smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
val = atomic_read(&vsock->queued_replies);
return val < vq->num;
}
static bool vhost_transport_seqpacket_allow(u32 remote_cid);
static struct virtio_transport vhost_transport = {
.transport = {
.module = THIS_MODULE,
.get_local_cid = vhost_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_allow = virtio_transport_dgram_allow,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
.seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
.seqpacket_allow = vhost_transport_seqpacket_allow,
.seqpacket_has_data = virtio_transport_seqpacket_has_data,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size,
.read_skb = virtio_transport_read_skb,
},
.send_pkt = vhost_transport_send_pkt,
};
static bool vhost_transport_seqpacket_allow(u32 remote_cid)
{
struct vhost_vsock *vsock;
bool seqpacket_allow = false;
rcu_read_lock();
vsock = vhost_vsock_get(remote_cid);
if (vsock)
seqpacket_allow = vsock->seqpacket_allow;
rcu_read_unlock();
return seqpacket_allow;
}
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
int head, pkts = 0, total_len = 0;
unsigned int out, in;
struct sk_buff *skb;
bool added = false;
mutex_lock(&vq->mutex);
if (!vhost_vq_get_backend(vq))
goto out;
if (!vq_meta_prefetch(vq))
goto out;
vhost_disable_notify(&vsock->dev, vq);
do {
struct virtio_vsock_hdr *hdr;
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
* callbacks disabled.
*/
goto no_more_replies;
}
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
if (head < 0)
break;
if (head == vq->num) {
if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
vhost_disable_notify(&vsock->dev, vq);
continue;
}
break;
}
skb = vhost_vsock_alloc_skb(vq, out, in);
if (!skb) {
vq_err(vq, "Faulted on pkt\n");
continue;
}
total_len += sizeof(*hdr) + skb->len;
/* Deliver to monitoring devices all received packets */
virtio_transport_deliver_tap_pkt(skb);
hdr = virtio_vsock_hdr(skb);
/* Only accept correctly addressed packets */
if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
le64_to_cpu(hdr->dst_cid) ==
vhost_transport_get_local_cid())
virtio_transport_recv_pkt(&vhost_transport, skb);
else
kfree_skb(skb);
vhost_add_used(vq, head, 0);
added = true;
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
no_more_replies:
if (added)
vhost_signal(&vsock->dev, vq);
out:
mutex_unlock(&vq->mutex);
}
static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
poll.work);
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
vhost_transport_do_send_pkt(vsock, vq);
}
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
struct vhost_virtqueue *vq;
size_t i;
int ret;
mutex_lock(&vsock->dev.mutex);
ret = vhost_dev_check_owner(&vsock->dev);
if (ret)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
goto err_vq;
}
if (!vhost_vq_get_backend(vq)) {
vhost_vq_set_backend(vq, vsock);
ret = vhost_vq_init_access(vq);
if (ret)
goto err_vq;
}
mutex_unlock(&vq->mutex);
}
/* Some packets may have been queued before the device was started,
* let's kick the send worker to send them.
*/
vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
mutex_unlock(&vsock->dev.mutex);
return 0;
err_vq:
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
err:
mutex_unlock(&vsock->dev.mutex);
return ret;
}
static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
{
size_t i;
int ret = 0;
mutex_lock(&vsock->dev.mutex);
if (check_owner) {
ret = vhost_dev_check_owner(&vsock->dev);
if (ret)
goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
struct vhost_virtqueue *vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
err:
mutex_unlock(&vsock->dev.mutex);
return ret;
}
static void vhost_vsock_free(struct vhost_vsock *vsock)
{
kvfree(vsock);
}
static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
{
struct vhost_virtqueue **vqs;
struct vhost_vsock *vsock;
int ret;
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!vsock)
return -ENOMEM;
vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
ret = -ENOMEM;
goto out;
}
vsock->guest_cid = 0; /* no CID assigned yet */
atomic_set(&vsock->queued_replies, 0);
vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
VHOST_VSOCK_WEIGHT, true, NULL);
file->private_data = vsock;
skb_queue_head_init(&vsock->send_pkt_queue);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
return 0;
out:
vhost_vsock_free(vsock);
return ret;
}
static void vhost_vsock_flush(struct vhost_vsock *vsock)
{
vhost_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)
{
struct vsock_sock *vsk = vsock_sk(sk);
/* vmci_transport.c doesn't take sk_lock here either. At least we're
* under vsock_table_lock so the sock cannot disappear while we're
* executing.
*/
/* If the peer is still valid, no need to reset connection */
if (vhost_vsock_get(vsk->remote_addr.svm_cid))
return;
/* If the close timeout is pending, let it expire. This avoids races
* with the timeout callback.
*/
if (vsk->close_work_scheduled)
return;
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown = SHUTDOWN_MASK;
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ECONNRESET;
sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
{
struct vhost_vsock *vsock = file->private_data;
mutex_lock(&vhost_vsock_mutex);
if (vsock->guest_cid)
hash_del_rcu(&vsock->hash);
mutex_unlock(&vhost_vsock_mutex);
/* Wait for other CPUs to finish using vsock */
synchronize_rcu();
/* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(&vhost_transport.transport,
vhost_vsock_reset_orphans);
/* Don't check the owner, because we are in the release path, so we
* need to stop the vsock device in any case.
* vhost_vsock_stop() can not fail in this case, so we don't need to
* check the return code.
*/
vhost_vsock_stop(vsock, false);
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
kfree(vsock->dev.vqs);
vhost_vsock_free(vsock);
return 0;
}
static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
{
struct vhost_vsock *other;
/* Refuse reserved CIDs */
if (guest_cid <= VMADDR_CID_HOST ||
guest_cid == U32_MAX)
return -EINVAL;
/* 64-bit CIDs are not yet supported */
if (guest_cid > U32_MAX)
return -EINVAL;
/* Refuse if CID is assigned to the guest->host transport (i.e. nested
* VM), to make the loopback work.
*/
if (vsock_find_cid(guest_cid))
return -EADDRINUSE;
/* Refuse if CID is already in use */
mutex_lock(&vhost_vsock_mutex);
other = vhost_vsock_get(guest_cid);
if (other && other != vsock) {
mutex_unlock(&vhost_vsock_mutex);
return -EADDRINUSE;
}
if (vsock->guest_cid)
hash_del_rcu(&vsock->hash);
vsock->guest_cid = guest_cid;
hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
mutex_unlock(&vhost_vsock_mutex);
return 0;
}
static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
{
struct vhost_virtqueue *vq;
int i;
if (features & ~VHOST_VSOCK_FEATURES)
return -EOPNOTSUPP;
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
goto err;
}
if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&vsock->dev))
goto err;
}
if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
vsock->seqpacket_allow = true;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
mutex_unlock(&vsock->dev.mutex);
return 0;
err:
mutex_unlock(&vsock->dev.mutex);
return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
struct vhost_vsock *vsock = f->private_data;
void __user *argp = (void __user *)arg;
u64 guest_cid;
u64 features;
int start;
int r;
switch (ioctl) {
case VHOST_VSOCK_SET_GUEST_CID:
if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
return -EFAULT;
return vhost_vsock_set_cid(vsock, guest_cid);
case VHOST_VSOCK_SET_RUNNING:
if (copy_from_user(&start, argp, sizeof(start)))
return -EFAULT;
if (start)
return vhost_vsock_start(vsock);
else
return vhost_vsock_stop(vsock, true);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VSOCK_BACKEND_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
return -EFAULT;
return 0;
case VHOST_SET_BACKEND_FEATURES:
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
return -EOPNOTSUPP;
vhost_set_backend_features(&vsock->dev, features);
return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
else
vhost_vsock_flush(vsock);
mutex_unlock(&vsock->dev.mutex);
return r;
}
}
static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
int noblock = file->f_flags & O_NONBLOCK;
return vhost_chr_read_iter(dev, to, noblock);
}
static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
return vhost_chr_write_iter(dev, from);
}
static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_vsock *vsock = file->private_data;
struct vhost_dev *dev = &vsock->dev;
return vhost_chr_poll(file, dev, wait);
}
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
.release = vhost_vsock_dev_release,
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.read_iter = vhost_vsock_chr_read_iter,
.write_iter = vhost_vsock_chr_write_iter,
.poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
.minor = VHOST_VSOCK_MINOR,
.name = "vhost-vsock",
.fops = &vhost_vsock_fops,
};
static int __init vhost_vsock_init(void)
{
int ret;
ret = vsock_core_register(&vhost_transport.transport,
VSOCK_TRANSPORT_F_H2G);
if (ret < 0)
return ret;
ret = misc_register(&vhost_vsock_misc);
if (ret) {
vsock_core_unregister(&vhost_transport.transport);
return ret;
}
return 0;
};
static void __exit vhost_vsock_exit(void)
{
misc_deregister(&vhost_vsock_misc);
vsock_core_unregister(&vhost_transport.transport);
};
module_init(vhost_vsock_init);
module_exit(vhost_vsock_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He");
MODULE_DESCRIPTION("vhost transport for vsock ");
MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
MODULE_ALIAS("devname:vhost-vsock");
| linux-master | drivers/vhost/vsock.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
* Copyright (C) 2006 Rusty Russell IBM Corporation
*
* Author: Michael S. Tsirkin <[email protected]>
*
* Inspiration, some code, and most witty comments come from
* Documentation/virtual/lguest/lguest.c, by Rusty Russell
*
* Generic code for virtio server in host kernel.
*/
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/vhost_task.h>
#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
#include <linux/kcov.h>
#include "vhost.h"
static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
"Maximum number of memory regions in memory map. (default: 64)");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
"Maximum number of iotlb entries. (default: 2048)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
};
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
vq->user_be = !virtio_legacy_is_little_endian();
}
static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
{
vq->user_be = true;
}
static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
{
vq->user_be = false;
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
struct vhost_vring_state s;
if (vq->private_data)
return -EBUSY;
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
s.num != VHOST_VRING_BIG_ENDIAN)
return -EINVAL;
if (s.num == VHOST_VRING_BIG_ENDIAN)
vhost_enable_cross_endian_big(vq);
else
vhost_enable_cross_endian_little(vq);
return 0;
}
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{
struct vhost_vring_state s = {
.index = idx,
.num = vq->user_be
};
if (copy_to_user(argp, &s, sizeof(s)))
return -EFAULT;
return 0;
}
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
/* Note for legacy virtio: user_be is initialized at reset time
* according to the host endianness. If userspace does not set an
* explicit endianness, the default behavior is native endian, as
* expected by legacy virtio.
*/
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
return -ENOIOCTLCMD;
}
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{
return -ENOIOCTLCMD;
}
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
|| virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
vhost_init_is_le(vq);
}
struct vhost_flush_struct {
struct vhost_work work;
struct completion wait_event;
};
static void vhost_flush_work(struct vhost_work *work)
{
struct vhost_flush_struct *s;
s = container_of(work, struct vhost_flush_struct, work);
complete(&s->wait_event);
}
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct vhost_poll *poll;
poll = container_of(pt, struct vhost_poll, table);
poll->wqh = wqh;
add_wait_queue(wqh, &poll->wait);
}
static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
void *key)
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
struct vhost_work *work = &poll->work;
if (!(key_to_poll(key) & poll->mask))
return 0;
if (!poll->dev->use_worker)
work->fn(work);
else
vhost_poll_queue(poll);
return 0;
}
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
__poll_t mask, struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
poll->wqh = NULL;
poll->vq = vq;
vhost_work_init(&poll->work, fn);
}
EXPORT_SYMBOL_GPL(vhost_poll_init);
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
__poll_t mask;
if (poll->wqh)
return 0;
mask = vfs_poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
vhost_poll_stop(poll);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
/* Stop polling a file. After this function returns, it becomes safe to drop the
* file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll)
{
if (poll->wqh) {
remove_wait_queue(poll->wqh, &poll->wait);
poll->wqh = NULL;
}
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
static void vhost_worker_queue(struct vhost_worker *worker,
struct vhost_work *work)
{
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
/* We can only add the work to the list after we're
* sure it was not in the list.
* test_and_set_bit() implies a memory barrier.
*/
llist_add(&work->node, &worker->work_list);
vhost_task_wake(worker->vtsk);
}
}
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
{
struct vhost_worker *worker;
bool queued = false;
rcu_read_lock();
worker = rcu_dereference(vq->worker);
if (worker) {
queued = true;
vhost_worker_queue(worker, work);
}
rcu_read_unlock();
return queued;
}
EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
void vhost_vq_flush(struct vhost_virtqueue *vq)
{
struct vhost_flush_struct flush;
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
if (vhost_vq_work_queue(vq, &flush.work))
wait_for_completion(&flush.wait_event);
}
EXPORT_SYMBOL_GPL(vhost_vq_flush);
/**
* vhost_worker_flush - flush a worker
* @worker: worker to flush
*
* This does not use RCU to protect the worker, so the device or worker
* mutex must be held.
*/
static void vhost_worker_flush(struct vhost_worker *worker)
{
struct vhost_flush_struct flush;
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
vhost_worker_queue(worker, &flush.work);
wait_for_completion(&flush.wait_event);
}
void vhost_dev_flush(struct vhost_dev *dev)
{
struct vhost_worker *worker;
unsigned long i;
xa_for_each(&dev->worker_xa, i, worker) {
mutex_lock(&worker->mutex);
if (!worker->attachment_cnt) {
mutex_unlock(&worker->mutex);
continue;
}
vhost_worker_flush(worker);
mutex_unlock(&worker->mutex);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_flush);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_vq_has_work(struct vhost_virtqueue *vq)
{
struct vhost_worker *worker;
bool has_work = false;
rcu_read_lock();
worker = rcu_dereference(vq->worker);
if (worker && !llist_empty(&worker->work_list))
has_work = true;
rcu_read_unlock();
return has_work;
}
EXPORT_SYMBOL_GPL(vhost_vq_has_work);
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_vq_work_queue(poll->vq, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
{
int j;
for (j = 0; j < VHOST_NUM_ADDRS; j++)
vq->meta_iotlb[j] = NULL;
}
static void vhost_vq_meta_reset(struct vhost_dev *d)
{
int i;
for (i = 0; i < d->nvqs; ++i)
__vhost_vq_meta_reset(d->vqs[i]);
}
static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{
call_ctx->ctx = NULL;
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
}
bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
{
return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
}
EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
vq->num = 1;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
vq->signalled_used_valid = false;
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
vq->private_data = NULL;
vq->acked_features = 0;
vq->acked_backend_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->kick = NULL;
vq->log_ctx = NULL;
vhost_disable_cross_endian(vq);
vhost_reset_is_le(vq);
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
rcu_assign_pointer(vq->worker, NULL);
vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq);
}
static bool vhost_worker(void *data)
{
struct vhost_worker *worker = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
node = llist_del_all(&worker->work_list);
if (node) {
__set_current_state(TASK_RUNNING);
node = llist_reverse_order(node);
/* make sure flag is seen after deletion */
smp_wmb();
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
kcov_remote_start_common(worker->kcov_handle);
work->fn(work);
kcov_remote_stop();
cond_resched();
}
}
return !!node;
}
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->indirect);
vq->indirect = NULL;
kfree(vq->log);
vq->log = NULL;
kfree(vq->heads);
vq->heads = NULL;
}
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->indirect = kmalloc_array(UIO_MAXIOV,
sizeof(*vq->indirect),
GFP_KERNEL);
vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
GFP_KERNEL);
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
return 0;
err_nomem:
for (; i >= 0; --i)
vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM;
}
static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i)
vhost_vq_free_iovecs(dev->vqs[i]);
}
bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
int pkts, int total_len)
{
struct vhost_dev *dev = vq->dev;
if ((dev->byte_weight && total_len >= dev->byte_weight) ||
pkts >= dev->weight) {
vhost_poll_queue(&vq->poll);
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
unsigned int num)
{
size_t event __maybe_unused =
vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return size_add(struct_size(vq->avail, ring, num), event);
}
static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
unsigned int num)
{
size_t event __maybe_unused =
vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return size_add(struct_size(vq->used, ring, num), event);
}
static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
unsigned int num)
{
return sizeof(*vq->desc) * num;
}
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
bool use_worker,
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
int i;
dev->vqs = vqs;
dev->nvqs = nvqs;
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
dev->umem = NULL;
dev->iotlb = NULL;
dev->mm = NULL;
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
dev->use_worker = use_worker;
dev->msg_handler = msg_handler;
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
spin_lock_init(&dev->iotlb_lock);
xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
EPOLLIN, dev, vq);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
/* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev)
{
/* Are you the owner? If not, I don't think you mean to do that */
return dev->mm == current->mm ? 0 : -EPERM;
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
return dev->mm;
}
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
static void vhost_attach_mm(struct vhost_dev *dev)
{
/* No owner, become one */
if (dev->use_worker) {
dev->mm = get_task_mm(current);
} else {
/* vDPA device does not use worker thead, so there's
* no need to hold the address space for mm. This help
* to avoid deadlock in the case of mmap() which may
* held the refcnt of the file and depends on release
* method to remove vma.
*/
dev->mm = current->mm;
mmgrab(dev->mm);
}
}
static void vhost_detach_mm(struct vhost_dev *dev)
{
if (!dev->mm)
return;
if (dev->use_worker)
mmput(dev->mm);
else
mmdrop(dev->mm);
dev->mm = NULL;
}
static void vhost_worker_destroy(struct vhost_dev *dev,
struct vhost_worker *worker)
{
if (!worker)
return;
WARN_ON(!llist_empty(&worker->work_list));
xa_erase(&dev->worker_xa, worker->id);
vhost_task_stop(worker->vtsk);
kfree(worker);
}
static void vhost_workers_free(struct vhost_dev *dev)
{
struct vhost_worker *worker;
unsigned long i;
if (!dev->use_worker)
return;
for (i = 0; i < dev->nvqs; i++)
rcu_assign_pointer(dev->vqs[i]->worker, NULL);
/*
* Free the default worker we created and cleanup workers userspace
* created but couldn't clean up (it forgot or crashed).
*/
xa_for_each(&dev->worker_xa, i, worker)
vhost_worker_destroy(dev, worker);
xa_destroy(&dev->worker_xa);
}
static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{
struct vhost_worker *worker;
struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
int ret;
u32 id;
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
if (!worker)
return NULL;
snprintf(name, sizeof(name), "vhost-%d", current->pid);
vtsk = vhost_task_create(vhost_worker, worker, name);
if (!vtsk)
goto free_worker;
mutex_init(&worker->mutex);
init_llist_head(&worker->work_list);
worker->kcov_handle = kcov_common_handle();
worker->vtsk = vtsk;
vhost_task_start(vtsk);
ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
if (ret < 0)
goto stop_worker;
worker->id = id;
return worker;
stop_worker:
vhost_task_stop(vtsk);
free_worker:
kfree(worker);
return NULL;
}
/* Caller must have device mutex */
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_worker *worker)
{
struct vhost_worker *old_worker;
old_worker = rcu_dereference_check(vq->worker,
lockdep_is_held(&vq->dev->mutex));
mutex_lock(&worker->mutex);
worker->attachment_cnt++;
mutex_unlock(&worker->mutex);
rcu_assign_pointer(vq->worker, worker);
if (!old_worker)
return;
/*
* Take the worker mutex to make sure we see the work queued from
* device wide flushes which doesn't use RCU for execution.
*/
mutex_lock(&old_worker->mutex);
old_worker->attachment_cnt--;
/*
* We don't want to call synchronize_rcu for every vq during setup
* because it will slow down VM startup. If we haven't done
* VHOST_SET_VRING_KICK and not done the driver specific
* SET_ENDPOINT/RUNNUNG then we can skip the sync since there will
* not be any works queued for scsi and net.
*/
mutex_lock(&vq->mutex);
if (!vhost_vq_get_backend(vq) && !vq->kick) {
mutex_unlock(&vq->mutex);
mutex_unlock(&old_worker->mutex);
/*
* vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
* Warn if it adds support for multiple workers but forgets to
* handle the early queueing case.
*/
WARN_ON(!old_worker->attachment_cnt &&
!llist_empty(&old_worker->work_list));
return;
}
mutex_unlock(&vq->mutex);
/* Make sure new vq queue/flush/poll calls see the new worker */
synchronize_rcu();
/* Make sure whatever was queued gets run */
vhost_worker_flush(old_worker);
mutex_unlock(&old_worker->mutex);
}
/* Caller must have device mutex */
static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_vring_worker *info)
{
unsigned long index = info->worker_id;
struct vhost_dev *dev = vq->dev;
struct vhost_worker *worker;
if (!dev->use_worker)
return -EINVAL;
worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
if (!worker || worker->id != info->worker_id)
return -ENODEV;
__vhost_vq_attach_worker(vq, worker);
return 0;
}
/* Caller must have device mutex */
static int vhost_new_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
{
struct vhost_worker *worker;
worker = vhost_worker_create(dev);
if (!worker)
return -ENOMEM;
info->worker_id = worker->id;
return 0;
}
/* Caller must have device mutex */
static int vhost_free_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
{
unsigned long index = info->worker_id;
struct vhost_worker *worker;
worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
if (!worker || worker->id != info->worker_id)
return -ENODEV;
mutex_lock(&worker->mutex);
if (worker->attachment_cnt) {
mutex_unlock(&worker->mutex);
return -EBUSY;
}
mutex_unlock(&worker->mutex);
vhost_worker_destroy(dev, worker);
return 0;
}
static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
struct vhost_virtqueue **vq, u32 *id)
{
u32 __user *idxp = argp;
u32 idx;
long r;
r = get_user(idx, idxp);
if (r < 0)
return r;
if (idx >= dev->nvqs)
return -ENOBUFS;
idx = array_index_nospec(idx, dev->nvqs);
*vq = dev->vqs[idx];
*id = idx;
return 0;
}
/* Caller must have device mutex */
long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
void __user *argp)
{
struct vhost_vring_worker ring_worker;
struct vhost_worker_state state;
struct vhost_worker *worker;
struct vhost_virtqueue *vq;
long ret;
u32 idx;
if (!dev->use_worker)
return -EINVAL;
if (!vhost_dev_has_owner(dev))
return -EINVAL;
ret = vhost_dev_check_owner(dev);
if (ret)
return ret;
switch (ioctl) {
/* dev worker ioctls */
case VHOST_NEW_WORKER:
ret = vhost_new_worker(dev, &state);
if (!ret && copy_to_user(argp, &state, sizeof(state)))
ret = -EFAULT;
return ret;
case VHOST_FREE_WORKER:
if (copy_from_user(&state, argp, sizeof(state)))
return -EFAULT;
return vhost_free_worker(dev, &state);
/* vring worker ioctls */
case VHOST_ATTACH_VRING_WORKER:
case VHOST_GET_VRING_WORKER:
break;
default:
return -ENOIOCTLCMD;
}
ret = vhost_get_vq_from_user(dev, argp, &vq, &idx);
if (ret)
return ret;
switch (ioctl) {
case VHOST_ATTACH_VRING_WORKER:
if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) {
ret = -EFAULT;
break;
}
ret = vhost_vq_attach_worker(vq, &ring_worker);
break;
case VHOST_GET_VRING_WORKER:
worker = rcu_dereference_check(vq->worker,
lockdep_is_held(&dev->mutex));
if (!worker) {
ret = -EINVAL;
break;
}
ring_worker.index = idx;
ring_worker.worker_id = worker->id;
if (copy_to_user(argp, &ring_worker, sizeof(ring_worker)))
ret = -EFAULT;
break;
default:
ret = -ENOIOCTLCMD;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(vhost_worker_ioctl);
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct vhost_worker *worker;
int err, i;
/* Is there an owner already? */
if (vhost_dev_has_owner(dev)) {
err = -EBUSY;
goto err_mm;
}
vhost_attach_mm(dev);
err = vhost_dev_alloc_iovecs(dev);
if (err)
goto err_iovecs;
if (dev->use_worker) {
/*
* This should be done last, because vsock can queue work
* before VHOST_SET_OWNER so it simplifies the failure path
* below since we don't have to worry about vsock queueing
* while we free the worker.
*/
worker = vhost_worker_create(dev);
if (!worker) {
err = -ENOMEM;
goto err_worker;
}
for (i = 0; i < dev->nvqs; i++)
__vhost_vq_attach_worker(dev->vqs[i], worker);
}
return 0;
err_worker:
vhost_dev_free_iovecs(dev);
err_iovecs:
vhost_detach_mm(dev);
err_mm:
return err;
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
static struct vhost_iotlb *iotlb_alloc(void)
{
return vhost_iotlb_alloc(max_iotlb_entries,
VHOST_IOTLB_FLAG_RETIRE);
}
struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{
return iotlb_alloc();
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{
int i;
vhost_dev_cleanup(dev);
dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
*/
for (i = 0; i < dev->nvqs; ++i)
dev->vqs[i]->umem = umem;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
void vhost_dev_stop(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
vhost_poll_stop(&dev->vqs[i]->poll);
}
vhost_dev_flush(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
spin_lock(&dev->iotlb_lock);
list_for_each_entry_safe(node, n, &dev->read_list, node) {
list_del(&node->node);
kfree(node);
}
list_for_each_entry_safe(node, n, &dev->pending_list, node) {
list_del(&node->node);
kfree(node);
}
spin_unlock(&dev->iotlb_lock);
}
EXPORT_SYMBOL_GPL(vhost_clear_msg);
void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
if (dev->vqs[i]->call_ctx.ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
/* No one will access memory at this point */
vhost_iotlb_free(dev->umem);
dev->umem = NULL;
vhost_iotlb_free(dev->iotlb);
dev->iotlb = NULL;
vhost_clear_msg(dev);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
vhost_workers_free(dev);
vhost_detach_mm(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
return false;
return access_ok(log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
if (uaddr > ULONG_MAX || size > ULONG_MAX)
return true;
if (!size)
return false;
return uaddr > ULONG_MAX - size + 1;
}
/* Caller should have vq mutex and device mutex. */
static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
int log_all)
{
struct vhost_iotlb_map *map;
if (!umem)
return false;
list_for_each_entry(map, &umem->list, link) {
unsigned long a = map->addr;
if (vhost_overflow(map->addr, map->size))
return false;
if (!access_ok((void __user *)a, map->size))
return false;
else if (log_all && !log_access_ok(log_base,
map->start,
map->size))
return false;
}
return true;
}
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
u64 addr, unsigned int size,
int type)
{
const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
if (!map)
return NULL;
return (void __user *)(uintptr_t)(map->addr + addr - map->start);
}
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
int log_all)
{
int i;
for (i = 0; i < d->nvqs; ++i) {
bool ok;
bool log;
mutex_lock(&d->vqs[i]->mutex);
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
ok = vq_memory_access_ok(d->vqs[i]->log_base,
umem, log);
else
ok = true;
mutex_unlock(&d->vqs[i]->mutex);
if (!ok)
return false;
}
return true;
}
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access);
static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{
int ret;
if (!vq->iotlb)
return __copy_to_user(to, from, size);
else {
/* This function should be called after iotlb
* prefetch, which means we're sure that all vq
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
struct iov_iter t;
void __user *uaddr = vhost_vq_meta_fetch(vq,
(u64)(uintptr_t)to, size,
VHOST_ADDR_USED);
if (uaddr)
return __copy_to_user(uaddr, from, size);
ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_WO);
if (ret < 0)
goto out;
iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
ret = copy_to_iter(from, size, &t);
if (ret == size)
ret = 0;
}
out:
return ret;
}
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
void __user *from, unsigned size)
{
int ret;
if (!vq->iotlb)
return __copy_from_user(to, from, size);
else {
/* This function should be called after iotlb
* prefetch, which means we're sure that vq
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
void __user *uaddr = vhost_vq_meta_fetch(vq,
(u64)(uintptr_t)from, size,
VHOST_ADDR_DESC);
struct iov_iter f;
if (uaddr)
return __copy_from_user(to, uaddr, size);
ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_RO);
if (ret < 0) {
vq_err(vq, "IOTLB translation failure: uaddr "
"%p size 0x%llx\n", from,
(unsigned long long) size);
goto out;
}
iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
ret = copy_from_iter(to, size, &f);
if (ret == size)
ret = 0;
}
out:
return ret;
}
static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
void __user *addr, unsigned int size,
int type)
{
int ret;
ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_RO);
if (ret < 0) {
vq_err(vq, "IOTLB translation failure: uaddr "
"%p size 0x%llx\n", addr,
(unsigned long long) size);
return NULL;
}
if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
vq_err(vq, "Non atomic userspace memory access: uaddr "
"%p size 0x%llx\n", addr,
(unsigned long long) size);
return NULL;
}
return vq->iotlb_iov[0].iov_base;
}
/* This function should be called after iotlb
* prefetch, which means we're sure that vq
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
void __user *addr, unsigned int size,
int type)
{
void __user *uaddr = vhost_vq_meta_fetch(vq,
(u64)(uintptr_t)addr, size, type);
if (uaddr)
return uaddr;
return __vhost_get_user_slow(vq, addr, size, type);
}
#define vhost_put_user(vq, x, ptr) \
({ \
int ret; \
if (!vq->iotlb) { \
ret = __put_user(x, ptr); \
} else { \
__typeof__(ptr) to = \
(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
sizeof(*ptr), VHOST_ADDR_USED); \
if (to != NULL) \
ret = __put_user(x, to); \
else \
ret = -EFAULT; \
} \
ret; \
})
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
vhost_avail_event(vq));
}
static inline int vhost_put_used(struct vhost_virtqueue *vq,
struct vring_used_elem *head, int idx,
int count)
{
return vhost_copy_to_user(vq, vq->used->ring + idx, head,
count * sizeof(*head));
}
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
{
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
&vq->used->flags);
}
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
{
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
&vq->used->idx);
}
#define vhost_get_user(vq, x, ptr, type) \
({ \
int ret; \
if (!vq->iotlb) { \
ret = __get_user(x, ptr); \
} else { \
__typeof__(ptr) from = \
(__typeof__(ptr)) __vhost_get_user(vq, ptr, \
sizeof(*ptr), \
type); \
if (from != NULL) \
ret = __get_user(x, from); \
else \
ret = -EFAULT; \
} \
ret; \
})
#define vhost_get_avail(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
#define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_unlock(&d->vqs[i]->mutex);
}
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
return vhost_get_avail(vq, *idx, &vq->avail->idx);
}
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
__virtio16 *head, int idx)
{
return vhost_get_avail(vq, *head,
&vq->avail->ring[idx & (vq->num - 1)]);
}
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
__virtio16 *flags)
{
return vhost_get_avail(vq, *flags, &vq->avail->flags);
}
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
__virtio16 *event)
{
return vhost_get_avail(vq, *event, vhost_used_event(vq));
}
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
return vhost_get_used(vq, *idx, &vq->used->idx);
}
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
struct vring_desc *desc, int idx)
{
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}
static void vhost_iotlb_notify_vq(struct vhost_dev *d,
struct vhost_iotlb_msg *msg)
{
struct vhost_msg_node *node, *n;
spin_lock(&d->iotlb_lock);
list_for_each_entry_safe(node, n, &d->pending_list, node) {
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
vhost_poll_queue(&node->vq->poll);
list_del(&node->node);
kfree(node);
}
}
spin_unlock(&d->iotlb_lock);
}
static bool umem_access_ok(u64 uaddr, u64 size, int access)
{
unsigned long a = uaddr;
/* Make sure 64 bit math will not overflow. */
if (vhost_overflow(uaddr, size))
return false;
if ((access & VHOST_ACCESS_RO) &&
!access_ok((void __user *)a, size))
return false;
if ((access & VHOST_ACCESS_WO) &&
!access_ok((void __user *)a, size))
return false;
return true;
}
static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
int ret = 0;
if (asid != 0)
return -EINVAL;
mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) {
ret = -EFAULT;
break;
}
if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
ret = -EFAULT;
break;
}
vhost_vq_meta_reset(dev);
if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1,
msg->uaddr, msg->perm)) {
ret = -ENOMEM;
break;
}
vhost_iotlb_notify_vq(dev, msg);
break;
case VHOST_IOTLB_INVALIDATE:
if (!dev->iotlb) {
ret = -EFAULT;
break;
}
vhost_vq_meta_reset(dev);
vhost_iotlb_del_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1);
break;
default:
ret = -EINVAL;
break;
}
vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex);
return ret;
}
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from)
{
struct vhost_iotlb_msg msg;
size_t offset;
int type, ret;
u32 asid = 0;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type)) {
ret = -EINVAL;
goto done;
}
switch (type) {
case VHOST_IOTLB_MSG:
/* There maybe a hole after type for V1 message type,
* so skip it here.
*/
offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
break;
case VHOST_IOTLB_MSG_V2:
if (vhost_backend_has_feature(dev->vqs[0],
VHOST_BACKEND_F_IOTLB_ASID)) {
ret = copy_from_iter(&asid, sizeof(asid), from);
if (ret != sizeof(asid)) {
ret = -EINVAL;
goto done;
}
offset = 0;
} else
offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
goto done;
}
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
if (ret != sizeof(msg)) {
ret = -EINVAL;
goto done;
}
if ((msg.type == VHOST_IOTLB_UPDATE ||
msg.type == VHOST_IOTLB_INVALIDATE) &&
msg.size == 0) {
ret = -EINVAL;
goto done;
}
if (dev->msg_handler)
ret = dev->msg_handler(dev, asid, &msg);
else
ret = vhost_process_iotlb_msg(dev, asid, &msg);
if (ret) {
ret = -EFAULT;
goto done;
}
ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
sizeof(struct vhost_msg_v2);
done:
return ret;
}
EXPORT_SYMBOL(vhost_chr_write_iter);
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{
__poll_t mask = 0;
poll_wait(file, &dev->wait, wait);
if (!list_empty(&dev->read_list))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
EXPORT_SYMBOL(vhost_chr_poll);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock)
{
DEFINE_WAIT(wait);
struct vhost_msg_node *node;
ssize_t ret = 0;
unsigned size = sizeof(struct vhost_msg);
if (iov_iter_count(to) < size)
return 0;
while (1) {
if (!noblock)
prepare_to_wait(&dev->wait, &wait,
TASK_INTERRUPTIBLE);
node = vhost_dequeue_msg(dev, &dev->read_list);
if (node)
break;
if (noblock) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (!dev->iotlb) {
ret = -EBADFD;
break;
}
schedule();
}
if (!noblock)
finish_wait(&dev->wait, &wait);
if (node) {
struct vhost_iotlb_msg *msg;
void *start = &node->msg;
switch (node->msg.type) {
case VHOST_IOTLB_MSG:
size = sizeof(node->msg);
msg = &node->msg.iotlb;
break;
case VHOST_IOTLB_MSG_V2:
size = sizeof(node->msg_v2);
msg = &node->msg_v2.iotlb;
break;
default:
BUG();
break;
}
ret = copy_to_iter(start, size, to);
if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
return ret;
}
EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
{
struct vhost_dev *dev = vq->dev;
struct vhost_msg_node *node;
struct vhost_iotlb_msg *msg;
bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
if (!node)
return -ENOMEM;
if (v2) {
node->msg_v2.type = VHOST_IOTLB_MSG_V2;
msg = &node->msg_v2.iotlb;
} else {
msg = &node->msg.iotlb;
}
msg->type = VHOST_IOTLB_MISS;
msg->iova = iova;
msg->perm = access;
vhost_enqueue_msg(dev, &dev->read_list, node);
return 0;
}
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
vring_desc_t __user *desc,
vring_avail_t __user *avail,
vring_used_t __user *used)
{
/* If an IOTLB device is present, the vring addresses are
* GIOVAs. Access validation occurs at prefetch time. */
if (vq->iotlb)
return true;
return access_ok(desc, vhost_get_desc_size(vq, num)) &&
access_ok(avail, vhost_get_avail_size(vq, num)) &&
access_ok(used, vhost_get_used_size(vq, num));
}
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
const struct vhost_iotlb_map *map,
int type)
{
int access = (type == VHOST_ADDR_USED) ?
VHOST_ACCESS_WO : VHOST_ACCESS_RO;
if (likely(map->perm & access))
vq->meta_iotlb[type] = map;
}
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int access, u64 addr, u64 len, int type)
{
const struct vhost_iotlb_map *map;
struct vhost_iotlb *umem = vq->iotlb;
u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
if (vhost_vq_meta_fetch(vq, addr, len, type))
return true;
while (len > s) {
map = vhost_iotlb_itree_first(umem, addr, last);
if (map == NULL || map->start > addr) {
vhost_iotlb_miss(vq, addr, access);
return false;
} else if (!(map->perm & access)) {
/* Report the possible access violation by
* request another translation from userspace.
*/
return false;
}
size = map->size - addr + map->start;
if (orig_addr == addr && size >= len)
vhost_vq_meta_update(vq, map, type);
s += size;
addr += size;
}
return true;
}
int vq_meta_prefetch(struct vhost_virtqueue *vq)
{
unsigned int num = vq->num;
if (!vq->iotlb)
return 1;
return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
vhost_get_avail_size(vq, num),
VHOST_ADDR_AVAIL) &&
iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
vhost_get_used_size(vq, num), VHOST_ADDR_USED);
}
EXPORT_SYMBOL_GPL(vq_meta_prefetch);
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
bool vhost_log_access_ok(struct vhost_dev *dev)
{
return memory_access_ok(dev, dev->umem, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
void __user *log_base,
bool log_used,
u64 log_addr)
{
/* If an IOTLB device is present, log_addr is a GIOVA that
* will never be logged by log_used(). */
if (vq->iotlb)
return true;
return !log_used || log_access_ok(log_base, log_addr,
vhost_get_used_size(vq, vq->num));
}
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base)
{
return vq_memory_access_ok(log_base, vq->umem,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
if (!vq_log_access_ok(vq, vq->log_base))
return false;
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem;
struct vhost_memory_region *region;
struct vhost_iotlb *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
if (copy_from_user(&mem, m, size))
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
if (mem.nregions > max_mem_regions)
return -E2BIG;
newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
GFP_KERNEL);
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
flex_array_size(newmem, regions, mem.nregions))) {
kvfree(newmem);
return -EFAULT;
}
newumem = iotlb_alloc();
if (!newumem) {
kvfree(newmem);
return -ENOMEM;
}
for (region = newmem->regions;
region < newmem->regions + mem.nregions;
region++) {
if (vhost_iotlb_add_range(newumem,
region->guest_phys_addr,
region->guest_phys_addr +
region->memory_size - 1,
region->userspace_addr,
VHOST_MAP_RW))
goto err;
}
if (!memory_access_ok(d, newumem, 0))
goto err;
oldumem = d->umem;
d->umem = newumem;
/* All memory accesses are done under some VQ mutex. */
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->umem = newumem;
mutex_unlock(&d->vqs[i]->mutex);
}
kvfree(newmem);
vhost_iotlb_free(oldumem);
return 0;
err:
vhost_iotlb_free(newumem);
kvfree(newmem);
return -EFAULT;
}
static long vhost_vring_set_num(struct vhost_dev *d,
struct vhost_virtqueue *vq,
void __user *argp)
{
struct vhost_vring_state s;
/* Resizing ring with an active backend?
* You don't want to do that. */
if (vq->private_data)
return -EBUSY;
if (copy_from_user(&s, argp, sizeof s))
return -EFAULT;
if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
return -EINVAL;
vq->num = s.num;
return 0;
}
static long vhost_vring_set_addr(struct vhost_dev *d,
struct vhost_virtqueue *vq,
void __user *argp)
{
struct vhost_vring_addr a;
if (copy_from_user(&a, argp, sizeof a))
return -EFAULT;
if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
return -EOPNOTSUPP;
/* For 32bit, verify that the top 32bits of the user
data are set to zero. */
if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
(u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
(u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
return -EFAULT;
/* Make sure it's safe to cast pointers to vring types. */
BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
(a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
(a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
return -EINVAL;
/* We only verify access here if backend is configured.
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
if (!vq_access_ok(vq, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr))
return -EINVAL;
/* Also validate log access for used ring if enabled. */
if (!vq_log_used_access_ok(vq, vq->log_base,
a.flags & (0x1 << VHOST_VRING_F_LOG),
a.log_guest_addr))
return -EINVAL;
}
vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
vq->log_addr = a.log_guest_addr;
vq->used = (void __user *)(unsigned long)a.used_user_addr;
return 0;
}
static long vhost_vring_set_num_addr(struct vhost_dev *d,
struct vhost_virtqueue *vq,
unsigned int ioctl,
void __user *argp)
{
long r;
mutex_lock(&vq->mutex);
switch (ioctl) {
case VHOST_SET_VRING_NUM:
r = vhost_vring_set_num(d, vq, argp);
break;
case VHOST_SET_VRING_ADDR:
r = vhost_vring_set_addr(d, vq, argp);
break;
default:
BUG();
}
mutex_unlock(&vq->mutex);
return r;
}
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
bool pollstart = false, pollstop = false;
struct eventfd_ctx *ctx = NULL;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
struct vhost_vring_file f;
u32 idx;
long r;
r = vhost_get_vq_from_user(d, argp, &vq, &idx);
if (r < 0)
return r;
if (ioctl == VHOST_SET_VRING_NUM ||
ioctl == VHOST_SET_VRING_ADDR) {
return vhost_vring_set_num_addr(d, vq, ioctl, argp);
}
mutex_lock(&vq->mutex);
switch (ioctl) {
case VHOST_SET_VRING_BASE:
/* Moving base with an active backend?
* You don't want to do that. */
if (vq->private_data) {
r = -EBUSY;
break;
}
if (copy_from_user(&s, argp, sizeof s)) {
r = -EFAULT;
break;
}
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq->last_avail_idx = s.num & 0xffff;
vq->last_used_idx = (s.num >> 16) & 0xffff;
} else {
if (s.num > 0xffff) {
r = -EINVAL;
break;
}
vq->last_avail_idx = s.num;
}
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
else
s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;
case VHOST_SET_VRING_KICK:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != vq->kick) {
pollstop = (filep = vq->kick) != NULL;
pollstart = (vq->kick = eventfp) != NULL;
} else
filep = eventfp;
break;
case VHOST_SET_VRING_CALL:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
}
swap(ctx, vq->call_ctx.ctx);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
}
swap(ctx, vq->error_ctx);
break;
case VHOST_SET_VRING_ENDIAN:
r = vhost_set_vring_endian(vq, argp);
break;
case VHOST_GET_VRING_ENDIAN:
r = vhost_get_vring_endian(vq, idx, argp);
break;
case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
if (copy_from_user(&s, argp, sizeof(s))) {
r = -EFAULT;
break;
}
vq->busyloop_timeout = s.num;
break;
case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
s.index = idx;
s.num = vq->busyloop_timeout;
if (copy_to_user(argp, &s, sizeof(s)))
r = -EFAULT;
break;
default:
r = -ENOIOCTLCMD;
}
if (pollstop && vq->handle_kick)
vhost_poll_stop(&vq->poll);
if (!IS_ERR_OR_NULL(ctx))
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
if (pollstart && vq->handle_kick)
r = vhost_poll_start(&vq->poll, vq->kick);
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
vhost_dev_flush(vq->poll.dev);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
int vhost_init_device_iotlb(struct vhost_dev *d)
{
struct vhost_iotlb *niotlb, *oiotlb;
int i;
niotlb = iotlb_alloc();
if (!niotlb)
return -ENOMEM;
oiotlb = d->iotlb;
d->iotlb = niotlb;
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq = d->vqs[i];
mutex_lock(&vq->mutex);
vq->iotlb = niotlb;
__vhost_vq_meta_reset(vq);
mutex_unlock(&vq->mutex);
}
vhost_iotlb_free(oiotlb);
return 0;
}
EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct eventfd_ctx *ctx;
u64 p;
long r;
int i, fd;
/* If you are not the owner, you can become one */
if (ioctl == VHOST_SET_OWNER) {
r = vhost_dev_set_owner(d);
goto done;
}
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
if (r)
goto done;
switch (ioctl) {
case VHOST_SET_MEM_TABLE:
r = vhost_set_memory(d, argp);
break;
case VHOST_SET_LOG_BASE:
if (copy_from_user(&p, argp, sizeof p)) {
r = -EFAULT;
break;
}
if ((u64)(unsigned long)p != p) {
r = -EFAULT;
break;
}
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq;
void __user *base = (void __user *)(unsigned long)p;
vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT;
else
vq->log_base = base;
mutex_unlock(&vq->mutex);
}
break;
case VHOST_SET_LOG_FD:
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
}
swap(ctx, d->log_ctx);
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx;
mutex_unlock(&d->vqs[i]->mutex);
}
if (ctx)
eventfd_ctx_put(ctx);
break;
default:
r = -ENOIOCTLCMD;
break;
}
done:
return r;
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
* returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
*/
static int set_bit_to_user(int nr, void __user *addr)
{
unsigned long log = (unsigned long)addr;
struct page *page;
void *base;
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
base = kmap_atomic(page);
set_bit(bit, base);
kunmap_atomic(base);
unpin_user_pages_dirty_lock(&page, 1, true);
return 0;
}
static int log_write(void __user *log_base,
u64 write_address, u64 write_length)
{
u64 write_page = write_address / VHOST_PAGE_SIZE;
int r;
if (!write_length)
return 0;
write_length += write_address % VHOST_PAGE_SIZE;
for (;;) {
u64 base = (u64)(unsigned long)log_base;
u64 log = base + write_page / 8;
int bit = write_page % 8;
if ((u64)(unsigned long)log != log)
return -EFAULT;
r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
if (r < 0)
return r;
if (write_length <= VHOST_PAGE_SIZE)
break;
write_length -= VHOST_PAGE_SIZE;
write_page += 1;
}
return r;
}
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{
struct vhost_iotlb *umem = vq->umem;
struct vhost_iotlb_map *u;
u64 start, end, l, min;
int r;
bool hit = false;
while (len) {
min = len;
/* More than one GPAs can be mapped into a single HVA. So
* iterate all possible umems here to be safe.
*/
list_for_each_entry(u, &umem->list, link) {
if (u->addr > hva - 1 + len ||
u->addr - 1 + u->size < hva)
continue;
start = max(u->addr, hva);
end = min(u->addr - 1 + u->size, hva - 1 + len);
l = end - start + 1;
r = log_write(vq->log_base,
u->start + start - u->addr,
l);
if (r < 0)
return r;
hit = true;
min = min(l, min);
}
if (!hit)
return -EFAULT;
len -= min;
hva += min;
}
return 0;
}
static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
{
struct iovec *iov = vq->log_iov;
int i, ret;
if (!vq->iotlb)
return log_write(vq->log_base, vq->log_addr + used_offset, len);
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
len, iov, 64, VHOST_ACCESS_WO);
if (ret < 0)
return ret;
for (i = 0; i < ret; i++) {
ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
iov[i].iov_len);
if (ret)
return ret;
}
return 0;
}
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len, struct iovec *iov, int count)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
if (vq->iotlb) {
for (i = 0; i < count; i++) {
r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
iov[i].iov_len);
if (r < 0)
return r;
}
return 0;
}
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
if (r < 0)
return r;
len -= l;
if (!len) {
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
return 0;
}
}
/* Length written exceeds what we have stored. This is a bug. */
BUG();
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
if (vhost_put_used_flags(vq))
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
log_used(vq, (used - (void __user *)vq->used),
sizeof vq->used->flags);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return 0;
}
static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{
if (vhost_put_avail_event(vq))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
/* Make sure the event is seen before log. */
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
log_used(vq, (used - (void __user *)vq->used),
sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return 0;
}
int vhost_vq_init_access(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
bool is_le = vq->is_le;
if (!vq->private_data)
return 0;
vhost_init_is_le(vq);
r = vhost_update_used_flags(vq);
if (r)
goto err;
vq->signalled_used_valid = false;
if (!vq->iotlb &&
!access_ok(&vq->used->idx, sizeof vq->used->idx)) {
r = -EFAULT;
goto err;
}
r = vhost_get_used_idx(vq, &last_used_idx);
if (r) {
vq_err(vq, "Can't access used idx at %p\n",
&vq->used->idx);
goto err;
}
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
err:
vq->is_le = is_le;
return r;
}
EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access)
{
const struct vhost_iotlb_map *map;
struct vhost_dev *dev = vq->dev;
struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0, last = addr + len - 1;
int ret = 0;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
map = vhost_iotlb_itree_first(umem, addr, last);
if (map == NULL || map->start > addr) {
if (umem != dev->iotlb) {
ret = -EFAULT;
break;
}
ret = -EAGAIN;
break;
} else if (!(map->perm & access)) {
ret = -EPERM;
break;
}
_iov = iov + ret;
size = map->size - addr + map->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(map->addr + addr - map->start);
s += size;
addr += size;
++ret;
}
if (ret == -EAGAIN)
vhost_iotlb_miss(vq, addr, access);
return ret;
}
/* Each buffer in the virtqueues is actually a chain of descriptors. This
* function returns the next descriptor in the chain,
* or -1U if we're at the end. */
static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
return -1U;
/* Check they're not leading us off end of descriptors. */
next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
static int get_indirect(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
struct iov_iter from;
int ret, access;
/* Sanity check */
if (unlikely(len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
(unsigned long long)len,
sizeof desc);
return -EINVAL;
}
ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
UIO_MAXIOV, VHOST_ACCESS_RO);
if (unlikely(ret < 0)) {
if (ret != -EAGAIN)
vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
vq_err(vq, "Indirect buffer length too big: %d\n",
indirect->len);
return -E2BIG;
}
do {
unsigned iov_count = *in_num + *out_num;
if (unlikely(++found > count)) {
vq_err(vq, "Loop detected: last one at %u "
"indirect size %u\n",
i, count);
return -EINVAL;
}
if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
access = VHOST_ACCESS_WO;
else
access = VHOST_ACCESS_RO;
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count, access);
if (unlikely(ret < 0)) {
if (ret != -EAGAIN)
vq_err(vq, "Translation failure %d indirect idx %d\n",
ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
if (access == VHOST_ACCESS_WO) {
*in_num += ret;
if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
vq_err(vq, "Indirect descriptor "
"has out after in: idx %d\n", i);
return -EINVAL;
}
*out_num += ret;
}
} while ((i = next_desc(vq, &desc)) != -1);
return 0;
}
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
* iovecs, but we pack them into one and note how many of each there were.
*
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
{
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx;
__virtio16 avail_idx;
__virtio16 ring_head;
int ret, access;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
if (vq->avail_idx == vq->last_avail_idx) {
if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
}
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
last_avail_idx, vq->avail_idx);
return -EFAULT;
}
/* If there's nothing new since last we looked, return
* invalid.
*/
if (vq->avail_idx == last_avail_idx)
return vq->num;
/* Only get avail ring entries after they have been
* exposed by guest.
*/
smp_rmb();
}
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
return -EFAULT;
}
head = vhost16_to_cpu(vq, ring_head);
/* If their number is silly, that's an error. */
if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
head, vq->num);
return -EINVAL;
}
/* When we start there are none of either input nor output. */
*out_num = *in_num = 0;
if (unlikely(log))
*log_num = 0;
i = head;
do {
unsigned iov_count = *in_num + *out_num;
if (unlikely(i >= vq->num)) {
vq_err(vq, "Desc index is %u > %u, head = %u",
i, vq->num, head);
return -EINVAL;
}
if (unlikely(++found > vq->num)) {
vq_err(vq, "Loop detected: last one at %u "
"vq size %u head %u\n",
i, vq->num, head);
return -EINVAL;
}
ret = vhost_get_desc(vq, &desc, i);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
return -EFAULT;
}
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
if (ret != -EAGAIN)
vq_err(vq, "Failure detected "
"in indirect descriptor at idx %d\n", i);
return ret;
}
continue;
}
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
access = VHOST_ACCESS_WO;
else
access = VHOST_ACCESS_RO;
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count, access);
if (unlikely(ret < 0)) {
if (ret != -EAGAIN)
vq_err(vq, "Translation failure %d descriptor idx %d\n",
ret, i);
return ret;
}
if (access == VHOST_ACCESS_WO) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
vq_err(vq, "Descriptor has out after in: "
"idx %d\n", i);
return -EINVAL;
}
*out_num += ret;
}
} while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{
vq->last_avail_idx -= n;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
/* After we've used one of their buffers, we tell them about it. We'll then
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
struct vring_used_elem heads = {
cpu_to_vhost32(vq, head),
cpu_to_vhost32(vq, len)
};
return vhost_add_used_n(vq, &heads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
unsigned count)
{
vring_used_elem_t __user *used;
u16 old, new;
int start;
start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
if (vhost_put_used(vq, heads, start, count)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
log_used(vq, ((void __user *)used - (void __user *)vq->used),
count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
/* If the driver never bothers to signal in a very long while,
* used index might wrap around. If that happens, invalidate
* signalled_used index we stored. TODO: make sure driver
* signals at least once in 2^16 and remove this. */
if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
vq->signalled_used_valid = false;
return 0;
}
/* After we've used one of their buffers, we tell them about it. We'll then
* want to notify the guest, using eventfd. */
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
unsigned count)
{
int start, n, r;
start = vq->last_used_idx & (vq->num - 1);
n = vq->num - start;
if (n < count) {
r = __vhost_add_used_n(vq, heads, n);
if (r < 0)
return r;
heads += n;
count -= n;
}
r = __vhost_add_used_n(vq, heads, count);
/* Make sure buffer is written before we update index. */
smp_wmb();
if (vhost_put_used_idx(vq)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
log_used(vq, offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return r;
}
EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__u16 old, new;
__virtio16 event;
bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
return true;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
if (vhost_get_avail_flags(vq, &flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
}
old = vq->signalled_used;
v = vq->signalled_used_valid;
new = vq->signalled_used = vq->last_used_idx;
vq->signalled_used_valid = true;
if (unlikely(!v))
return true;
if (vhost_get_used_event(vq, &event)) {
vq_err(vq, "Failed to get used event idx");
return true;
}
return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
if (vq->call_ctx.ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx.ctx, 1);
}
EXPORT_SYMBOL_GPL(vhost_signal);
/* And here's the combo meal deal. Supersize me! */
void vhost_add_used_and_signal(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned int head, int len)
{
vhost_add_used(vq, head, len);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
struct vring_used_elem *heads, unsigned count)
{
vhost_add_used_n(vq, heads, count);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__virtio16 avail_idx;
int r;
if (vq->avail_idx != vq->last_avail_idx)
return false;
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vq->avail_idx == vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n",
&vq->used->flags, r);
return false;
}
} else {
r = vhost_update_avail_event(vq);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
return false;
}
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
r = vhost_get_avail_idx(vq, &avail_idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
&vq->avail->idx, r);
return false;
}
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vq->avail_idx != vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
/* We don't need to be notified again. */
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
vq_err(vq, "Failed to disable notification at %p: %d\n",
&vq->used->flags, r);
}
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
/* Create a new message. */
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{
/* Make sure all padding within the structure is initialized. */
struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
node->vq = vq;
node->msg.type = type;
return node;
}
EXPORT_SYMBOL_GPL(vhost_new_msg);
void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
struct vhost_msg_node *node)
{
spin_lock(&dev->iotlb_lock);
list_add_tail(&node->node, head);
spin_unlock(&dev->iotlb_lock);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
}
EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head)
{
struct vhost_msg_node *node = NULL;
spin_lock(&dev->iotlb_lock);
if (!list_empty(head)) {
node = list_first_entry(head, struct vhost_msg_node,
node);
list_del(&node->node);
}
spin_unlock(&dev->iotlb_lock);
return node;
}
EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
{
struct vhost_virtqueue *vq;
int i;
mutex_lock(&dev->mutex);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_backend_features = features;
mutex_unlock(&vq->mutex);
}
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(vhost_set_backend_features);
static int __init vhost_init(void)
{
return 0;
}
static void __exit vhost_exit(void)
{
}
module_init(vhost_init);
module_exit(vhost_exit);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio");
| linux-master | drivers/vhost/vhost.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU debugfs core infrastructure
*
* Copyright (C) 2018 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <[email protected]>
*/
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/debugfs.h>
struct dentry *iommu_debugfs_dir;
EXPORT_SYMBOL_GPL(iommu_debugfs_dir);
/**
* iommu_debugfs_setup - create the top-level iommu directory in debugfs
*
* Provide base enablement for using debugfs to expose internal data of an
* IOMMU driver. When called, this function creates the
* /sys/kernel/debug/iommu directory.
*
* Emit a strong warning at boot time to indicate that this feature is
* enabled.
*
* This function is called from iommu_init; drivers may then use
* iommu_debugfs_dir to instantiate a vendor-specific directory to be used
* to expose internal data.
*/
void iommu_debugfs_setup(void)
{
if (!iommu_debugfs_dir) {
iommu_debugfs_dir = debugfs_create_dir("iommu", NULL);
pr_warn("\n");
pr_warn("*************************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** IOMMU DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
pr_warn("** **\n");
pr_warn("** This means that this kernel is built to expose internal **\n");
pr_warn("** IOMMU data structures, which may compromise security on **\n");
pr_warn("** your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging the **\n");
pr_warn("** kernel, report this immediately to your vendor! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("*************************************************************\n");
}
}
| linux-master | drivers/iommu/iommu-debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/irqdomain.h>
#include <asm/hw_irq.h>
#include <asm/irq_remapping.h>
#include <asm/processor.h>
#include <asm/x86_init.h>
#include <asm/apic.h>
#include <asm/hpet.h>
#include "irq_remapping.h"
int irq_remapping_enabled;
int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
int disable_irq_post = 0;
static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
static void irq_remapping_restore_boot_irq_mode(void)
{
/*
* With interrupt-remapping, for now we will use virtual wire A
* mode, as virtual wire B is little complex (need to configure
* both IOAPIC RTE as well as interrupt-remapping table entry).
* As this gets called during crash dump, keep this simple for
* now.
*/
if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
disconnect_bsp_APIC(0);
}
static void __init irq_remapping_modify_x86_ops(void)
{
x86_apic_ops.restore = irq_remapping_restore_boot_irq_mode;
}
static __init int setup_nointremap(char *str)
{
disable_irq_remap = 1;
return 0;
}
early_param("nointremap", setup_nointremap);
static __init int setup_irqremap(char *str)
{
if (!str)
return -EINVAL;
while (*str) {
if (!strncmp(str, "on", 2)) {
disable_irq_remap = 0;
disable_irq_post = 0;
} else if (!strncmp(str, "off", 3)) {
disable_irq_remap = 1;
disable_irq_post = 1;
} else if (!strncmp(str, "nosid", 5))
disable_sourceid_checking = 1;
else if (!strncmp(str, "no_x2apic_optout", 16))
no_x2apic_optout = 1;
else if (!strncmp(str, "nopost", 6))
disable_irq_post = 1;
str += strcspn(str, ",");
while (*str == ',')
str++;
}
return 0;
}
early_param("intremap", setup_irqremap);
void set_irq_remapping_broken(void)
{
irq_remap_broken = 1;
}
bool irq_remapping_cap(enum irq_remap_cap cap)
{
if (!remap_ops || disable_irq_post)
return false;
return (remap_ops->capability & (1 << cap));
}
EXPORT_SYMBOL_GPL(irq_remapping_cap);
int __init irq_remapping_prepare(void)
{
if (disable_irq_remap)
return -ENOSYS;
if (intel_irq_remap_ops.prepare() == 0)
remap_ops = &intel_irq_remap_ops;
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
amd_iommu_irq_ops.prepare() == 0)
remap_ops = &amd_iommu_irq_ops;
else if (IS_ENABLED(CONFIG_HYPERV_IOMMU) &&
hyperv_irq_remap_ops.prepare() == 0)
remap_ops = &hyperv_irq_remap_ops;
else
return -ENOSYS;
return 0;
}
int __init irq_remapping_enable(void)
{
int ret;
if (!remap_ops->enable)
return -ENODEV;
ret = remap_ops->enable();
if (irq_remapping_enabled)
irq_remapping_modify_x86_ops();
return ret;
}
void irq_remapping_disable(void)
{
if (irq_remapping_enabled && remap_ops->disable)
remap_ops->disable();
}
int irq_remapping_reenable(int mode)
{
if (irq_remapping_enabled && remap_ops->reenable)
return remap_ops->reenable(mode);
return 0;
}
int __init irq_remap_enable_fault_handling(void)
{
if (!irq_remapping_enabled)
return 0;
if (!remap_ops->enable_faulting)
return -ENODEV;
return remap_ops->enable_faulting();
}
void panic_if_irq_remap(const char *msg)
{
if (irq_remapping_enabled)
panic(msg);
}
| linux-master | drivers/iommu/irq_remapping.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic page table allocator for IOMMUs.
*
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <[email protected]>
*/
#include <linux/bug.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <linux/types.h>
static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_DART
[APPLE_DART] = &io_pgtable_apple_dart_init_fns,
[APPLE_DART2] = &io_pgtable_apple_dart_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
#ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
[AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg,
void *cookie)
{
struct io_pgtable *iop;
const struct io_pgtable_init_fns *fns;
if (fmt >= IO_PGTABLE_NUM_FMTS)
return NULL;
fns = io_pgtable_init_table[fmt];
if (!fns)
return NULL;
iop = fns->alloc(cfg, cookie);
if (!iop)
return NULL;
iop->fmt = fmt;
iop->cookie = cookie;
iop->cfg = *cfg;
return &iop->ops;
}
EXPORT_SYMBOL_GPL(alloc_io_pgtable_ops);
/*
* It is the IOMMU driver's responsibility to ensure that the page table
* is no longer accessible to the walker by this point.
*/
void free_io_pgtable_ops(struct io_pgtable_ops *ops)
{
struct io_pgtable *iop;
if (!ops)
return;
iop = io_pgtable_ops_to_pgtable(ops);
io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
EXPORT_SYMBOL_GPL(free_io_pgtable_ops);
| linux-master | drivers/iommu/io-pgtable.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A fairly generic DMA-API to IOMMU-API glue layer.
*
* Copyright (C) 2014-2015 ARM Ltd.
*
* based in part on arch/arm/mm/dma-mapping.c:
* Copyright (C) 2000-2004 Russell King
*/
#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include "dma-iommu.h"
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
phys_addr_t phys;
};
enum iommu_dma_cookie_type {
IOMMU_DMA_IOVA_COOKIE,
IOMMU_DMA_MSI_COOKIE,
};
struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct {
struct iova_domain iovad;
struct iova_fq __percpu *fq; /* Flush queue */
/* Number of TLB flushes that have been started */
atomic64_t fq_flush_start_cnt;
/* Number of TLB flushes that have been finished */
atomic64_t fq_flush_finish_cnt;
/* Timer to regularily empty the flush queues */
struct timer_list fq_timer;
/* 1 when timer is active, 0 when not */
atomic_t fq_timer_on;
};
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
bool iommu_dma_forcedac __read_mostly;
static int __init iommu_dma_forcedac_setup(char *str)
{
int ret = kstrtobool(str, &iommu_dma_forcedac);
if (!ret && iommu_dma_forcedac)
pr_info("Forcing DAC for PCI devices\n");
return ret;
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
/* Number of entries per flush queue */
#define IOVA_FQ_SIZE 256
/* Timeout (in ms) after which entries are flushed from the queue */
#define IOVA_FQ_TIMEOUT 10
/* Flush queue entry for deferred flushing */
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
struct list_head freelist;
u64 counter; /* Flush counter when this entry was added */
};
/* Per-CPU flush queue structure */
struct iova_fq {
struct iova_fq_entry entries[IOVA_FQ_SIZE];
unsigned int head, tail;
spinlock_t lock;
};
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}
static inline unsigned int fq_ring_add(struct iova_fq *fq)
{
unsigned int idx = fq->tail;
assert_spin_locked(&fq->lock);
fq->tail = (idx + 1) % IOVA_FQ_SIZE;
return idx;
}
static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{
u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
unsigned int idx;
assert_spin_locked(&fq->lock);
fq_ring_for_each(idx, fq) {
if (fq->entries[idx].counter >= counter)
break;
put_pages_list(&fq->entries[idx].freelist);
free_iova_fast(&cookie->iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
}
}
static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{
atomic64_inc(&cookie->fq_flush_start_cnt);
cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
atomic64_inc(&cookie->fq_flush_finish_cnt);
}
static void fq_flush_timeout(struct timer_list *t)
{
struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
int cpu;
atomic_set(&cookie->fq_timer_on, 0);
fq_flush_iotlb(cookie);
for_each_possible_cpu(cpu) {
unsigned long flags;
struct iova_fq *fq;
fq = per_cpu_ptr(cookie->fq, cpu);
spin_lock_irqsave(&fq->lock, flags);
fq_ring_free(cookie, fq);
spin_unlock_irqrestore(&fq->lock, flags);
}
}
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
struct list_head *freelist)
{
struct iova_fq *fq;
unsigned long flags;
unsigned int idx;
/*
* Order against the IOMMU driver's pagetable update from unmapping
* @pte, to guarantee that fq_flush_iotlb() observes that if called
* from a different CPU before we release the lock below. Full barrier
* so it also pairs with iommu_dma_init_fq() to avoid seeing partially
* written fq state here.
*/
smp_mb();
fq = raw_cpu_ptr(cookie->fq);
spin_lock_irqsave(&fq->lock, flags);
/*
* First remove all entries from the flush queue that have already been
* flushed out on another CPU. This makes the fq_full() check below less
* likely to be true.
*/
fq_ring_free(cookie, fq);
if (fq_full(fq)) {
fq_flush_iotlb(cookie);
fq_ring_free(cookie, fq);
}
idx = fq_ring_add(fq);
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
/* Avoid false sharing as much as possible. */
if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
{
int cpu, idx;
if (!cookie->fq)
return;
del_timer_sync(&cookie->fq_timer);
/* The IOVAs will be torn down separately, so just free our queued pages */
for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
fq_ring_for_each(idx, fq)
put_pages_list(&fq->entries[idx].freelist);
}
free_percpu(cookie->fq);
}
/* sysfs updates are serialised by the mutex of the group owning @domain */
int iommu_dma_init_fq(struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_fq __percpu *queue;
int i, cpu;
if (cookie->fq_domain)
return 0;
atomic64_set(&cookie->fq_flush_start_cnt, 0);
atomic64_set(&cookie->fq_flush_finish_cnt, 0);
queue = alloc_percpu(struct iova_fq);
if (!queue) {
pr_warn("iova flush queue initialization failed\n");
return -ENOMEM;
}
for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(queue, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
for (i = 0; i < IOVA_FQ_SIZE; i++)
INIT_LIST_HEAD(&fq->entries[i].freelist);
}
cookie->fq = queue;
timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
atomic_set(&cookie->fq_timer_on, 0);
/*
* Prevent incomplete fq state being observable. Pairs with path from
* __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
*/
smp_wmb();
WRITE_ONCE(cookie->fq_domain, domain);
return 0;
}
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
return cookie->iovad.granule;
return PAGE_SIZE;
}
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
return cookie;
}
/**
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
* @domain: IOMMU domain to prepare for DMA-API usage
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
if (domain->iova_cookie)
return -EEXIST;
domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
if (!domain->iova_cookie)
return -ENOMEM;
mutex_init(&domain->iova_cookie->mutex);
return 0;
}
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
* @domain: IOMMU domain to prepare
* @base: Start address of IOVA region for MSI mappings
*
* Users who manage their own IOVA allocation and do not want DMA API support,
* but would still like to take advantage of automatic MSI remapping, can use
* this to initialise their own domain appropriately. Users should reserve a
* contiguous IOVA region, starting at @base, large enough to accommodate the
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
* used by the devices attached to @domain.
*/
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
struct iommu_dma_cookie *cookie;
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
if (domain->iova_cookie)
return -EEXIST;
cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
if (!cookie)
return -ENOMEM;
cookie->msi_iova = base;
domain->iova_cookie = cookie;
return 0;
}
EXPORT_SYMBOL(iommu_get_msi_cookie);
/**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
* iommu_get_msi_cookie()
*/
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi, *tmp;
if (!cookie)
return;
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
}
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
list_del(&msi->list);
kfree(msi);
}
kfree(cookie);
domain->iova_cookie = NULL;
}
/**
* iommu_dma_get_resv_regions - Reserved region driver helper
* @dev: Device from iommu_get_resv_regions()
* @list: Reserved region list from iommu_get_resv_regions()
*
* IOMMU drivers can use this to implement their .get_resv_regions callback
* for general non-IOMMU-specific reservations. Currently, this covers GICv3
* ITS region reservation on ACPI based ARM platforms that may require HW MSI
* reservation.
*/
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
iort_iommu_get_resv_regions(dev, list);
if (dev->of_node)
of_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
phys_addr_t start, phys_addr_t end)
{
struct iova_domain *iovad = &cookie->iovad;
struct iommu_dma_msi_page *msi_page;
int i, num_pages;
start -= iova_offset(iovad, start);
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
for (i = 0; i < num_pages; i++) {
msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
msi_page->phys = start;
msi_page->iova = start;
INIT_LIST_HEAD(&msi_page->list);
list_add(&msi_page->list, &cookie->msi_page_list);
start += iovad->granule;
}
return 0;
}
static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
return res_a->res->start > res_b->res->start;
}
static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
struct resource_entry *window;
unsigned long lo, hi;
phys_addr_t start = 0, end;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM)
continue;
lo = iova_pfn(iovad, window->res->start - window->offset);
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
/* Get reserved DMA windows from host bridge */
list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
resv_iova:
if (end > start) {
lo = iova_pfn(iovad, start);
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
} else if (end < start) {
/* DMA ranges should be non-overlapping */
dev_err(&dev->dev,
"Failed to reserve IOVA [%pa-%pa]\n",
&start, &end);
return -EINVAL;
}
start = window->res->end - window->offset + 1;
/* If window is last entry */
if (window->node.next == &bridge->dma_ranges &&
end != ~(phys_addr_t)0) {
end = ~(phys_addr_t)0;
goto resv_iova;
}
}
return 0;
}
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct iommu_resv_region *region;
LIST_HEAD(resv_regions);
int ret = 0;
if (dev_is_pci(dev)) {
ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
if (ret)
return ret;
}
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi;
/* We ARE the software that manages these! */
if (region->type == IOMMU_RESV_SW_MSI)
continue;
lo = iova_pfn(iovad, region->start);
hi = iova_pfn(iovad, region->start + region->length - 1);
reserve_iova(iovad, lo, hi);
if (region->type == IOMMU_RESV_MSI)
ret = cookie_init_hw_msi_region(cookie, region->start,
region->start + region->length);
if (ret)
break;
}
iommu_put_resv_regions(dev, &resv_regions);
return ret;
}
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
static bool dev_use_swiotlb(struct device *dev, size_t size,
enum dma_data_direction dir)
{
return IS_ENABLED(CONFIG_SWIOTLB) &&
(dev_is_untrusted(dev) ||
dma_kmalloc_needs_bounce(dev, size, dir));
}
static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
if (!IS_ENABLED(CONFIG_SWIOTLB))
return false;
if (dev_is_untrusted(dev))
return true;
/*
* If kmalloc() buffers are not DMA-safe for this device and
* direction, check the individual lengths in the sg list. If any
* element is deemed unsafe, use the swiotlb for bouncing.
*/
if (!dma_kmalloc_safe(dev, dir)) {
for_each_sg(sg, s, nents, i)
if (!dma_kmalloc_size_aligned(s->length))
return true;
}
return false;
}
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts
* @limit: Last address of the IOVA space
* @dev: Device the domain is being initialised for
*
* @base and @limit + 1 should be exact multiples of IOMMU page granularity to
* avoid rounding surprises. If necessary, we reserve the page at address 0
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
dma_addr_t limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
int ret;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
iovad = &cookie->iovad;
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
/* Check the domain allows at least some access to the device... */
if (domain->geometry.force_aperture) {
if (base > domain->geometry.aperture_end ||
limit < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT;
}
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsigned long, base_pfn,
domain->geometry.aperture_start >> order);
}
/* start_pfn is always nonzero for an already-initialised domain */
mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
ret = -EFAULT;
goto done_unlock;
}
ret = 0;
goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
ret = iova_reserve_iommu_regions(dev, domain);
done_unlock:
mutex_unlock(&cookie->mutex);
return ret;
}
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
* @dir: Direction of DMA transfer
* @coherent: Is the DMA master cache-coherent?
* @attrs: DMA attributes for the mapping
*
* Return: corresponding IOMMU API page protection flags
*/
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
return prot | IOMMU_READ;
case DMA_FROM_DEVICE:
return prot | IOMMU_WRITE;
default:
return 0;
}
}
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long shift, iova_len, iova;
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
cookie->msi_iova += size;
return cookie->msi_iova - size;
}
shift = iova_shift(iovad);
iova_len = size >> shift;
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/*
* Try to use all the 32-bit PCI addresses first. The original SAC vs.
* DAC reasoning loses relevance with PCIe, but enough hardware and
* firmware bugs are still lurking out there that it's safest not to
* venture into the 64-bit space until necessary.
*
* If your device goes wrong after seeing the notice then likely either
* its driver is not setting DMA masks accurately, the hardware has
* some inherent bug in handling >32-bit addresses, or not all the
* expected address bits are wired up between the device and the IOMMU.
*/
if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
if (iova)
goto done;
dev->iommu->pci_32bit_workaround = false;
dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
}
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
done:
return (dma_addr_t)iova << shift;
}
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{
struct iova_domain *iovad = &cookie->iovad;
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else if (gather && gather->queued)
queue_iova(cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
&gather->freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
}
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
struct iommu_iotlb_gather iotlb_gather;
size_t unmapped;
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
iommu_iotlb_gather_init(&iotlb_gather);
iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
WARN_ON(unmapped != size);
if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
}
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
__free_page(pages[count]);
kvfree(pages);
}
static struct page **__iommu_dma_alloc_pages(struct device *dev,
unsigned int count, unsigned long order_mask, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev);
order_mask &= GENMASK(MAX_ORDER, 0);
if (!order_mask)
return NULL;
pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
while (count) {
struct page *page = NULL;
unsigned int order_size;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
* falling back to minimum-order allocations.
*/
for (order_mask &= GENMASK(__fls(count), 0);
order_mask; order_mask &= ~order_size) {
unsigned int order = __fls(order_mask);
gfp_t alloc_flags = gfp;
order_size = 1U << order;
if (order_mask > order_size)
alloc_flags |= __GFP_NORETRY;
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;
if (order)
split_page(page, order);
break;
}
if (!page) {
__iommu_dma_free_pages(pages, i);
return NULL;
}
count -= order_size;
while (order_size--)
pages[i++] = page++;
}
return pages;
}
/*
* If size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
*/
static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
alloc_sizes |= PAGE_SIZE;
} else {
size = ALIGN(size, min_size);
}
if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
gfp);
if (!pages)
return NULL;
size = iova_align(iovad, size);
iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
/*
* Remove the zone/policy flags from the GFP - these are applied to the
* __iommu_dma_alloc_pages() but are not used for the supporting
* internal allocations that follow.
*/
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
gfp);
if (ret < 0 || ret < size)
goto out_free_sg;
sgt->sgl->dma_address = iova;
sgt->sgl->dma_length = size;
return pages;
out_free_sg:
sg_free_table(sgt);
out_free_iova:
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
}
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
unsigned long attrs)
{
struct page **pages;
struct sg_table sgt;
void *vaddr;
pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
attrs);
if (!pages)
return NULL;
*dma_handle = sgt.sgl->dma_address;
sg_free_table(&sgt);
vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!vaddr)
goto out_unmap;
return vaddr;
out_unmap:
__iommu_dma_unmap(dev, *dma_handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
return NULL;
}
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
{
struct dma_sgt_handle *sh;
sh = kmalloc(sizeof(*sh), gfp);
if (!sh)
return NULL;
sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
PAGE_KERNEL, attrs);
if (!sh->pages) {
kfree(sh);
return NULL;
}
return &sh->sgt;
}
static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct dma_sgt_handle *sh = sgt_handle(sgt);
__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
kfree(sh);
}
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
if (sg_dma_is_swiotlb(sgl))
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
sg->length, dir);
else if (!dev_is_dma_coherent(dev))
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
if (sg_dma_is_swiotlb(sgl))
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_device(dev,
sg_dma_address(sg),
sg->length, dir);
else if (!dev_is_dma_coherent(dev))
for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
dma_addr_t iova, dma_mask = dma_get_mask(dev);
/*
* If both the physical buffer start address and size are
* page aligned, we don't need to use a bounce page.
*/
if (dev_use_swiotlb(dev, size, dir) &&
iova_offset(iovad, phys | size)) {
void *padding_start;
size_t padding_size, aligned_size;
if (!is_swiotlb_active(dev)) {
dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
return DMA_MAPPING_ERROR;
}
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
/* Cleanup the padding area. */
padding_start = phys_to_virt(phys);
padding_size = aligned_size;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
padding_start += size;
padding_size -= size;
}
memset(padding_start, 0, padding_size);
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;
}
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
phys_addr_t phys;
phys = iommu_iova_to_phys(domain, dma_handle);
if (WARN_ON(!phys))
return;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);
__iommu_dma_unmap(dev, dma_handle, size);
if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
/*
* Prepare a successfully-mapped scatterlist to give back to the caller.
*
* At this point the segments are already laid out by iommu_dma_map_sg() to
* avoid individually crossing any boundaries, so we merely need to check a
* segment's start address to avoid concatenating across one.
*/
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr)
{
struct scatterlist *s, *cur = sg;
unsigned long seg_mask = dma_get_seg_boundary(dev);
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
dma_addr_t s_dma_addr = sg_dma_address(s);
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
if (sg_dma_is_bus_address(s)) {
if (i > 0)
cur = sg_next(cur);
sg_dma_unmark_bus_address(s);
sg_dma_address(cur) = s_dma_addr;
sg_dma_len(cur) = s_length;
sg_dma_mark_bus_address(cur);
count++;
cur_len = 0;
continue;
}
s->offset += s_iova_off;
s->length = s_length;
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
* - and this segment starts on an IOVA page boundary
* - but doesn't fall at a segment boundary
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
(max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
/* Otherwise start the next output segment */
if (i > 0)
cur = sg_next(cur);
cur_len = s_length;
count++;
sg_dma_address(cur) = dma_addr + s_iova_off;
}
sg_dma_len(cur) = cur_len;
dma_addr += s_iova_len;
if (s_length + s_iova_off < s_iova_len)
cur_len = 0;
}
return count;
}
/*
* If mapping failed, then just restore the original list,
* but making sure the DMA fields are invalidated.
*/
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (sg_dma_is_bus_address(s)) {
sg_dma_unmark_bus_address(s);
} else {
if (sg_dma_address(s) != DMA_MAPPING_ERROR)
s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
}
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
}
static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
iommu_dma_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
sg_dma_mark_swiotlb(sg);
for_each_sg(sg, s, nents, i) {
sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
s->offset, s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
}
return nents;
out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return -EIO;
}
/*
* The DMA API client is passing in a scatterlist which could describe
* any old buffer layout, but the IOMMU API requires everything to be
* aligned to IOMMU pages. Hence the need for this complicated bit of
* impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller.
*/
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
struct pci_p2pdma_map_state p2pdma_state = {};
enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
ssize_t ret;
int i;
if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
ret = iommu_deferred_attach(dev, domain);
if (ret)
goto out;
}
if (dev_use_sg_swiotlb(dev, sg, nents, dir))
return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
* trickery we can modify the list in-place, but reversibly, by
* stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
size_t s_iova_off = iova_offset(iovad, s->offset);
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
if (is_pci_p2pdma_page(sg_page(s))) {
map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
switch (map) {
case PCI_P2PDMA_MAP_BUS_ADDR:
/*
* iommu_map_sg() will skip this segment as
* it is marked as a bus address,
* __finalise_sg() will copy the dma address
* into the output segment.
*/
continue;
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
/*
* Mapping through host bridge should be
* mapped with regular IOVAs, thus we
* do nothing here and continue below.
*/
break;
default:
ret = -EREMOTEIO;
goto out_restore_sg;
}
}
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
s->offset -= s_iova_off;
s_length = iova_align(iovad, s_length + s_iova_off);
s->length = s_length;
/*
* Due to the alignment of our single IOVA allocation, we can
* depend on these assumptions about the segment boundary mask:
* - If mask size >= IOVA size, then the IOVA range cannot
* possibly fall across a boundary, so we don't care.
* - If mask size < IOVA size, then the IOVA range must start
* exactly on a boundary, therefore we can lay things out
* based purely on segment lengths without needing to know
* the actual addresses beforehand.
* - The mask must be a power of 2, so pad_len == 0 if
* iova_len == 0, thus we cannot dereference prev the first
* time through here (i.e. before it has a meaningful value).
*/
if (pad_len && pad_len < s_length - 1) {
prev->length += pad_len;
iova_len += pad_len;
}
iova_len += s_length;
prev = s;
}
if (!iova_len)
return __finalise_sg(dev, sg, nents, 0);
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
goto out_restore_sg;
}
/*
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
if (ret < 0 || ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
out:
if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t end = 0, start;
struct scatterlist *tmp;
int i;
if (sg_dma_is_swiotlb(sg)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, the start and end points
* just have to be determined.
*/
for_each_sg(sg, tmp, nents, i) {
if (sg_dma_is_bus_address(tmp)) {
sg_dma_unmark_bus_address(tmp);
continue;
}
if (sg_dma_len(tmp) == 0)
break;
start = sg_dma_address(tmp);
break;
}
nents -= i;
for_each_sg(tmp, tmp, nents, i) {
if (sg_dma_is_bus_address(tmp)) {
sg_dma_unmark_bus_address(tmp);
continue;
}
if (sg_dma_len(tmp) == 0)
break;
end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
if (end)
__iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
dma_get_mask(dev));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
}
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{
size_t alloc_size = PAGE_ALIGN(size);
int count = alloc_size >> PAGE_SHIFT;
struct page *page = NULL, **pages = NULL;
/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
if (is_vmalloc_addr(cpu_addr)) {
/*
* If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
*/
pages = dma_common_find_pages(cpu_addr);
if (!pages)
page = vmalloc_to_page(cpu_addr);
dma_common_free_remap(cpu_addr, alloc_size);
} else {
/* Lowmem means a coherent atomic or CMA allocation */
page = virt_to_page(cpu_addr);
}
if (pages)
__iommu_dma_free_pages(pages, count);
if (page)
dma_free_contiguous(dev, page, alloc_size);
}
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
__iommu_dma_free(dev, size, cpu_addr);
}
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
struct page **pagep, gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL;
void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (!page)
page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (!page)
return NULL;
if (!coherent || PageHighMem(page)) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
prot, __builtin_return_address(0));
if (!cpu_addr)
goto out_free_pages;
if (!coherent)
arch_dma_prep_coherent(page, size);
} else {
cpu_addr = page_address(page);
}
*pagep = page;
memset(cpu_addr, 0, alloc_size);
return cpu_addr;
out_free_pages:
dma_free_contiguous(dev, page, alloc_size);
return NULL;
}
static void *iommu_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
struct page *page = NULL;
void *cpu_addr;
gfp |= __GFP_ZERO;
if (gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
return iommu_dma_alloc_remap(dev, size, handle, gfp,
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
}
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
gfp, NULL);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
return NULL;
*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
}
return cpu_addr;
}
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn, off = vma->vm_pgoff;
int ret;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO;
if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages)
return vm_map_pages(vma, pages, nr_pages);
pfn = vmalloc_to_pfn(cpu_addr);
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
}
return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page;
int ret;
if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) {
return sg_alloc_table_from_pages(sgt, pages,
PAGE_ALIGN(size) >> PAGE_SHIFT,
0, size, GFP_KERNEL);
}
page = vmalloc_to_page(cpu_addr);
} else {
page = virt_to_page(cpu_addr);
}
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
static size_t iommu_dma_opt_mapping_size(void)
{
return iova_rcache_range();
}
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
.unmap_page = iommu_dma_unmap_page,
.map_sg = iommu_dma_map_sg,
.unmap_sg = iommu_dma_unmap_sg,
.sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
.sync_single_for_device = iommu_dma_sync_single_for_device,
.sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
.sync_sg_for_device = iommu_dma_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
};
/*
* The IOMMU core code allocates the default DMA domain, which the underlying
* IOMMU driver needs to support via the dma-iommu layer.
*/
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain)
goto out_err;
/*
* The IOMMU core code allocates the default DMA domain, which the
* underlying IOMMU driver needs to support via the dma-iommu layer.
*/
if (iommu_is_dma_domain(domain)) {
if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
goto out_err;
dev->dma_ops = &iommu_dma_ops;
}
return;
out_err:
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
}
EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
msi_addr &= ~(phys_addr_t)(size - 1);
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr)
return msi_page;
msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return NULL;
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;
if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
out_free_iova:
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
}
/**
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
* @desc: MSI descriptor, will store the MSI page
* @msi_addr: MSI target address to be mapped
*
* Return: 0 on success or negative error code if the mapping failed.
*/
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_msi_page *msi_page;
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
return 0;
}
/*
* In fact the whole prepare operation should already be serialised by
* irq_domain_mutex further up the callchain, but that's pretty subtle
* on its own, so consider this locking as failsafe documentation...
*/
mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);
if (!msi_page)
return -ENOMEM;
return 0;
}
/**
* iommu_dma_compose_msi_msg() - Apply translation to an MSI message
* @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
* @msg: MSI message containing target physical address
*/
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
const struct iommu_dma_msi_page *msi_page;
msi_page = msi_desc_get_iommu_cookie(desc);
if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
return;
msg->address_hi = upper_32_bits(msi_page->iova);
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
static int iommu_dma_init(void)
{
if (is_kdump_kernel())
static_branch_enable(&iommu_deferred_attach_enabled);
return iova_cache_get();
}
arch_initcall(iommu_dma_init);
| linux-master | drivers/iommu/dma-iommu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Author: Stepan Moskovchenko <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/io-pgtable.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/cacheflush.h>
#include <linux/sizes.h>
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
#define MRC(reg, processor, op1, crn, crm, op2) \
__asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
static DEFINE_SPINLOCK(msm_iommu_lock);
static LIST_HEAD(qcom_iommu_devices);
static struct iommu_ops msm_iommu_ops;
struct msm_priv {
struct list_head list_attached;
struct iommu_domain domain;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
struct device *dev;
spinlock_t pgtlock; /* pagetable lock */
};
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
if (iommu->clk) {
ret = clk_enable(iommu->clk);
if (ret)
clk_disable(iommu->pclk);
}
fail:
return ret;
}
static void __disable_clocks(struct msm_iommu_dev *iommu)
{
if (iommu->clk)
clk_disable(iommu->clk);
clk_disable(iommu->pclk);
}
static void msm_iommu_reset(void __iomem *base, int ncb)
{
int ctx;
SET_RPUE(base, 0);
SET_RPUEIE(base, 0);
SET_ESRRESTORE(base, 0);
SET_TBE(base, 0);
SET_CR(base, 0);
SET_SPDMBE(base, 0);
SET_TESTBUSCR(base, 0);
SET_TLBRSW(base, 0);
SET_GLOBAL_TLBIALL(base, 0);
SET_RPU_ACR(base, 0);
SET_TLBLKCRWE(base, 1);
for (ctx = 0; ctx < ncb; ctx++) {
SET_BPRCOSH(base, ctx, 0);
SET_BPRCISH(base, ctx, 0);
SET_BPRCNSH(base, ctx, 0);
SET_BPSHCFG(base, ctx, 0);
SET_BPMTCFG(base, ctx, 0);
SET_ACTLR(base, ctx, 0);
SET_SCTLR(base, ctx, 0);
SET_FSRRESTORE(base, ctx, 0);
SET_TTBR0(base, ctx, 0);
SET_TTBR1(base, ctx, 0);
SET_TTBCR(base, ctx, 0);
SET_BFBCR(base, ctx, 0);
SET_PAR(base, ctx, 0);
SET_FAR(base, ctx, 0);
SET_CTX_TLBIALL(base, ctx, 0);
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
SET_CONTEXTIDR(base, ctx, 0);
}
}
static void __flush_iotlb(void *cookie)
{
struct msm_priv *priv = cookie;
struct msm_iommu_dev *iommu = NULL;
struct msm_iommu_ctx_dev *master;
int ret = 0;
list_for_each_entry(iommu, &priv->list_attached, dom_node) {
ret = __enable_clocks(iommu);
if (ret)
goto fail;
list_for_each_entry(master, &iommu->ctx_list, list)
SET_CTX_TLBIALL(iommu->base, master->num, 0);
__disable_clocks(iommu);
}
fail:
return;
}
static void __flush_iotlb_range(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct msm_priv *priv = cookie;
struct msm_iommu_dev *iommu = NULL;
struct msm_iommu_ctx_dev *master;
int ret = 0;
int temp_size;
list_for_each_entry(iommu, &priv->list_attached, dom_node) {
ret = __enable_clocks(iommu);
if (ret)
goto fail;
list_for_each_entry(master, &iommu->ctx_list, list) {
temp_size = size;
do {
iova &= TLBIVA_VA;
iova |= GET_CONTEXTIDR_ASID(iommu->base,
master->num);
SET_TLBIVA(iommu->base, master->num, iova);
iova += granule;
} while (temp_size -= granule);
}
__disable_clocks(iommu);
}
fail:
return;
}
static void __flush_iotlb_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
__flush_iotlb_range(iova, size, granule, false, cookie);
}
static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{
__flush_iotlb_range(iova, granule, granule, true, cookie);
}
static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb,
.tlb_flush_walk = __flush_iotlb_walk,
.tlb_add_page = __flush_iotlb_page,
};
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
{
int idx;
do {
idx = find_next_zero_bit(map, end, start);
if (idx == end)
return -ENOSPC;
} while (test_and_set_bit(idx, map));
return idx;
}
static void msm_iommu_free_ctx(unsigned long *map, int idx)
{
clear_bit(idx, map);
}
static void config_mids(struct msm_iommu_dev *iommu,
struct msm_iommu_ctx_dev *master)
{
int mid, ctx, i;
for (i = 0; i < master->num_mids; i++) {
mid = master->mids[i];
ctx = master->num;
SET_M2VCBR_N(iommu->base, mid, 0);
SET_CBACR_N(iommu->base, ctx, 0);
/* Set VMID = 0 */
SET_VMID(iommu->base, mid, 0);
/* Set the context number for that MID to this context */
SET_CBNDX(iommu->base, mid, ctx);
/* Set MID associated with this context bank to 0*/
SET_CBVMID(iommu->base, ctx, 0);
/* Set the ASID for TLB tagging for this context */
SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
/* Set security bit override to be Non-secure */
SET_NSCFG(iommu->base, mid, 3);
}
}
static void __reset_context(void __iomem *base, int ctx)
{
SET_BPRCOSH(base, ctx, 0);
SET_BPRCISH(base, ctx, 0);
SET_BPRCNSH(base, ctx, 0);
SET_BPSHCFG(base, ctx, 0);
SET_BPMTCFG(base, ctx, 0);
SET_ACTLR(base, ctx, 0);
SET_SCTLR(base, ctx, 0);
SET_FSRRESTORE(base, ctx, 0);
SET_TTBR0(base, ctx, 0);
SET_TTBR1(base, ctx, 0);
SET_TTBCR(base, ctx, 0);
SET_BFBCR(base, ctx, 0);
SET_PAR(base, ctx, 0);
SET_FAR(base, ctx, 0);
SET_CTX_TLBIALL(base, ctx, 0);
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
}
static void __program_context(void __iomem *base, int ctx,
struct msm_priv *priv)
{
__reset_context(base, ctx);
/* Turn on TEX Remap */
SET_TRE(base, ctx, 1);
SET_AFE(base, ctx, 1);
/* Set up HTW mode */
/* TLB miss configuration: perform HTW on miss */
SET_TLBMCFG(base, ctx, 0x3);
/* V2P configuration: HTW for access */
SET_V2PCFG(base, ctx, 0x3);
SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
SET_TTBR1(base, ctx, 0);
/* Set prrr and nmrr */
SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
/* Invalidate the TLB for this context */
SET_CTX_TLBIALL(base, ctx, 0);
/* Set interrupt number to "secure" interrupt */
SET_IRPTNDX(base, ctx, 0);
/* Enable context fault interrupt */
SET_CFEIE(base, ctx, 1);
/* Stall access on a context fault and let the handler deal with it */
SET_CFCFG(base, ctx, 1);
/* Redirect all cacheable requests to L2 slave port. */
SET_RCISH(base, ctx, 1);
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
/* Enable the MMU */
SET_M(base, ctx, 1);
}
static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
{
struct msm_priv *priv;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail_nomem;
INIT_LIST_HEAD(&priv->list_attached);
priv->domain.geometry.aperture_start = 0;
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
priv->domain.geometry.force_aperture = true;
return &priv->domain;
fail_nomem:
kfree(priv);
return NULL;
}
static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
kfree(priv);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
static int msm_iommu_domain_config(struct msm_priv *priv)
{
spin_lock_init(&priv->pgtlock);
priv->cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
.ias = 32,
.oas = 32,
.tlb = &msm_iommu_flush_ops,
.iommu_dev = priv->dev,
};
priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
if (!priv->iop) {
dev_err(priv->dev, "Failed to allocate pgtable\n");
return -EINVAL;
}
msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
return 0;
}
/* Must be called under msm_iommu_lock */
static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
{
struct msm_iommu_dev *iommu, *ret = NULL;
struct msm_iommu_ctx_dev *master;
list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
master = list_first_entry(&iommu->ctx_list,
struct msm_iommu_ctx_dev,
list);
if (master->of_node == dev->of_node) {
ret = iommu;
break;
}
}
return ret;
}
static struct iommu_device *msm_iommu_probe_device(struct device *dev)
{
struct msm_iommu_dev *iommu;
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
if (!iommu)
return ERR_PTR(-ENODEV);
return &iommu->iommu;
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
struct msm_iommu_dev *iommu;
struct msm_priv *priv = to_msm_priv(domain);
struct msm_iommu_ctx_dev *master;
priv->dev = dev;
msm_iommu_domain_config(priv);
spin_lock_irqsave(&msm_iommu_lock, flags);
list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
master = list_first_entry(&iommu->ctx_list,
struct msm_iommu_ctx_dev,
list);
if (master->of_node == dev->of_node) {
ret = __enable_clocks(iommu);
if (ret)
goto fail;
list_for_each_entry(master, &iommu->ctx_list, list) {
if (master->num) {
dev_err(dev, "domain already attached");
ret = -EEXIST;
goto fail;
}
master->num =
msm_iommu_alloc_ctx(iommu->context_map,
0, iommu->ncb);
if (IS_ERR_VALUE(master->num)) {
ret = -ENODEV;
goto fail;
}
config_mids(iommu, master);
__program_context(iommu->base, master->num,
priv);
}
__disable_clocks(iommu);
list_add(&iommu->dom_node, &priv->list_attached);
}
}
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
static void msm_iommu_set_platform_dma(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
struct msm_iommu_dev *iommu;
struct msm_iommu_ctx_dev *master;
int ret;
free_io_pgtable_ops(priv->iop);
spin_lock_irqsave(&msm_iommu_lock, flags);
list_for_each_entry(iommu, &priv->list_attached, dom_node) {
ret = __enable_clocks(iommu);
if (ret)
goto fail;
list_for_each_entry(master, &iommu->ctx_list, list) {
msm_iommu_free_ctx(iommu->context_map, master->num);
__reset_context(iommu->base, master->num);
}
__disable_clocks(iommu);
}
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
GFP_ATOMIC, mapped);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
}
static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
struct msm_priv *priv = to_msm_priv(domain);
__flush_iotlb_range(iova, size, SZ_4K, false, priv);
}
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
size_t ret;
spin_lock_irqsave(&priv->pgtlock, flags);
ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
struct msm_iommu_dev *iommu;
struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
phys_addr_t ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
iommu = list_first_entry(&priv->list_attached,
struct msm_iommu_dev, dom_node);
if (list_empty(&iommu->ctx_list))
goto fail;
master = list_first_entry(&iommu->ctx_list,
struct msm_iommu_ctx_dev, list);
if (!master)
goto fail;
ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
SET_CTX_TLBIALL(iommu->base, master->num, 0);
SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
if (GET_FAULT(iommu->base, master->num))
ret = 0;
__disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
static void print_ctx_regs(void __iomem *base, int ctx)
{
unsigned int fsr = GET_FSR(base, ctx);
pr_err("FAR = %08x PAR = %08x\n",
GET_FAR(base, ctx), GET_PAR(base, ctx));
pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
(fsr & 0x02) ? "TF " : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? "APF " : "",
(fsr & 0x10) ? "TLBMF " : "",
(fsr & 0x20) ? "HTWDEEF " : "",
(fsr & 0x40) ? "HTWSEEF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x10000) ? "SL " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "");
pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
pr_err("TTBR0 = %08x TTBR1 = %08x\n",
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
}
static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
if (!master) {
dev_err(dev, "Failed to allocate iommu_master\n");
return -ENOMEM;
}
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
dev_iommu_priv_set(dev, master);
}
for (sid = 0; sid < master->num_mids; sid++)
if (master->mids[sid] == spec->args[0]) {
dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
sid);
return 0;
}
master->mids[master->num_mids++] = spec->args[0];
return 0;
}
static int qcom_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
if (iter->dev->of_node == spec->np) {
iommu = iter;
break;
}
}
if (!iommu) {
ret = -ENODEV;
goto fail;
}
ret = insert_iommu_master(dev, &iommu, spec);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
pr_err("Unexpected IOMMU page fault!\n");
pr_err("base = %08x\n", (unsigned int)iommu->base);
ret = __enable_clocks(iommu);
if (ret)
goto fail;
for (i = 0; i < iommu->ncb; i++) {
fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
print_ctx_regs(iommu->base, i);
SET_FSR(iommu->base, i, 0x4000000F);
}
}
__disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
.device_group = generic_device_group,
.set_platform_dma_ops = msm_iommu_set_platform_dma,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
.map_pages = msm_iommu_map,
.unmap_pages = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
* taken care when the iommu client does a writel before
* kick starting the other master.
*/
.iotlb_sync = NULL,
.iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.free = msm_iommu_domain_free,
}
};
static int msm_iommu_probe(struct platform_device *pdev)
{
struct resource *r;
resource_size_t ioaddr;
struct msm_iommu_dev *iommu;
int ret, par, val;
iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENODEV;
iommu->dev = &pdev->dev;
INIT_LIST_HEAD(&iommu->ctx_list);
iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
if (IS_ERR(iommu->pclk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
"could not get smmu_pclk\n");
ret = clk_prepare(iommu->pclk);
if (ret)
return dev_err_probe(iommu->dev, ret,
"could not prepare smmu_pclk\n");
iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
if (IS_ERR(iommu->clk)) {
clk_unprepare(iommu->pclk);
return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
"could not get iommu_clk\n");
}
ret = clk_prepare(iommu->clk);
if (ret) {
clk_unprepare(iommu->pclk);
return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
goto fail;
}
ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
ret = -ENODEV;
goto fail;
}
ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
if (ret) {
dev_err(iommu->dev, "could not get ncb\n");
goto fail;
}
iommu->ncb = val;
msm_iommu_reset(iommu->base, iommu->ncb);
SET_M(iommu->base, 0, 1);
SET_PAR(iommu->base, 0, 0);
SET_V2PCFG(iommu->base, 0, 1);
SET_V2PPR(iommu->base, 0, 0);
par = GET_PAR(iommu->base, 0);
SET_V2PCFG(iommu->base, 0, 0);
SET_M(iommu->base, 0, 0);
if (!par) {
pr_err("Invalid PAR value detected\n");
ret = -ENODEV;
goto fail;
}
ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
msm_iommu_fault_handler,
IRQF_ONESHOT | IRQF_SHARED,
"msm_iommu_secure_irpt_handler",
iommu);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
goto fail;
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
"msm-smmu.%pa", &ioaddr);
if (ret) {
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
goto fail;
}
ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
goto fail;
}
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
return ret;
fail:
clk_unprepare(iommu->clk);
clk_unprepare(iommu->pclk);
return ret;
}
static const struct of_device_id msm_iommu_dt_match[] = {
{ .compatible = "qcom,apq8064-iommu" },
{}
};
static void msm_iommu_remove(struct platform_device *pdev)
{
struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
clk_unprepare(iommu->clk);
clk_unprepare(iommu->pclk);
}
static struct platform_driver msm_iommu_driver = {
.driver = {
.name = "msm_iommu",
.of_match_table = msm_iommu_dt_match,
},
.probe = msm_iommu_probe,
.remove_new = msm_iommu_remove,
};
builtin_platform_driver(msm_iommu_driver);
| linux-master | drivers/iommu/msm_iommu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* iommu trace points
*
* Copyright (C) 2013 Shuah Khan <[email protected]>
*
*/
#include <linux/string.h>
#include <linux/types.h>
#define CREATE_TRACE_POINTS
#include <trace/events/iommu.h>
/* iommu_group_event */
EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
/* iommu_device_event */
EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
/* iommu_map_unmap */
EXPORT_TRACEPOINT_SYMBOL_GPL(map);
EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
/* iommu_error */
EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
| linux-master | drivers/iommu/iommu-traces.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.