python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Sensirion SPS30 particulate matter sensor serial driver * * Copyright (c) 2021 Tomasz Duszynski <[email protected]> */ #include <linux/completion.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/iio/iio.h> #include <linux/minmax.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/serdev.h> #include <linux/types.h> #include "sps30.h" #define SPS30_SERIAL_DEV_NAME "sps30" #define SPS30_SERIAL_SOF_EOF 0x7e #define SPS30_SERIAL_TIMEOUT msecs_to_jiffies(20) #define SPS30_SERIAL_MAX_BUF_SIZE 263 #define SPS30_SERIAL_ESCAPE_CHAR 0x7d #define SPS30_SERIAL_FRAME_MIN_SIZE 7 #define SPS30_SERIAL_FRAME_ADR_OFFSET 1 #define SPS30_SERIAL_FRAME_CMD_OFFSET 2 #define SPS30_SERIAL_FRAME_MOSI_LEN_OFFSET 3 #define SPS30_SERIAL_FRAME_MISO_STATE_OFFSET 3 #define SPS30_SERIAL_FRAME_MISO_LEN_OFFSET 4 #define SPS30_SERIAL_FRAME_MISO_DATA_OFFSET 5 #define SPS30_SERIAL_START_MEAS 0x00 #define SPS30_SERIAL_STOP_MEAS 0x01 #define SPS30_SERIAL_READ_MEAS 0x03 #define SPS30_SERIAL_RESET 0xd3 #define SPS30_SERIAL_CLEAN_FAN 0x56 #define SPS30_SERIAL_PERIOD 0x80 #define SPS30_SERIAL_DEV_INFO 0xd0 #define SPS30_SERIAL_READ_VERSION 0xd1 struct sps30_serial_priv { struct completion new_frame; unsigned char buf[SPS30_SERIAL_MAX_BUF_SIZE]; size_t num; bool escaped; bool done; }; static int sps30_serial_xfer(struct sps30_state *state, const unsigned char *buf, size_t size) { struct serdev_device *serdev = to_serdev_device(state->dev); struct sps30_serial_priv *priv = state->priv; int ret; priv->num = 0; priv->escaped = false; priv->done = false; ret = serdev_device_write(serdev, buf, size, SPS30_SERIAL_TIMEOUT); if (ret < 0) return ret; if (ret != size) return -EIO; ret = wait_for_completion_interruptible_timeout(&priv->new_frame, SPS30_SERIAL_TIMEOUT); if (ret < 0) return ret; if (!ret) return -ETIMEDOUT; return 0; } static const struct { unsigned char byte; unsigned char byte2; } sps30_serial_bytes[] = { { 0x11, 0x31 }, { 0x13, 0x33 }, { 0x7e, 0x5e }, { 0x7d, 0x5d }, }; static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte) { int i; for (i = 0; i < ARRAY_SIZE(sps30_serial_bytes); i++) { if (sps30_serial_bytes[i].byte != byte) continue; buf[0] = SPS30_SERIAL_ESCAPE_CHAR; buf[1] = sps30_serial_bytes[i].byte2; return 2; } buf[0] = byte; return 1; } static char sps30_serial_get_byte(bool escaped, unsigned char byte2) { int i; if (!escaped) return byte2; for (i = 0; i < ARRAY_SIZE(sps30_serial_bytes); i++) { if (sps30_serial_bytes[i].byte2 != byte2) continue; return sps30_serial_bytes[i].byte; } return 0; } static unsigned char sps30_serial_calc_chksum(const unsigned char *buf, size_t num) { unsigned int chksum = 0; size_t i; for (i = 0; i < num; i++) chksum += buf[i]; return ~chksum; } static int sps30_serial_prep_frame(unsigned char *buf, unsigned char cmd, const unsigned char *arg, size_t arg_size) { unsigned char chksum; int num = 0; size_t i; buf[num++] = SPS30_SERIAL_SOF_EOF; buf[num++] = 0; num += sps30_serial_put_byte(buf + num, cmd); num += sps30_serial_put_byte(buf + num, arg_size); for (i = 0; i < arg_size; i++) num += sps30_serial_put_byte(buf + num, arg[i]); /* SOF isn't checksummed */ chksum = sps30_serial_calc_chksum(buf + 1, num - 1); num += sps30_serial_put_byte(buf + num, chksum); buf[num++] = SPS30_SERIAL_SOF_EOF; return num; } static bool sps30_serial_frame_valid(struct sps30_state *state, const unsigned char *buf) { struct sps30_serial_priv *priv = state->priv; unsigned char chksum; if ((priv->num < SPS30_SERIAL_FRAME_MIN_SIZE) || (priv->num != SPS30_SERIAL_FRAME_MIN_SIZE + priv->buf[SPS30_SERIAL_FRAME_MISO_LEN_OFFSET])) { dev_err(state->dev, "frame has invalid number of bytes\n"); return false; } if ((priv->buf[SPS30_SERIAL_FRAME_ADR_OFFSET] != buf[SPS30_SERIAL_FRAME_ADR_OFFSET]) || (priv->buf[SPS30_SERIAL_FRAME_CMD_OFFSET] != buf[SPS30_SERIAL_FRAME_CMD_OFFSET])) { dev_err(state->dev, "frame has wrong ADR and CMD bytes\n"); return false; } if (priv->buf[SPS30_SERIAL_FRAME_MISO_STATE_OFFSET]) { dev_err(state->dev, "frame with non-zero state received (0x%02x)\n", priv->buf[SPS30_SERIAL_FRAME_MISO_STATE_OFFSET]); return false; } /* SOF, checksum and EOF are not checksummed */ chksum = sps30_serial_calc_chksum(priv->buf + 1, priv->num - 3); if (priv->buf[priv->num - 2] != chksum) { dev_err(state->dev, "frame integrity check failed\n"); return false; } return true; } static int sps30_serial_command(struct sps30_state *state, unsigned char cmd, const void *arg, size_t arg_size, void *rsp, size_t rsp_size) { struct sps30_serial_priv *priv = state->priv; unsigned char buf[SPS30_SERIAL_MAX_BUF_SIZE]; int ret, size; size = sps30_serial_prep_frame(buf, cmd, arg, arg_size); ret = sps30_serial_xfer(state, buf, size); if (ret) return ret; if (!sps30_serial_frame_valid(state, buf)) return -EIO; if (rsp) { rsp_size = min_t(size_t, priv->buf[SPS30_SERIAL_FRAME_MISO_LEN_OFFSET], rsp_size); memcpy(rsp, &priv->buf[SPS30_SERIAL_FRAME_MISO_DATA_OFFSET], rsp_size); } return rsp_size; } static int sps30_serial_receive_buf(struct serdev_device *serdev, const unsigned char *buf, size_t size) { struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev); struct sps30_serial_priv *priv; struct sps30_state *state; unsigned char byte; size_t i; if (!indio_dev) return 0; state = iio_priv(indio_dev); priv = state->priv; /* just in case device put some unexpected data on the bus */ if (priv->done) return size; /* wait for the start of frame */ if (!priv->num && size && buf[0] != SPS30_SERIAL_SOF_EOF) return 1; if (priv->num + size >= ARRAY_SIZE(priv->buf)) size = ARRAY_SIZE(priv->buf) - priv->num; for (i = 0; i < size; i++) { byte = buf[i]; /* remove stuffed bytes on-the-fly */ if (byte == SPS30_SERIAL_ESCAPE_CHAR) { priv->escaped = true; continue; } byte = sps30_serial_get_byte(priv->escaped, byte); if (priv->escaped && !byte) dev_warn(state->dev, "unrecognized escaped char (0x%02x)\n", byte); priv->buf[priv->num++] = byte; /* EOF received */ if (!priv->escaped && byte == SPS30_SERIAL_SOF_EOF) { if (priv->num < SPS30_SERIAL_FRAME_MIN_SIZE) continue; priv->done = true; complete(&priv->new_frame); i++; break; } priv->escaped = false; } return i; } static const struct serdev_device_ops sps30_serial_device_ops = { .receive_buf = sps30_serial_receive_buf, .write_wakeup = serdev_device_write_wakeup, }; static int sps30_serial_start_meas(struct sps30_state *state) { /* request BE IEEE754 formatted data */ unsigned char buf[] = { 0x01, 0x03 }; return sps30_serial_command(state, SPS30_SERIAL_START_MEAS, buf, sizeof(buf), NULL, 0); } static int sps30_serial_stop_meas(struct sps30_state *state) { return sps30_serial_command(state, SPS30_SERIAL_STOP_MEAS, NULL, 0, NULL, 0); } static int sps30_serial_reset(struct sps30_state *state) { int ret; ret = sps30_serial_command(state, SPS30_SERIAL_RESET, NULL, 0, NULL, 0); msleep(500); return ret; } static int sps30_serial_read_meas(struct sps30_state *state, __be32 *meas, size_t num) { int ret; /* measurements are ready within a second */ if (msleep_interruptible(1000)) return -EINTR; ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num)); if (ret < 0) return ret; /* if measurements aren't ready sensor returns empty frame */ if (ret == SPS30_SERIAL_FRAME_MIN_SIZE) return -ETIMEDOUT; if (ret != num * sizeof(*meas)) return -EIO; return 0; } static int sps30_serial_clean_fan(struct sps30_state *state) { return sps30_serial_command(state, SPS30_SERIAL_CLEAN_FAN, NULL, 0, NULL, 0); } static int sps30_serial_read_cleaning_period(struct sps30_state *state, __be32 *period) { unsigned char buf[] = { 0x00 }; int ret; ret = sps30_serial_command(state, SPS30_SERIAL_PERIOD, buf, sizeof(buf), period, sizeof(*period)); if (ret < 0) return ret; if (ret != sizeof(*period)) return -EIO; return 0; } static int sps30_serial_write_cleaning_period(struct sps30_state *state, __be32 period) { unsigned char buf[5] = { 0x00 }; memcpy(buf + 1, &period, sizeof(period)); return sps30_serial_command(state, SPS30_SERIAL_PERIOD, buf, sizeof(buf), NULL, 0); } static int sps30_serial_show_info(struct sps30_state *state) { /* * tell device do return serial number and add extra nul byte just in case * serial number isn't a valid string */ unsigned char buf[32 + 1] = { 0x03 }; struct device *dev = state->dev; int ret; ret = sps30_serial_command(state, SPS30_SERIAL_DEV_INFO, buf, 1, buf, sizeof(buf) - 1); if (ret < 0) return ret; if (ret != sizeof(buf) - 1) return -EIO; dev_info(dev, "serial number: %s\n", buf); ret = sps30_serial_command(state, SPS30_SERIAL_READ_VERSION, NULL, 0, buf, sizeof(buf) - 1); if (ret < 0) return ret; if (ret < 2) return -EIO; dev_info(dev, "fw version: %u.%u\n", buf[0], buf[1]); return 0; } static const struct sps30_ops sps30_serial_ops = { .start_meas = sps30_serial_start_meas, .stop_meas = sps30_serial_stop_meas, .read_meas = sps30_serial_read_meas, .reset = sps30_serial_reset, .clean_fan = sps30_serial_clean_fan, .read_cleaning_period = sps30_serial_read_cleaning_period, .write_cleaning_period = sps30_serial_write_cleaning_period, .show_info = sps30_serial_show_info, }; static int sps30_serial_probe(struct serdev_device *serdev) { struct device *dev = &serdev->dev; struct sps30_serial_priv *priv; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; init_completion(&priv->new_frame); serdev_device_set_client_ops(serdev, &sps30_serial_device_ops); ret = devm_serdev_device_open(dev, serdev); if (ret) return ret; serdev_device_set_baudrate(serdev, 115200); serdev_device_set_flow_control(serdev, false); ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); if (ret) return ret; return sps30_probe(dev, SPS30_SERIAL_DEV_NAME, priv, &sps30_serial_ops); } static const struct of_device_id sps30_serial_of_match[] = { { .compatible = "sensirion,sps30" }, { } }; MODULE_DEVICE_TABLE(of, sps30_serial_of_match); static struct serdev_device_driver sps30_serial_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = sps30_serial_of_match, }, .probe = sps30_serial_probe, }; module_serdev_device_driver(sps30_serial_driver); MODULE_AUTHOR("Tomasz Duszynski <[email protected]>"); MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor serial driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(IIO_SPS30);
linux-master
drivers/iio/chemical/sps30_serial.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012 Analog Devices, Inc. * Author: Lars-Peter Clausen <[email protected]> */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/trigger_consumer.h> /** * iio_triggered_buffer_setup_ext() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @h: Function which will be used as pollfunc top half * @thread: Function which will be used as pollfunc bottom half * @direction: Direction of the data stream (in/out). * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *setup_ops, const struct iio_dev_attr **buffer_attrs) { struct iio_buffer *buffer; int ret; buffer = iio_kfifo_allocate(); if (!buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(h, thread, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, iio_device_id(indio_dev)); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; buffer->direction = direction; buffer->attrs = buffer_attrs; ret = iio_device_attach_buffer(indio_dev, buffer); if (ret < 0) goto error_dealloc_pollfunc; return 0; error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_kfifo_free: iio_kfifo_free(buffer); error_ret: return ret; } EXPORT_SYMBOL(iio_triggered_buffer_setup_ext); /** * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup_ext() * @indio_dev: IIO device structure */ void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); } EXPORT_SYMBOL(iio_triggered_buffer_cleanup); static void devm_iio_triggered_buffer_clean(void *indio_dev) { iio_triggered_buffer_cleanup(indio_dev); } int devm_iio_triggered_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *ops, const struct iio_dev_attr **buffer_attrs) { int ret; ret = iio_triggered_buffer_setup_ext(indio_dev, h, thread, direction, ops, buffer_attrs); if (ret) return ret; return devm_add_action_or_reset(dev, devm_iio_triggered_buffer_clean, indio_dev); } EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup_ext); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers"); MODULE_LICENSE("GPL");
linux-master
drivers/iio/buffer/industrialio-triggered-buffer.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2017 Analog Devices Inc. * Author: Lars-Peter Clausen <[email protected]> */ #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/consumer.h> #include <linux/iio/hw-consumer.h> #include <linux/iio/buffer_impl.h> /** * struct iio_hw_consumer - IIO hw consumer block * @buffers: hardware buffers list head. * @channels: IIO provider channels. */ struct iio_hw_consumer { struct list_head buffers; struct iio_channel *channels; }; struct hw_consumer_buffer { struct list_head head; struct iio_dev *indio_dev; struct iio_buffer buffer; long scan_mask[]; }; static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer( struct iio_buffer *buffer) { return container_of(buffer, struct hw_consumer_buffer, buffer); } static void iio_hw_buf_release(struct iio_buffer *buffer) { struct hw_consumer_buffer *hw_buf = iio_buffer_to_hw_consumer_buffer(buffer); kfree(hw_buf); } static const struct iio_buffer_access_funcs iio_hw_buf_access = { .release = &iio_hw_buf_release, .modes = INDIO_BUFFER_HARDWARE, }; static struct hw_consumer_buffer *iio_hw_consumer_get_buffer( struct iio_hw_consumer *hwc, struct iio_dev *indio_dev) { struct hw_consumer_buffer *buf; list_for_each_entry(buf, &hwc->buffers, head) { if (buf->indio_dev == indio_dev) return buf; } buf = kzalloc(struct_size(buf, scan_mask, BITS_TO_LONGS(indio_dev->masklength)), GFP_KERNEL); if (!buf) return NULL; buf->buffer.access = &iio_hw_buf_access; buf->indio_dev = indio_dev; buf->buffer.scan_mask = buf->scan_mask; iio_buffer_init(&buf->buffer); list_add_tail(&buf->head, &hwc->buffers); return buf; } /** * iio_hw_consumer_alloc() - Allocate IIO hardware consumer * @dev: Pointer to consumer device. * * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure. */ struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev) { struct hw_consumer_buffer *buf; struct iio_hw_consumer *hwc; struct iio_channel *chan; int ret; hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); if (!hwc) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&hwc->buffers); hwc->channels = iio_channel_get_all(dev); if (IS_ERR(hwc->channels)) { ret = PTR_ERR(hwc->channels); goto err_free_hwc; } chan = &hwc->channels[0]; while (chan->indio_dev) { buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev); if (!buf) { ret = -ENOMEM; goto err_put_buffers; } set_bit(chan->channel->scan_index, buf->buffer.scan_mask); chan++; } return hwc; err_put_buffers: list_for_each_entry(buf, &hwc->buffers, head) iio_buffer_put(&buf->buffer); iio_channel_release_all(hwc->channels); err_free_hwc: kfree(hwc); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc); /** * iio_hw_consumer_free() - Free IIO hardware consumer * @hwc: hw consumer to free. */ void iio_hw_consumer_free(struct iio_hw_consumer *hwc) { struct hw_consumer_buffer *buf, *n; iio_channel_release_all(hwc->channels); list_for_each_entry_safe(buf, n, &hwc->buffers, head) iio_buffer_put(&buf->buffer); kfree(hwc); } EXPORT_SYMBOL_GPL(iio_hw_consumer_free); static void devm_iio_hw_consumer_release(void *iio_hwc) { iio_hw_consumer_free(iio_hwc); } /** * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc() * @dev: Pointer to consumer device. * * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function * is automatically freed on driver detach. * * returns pointer to allocated iio_hw_consumer on success, NULL on failure. */ struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev) { struct iio_hw_consumer *iio_hwc; int ret; iio_hwc = iio_hw_consumer_alloc(dev); if (IS_ERR(iio_hwc)) return iio_hwc; ret = devm_add_action_or_reset(dev, devm_iio_hw_consumer_release, iio_hwc); if (ret) return ERR_PTR(ret); return iio_hwc; } EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc); /** * iio_hw_consumer_enable() - Enable IIO hardware consumer * @hwc: iio_hw_consumer to enable. * * Returns 0 on success. */ int iio_hw_consumer_enable(struct iio_hw_consumer *hwc) { struct hw_consumer_buffer *buf; int ret; list_for_each_entry(buf, &hwc->buffers, head) { ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL); if (ret) goto err_disable_buffers; } return 0; err_disable_buffers: list_for_each_entry_continue_reverse(buf, &hwc->buffers, head) iio_update_buffers(buf->indio_dev, NULL, &buf->buffer); return ret; } EXPORT_SYMBOL_GPL(iio_hw_consumer_enable); /** * iio_hw_consumer_disable() - Disable IIO hardware consumer * @hwc: iio_hw_consumer to disable. */ void iio_hw_consumer_disable(struct iio_hw_consumer *hwc) { struct hw_consumer_buffer *buf; list_for_each_entry(buf, &hwc->buffers, head) iio_update_buffers(buf->indio_dev, NULL, &buf->buffer); } EXPORT_SYMBOL_GPL(iio_hw_consumer_disable); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework"); MODULE_LICENSE("GPL v2");
linux-master
drivers/iio/buffer/industrialio-hw-consumer.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2013-2015 Analog Devices Inc. * Author: Lars-Peter Clausen <[email protected]> */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/buffer-dma.h> #include <linux/dma-mapping.h> #include <linux/sizes.h> /* * For DMA buffers the storage is sub-divided into so called blocks. Each block * has its own memory buffer. The size of the block is the granularity at which * memory is exchanged between the hardware and the application. Increasing the * basic unit of data exchange from one sample to one block decreases the * management overhead that is associated with each sample. E.g. if we say the * management overhead for one exchange is x and the unit of exchange is one * sample the overhead will be x for each sample. Whereas when using a block * which contains n samples the overhead per sample is reduced to x/n. This * allows to achieve much higher samplerates than what can be sustained with * the one sample approach. * * Blocks are exchanged between the DMA controller and the application via the * means of two queues. The incoming queue and the outgoing queue. Blocks on the * incoming queue are waiting for the DMA controller to pick them up and fill * them with data. Block on the outgoing queue have been filled with data and * are waiting for the application to dequeue them and read the data. * * A block can be in one of the following states: * * Owned by the application. In this state the application can read data from * the block. * * On the incoming list: Blocks on the incoming list are queued up to be * processed by the DMA controller. * * Owned by the DMA controller: The DMA controller is processing the block * and filling it with data. * * On the outgoing list: Blocks on the outgoing list have been successfully * processed by the DMA controller and contain data. They can be dequeued by * the application. * * Dead: A block that is dead has been marked as to be freed. It might still * be owned by either the application or the DMA controller at the moment. * But once they are done processing it instead of going to either the * incoming or outgoing queue the block will be freed. * * In addition to this blocks are reference counted and the memory associated * with both the block structure as well as the storage memory for the block * will be freed when the last reference to the block is dropped. This means a * block must not be accessed without holding a reference. * * The iio_dma_buffer implementation provides a generic infrastructure for * managing the blocks. * * A driver for a specific piece of hardware that has DMA capabilities need to * implement the submit() callback from the iio_dma_buffer_ops structure. This * callback is supposed to initiate the DMA transfer copying data from the * converter to the memory region of the block. Once the DMA transfer has been * completed the driver must call iio_dma_buffer_block_done() for the completed * block. * * Prior to this it must set the bytes_used field of the block contains * the actual number of bytes in the buffer. Typically this will be equal to the * size of the block, but if the DMA hardware has certain alignment requirements * for the transfer length it might choose to use less than the full size. In * either case it is expected that bytes_used is a multiple of the bytes per * datum, i.e. the block must not contain partial samples. * * The driver must call iio_dma_buffer_block_done() for each block it has * received through its submit_block() callback, even if it does not actually * perform a DMA transfer for the block, e.g. because the buffer was disabled * before the block transfer was started. In this case it should set bytes_used * to 0. * * In addition it is recommended that a driver implements the abort() callback. * It will be called when the buffer is disabled and can be used to cancel * pending and stop active transfers. * * The specific driver implementation should use the default callback * implementations provided by this module for the iio_buffer_access_funcs * struct. It may overload some callbacks with custom variants if the hardware * has special requirements that are not handled by the generic functions. If a * driver chooses to overload a callback it has to ensure that the generic * callback is called from within the custom callback. */ static void iio_buffer_block_release(struct kref *kref) { struct iio_dma_buffer_block *block = container_of(kref, struct iio_dma_buffer_block, kref); WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), block->vaddr, block->phys_addr); iio_buffer_put(&block->queue->buffer); kfree(block); } static void iio_buffer_block_get(struct iio_dma_buffer_block *block) { kref_get(&block->kref); } static void iio_buffer_block_put(struct iio_dma_buffer_block *block) { kref_put(&block->kref, iio_buffer_block_release); } /* * dma_free_coherent can sleep, hence we need to take some special care to be * able to drop a reference from an atomic context. */ static LIST_HEAD(iio_dma_buffer_dead_blocks); static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock); static void iio_dma_buffer_cleanup_worker(struct work_struct *work) { struct iio_dma_buffer_block *block, *_block; LIST_HEAD(block_list); spin_lock_irq(&iio_dma_buffer_dead_blocks_lock); list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list); spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock); list_for_each_entry_safe(block, _block, &block_list, head) iio_buffer_block_release(&block->kref); } static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker); static void iio_buffer_block_release_atomic(struct kref *kref) { struct iio_dma_buffer_block *block; unsigned long flags; block = container_of(kref, struct iio_dma_buffer_block, kref); spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags); list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags); schedule_work(&iio_dma_buffer_cleanup_work); } /* * Version of iio_buffer_block_put() that can be called from atomic context */ static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) { kref_put(&block->kref, iio_buffer_block_release_atomic); } static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf) { return container_of(buf, struct iio_dma_buffer_queue, buffer); } static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( struct iio_dma_buffer_queue *queue, size_t size) { struct iio_dma_buffer_block *block; block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) return NULL; block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), &block->phys_addr, GFP_KERNEL); if (!block->vaddr) { kfree(block); return NULL; } block->size = size; block->state = IIO_BLOCK_STATE_DEQUEUED; block->queue = queue; INIT_LIST_HEAD(&block->head); kref_init(&block->kref); iio_buffer_get(&queue->buffer); return block; } static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) { struct iio_dma_buffer_queue *queue = block->queue; /* * The buffer has already been freed by the application, just drop the * reference. */ if (block->state != IIO_BLOCK_STATE_DEAD) { block->state = IIO_BLOCK_STATE_DONE; list_add_tail(&block->head, &queue->outgoing); } } /** * iio_dma_buffer_block_done() - Indicate that a block has been completed * @block: The completed block * * Should be called when the DMA controller has finished handling the block to * pass back ownership of the block to the queue. */ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) { struct iio_dma_buffer_queue *queue = block->queue; unsigned long flags; spin_lock_irqsave(&queue->list_lock, flags); _iio_dma_buffer_block_done(block); spin_unlock_irqrestore(&queue->list_lock, flags); iio_buffer_block_put_atomic(block); wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); /** * iio_dma_buffer_block_list_abort() - Indicate that a list block has been * aborted * @queue: Queue for which to complete blocks. * @list: List of aborted blocks. All blocks in this list must be from @queue. * * Typically called from the abort() callback after the DMA controller has been * stopped. This will set bytes_used to 0 for each block in the list and then * hand the blocks back to the queue. */ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, struct list_head *list) { struct iio_dma_buffer_block *block, *_block; unsigned long flags; spin_lock_irqsave(&queue->list_lock, flags); list_for_each_entry_safe(block, _block, list, head) { list_del(&block->head); block->bytes_used = 0; _iio_dma_buffer_block_done(block); iio_buffer_block_put_atomic(block); } spin_unlock_irqrestore(&queue->list_lock, flags); wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) { /* * If the core owns the block it can be re-used. This should be the * default case when enabling the buffer, unless the DMA controller does * not support abort and has not given back the block yet. */ switch (block->state) { case IIO_BLOCK_STATE_DEQUEUED: case IIO_BLOCK_STATE_QUEUED: case IIO_BLOCK_STATE_DONE: return true; default: return false; } } /** * iio_dma_buffer_request_update() - DMA buffer request_update callback * @buffer: The buffer which to request an update * * Should be used as the iio_dma_buffer_request_update() callback for * iio_buffer_access_ops struct for DMA buffers. */ int iio_dma_buffer_request_update(struct iio_buffer *buffer) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); struct iio_dma_buffer_block *block; bool try_reuse = false; size_t size; int ret = 0; int i; /* * Split the buffer into two even parts. This is used as a double * buffering scheme with usually one block at a time being used by the * DMA and the other one by the application. */ size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * queue->buffer.length, 2); mutex_lock(&queue->lock); /* Allocations are page aligned */ if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) try_reuse = true; queue->fileio.block_size = size; queue->fileio.active_block = NULL; spin_lock_irq(&queue->list_lock); for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { block = queue->fileio.blocks[i]; /* If we can't re-use it free it */ if (block && (!iio_dma_block_reusable(block) || !try_reuse)) block->state = IIO_BLOCK_STATE_DEAD; } /* * At this point all blocks are either owned by the core or marked as * dead. This means we can reset the lists without having to fear * corrution. */ INIT_LIST_HEAD(&queue->outgoing); spin_unlock_irq(&queue->list_lock); INIT_LIST_HEAD(&queue->incoming); for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { if (queue->fileio.blocks[i]) { block = queue->fileio.blocks[i]; if (block->state == IIO_BLOCK_STATE_DEAD) { /* Could not reuse it */ iio_buffer_block_put(block); block = NULL; } else { block->size = size; } } else { block = NULL; } if (!block) { block = iio_dma_buffer_alloc_block(queue, size); if (!block) { ret = -ENOMEM; goto out_unlock; } queue->fileio.blocks[i] = block; } block->state = IIO_BLOCK_STATE_QUEUED; list_add_tail(&block->head, &queue->incoming); } out_unlock: mutex_unlock(&queue->lock); return ret; } EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) { int ret; /* * If the hardware has already been removed we put the block into * limbo. It will neither be on the incoming nor outgoing list, nor will * it ever complete. It will just wait to be freed eventually. */ if (!queue->ops) return; block->state = IIO_BLOCK_STATE_ACTIVE; iio_buffer_block_get(block); ret = queue->ops->submit(queue, block); if (ret) { /* * This is a bit of a problem and there is not much we can do * other then wait for the buffer to be disabled and re-enabled * and try again. But it should not really happen unless we run * out of memory or something similar. * * TODO: Implement support in the IIO core to allow buffers to * notify consumers that something went wrong and the buffer * should be disabled. */ iio_buffer_block_put(block); } } /** * iio_dma_buffer_enable() - Enable DMA buffer * @buffer: IIO buffer to enable * @indio_dev: IIO device the buffer is attached to * * Needs to be called when the device that the buffer is attached to starts * sampling. Typically should be the iio_buffer_access_ops enable callback. * * This will allocate the DMA buffers and start the DMA transfers. */ int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); struct iio_dma_buffer_block *block, *_block; mutex_lock(&queue->lock); queue->active = true; list_for_each_entry_safe(block, _block, &queue->incoming, head) { list_del(&block->head); iio_dma_buffer_submit_block(queue, block); } mutex_unlock(&queue->lock); return 0; } EXPORT_SYMBOL_GPL(iio_dma_buffer_enable); /** * iio_dma_buffer_disable() - Disable DMA buffer * @buffer: IIO DMA buffer to disable * @indio_dev: IIO device the buffer is attached to * * Needs to be called when the device that the buffer is attached to stops * sampling. Typically should be the iio_buffer_access_ops disable callback. */ int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); mutex_lock(&queue->lock); queue->active = false; if (queue->ops && queue->ops->abort) queue->ops->abort(queue); mutex_unlock(&queue->lock); return 0; } EXPORT_SYMBOL_GPL(iio_dma_buffer_disable); static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) { if (block->state == IIO_BLOCK_STATE_DEAD) { iio_buffer_block_put(block); } else if (queue->active) { iio_dma_buffer_submit_block(queue, block); } else { block->state = IIO_BLOCK_STATE_QUEUED; list_add_tail(&block->head, &queue->incoming); } } static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( struct iio_dma_buffer_queue *queue) { struct iio_dma_buffer_block *block; spin_lock_irq(&queue->list_lock); block = list_first_entry_or_null(&queue->outgoing, struct iio_dma_buffer_block, head); if (block != NULL) { list_del(&block->head); block->state = IIO_BLOCK_STATE_DEQUEUED; } spin_unlock_irq(&queue->list_lock); return block; } /** * iio_dma_buffer_read() - DMA buffer read callback * @buffer: Buffer to read form * @n: Number of bytes to read * @user_buffer: Userspace buffer to copy the data to * * Should be used as the read callback for iio_buffer_access_ops * struct for DMA buffers. */ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); struct iio_dma_buffer_block *block; int ret; if (n < buffer->bytes_per_datum) return -EINVAL; mutex_lock(&queue->lock); if (!queue->fileio.active_block) { block = iio_dma_buffer_dequeue(queue); if (block == NULL) { ret = 0; goto out_unlock; } queue->fileio.pos = 0; queue->fileio.active_block = block; } else { block = queue->fileio.active_block; } n = rounddown(n, buffer->bytes_per_datum); if (n > block->bytes_used - queue->fileio.pos) n = block->bytes_used - queue->fileio.pos; if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { ret = -EFAULT; goto out_unlock; } queue->fileio.pos += n; if (queue->fileio.pos == block->bytes_used) { queue->fileio.active_block = NULL; iio_dma_buffer_enqueue(queue, block); } ret = n; out_unlock: mutex_unlock(&queue->lock); return ret; } EXPORT_SYMBOL_GPL(iio_dma_buffer_read); /** * iio_dma_buffer_data_available() - DMA buffer data_available callback * @buf: Buffer to check for data availability * * Should be used as the data_available callback for iio_buffer_access_ops * struct for DMA buffers. */ size_t iio_dma_buffer_data_available(struct iio_buffer *buf) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); struct iio_dma_buffer_block *block; size_t data_available = 0; /* * For counting the available bytes we'll use the size of the block not * the number of actual bytes available in the block. Otherwise it is * possible that we end up with a value that is lower than the watermark * but won't increase since all blocks are in use. */ mutex_lock(&queue->lock); if (queue->fileio.active_block) data_available += queue->fileio.active_block->size; spin_lock_irq(&queue->list_lock); list_for_each_entry(block, &queue->outgoing, head) data_available += block->size; spin_unlock_irq(&queue->list_lock); mutex_unlock(&queue->lock); return data_available; } EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available); /** * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback * @buffer: Buffer to set the bytes-per-datum for * @bpd: The new bytes-per-datum value * * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops * struct for DMA buffers. */ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) { buffer->bytes_per_datum = bpd; return 0; } EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); /** * iio_dma_buffer_set_length - DMA buffer set_length callback * @buffer: Buffer to set the length for * @length: The new buffer length * * Should be used as the set_length callback for iio_buffer_access_ops * struct for DMA buffers. */ int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) { /* Avoid an invalid state */ if (length < 2) length = 2; buffer->length = length; buffer->watermark = length / 2; return 0; } EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length); /** * iio_dma_buffer_init() - Initialize DMA buffer queue * @queue: Buffer to initialize * @dev: DMA device * @ops: DMA buffer queue callback operations * * The DMA device will be used by the queue to do DMA memory allocations. So it * should refer to the device that will perform the DMA to ensure that * allocations are done from a memory region that can be accessed by the device. */ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev, const struct iio_dma_buffer_ops *ops) { iio_buffer_init(&queue->buffer); queue->buffer.length = PAGE_SIZE; queue->buffer.watermark = queue->buffer.length / 2; queue->dev = dev; queue->ops = ops; INIT_LIST_HEAD(&queue->incoming); INIT_LIST_HEAD(&queue->outgoing); mutex_init(&queue->lock); spin_lock_init(&queue->list_lock); return 0; } EXPORT_SYMBOL_GPL(iio_dma_buffer_init); /** * iio_dma_buffer_exit() - Cleanup DMA buffer queue * @queue: Buffer to cleanup * * After this function has completed it is safe to free any resources that are * associated with the buffer and are accessed inside the callback operations. */ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) { unsigned int i; mutex_lock(&queue->lock); spin_lock_irq(&queue->list_lock); for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { if (!queue->fileio.blocks[i]) continue; queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; } INIT_LIST_HEAD(&queue->outgoing); spin_unlock_irq(&queue->list_lock); INIT_LIST_HEAD(&queue->incoming); for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { if (!queue->fileio.blocks[i]) continue; iio_buffer_block_put(queue->fileio.blocks[i]); queue->fileio.blocks[i] = NULL; } queue->fileio.active_block = NULL; queue->ops = NULL; mutex_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(iio_dma_buffer_exit); /** * iio_dma_buffer_release() - Release final buffer resources * @queue: Buffer to release * * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be * called in the buffers release callback implementation right before freeing * the memory associated with the buffer. */ void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) { mutex_destroy(&queue->lock); } EXPORT_SYMBOL_GPL(iio_dma_buffer_release); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_LICENSE("GPL v2");
linux-master
drivers/iio/buffer/industrialio-buffer-dma.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/kfifo.h> #include <linux/mutex.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/buffer_impl.h> #include <linux/sched.h> #include <linux/poll.h> struct iio_kfifo { struct iio_buffer buffer; struct kfifo kf; struct mutex user_lock; int update_needed; }; #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, size_t bytes_per_datum, unsigned int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; /* * Make sure we don't overflow an unsigned int after kfifo rounds up to * the next power of 2. */ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) return -EINVAL; return __kfifo_alloc((struct __kfifo *)&buf->kf, length, bytes_per_datum, GFP_KERNEL); } static int iio_request_update_kfifo(struct iio_buffer *r) { int ret = 0; struct iio_kfifo *buf = iio_to_kfifo(r); mutex_lock(&buf->user_lock); if (buf->update_needed) { kfifo_free(&buf->kf); ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, buf->buffer.length); if (ret >= 0) buf->update_needed = false; } else { kfifo_reset_out(&buf->kf); } mutex_unlock(&buf->user_lock); return ret; } static int iio_mark_update_needed_kfifo(struct iio_buffer *r) { struct iio_kfifo *kf = iio_to_kfifo(r); kf->update_needed = true; return 0; } static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) { if (r->bytes_per_datum != bpd) { r->bytes_per_datum = bpd; iio_mark_update_needed_kfifo(r); } return 0; } static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) { /* Avoid an invalid state */ if (length < 2) length = 2; if (r->length != length) { r->length = length; iio_mark_update_needed_kfifo(r); } return 0; } static int iio_store_to_kfifo(struct iio_buffer *r, const void *data) { int ret; struct iio_kfifo *kf = iio_to_kfifo(r); ret = kfifo_in(&kf->kf, data, 1); if (ret != 1) return -EBUSY; return 0; } static int iio_read_kfifo(struct iio_buffer *r, size_t n, char __user *buf) { int ret, copied; struct iio_kfifo *kf = iio_to_kfifo(r); if (mutex_lock_interruptible(&kf->user_lock)) return -ERESTARTSYS; if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf)) ret = -EINVAL; else ret = kfifo_to_user(&kf->kf, buf, n, &copied); mutex_unlock(&kf->user_lock); if (ret < 0) return ret; return copied; } static size_t iio_kfifo_buf_data_available(struct iio_buffer *r) { struct iio_kfifo *kf = iio_to_kfifo(r); size_t samples; mutex_lock(&kf->user_lock); samples = kfifo_len(&kf->kf); mutex_unlock(&kf->user_lock); return samples; } static void iio_kfifo_buffer_release(struct iio_buffer *buffer) { struct iio_kfifo *kf = iio_to_kfifo(buffer); mutex_destroy(&kf->user_lock); kfifo_free(&kf->kf); kfree(kf); } static size_t iio_kfifo_buf_space_available(struct iio_buffer *r) { struct iio_kfifo *kf = iio_to_kfifo(r); size_t avail; mutex_lock(&kf->user_lock); avail = kfifo_avail(&kf->kf); mutex_unlock(&kf->user_lock); return avail; } static int iio_kfifo_remove_from(struct iio_buffer *r, void *data) { int ret; struct iio_kfifo *kf = iio_to_kfifo(r); if (kfifo_size(&kf->kf) < 1) return -EBUSY; ret = kfifo_out(&kf->kf, data, 1); if (ret != 1) return -EBUSY; wake_up_interruptible_poll(&r->pollq, EPOLLOUT | EPOLLWRNORM); return 0; } static int iio_kfifo_write(struct iio_buffer *r, size_t n, const char __user *buf) { struct iio_kfifo *kf = iio_to_kfifo(r); int ret, copied; mutex_lock(&kf->user_lock); if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf)) ret = -EINVAL; else ret = kfifo_from_user(&kf->kf, buf, n, &copied); mutex_unlock(&kf->user_lock); if (ret) return ret; return copied; } static const struct iio_buffer_access_funcs kfifo_access_funcs = { .store_to = &iio_store_to_kfifo, .read = &iio_read_kfifo, .data_available = iio_kfifo_buf_data_available, .remove_from = &iio_kfifo_remove_from, .write = &iio_kfifo_write, .space_available = &iio_kfifo_buf_space_available, .request_update = &iio_request_update_kfifo, .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo, .set_length = &iio_set_length_kfifo, .release = &iio_kfifo_buffer_release, .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED, }; struct iio_buffer *iio_kfifo_allocate(void) { struct iio_kfifo *kf; kf = kzalloc(sizeof(*kf), GFP_KERNEL); if (!kf) return NULL; kf->update_needed = true; iio_buffer_init(&kf->buffer); kf->buffer.access = &kfifo_access_funcs; kf->buffer.length = 2; mutex_init(&kf->user_lock); return &kf->buffer; } EXPORT_SYMBOL(iio_kfifo_allocate); void iio_kfifo_free(struct iio_buffer *r) { iio_buffer_put(r); } EXPORT_SYMBOL(iio_kfifo_free); static void devm_iio_kfifo_release(struct device *dev, void *res) { iio_kfifo_free(*(struct iio_buffer **)res); } /** * devm_iio_kfifo_allocate - Resource-managed iio_kfifo_allocate() * @dev: Device to allocate kfifo buffer for * * RETURNS: * Pointer to allocated iio_buffer on success, NULL on failure. */ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) { struct iio_buffer **ptr, *r; ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; r = iio_kfifo_allocate(); if (r) { *ptr = r; devres_add(dev, ptr); } else { devres_free(ptr); } return r; } /** * devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device * @dev: Device object to which to attach the life-time of this kfifo buffer * @indio_dev: The device the buffer should be attached to * @setup_ops: The setup_ops required to configure the HW part of the buffer (optional) * @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer * * This function allocates a kfifo buffer via devm_iio_kfifo_allocate() and * attaches it to the IIO device via iio_device_attach_buffer(). * This is meant to be a bit of a short-hand/helper function as there are a few * drivers that seem to do this. */ int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, const struct iio_buffer_setup_ops *setup_ops, const struct iio_dev_attr **buffer_attrs) { struct iio_buffer *buffer; buffer = devm_iio_kfifo_allocate(dev); if (!buffer) return -ENOMEM; indio_dev->modes |= INDIO_BUFFER_SOFTWARE; indio_dev->setup_ops = setup_ops; buffer->attrs = buffer_attrs; return iio_device_attach_buffer(indio_dev, buffer); } EXPORT_SYMBOL_GPL(devm_iio_kfifo_buffer_setup_ext); MODULE_LICENSE("GPL");
linux-master
drivers/iio/buffer/kfifo_buf.c
// SPDX-License-Identifier: GPL-2.0-only /* The industrial I/O callback buffer */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <linux/iio/iio.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/consumer.h> struct iio_cb_buffer { struct iio_buffer buffer; int (*cb)(const void *data, void *private); void *private; struct iio_channel *channels; struct iio_dev *indio_dev; }; static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer) { return container_of(buffer, struct iio_cb_buffer, buffer); } static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data) { struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer); return cb_buff->cb(data, cb_buff->private); } static void iio_buffer_cb_release(struct iio_buffer *buffer) { struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer); bitmap_free(cb_buff->buffer.scan_mask); kfree(cb_buff); } static const struct iio_buffer_access_funcs iio_cb_access = { .store_to = &iio_buffer_cb_store_to, .release = &iio_buffer_cb_release, .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED, }; struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, int (*cb)(const void *data, void *private), void *private) { int ret; struct iio_cb_buffer *cb_buff; struct iio_channel *chan; if (!cb) { dev_err(dev, "Invalid arguments: A callback must be provided!\n"); return ERR_PTR(-EINVAL); } cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL); if (cb_buff == NULL) return ERR_PTR(-ENOMEM); iio_buffer_init(&cb_buff->buffer); cb_buff->private = private; cb_buff->cb = cb; cb_buff->buffer.access = &iio_cb_access; INIT_LIST_HEAD(&cb_buff->buffer.demux_list); cb_buff->channels = iio_channel_get_all(dev); if (IS_ERR(cb_buff->channels)) { ret = PTR_ERR(cb_buff->channels); goto error_free_cb_buff; } cb_buff->indio_dev = cb_buff->channels[0].indio_dev; cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength, GFP_KERNEL); if (cb_buff->buffer.scan_mask == NULL) { ret = -ENOMEM; goto error_release_channels; } chan = &cb_buff->channels[0]; while (chan->indio_dev) { if (chan->indio_dev != cb_buff->indio_dev) { ret = -EINVAL; goto error_free_scan_mask; } set_bit(chan->channel->scan_index, cb_buff->buffer.scan_mask); chan++; } return cb_buff; error_free_scan_mask: bitmap_free(cb_buff->buffer.scan_mask); error_release_channels: iio_channel_release_all(cb_buff->channels); error_free_cb_buff: kfree(cb_buff); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iio_channel_get_all_cb); int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff, size_t watermark) { if (!watermark) return -EINVAL; cb_buff->buffer.watermark = watermark; return 0; } EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark); int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff) { return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer, NULL); } EXPORT_SYMBOL_GPL(iio_channel_start_all_cb); void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff) { iio_update_buffers(cb_buff->indio_dev, NULL, &cb_buff->buffer); } EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb); void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff) { iio_channel_release_all(cb_buff->channels); iio_buffer_put(&cb_buff->buffer); } EXPORT_SYMBOL_GPL(iio_channel_release_all_cb); struct iio_channel *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer) { return cb_buffer->channels; } EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels); struct iio_dev *iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer) { return cb_buffer->indio_dev; } EXPORT_SYMBOL_GPL(iio_channel_cb_get_iio_dev); MODULE_AUTHOR("Jonathan Cameron <[email protected]>"); MODULE_DESCRIPTION("Industrial I/O callback buffer"); MODULE_LICENSE("GPL");
linux-master
drivers/iio/buffer/industrialio-buffer-cb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2014-2015 Analog Devices Inc. * Author: Lars-Peter Clausen <[email protected]> */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/buffer-dma.h> #include <linux/iio/buffer-dmaengine.h> /* * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure * with the DMAengine framework. The generic IIO DMA buffer infrastructure is * used to manage the buffer memory and implement the IIO buffer operations * while the DMAengine framework is used to perform the DMA transfers. Combined * this results in a device independent fully functional DMA buffer * implementation that can be used by device drivers for peripherals which are * connected to a DMA controller which has a DMAengine driver implementation. */ struct dmaengine_buffer { struct iio_dma_buffer_queue queue; struct dma_chan *chan; struct list_head active; size_t align; size_t max_size; }; static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer( struct iio_buffer *buffer) { return container_of(buffer, struct dmaengine_buffer, queue.buffer); } static void iio_dmaengine_buffer_block_done(void *data, const struct dmaengine_result *result) { struct iio_dma_buffer_block *block = data; unsigned long flags; spin_lock_irqsave(&block->queue->list_lock, flags); list_del(&block->head); spin_unlock_irqrestore(&block->queue->list_lock, flags); block->bytes_used -= result->residue; iio_dma_buffer_block_done(block); } static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); struct dma_async_tx_descriptor *desc; dma_cookie_t cookie; block->bytes_used = min(block->size, dmaengine_buffer->max_size); block->bytes_used = round_down(block->bytes_used, dmaengine_buffer->align); desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!desc) return -ENOMEM; desc->callback_result = iio_dmaengine_buffer_block_done; desc->callback_param = block; cookie = dmaengine_submit(desc); if (dma_submit_error(cookie)) return dma_submit_error(cookie); spin_lock_irq(&dmaengine_buffer->queue.list_lock); list_add_tail(&block->head, &dmaengine_buffer->active); spin_unlock_irq(&dmaengine_buffer->queue.list_lock); dma_async_issue_pending(dmaengine_buffer->chan); return 0; } static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); dmaengine_terminate_sync(dmaengine_buffer->chan); iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); } static void iio_dmaengine_buffer_release(struct iio_buffer *buf) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(buf); iio_dma_buffer_release(&dmaengine_buffer->queue); kfree(dmaengine_buffer); } static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { .read = iio_dma_buffer_read, .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, .set_length = iio_dma_buffer_set_length, .request_update = iio_dma_buffer_request_update, .enable = iio_dma_buffer_enable, .disable = iio_dma_buffer_disable, .data_available = iio_dma_buffer_data_available, .release = iio_dmaengine_buffer_release, .modes = INDIO_BUFFER_HARDWARE, .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, }; static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = { .submit = iio_dmaengine_buffer_submit_block, .abort = iio_dmaengine_buffer_abort, }; static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(buffer); return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align); } static IIO_DEVICE_ATTR(length_align_bytes, 0444, iio_dmaengine_buffer_get_length_align, NULL, 0); static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { &iio_dev_attr_length_align_bytes, NULL, }; /** * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine * @dev: Parent device for the buffer * @channel: DMA channel name, typically "rx". * * This allocates a new IIO buffer which internally uses the DMAengine framework * to perform its transfers. The parent device will be used to request the DMA * channel. * * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it. */ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel) { struct dmaengine_buffer *dmaengine_buffer; unsigned int width, src_width, dest_width; struct dma_slave_caps caps; struct dma_chan *chan; int ret; dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); if (!dmaengine_buffer) return ERR_PTR(-ENOMEM); chan = dma_request_chan(dev, channel); if (IS_ERR(chan)) { ret = PTR_ERR(chan); goto err_free; } ret = dma_get_slave_caps(chan, &caps); if (ret < 0) goto err_free; /* Needs to be aligned to the maximum of the minimums */ if (caps.src_addr_widths) src_width = __ffs(caps.src_addr_widths); else src_width = 1; if (caps.dst_addr_widths) dest_width = __ffs(caps.dst_addr_widths); else dest_width = 1; width = max(src_width, dest_width); INIT_LIST_HEAD(&dmaengine_buffer->active); dmaengine_buffer->chan = chan; dmaengine_buffer->align = width; dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, &iio_dmaengine_default_ops); dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs; dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; return &dmaengine_buffer->queue.buffer; err_free: kfree(dmaengine_buffer); return ERR_PTR(ret); } /** * iio_dmaengine_buffer_free() - Free dmaengine buffer * @buffer: Buffer to free * * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). */ static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(buffer); iio_dma_buffer_exit(&dmaengine_buffer->queue); dma_release_channel(dmaengine_buffer->chan); iio_buffer_put(buffer); } static void __devm_iio_dmaengine_buffer_free(void *buffer) { iio_dmaengine_buffer_free(buffer); } /** * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc() * @dev: Parent device for the buffer * @channel: DMA channel name, typically "rx". * * This allocates a new IIO buffer which internally uses the DMAengine framework * to perform its transfers. The parent device will be used to request the DMA * channel. * * The buffer will be automatically de-allocated once the device gets destroyed. */ static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, const char *channel) { struct iio_buffer *buffer; int ret; buffer = iio_dmaengine_buffer_alloc(dev, channel); if (IS_ERR(buffer)) return buffer; ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, buffer); if (ret) return ERR_PTR(ret); return buffer; } /** * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device * @dev: Parent device for the buffer * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. */ int devm_iio_dmaengine_buffer_setup(struct device *dev, struct iio_dev *indio_dev, const char *channel) { struct iio_buffer *buffer; buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent, channel); if (IS_ERR(buffer)) return PTR_ERR(buffer); indio_dev->modes |= INDIO_BUFFER_HARDWARE; return iio_device_attach_buffer(indio_dev, buffer); } EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_LICENSE("GPL");
linux-master
drivers/iio/buffer/industrialio-buffer-dmaengine.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2021 Intel Corporation #include <linux/bug.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/peci.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/unaligned.h> #include "internal.h" #define PECI_GET_DIB_CMD 0xf7 #define PECI_GET_DIB_WR_LEN 1 #define PECI_GET_DIB_RD_LEN 8 #define PECI_GET_TEMP_CMD 0x01 #define PECI_GET_TEMP_WR_LEN 1 #define PECI_GET_TEMP_RD_LEN 2 #define PECI_RDPKGCFG_CMD 0xa1 #define PECI_RDPKGCFG_WR_LEN 5 #define PECI_RDPKGCFG_RD_LEN_BASE 1 #define PECI_WRPKGCFG_CMD 0xa5 #define PECI_WRPKGCFG_WR_LEN_BASE 6 #define PECI_WRPKGCFG_RD_LEN 1 #define PECI_RDIAMSR_CMD 0xb1 #define PECI_RDIAMSR_WR_LEN 5 #define PECI_RDIAMSR_RD_LEN 9 #define PECI_WRIAMSR_CMD 0xb5 #define PECI_RDIAMSREX_CMD 0xd1 #define PECI_RDIAMSREX_WR_LEN 6 #define PECI_RDIAMSREX_RD_LEN 9 #define PECI_RDPCICFG_CMD 0x61 #define PECI_RDPCICFG_WR_LEN 6 #define PECI_RDPCICFG_RD_LEN 5 #define PECI_RDPCICFG_RD_LEN_MAX 24 #define PECI_WRPCICFG_CMD 0x65 #define PECI_RDPCICFGLOCAL_CMD 0xe1 #define PECI_RDPCICFGLOCAL_WR_LEN 5 #define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1 #define PECI_WRPCICFGLOCAL_CMD 0xe5 #define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6 #define PECI_WRPCICFGLOCAL_RD_LEN 1 #define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03 #define PECI_ENDPTCFG_TYPE_PCI 0x04 #define PECI_ENDPTCFG_TYPE_MMIO 0x05 #define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06 #define PECI_RDENDPTCFG_CMD 0xc1 #define PECI_RDENDPTCFG_PCI_WR_LEN 12 #define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10 #define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14 #define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18 #define PECI_RDENDPTCFG_RD_LEN_BASE 1 #define PECI_WRENDPTCFG_CMD 0xc5 #define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13 #define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15 #define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19 #define PECI_WRENDPTCFG_RD_LEN 1 /* Device Specific Completion Code (CC) Definition */ #define PECI_CC_SUCCESS 0x40 #define PECI_CC_NEED_RETRY 0x80 #define PECI_CC_OUT_OF_RESOURCE 0x81 #define PECI_CC_UNAVAIL_RESOURCE 0x82 #define PECI_CC_INVALID_REQ 0x90 #define PECI_CC_MCA_ERROR 0x91 #define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93 #define PECI_CC_FATAL_MCA_ERROR 0x94 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C #define PECI_RETRY_BIT BIT(0) #define PECI_RETRY_TIMEOUT msecs_to_jiffies(700) #define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1) #define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128) static u8 peci_request_data_cc(struct peci_request *req) { return req->rx.buf[0]; } /** * peci_request_status() - return -errno based on PECI completion code * @req: the PECI request that contains response data with completion code * * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we * don't expect completion code in the response. * * Return: -errno */ int peci_request_status(struct peci_request *req) { u8 cc = peci_request_data_cc(req); if (cc != PECI_CC_SUCCESS) dev_dbg(&req->device->dev, "ret: %#02x\n", cc); switch (cc) { case PECI_CC_SUCCESS: return 0; case PECI_CC_NEED_RETRY: case PECI_CC_OUT_OF_RESOURCE: case PECI_CC_UNAVAIL_RESOURCE: return -EAGAIN; case PECI_CC_INVALID_REQ: return -EINVAL; case PECI_CC_MCA_ERROR: case PECI_CC_CATASTROPHIC_MCA_ERROR: case PECI_CC_FATAL_MCA_ERROR: case PECI_CC_PARITY_ERR_GPSB_OR_PMSB: case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR: case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA: return -EIO; } WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc); return -EIO; } EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI); static int peci_request_xfer(struct peci_request *req) { struct peci_device *device = req->device; struct peci_controller *controller = to_peci_controller(device->dev.parent); int ret; mutex_lock(&controller->bus_lock); ret = controller->ops->xfer(controller, device->addr, req); mutex_unlock(&controller->bus_lock); return ret; } static int peci_request_xfer_retry(struct peci_request *req) { long wait_interval = PECI_RETRY_INTERVAL_MIN; struct peci_device *device = req->device; struct peci_controller *controller = to_peci_controller(device->dev.parent); unsigned long start = jiffies; int ret; /* Don't try to use it for ping */ if (WARN_ON(req->tx.len == 0)) return 0; do { ret = peci_request_xfer(req); if (ret) { dev_dbg(&controller->dev, "xfer error: %d\n", ret); return ret; } if (peci_request_status(req) != -EAGAIN) return 0; /* Set the retry bit to indicate a retry attempt */ req->tx.buf[1] |= PECI_RETRY_BIT; if (schedule_timeout_interruptible(wait_interval)) return -ERESTARTSYS; wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX); } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT)); dev_dbg(&controller->dev, "request timed out\n"); return -ETIMEDOUT; } /** * peci_request_alloc() - allocate &struct peci_requests * @device: PECI device to which request is going to be sent * @tx_len: TX length * @rx_len: RX length * * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise. */ struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len) { struct peci_request *req; /* * TX and RX buffers are fixed length members of peci_request, this is * just a warn for developers to make sure to expand the buffers (or * change the allocation method) if we go over the current limit. */ if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE)) return NULL; /* * PECI controllers that we are using now don't support DMA, this * should be converted to DMA API once support for controllers that do * allow it is added to avoid an extra copy. */ req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return NULL; req->device = device; req->tx.len = tx_len; req->rx.len = rx_len; return req; } EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI); /** * peci_request_free() - free peci_request * @req: the PECI request to be freed */ void peci_request_free(struct peci_request *req) { kfree(req); } EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI); struct peci_request *peci_xfer_get_dib(struct peci_device *device) { struct peci_request *req; int ret; req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN); if (!req) return ERR_PTR(-ENOMEM); req->tx.buf[0] = PECI_GET_DIB_CMD; ret = peci_request_xfer(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI); struct peci_request *peci_xfer_get_temp(struct peci_device *device) { struct peci_request *req; int ret; req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN); if (!req) return ERR_PTR(-ENOMEM); req->tx.buf[0] = PECI_GET_TEMP_CMD; ret = peci_request_xfer(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI); static struct peci_request * __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len) { struct peci_request *req; int ret; req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len); if (!req) return ERR_PTR(-ENOMEM); req->tx.buf[0] = PECI_RDPKGCFG_CMD; req->tx.buf[1] = 0; req->tx.buf[2] = index; put_unaligned_le16(param, &req->tx.buf[3]); ret = peci_request_xfer_retry(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg) { return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12; } static struct peci_request * __pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len) { struct peci_request *req; u32 pci_addr; int ret; req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN, PECI_RDPCICFGLOCAL_RD_LEN_BASE + len); if (!req) return ERR_PTR(-ENOMEM); pci_addr = __get_pci_addr(bus, dev, func, reg); req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD; req->tx.buf[1] = 0; put_unaligned_le24(pci_addr, &req->tx.buf[2]); ret = peci_request_xfer_retry(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } static struct peci_request * __ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg, u8 bus, u8 dev, u8 func, u16 reg, u8 len) { struct peci_request *req; u32 pci_addr; int ret; req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN, PECI_RDENDPTCFG_RD_LEN_BASE + len); if (!req) return ERR_PTR(-ENOMEM); pci_addr = __get_pci_addr(bus, dev, func, reg); req->tx.buf[0] = PECI_RDENDPTCFG_CMD; req->tx.buf[1] = 0; req->tx.buf[2] = msg_type; req->tx.buf[3] = 0; req->tx.buf[4] = 0; req->tx.buf[5] = 0; req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; req->tx.buf[7] = seg; /* PCI Segment */ put_unaligned_le32(pci_addr, &req->tx.buf[8]); ret = peci_request_xfer_retry(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } static struct peci_request * __ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg, u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len) { struct peci_request *req; int ret; req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len); if (!req) return ERR_PTR(-ENOMEM); req->tx.buf[0] = PECI_RDENDPTCFG_CMD; req->tx.buf[1] = 0; req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO; req->tx.buf[3] = 0; /* Endpoint ID */ req->tx.buf[4] = 0; /* Reserved */ req->tx.buf[5] = bar; req->tx.buf[6] = addr_type; req->tx.buf[7] = seg; /* PCI Segment */ req->tx.buf[8] = PCI_DEVFN(dev, func); req->tx.buf[9] = bus; /* PCI Bus */ if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D) put_unaligned_le32(offset, &req->tx.buf[10]); else put_unaligned_le64(offset, &req->tx.buf[10]); ret = peci_request_xfer_retry(req); if (ret) { peci_request_free(req); return ERR_PTR(ret); } return req; } u8 peci_request_data_readb(struct peci_request *req) { return req->rx.buf[1]; } EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI); u16 peci_request_data_readw(struct peci_request *req) { return get_unaligned_le16(&req->rx.buf[1]); } EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI); u32 peci_request_data_readl(struct peci_request *req) { return get_unaligned_le32(&req->rx.buf[1]); } EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI); u64 peci_request_data_readq(struct peci_request *req) { return get_unaligned_le64(&req->rx.buf[1]); } EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI); u64 peci_request_dib_read(struct peci_request *req) { return get_unaligned_le64(&req->rx.buf[0]); } EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI); s16 peci_request_temp_read(struct peci_request *req) { return get_unaligned_le16(&req->rx.buf[0]); } EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI); #define __read_pkg_config(x, type) \ struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \ { \ return __pkg_cfg_read(device, index, param, sizeof(type)); \ } \ EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI) __read_pkg_config(readb, u8); __read_pkg_config(readw, u16); __read_pkg_config(readl, u32); __read_pkg_config(readq, u64); #define __read_pci_config_local(x, type) \ struct peci_request * \ peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \ { \ return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \ } \ EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI) __read_pci_config_local(readb, u8); __read_pci_config_local(readw, u16); __read_pci_config_local(readl, u32); #define __read_ep_pci_config(x, msg_type, type) \ struct peci_request * \ peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \ { \ return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \ } \ EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI) __read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8); __read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16); __read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32); __read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8); __read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16); __read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32); #define __read_ep_mmio(x, y, addr_type, type1, type2) \ struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \ u8 bus, u8 dev, u8 func, u64 offset) \ { \ return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \ offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \ sizeof(type2)); \ } \ EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI) __read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32); __read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32);
linux-master
drivers/peci/request.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2021 Intel Corporation #include <linux/device.h> #include <linux/kernel.h> #include <linux/peci.h> #include "internal.h" static int rescan_controller(struct device *dev, void *data) { if (dev->type != &peci_controller_type) return 0; return peci_controller_scan_devices(to_peci_controller(dev)); } static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) { bool res; int ret; ret = kstrtobool(buf, &res); if (ret) return ret; if (!res) return count; ret = bus_for_each_dev(&peci_bus_type, NULL, NULL, rescan_controller); if (ret) return ret; return count; } static BUS_ATTR_WO(rescan); static struct attribute *peci_bus_attrs[] = { &bus_attr_rescan.attr, NULL }; static const struct attribute_group peci_bus_group = { .attrs = peci_bus_attrs, }; const struct attribute_group *peci_bus_groups[] = { &peci_bus_group, NULL }; static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct peci_device *device = to_peci_device(dev); bool res; int ret; ret = kstrtobool(buf, &res); if (ret) return ret; if (res && device_remove_file_self(dev, attr)) peci_device_destroy(device); return count; } static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0200, NULL, remove_store); static struct attribute *peci_device_attrs[] = { &dev_attr_remove.attr, NULL }; static const struct attribute_group peci_device_group = { .attrs = peci_device_attrs, }; const struct attribute_group *peci_device_groups[] = { &peci_device_group, NULL };
linux-master
drivers/peci/sysfs.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2021 Intel Corporation #include <linux/auxiliary_bus.h> #include <linux/module.h> #include <linux/peci.h> #include <linux/peci-cpu.h> #include <linux/slab.h> #include "internal.h" /** * peci_temp_read() - read the maximum die temperature from PECI target device * @device: PECI device to which request is going to be sent * @temp_raw: where to store the read temperature * * It uses GetTemp PECI command. * * Return: 0 if succeeded, other values in case errors. */ int peci_temp_read(struct peci_device *device, s16 *temp_raw) { struct peci_request *req; req = peci_xfer_get_temp(device); if (IS_ERR(req)) return PTR_ERR(req); *temp_raw = peci_request_temp_read(req); peci_request_free(req); return 0; } EXPORT_SYMBOL_NS_GPL(peci_temp_read, PECI_CPU); /** * peci_pcs_read() - read PCS register * @device: PECI device to which request is going to be sent * @index: PCS index * @param: PCS parameter * @data: where to store the read data * * It uses RdPkgConfig PECI command. * * Return: 0 if succeeded, other values in case errors. */ int peci_pcs_read(struct peci_device *device, u8 index, u16 param, u32 *data) { struct peci_request *req; int ret; req = peci_xfer_pkg_cfg_readl(device, index, param); if (IS_ERR(req)) return PTR_ERR(req); ret = peci_request_status(req); if (ret) goto out_req_free; *data = peci_request_data_readl(req); out_req_free: peci_request_free(req); return ret; } EXPORT_SYMBOL_NS_GPL(peci_pcs_read, PECI_CPU); /** * peci_pci_local_read() - read 32-bit memory location using raw address * @device: PECI device to which request is going to be sent * @bus: bus * @dev: device * @func: function * @reg: register * @data: where to store the read data * * It uses RdPCIConfigLocal PECI command. * * Return: 0 if succeeded, other values in case errors. */ int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u32 *data) { struct peci_request *req; int ret; req = peci_xfer_pci_cfg_local_readl(device, bus, dev, func, reg); if (IS_ERR(req)) return PTR_ERR(req); ret = peci_request_status(req); if (ret) goto out_req_free; *data = peci_request_data_readl(req); out_req_free: peci_request_free(req); return ret; } EXPORT_SYMBOL_NS_GPL(peci_pci_local_read, PECI_CPU); /** * peci_ep_pci_local_read() - read 32-bit memory location using raw address * @device: PECI device to which request is going to be sent * @seg: PCI segment * @bus: bus * @dev: device * @func: function * @reg: register * @data: where to store the read data * * Like &peci_pci_local_read, but it uses RdEndpointConfig PECI command. * * Return: 0 if succeeded, other values in case errors. */ int peci_ep_pci_local_read(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg, u32 *data) { struct peci_request *req; int ret; req = peci_xfer_ep_pci_cfg_local_readl(device, seg, bus, dev, func, reg); if (IS_ERR(req)) return PTR_ERR(req); ret = peci_request_status(req); if (ret) goto out_req_free; *data = peci_request_data_readl(req); out_req_free: peci_request_free(req); return ret; } EXPORT_SYMBOL_NS_GPL(peci_ep_pci_local_read, PECI_CPU); /** * peci_mmio_read() - read 32-bit memory location using 64-bit bar offset address * @device: PECI device to which request is going to be sent * @bar: PCI bar * @seg: PCI segment * @bus: bus * @dev: device * @func: function * @address: 64-bit MMIO address * @data: where to store the read data * * It uses RdEndpointConfig PECI command. * * Return: 0 if succeeded, other values in case errors. */ int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg, u8 bus, u8 dev, u8 func, u64 address, u32 *data) { struct peci_request *req; int ret; req = peci_xfer_ep_mmio64_readl(device, bar, seg, bus, dev, func, address); if (IS_ERR(req)) return PTR_ERR(req); ret = peci_request_status(req); if (ret) goto out_req_free; *data = peci_request_data_readl(req); out_req_free: peci_request_free(req); return ret; } EXPORT_SYMBOL_NS_GPL(peci_mmio_read, PECI_CPU); static const char * const peci_adev_types[] = { "cputemp", "dimmtemp", }; struct peci_cpu { struct peci_device *device; const struct peci_device_id *id; }; static void adev_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); kfree(adev->name); kfree(adev); } static struct auxiliary_device *adev_alloc(struct peci_cpu *priv, int idx) { struct peci_controller *controller = to_peci_controller(priv->device->dev.parent); struct auxiliary_device *adev; const char *name; int ret; adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return ERR_PTR(-ENOMEM); name = kasprintf(GFP_KERNEL, "%s.%s", peci_adev_types[idx], (const char *)priv->id->data); if (!name) { ret = -ENOMEM; goto free_adev; } adev->name = name; adev->dev.parent = &priv->device->dev; adev->dev.release = adev_release; adev->id = (controller->id << 16) | (priv->device->addr); ret = auxiliary_device_init(adev); if (ret) goto free_name; return adev; free_name: kfree(name); free_adev: kfree(adev); return ERR_PTR(ret); } static void unregister_adev(void *_adev) { struct auxiliary_device *adev = _adev; auxiliary_device_delete(adev); auxiliary_device_uninit(adev); } static int devm_adev_add(struct device *dev, int idx) { struct peci_cpu *priv = dev_get_drvdata(dev); struct auxiliary_device *adev; int ret; adev = adev_alloc(priv, idx); if (IS_ERR(adev)) return PTR_ERR(adev); ret = auxiliary_device_add(adev); if (ret) { auxiliary_device_uninit(adev); return ret; } ret = devm_add_action_or_reset(&priv->device->dev, unregister_adev, adev); if (ret) return ret; return 0; } static void peci_cpu_add_adevices(struct peci_cpu *priv) { struct device *dev = &priv->device->dev; int ret, i; for (i = 0; i < ARRAY_SIZE(peci_adev_types); i++) { ret = devm_adev_add(dev, i); if (ret) { dev_warn(dev, "Failed to register PECI auxiliary: %s, ret = %d\n", peci_adev_types[i], ret); continue; } } } static int peci_cpu_probe(struct peci_device *device, const struct peci_device_id *id) { struct device *dev = &device->dev; struct peci_cpu *priv; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_set_drvdata(dev, priv); priv->device = device; priv->id = id; peci_cpu_add_adevices(priv); return 0; } static const struct peci_device_id peci_cpu_device_ids[] = { { /* Haswell Xeon */ .family = 6, .model = INTEL_FAM6_HASWELL_X, .data = "hsx", }, { /* Broadwell Xeon */ .family = 6, .model = INTEL_FAM6_BROADWELL_X, .data = "bdx", }, { /* Broadwell Xeon D */ .family = 6, .model = INTEL_FAM6_BROADWELL_D, .data = "bdxd", }, { /* Skylake Xeon */ .family = 6, .model = INTEL_FAM6_SKYLAKE_X, .data = "skx", }, { /* Icelake Xeon */ .family = 6, .model = INTEL_FAM6_ICELAKE_X, .data = "icx", }, { /* Icelake Xeon D */ .family = 6, .model = INTEL_FAM6_ICELAKE_D, .data = "icxd", }, { /* Sapphire Rapids Xeon */ .family = 6, .model = INTEL_FAM6_SAPPHIRERAPIDS_X, .data = "spr", }, { } }; MODULE_DEVICE_TABLE(peci, peci_cpu_device_ids); static struct peci_driver peci_cpu_driver = { .probe = peci_cpu_probe, .id_table = peci_cpu_device_ids, .driver = { .name = "peci-cpu", }, }; module_peci_driver(peci_cpu_driver); MODULE_AUTHOR("Iwona Winiarska <[email protected]>"); MODULE_DESCRIPTION("PECI CPU driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PECI);
linux-master
drivers/peci/cpu.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2018-2021 Intel Corporation #include <linux/bug.h> #include <linux/device.h> #include <linux/export.h> #include <linux/idr.h> #include <linux/module.h> #include <linux/of.h> #include <linux/peci.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/slab.h> #include "internal.h" static DEFINE_IDA(peci_controller_ida); static void peci_controller_dev_release(struct device *dev) { struct peci_controller *controller = to_peci_controller(dev); mutex_destroy(&controller->bus_lock); ida_free(&peci_controller_ida, controller->id); kfree(controller); } struct device_type peci_controller_type = { .release = peci_controller_dev_release, }; int peci_controller_scan_devices(struct peci_controller *controller) { int ret; u8 addr; for (addr = PECI_BASE_ADDR; addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX; addr++) { ret = peci_device_create(controller, addr); if (ret) return ret; } return 0; } static struct peci_controller *peci_controller_alloc(struct device *dev, const struct peci_controller_ops *ops) { struct peci_controller *controller; int ret; if (!ops->xfer) return ERR_PTR(-EINVAL); controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) return ERR_PTR(-ENOMEM); ret = ida_alloc_max(&peci_controller_ida, U8_MAX, GFP_KERNEL); if (ret < 0) goto err; controller->id = ret; controller->ops = ops; controller->dev.parent = dev; controller->dev.bus = &peci_bus_type; controller->dev.type = &peci_controller_type; device_initialize(&controller->dev); mutex_init(&controller->bus_lock); return controller; err: kfree(controller); return ERR_PTR(ret); } static int unregister_child(struct device *dev, void *dummy) { peci_device_destroy(to_peci_device(dev)); return 0; } static void unregister_controller(void *_controller) { struct peci_controller *controller = _controller; /* * Detach any active PECI devices. This can't fail, thus we do not * check the returned value. */ device_for_each_child_reverse(&controller->dev, NULL, unregister_child); device_unregister(&controller->dev); fwnode_handle_put(controller->dev.fwnode); pm_runtime_disable(&controller->dev); } /** * devm_peci_controller_add() - add PECI controller * @dev: device for devm operations * @ops: pointer to controller specific methods * * In final stage of its probe(), peci_controller driver calls * devm_peci_controller_add() to register itself with the PECI bus. * * Return: Pointer to the newly allocated controller or ERR_PTR() in case of failure. */ struct peci_controller *devm_peci_controller_add(struct device *dev, const struct peci_controller_ops *ops) { struct peci_controller *controller; int ret; controller = peci_controller_alloc(dev, ops); if (IS_ERR(controller)) return controller; ret = dev_set_name(&controller->dev, "peci-%d", controller->id); if (ret) goto err_put; pm_runtime_no_callbacks(&controller->dev); pm_suspend_ignore_children(&controller->dev, true); pm_runtime_enable(&controller->dev); device_set_node(&controller->dev, fwnode_handle_get(dev_fwnode(dev))); ret = device_add(&controller->dev); if (ret) goto err_fwnode; ret = devm_add_action_or_reset(dev, unregister_controller, controller); if (ret) return ERR_PTR(ret); /* * Ignoring retval since failures during scan are non-critical for * controller itself. */ peci_controller_scan_devices(controller); return controller; err_fwnode: fwnode_handle_put(controller->dev.fwnode); pm_runtime_disable(&controller->dev); err_put: put_device(&controller->dev); return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(devm_peci_controller_add, PECI); static const struct peci_device_id * peci_bus_match_device_id(const struct peci_device_id *id, struct peci_device *device) { while (id->family != 0) { if (id->family == device->info.family && id->model == device->info.model) return id; id++; } return NULL; } static int peci_bus_device_match(struct device *dev, struct device_driver *drv) { struct peci_device *device = to_peci_device(dev); struct peci_driver *peci_drv = to_peci_driver(drv); if (dev->type != &peci_device_type) return 0; return !!peci_bus_match_device_id(peci_drv->id_table, device); } static int peci_bus_device_probe(struct device *dev) { struct peci_device *device = to_peci_device(dev); struct peci_driver *driver = to_peci_driver(dev->driver); return driver->probe(device, peci_bus_match_device_id(driver->id_table, device)); } static void peci_bus_device_remove(struct device *dev) { struct peci_device *device = to_peci_device(dev); struct peci_driver *driver = to_peci_driver(dev->driver); if (driver->remove) driver->remove(device); } struct bus_type peci_bus_type = { .name = "peci", .match = peci_bus_device_match, .probe = peci_bus_device_probe, .remove = peci_bus_device_remove, .bus_groups = peci_bus_groups, }; static int __init peci_init(void) { int ret; ret = bus_register(&peci_bus_type); if (ret < 0) { pr_err("peci: failed to register PECI bus type!\n"); return ret; } return 0; } module_init(peci_init); static void __exit peci_exit(void) { bus_unregister(&peci_bus_type); } module_exit(peci_exit); MODULE_AUTHOR("Jason M Bills <[email protected]>"); MODULE_AUTHOR("Jae Hyun Yoo <[email protected]>"); MODULE_AUTHOR("Iwona Winiarska <[email protected]>"); MODULE_DESCRIPTION("PECI bus core module"); MODULE_LICENSE("GPL");
linux-master
drivers/peci/core.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2018-2021 Intel Corporation #include <linux/bitfield.h> #include <linux/peci.h> #include <linux/peci-cpu.h> #include <linux/slab.h> #include "internal.h" /* * PECI device can be removed using sysfs, but the removal can also happen as * a result of controller being removed. * Mutex is used to protect PECI device from being double-deleted. */ static DEFINE_MUTEX(peci_device_del_lock); #define REVISION_NUM_MASK GENMASK(15, 8) static int peci_get_revision(struct peci_device *device, u8 *revision) { struct peci_request *req; u64 dib; req = peci_xfer_get_dib(device); if (IS_ERR(req)) return PTR_ERR(req); /* * PECI device may be in a state where it is unable to return a proper * DIB, in which case it returns 0 as DIB value. * Let's treat this as an error to avoid carrying on with the detection * using invalid revision. */ dib = peci_request_dib_read(req); if (dib == 0) { peci_request_free(req); return -EIO; } *revision = FIELD_GET(REVISION_NUM_MASK, dib); peci_request_free(req); return 0; } static int peci_get_cpu_id(struct peci_device *device, u32 *cpu_id) { struct peci_request *req; int ret; req = peci_xfer_pkg_cfg_readl(device, PECI_PCS_PKG_ID, PECI_PKG_ID_CPU_ID); if (IS_ERR(req)) return PTR_ERR(req); ret = peci_request_status(req); if (ret) goto out_req_free; *cpu_id = peci_request_data_readl(req); out_req_free: peci_request_free(req); return ret; } static unsigned int peci_x86_cpu_family(unsigned int sig) { unsigned int x86; x86 = (sig >> 8) & 0xf; if (x86 == 0xf) x86 += (sig >> 20) & 0xff; return x86; } static unsigned int peci_x86_cpu_model(unsigned int sig) { unsigned int fam, model; fam = peci_x86_cpu_family(sig); model = (sig >> 4) & 0xf; if (fam >= 0x6) model += ((sig >> 16) & 0xf) << 4; return model; } static int peci_device_info_init(struct peci_device *device) { u8 revision; u32 cpu_id; int ret; ret = peci_get_cpu_id(device, &cpu_id); if (ret) return ret; device->info.family = peci_x86_cpu_family(cpu_id); device->info.model = peci_x86_cpu_model(cpu_id); ret = peci_get_revision(device, &revision); if (ret) return ret; device->info.peci_revision = revision; device->info.socket_id = device->addr - PECI_BASE_ADDR; return 0; } static int peci_detect(struct peci_controller *controller, u8 addr) { /* * PECI Ping is a command encoded by tx_len = 0, rx_len = 0. * We expect correct Write FCS if the device at the target address * is able to respond. */ struct peci_request req = { 0 }; int ret; mutex_lock(&controller->bus_lock); ret = controller->ops->xfer(controller, addr, &req); mutex_unlock(&controller->bus_lock); return ret; } static bool peci_addr_valid(u8 addr) { return addr >= PECI_BASE_ADDR && addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX; } static int peci_dev_exists(struct device *dev, void *data) { struct peci_device *device = to_peci_device(dev); u8 *addr = data; if (device->addr == *addr) return -EBUSY; return 0; } int peci_device_create(struct peci_controller *controller, u8 addr) { struct peci_device *device; int ret; if (!peci_addr_valid(addr)) return -EINVAL; /* Check if we have already detected this device before. */ ret = device_for_each_child(&controller->dev, &addr, peci_dev_exists); if (ret) return 0; ret = peci_detect(controller, addr); if (ret) { /* * Device not present or host state doesn't allow successful * detection at this time. */ if (ret == -EIO || ret == -ETIMEDOUT) return 0; return ret; } device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; device_initialize(&device->dev); device->addr = addr; device->dev.parent = &controller->dev; device->dev.bus = &peci_bus_type; device->dev.type = &peci_device_type; ret = peci_device_info_init(device); if (ret) goto err_put; ret = dev_set_name(&device->dev, "%d-%02x", controller->id, device->addr); if (ret) goto err_put; ret = device_add(&device->dev); if (ret) goto err_put; return 0; err_put: put_device(&device->dev); return ret; } void peci_device_destroy(struct peci_device *device) { mutex_lock(&peci_device_del_lock); if (!device->deleted) { device_unregister(&device->dev); device->deleted = true; } mutex_unlock(&peci_device_del_lock); } int __peci_driver_register(struct peci_driver *driver, struct module *owner, const char *mod_name) { driver->driver.bus = &peci_bus_type; driver->driver.owner = owner; driver->driver.mod_name = mod_name; if (!driver->probe) { pr_err("peci: trying to register driver without probe callback\n"); return -EINVAL; } if (!driver->id_table) { pr_err("peci: trying to register driver without device id table\n"); return -EINVAL; } return driver_register(&driver->driver); } EXPORT_SYMBOL_NS_GPL(__peci_driver_register, PECI); void peci_driver_unregister(struct peci_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_NS_GPL(peci_driver_unregister, PECI); static void peci_device_release(struct device *dev) { struct peci_device *device = to_peci_device(dev); kfree(device); } struct device_type peci_device_type = { .groups = peci_device_groups, .release = peci_device_release, };
linux-master
drivers/peci/device.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Nuvoton Technology corporation #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/of.h> #include <linux/peci.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> /* NPCM GCR module */ #define NPCM_INTCR3_OFFSET 0x9C #define NPCM_INTCR3_PECIVSEL BIT(19) /* NPCM PECI Registers */ #define NPCM_PECI_CTL_STS 0x00 #define NPCM_PECI_RD_LENGTH 0x04 #define NPCM_PECI_ADDR 0x08 #define NPCM_PECI_CMD 0x0C #define NPCM_PECI_CTL2 0x10 #define NPCM_PECI_WR_LENGTH 0x1C #define NPCM_PECI_PDDR 0x2C #define NPCM_PECI_DAT_INOUT(n) (0x100 + ((n) * 4)) #define NPCM_PECI_MAX_REG 0x200 /* NPCM_PECI_CTL_STS - 0x00 : Control Register */ #define NPCM_PECI_CTRL_DONE_INT_EN BIT(6) #define NPCM_PECI_CTRL_ABRT_ERR BIT(4) #define NPCM_PECI_CTRL_CRC_ERR BIT(3) #define NPCM_PECI_CTRL_DONE BIT(1) #define NPCM_PECI_CTRL_START_BUSY BIT(0) /* NPCM_PECI_RD_LENGTH - 0x04 : Command Register */ #define NPCM_PECI_RD_LEN_MASK GENMASK(6, 0) /* NPCM_PECI_CMD - 0x10 : Command Register */ #define NPCM_PECI_CTL2_MASK GENMASK(7, 6) /* NPCM_PECI_WR_LENGTH - 0x1C : Command Register */ #define NPCM_PECI_WR_LEN_MASK GENMASK(6, 0) /* NPCM_PECI_PDDR - 0x2C : Command Register */ #define NPCM_PECI_PDDR_MASK GENMASK(4, 0) #define NPCM_PECI_INT_MASK (NPCM_PECI_CTRL_ABRT_ERR | \ NPCM_PECI_CTRL_CRC_ERR | \ NPCM_PECI_CTRL_DONE) #define NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC (50 * USEC_PER_MSEC) #define NPCM_PECI_IDLE_CHECK_INTERVAL_USEC (10 * USEC_PER_MSEC) #define NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT 1000 #define NPCM_PECI_CMD_TIMEOUT_MS_MAX 60000 #define NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT 15 #define NPCM_PECI_PULL_DOWN_DEFAULT 0 struct npcm_peci { u32 cmd_timeout_ms; struct completion xfer_complete; struct regmap *regmap; u32 status; spinlock_t lock; /* to sync completion status handling */ struct peci_controller *controller; struct device *dev; struct clk *clk; int irq; }; static int npcm_peci_xfer(struct peci_controller *controller, u8 addr, struct peci_request *req) { struct npcm_peci *priv = dev_get_drvdata(controller->dev.parent); unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms); unsigned int msg_rd; u32 cmd_sts; int i, ret; /* Check command sts and bus idle state */ ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts, !(cmd_sts & NPCM_PECI_CTRL_START_BUSY), NPCM_PECI_IDLE_CHECK_INTERVAL_USEC, NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC); if (ret) return ret; /* -ETIMEDOUT */ spin_lock_irq(&priv->lock); reinit_completion(&priv->xfer_complete); regmap_write(priv->regmap, NPCM_PECI_ADDR, addr); regmap_write(priv->regmap, NPCM_PECI_RD_LENGTH, NPCM_PECI_WR_LEN_MASK & req->rx.len); regmap_write(priv->regmap, NPCM_PECI_WR_LENGTH, NPCM_PECI_WR_LEN_MASK & req->tx.len); if (req->tx.len) { regmap_write(priv->regmap, NPCM_PECI_CMD, req->tx.buf[0]); for (i = 0; i < (req->tx.len - 1); i++) regmap_write(priv->regmap, NPCM_PECI_DAT_INOUT(i), req->tx.buf[i + 1]); } #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) dev_dbg(priv->dev, "addr : %#02x, tx.len : %#02x, rx.len : %#02x\n", addr, req->tx.len, req->rx.len); print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len); #endif priv->status = 0; regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_CTRL_START_BUSY, NPCM_PECI_CTRL_START_BUSY); spin_unlock_irq(&priv->lock); ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout); if (ret < 0) return ret; if (ret == 0) { dev_dbg(priv->dev, "timeout waiting for a response\n"); return -ETIMEDOUT; } spin_lock_irq(&priv->lock); if (priv->status != NPCM_PECI_CTRL_DONE) { spin_unlock_irq(&priv->lock); dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status); return -EIO; } regmap_write(priv->regmap, NPCM_PECI_CMD, 0); for (i = 0; i < req->rx.len; i++) { regmap_read(priv->regmap, NPCM_PECI_DAT_INOUT(i), &msg_rd); req->rx.buf[i] = (u8)msg_rd; } spin_unlock_irq(&priv->lock); #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len); #endif return 0; } static irqreturn_t npcm_peci_irq_handler(int irq, void *arg) { struct npcm_peci *priv = arg; u32 status_ack = 0; u32 status; spin_lock(&priv->lock); regmap_read(priv->regmap, NPCM_PECI_CTL_STS, &status); priv->status |= (status & NPCM_PECI_INT_MASK); if (status & NPCM_PECI_CTRL_CRC_ERR) status_ack |= NPCM_PECI_CTRL_CRC_ERR; if (status & NPCM_PECI_CTRL_ABRT_ERR) status_ack |= NPCM_PECI_CTRL_ABRT_ERR; /* * All commands should be ended up with a NPCM_PECI_CTRL_DONE * bit set even in an error case. */ if (status & NPCM_PECI_CTRL_DONE) { status_ack |= NPCM_PECI_CTRL_DONE; complete(&priv->xfer_complete); } regmap_write_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_INT_MASK, status_ack); spin_unlock(&priv->lock); return IRQ_HANDLED; } static int npcm_peci_init_ctrl(struct npcm_peci *priv) { u32 cmd_sts; int ret; priv->clk = devm_clk_get_enabled(priv->dev, NULL); if (IS_ERR(priv->clk)) { dev_err(priv->dev, "failed to get ref clock\n"); return PTR_ERR(priv->clk); } ret = device_property_read_u32(priv->dev, "cmd-timeout-ms", &priv->cmd_timeout_ms); if (ret) { priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT; } else if (priv->cmd_timeout_ms > NPCM_PECI_CMD_TIMEOUT_MS_MAX || priv->cmd_timeout_ms == 0) { dev_warn(priv->dev, "invalid cmd-timeout-ms: %u, falling back to: %u\n", priv->cmd_timeout_ms, NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT); priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT; } regmap_update_bits(priv->regmap, NPCM_PECI_CTL2, NPCM_PECI_CTL2_MASK, NPCM_PECI_PULL_DOWN_DEFAULT << 6); regmap_update_bits(priv->regmap, NPCM_PECI_PDDR, NPCM_PECI_PDDR_MASK, NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT); ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts, !(cmd_sts & NPCM_PECI_CTRL_START_BUSY), NPCM_PECI_IDLE_CHECK_INTERVAL_USEC, NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC); if (ret) return ret; /* -ETIMEDOUT */ /* PECI interrupt enable */ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_CTRL_DONE_INT_EN, NPCM_PECI_CTRL_DONE_INT_EN); return 0; } static const struct regmap_config npcm_peci_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = NPCM_PECI_MAX_REG, .fast_io = true, }; static struct peci_controller_ops npcm_ops = { .xfer = npcm_peci_xfer, }; static int npcm_peci_probe(struct platform_device *pdev) { struct peci_controller *controller; struct npcm_peci *priv; void __iomem *base; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, priv); base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &npcm_peci_regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) return priv->irq; ret = devm_request_irq(&pdev->dev, priv->irq, npcm_peci_irq_handler, 0, "peci-npcm-irq", priv); if (ret) return ret; init_completion(&priv->xfer_complete); spin_lock_init(&priv->lock); ret = npcm_peci_init_ctrl(priv); if (ret) return ret; controller = devm_peci_controller_add(priv->dev, &npcm_ops); if (IS_ERR(controller)) return dev_err_probe(priv->dev, PTR_ERR(controller), "failed to add npcm peci controller\n"); priv->controller = controller; return 0; } static const struct of_device_id npcm_peci_of_table[] = { { .compatible = "nuvoton,npcm750-peci", }, { .compatible = "nuvoton,npcm845-peci", }, { } }; MODULE_DEVICE_TABLE(of, npcm_peci_of_table); static struct platform_driver npcm_peci_driver = { .probe = npcm_peci_probe, .driver = { .name = KBUILD_MODNAME, .of_match_table = npcm_peci_of_table, }, }; module_platform_driver(npcm_peci_driver); MODULE_AUTHOR("Tomer Maimon <[email protected]>"); MODULE_DESCRIPTION("NPCM PECI driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PECI);
linux-master
drivers/peci/controller/peci-npcm.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2012-2017 ASPEED Technology Inc. // Copyright (c) 2018-2021 Intel Corporation #include <asm/unaligned.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/math.h> #include <linux/module.h> #include <linux/of.h> #include <linux/peci.h> #include <linux/platform_device.h> #include <linux/reset.h> /* ASPEED PECI Registers */ /* Control Register */ #define ASPEED_PECI_CTRL 0x00 #define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16) #define ASPEED_PECI_CTRL_RD_MODE_MASK GENMASK(13, 12) #define ASPEED_PECI_CTRL_RD_MODE_DBG BIT(13) #define ASPEED_PECI_CTRL_RD_MODE_COUNT BIT(12) #define ASPEED_PECI_CTRL_CLK_SRC_HCLK BIT(11) #define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8) #define ASPEED_PECI_CTRL_INVERT_OUT BIT(7) #define ASPEED_PECI_CTRL_INVERT_IN BIT(6) #define ASPEED_PECI_CTRL_BUS_CONTENTION_EN BIT(5) #define ASPEED_PECI_CTRL_PECI_EN BIT(4) #define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0) /* Timing Negotiation Register */ #define ASPEED_PECI_TIMING_NEGOTIATION 0x04 #define ASPEED_PECI_T_NEGO_MSG_MASK GENMASK(15, 8) #define ASPEED_PECI_T_NEGO_ADDR_MASK GENMASK(7, 0) /* Command Register */ #define ASPEED_PECI_CMD 0x08 #define ASPEED_PECI_CMD_PIN_MONITORING BIT(31) #define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24) #define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3 #define ASPEED_PECI_CMD_IDLE_MASK \ (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING) #define ASPEED_PECI_CMD_FIRE BIT(0) /* Read/Write Length Register */ #define ASPEED_PECI_RW_LENGTH 0x0c #define ASPEED_PECI_AW_FCS_EN BIT(31) #define ASPEED_PECI_RD_LEN_MASK GENMASK(23, 16) #define ASPEED_PECI_WR_LEN_MASK GENMASK(15, 8) #define ASPEED_PECI_TARGET_ADDR_MASK GENMASK(7, 0) /* Expected FCS Data Register */ #define ASPEED_PECI_EXPECTED_FCS 0x10 #define ASPEED_PECI_EXPECTED_RD_FCS_MASK GENMASK(23, 16) #define ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8) #define ASPEED_PECI_EXPECTED_WR_FCS_MASK GENMASK(7, 0) /* Captured FCS Data Register */ #define ASPEED_PECI_CAPTURED_FCS 0x14 #define ASPEED_PECI_CAPTURED_RD_FCS_MASK GENMASK(23, 16) #define ASPEED_PECI_CAPTURED_WR_FCS_MASK GENMASK(7, 0) /* Interrupt Register */ #define ASPEED_PECI_INT_CTRL 0x18 #define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30) #define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0 #define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1 #define ASPEED_PECI_MESSAGE_NEGO 2 #define ASPEED_PECI_INT_MASK GENMASK(4, 0) #define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4) #define ASPEED_PECI_INT_BUS_CONTENTION BIT(3) #define ASPEED_PECI_INT_WR_FCS_BAD BIT(2) #define ASPEED_PECI_INT_WR_FCS_ABORT BIT(1) #define ASPEED_PECI_INT_CMD_DONE BIT(0) /* Interrupt Status Register */ #define ASPEED_PECI_INT_STS 0x1c #define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16) /* bits[4..0]: Same bit fields in the 'Interrupt Register' */ /* Rx/Tx Data Buffer Registers */ #define ASPEED_PECI_WR_DATA0 0x20 #define ASPEED_PECI_WR_DATA1 0x24 #define ASPEED_PECI_WR_DATA2 0x28 #define ASPEED_PECI_WR_DATA3 0x2c #define ASPEED_PECI_RD_DATA0 0x30 #define ASPEED_PECI_RD_DATA1 0x34 #define ASPEED_PECI_RD_DATA2 0x38 #define ASPEED_PECI_RD_DATA3 0x3c #define ASPEED_PECI_WR_DATA4 0x40 #define ASPEED_PECI_WR_DATA5 0x44 #define ASPEED_PECI_WR_DATA6 0x48 #define ASPEED_PECI_WR_DATA7 0x4c #define ASPEED_PECI_RD_DATA4 0x50 #define ASPEED_PECI_RD_DATA5 0x54 #define ASPEED_PECI_RD_DATA6 0x58 #define ASPEED_PECI_RD_DATA7 0x5c #define ASPEED_PECI_DATA_BUF_SIZE_MAX 32 /* Timing Negotiation */ #define ASPEED_PECI_CLK_FREQUENCY_MIN 2000 #define ASPEED_PECI_CLK_FREQUENCY_DEFAULT 1000000 #define ASPEED_PECI_CLK_FREQUENCY_MAX 2000000 #define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8 /* Timeout */ #define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US (50 * USEC_PER_MSEC) #define ASPEED_PECI_IDLE_CHECK_INTERVAL_US (10 * USEC_PER_MSEC) #define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000 #define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 1000 #define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1) #define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp) #define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \ (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp)) struct aspeed_peci { struct peci_controller *controller; struct device *dev; void __iomem *base; struct reset_control *rst; int irq; spinlock_t lock; /* to sync completion status handling */ struct completion xfer_complete; struct clk *clk; u32 clk_frequency; u32 status; u32 cmd_timeout_ms; }; struct clk_aspeed_peci { struct clk_hw hw; struct aspeed_peci *aspeed_peci; }; static void aspeed_peci_controller_enable(struct aspeed_peci *priv) { u32 val = readl(priv->base + ASPEED_PECI_CTRL); val |= ASPEED_PECI_CTRL_PECI_CLK_EN; val |= ASPEED_PECI_CTRL_PECI_EN; writel(val, priv->base + ASPEED_PECI_CTRL); } static void aspeed_peci_init_regs(struct aspeed_peci *priv) { u32 val; /* Clear interrupts */ writel(ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_STS); /* Set timing negotiation mode and enable interrupts */ val = FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO); val |= ASPEED_PECI_INT_MASK; writel(val, priv->base + ASPEED_PECI_INT_CTRL); val = FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT); writel(val, priv->base + ASPEED_PECI_CTRL); } static int aspeed_peci_check_idle(struct aspeed_peci *priv) { u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD); int ret; /* * Under normal circumstances, we expect to be idle here. * In case there were any errors/timeouts that led to the situation * where the hardware is not in idle state - we need to reset and * reinitialize it to avoid potential controller hang. */ if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) { ret = reset_control_assert(priv->rst); if (ret) { dev_err(priv->dev, "cannot assert reset control\n"); return ret; } ret = reset_control_deassert(priv->rst); if (ret) { dev_err(priv->dev, "cannot deassert reset control\n"); return ret; } aspeed_peci_init_regs(priv); ret = clk_set_rate(priv->clk, priv->clk_frequency); if (ret < 0) { dev_err(priv->dev, "cannot set clock frequency\n"); return ret; } aspeed_peci_controller_enable(priv); } return readl_poll_timeout(priv->base + ASPEED_PECI_CMD, cmd_sts, !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK), ASPEED_PECI_IDLE_CHECK_INTERVAL_US, ASPEED_PECI_IDLE_CHECK_TIMEOUT_US); } static int aspeed_peci_xfer(struct peci_controller *controller, u8 addr, struct peci_request *req) { struct aspeed_peci *priv = dev_get_drvdata(controller->dev.parent); unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms); u32 peci_head; int ret, i; if (req->tx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX || req->rx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX) return -EINVAL; /* Check command sts and bus idle state */ ret = aspeed_peci_check_idle(priv); if (ret) return ret; /* -ETIMEDOUT */ spin_lock_irq(&priv->lock); reinit_completion(&priv->xfer_complete); peci_head = FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK, addr) | FIELD_PREP(ASPEED_PECI_WR_LEN_MASK, req->tx.len) | FIELD_PREP(ASPEED_PECI_RD_LEN_MASK, req->rx.len); writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH); for (i = 0; i < req->tx.len; i += 4) { u32 reg = (i < 16 ? ASPEED_PECI_WR_DATA0 : ASPEED_PECI_WR_DATA4) + i % 16; writel(get_unaligned_le32(&req->tx.buf[i]), priv->base + reg); } #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) dev_dbg(priv->dev, "HEAD : %#08x\n", peci_head); print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len); #endif priv->status = 0; writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD); spin_unlock_irq(&priv->lock); ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout); if (ret < 0) return ret; if (ret == 0) { dev_dbg(priv->dev, "timeout waiting for a response\n"); return -ETIMEDOUT; } spin_lock_irq(&priv->lock); if (priv->status != ASPEED_PECI_INT_CMD_DONE) { spin_unlock_irq(&priv->lock); dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status); return -EIO; } spin_unlock_irq(&priv->lock); /* * We need to use dword reads for register access, make sure that the * buffer size is multiple of 4-bytes. */ BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE % 4); for (i = 0; i < req->rx.len; i += 4) { u32 reg = (i < 16 ? ASPEED_PECI_RD_DATA0 : ASPEED_PECI_RD_DATA4) + i % 16; u32 rx_data = readl(priv->base + reg); put_unaligned_le32(rx_data, &req->rx.buf[i]); } #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len); #endif return 0; } static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg) { struct aspeed_peci *priv = arg; u32 status; spin_lock(&priv->lock); status = readl(priv->base + ASPEED_PECI_INT_STS); writel(status, priv->base + ASPEED_PECI_INT_STS); priv->status |= (status & ASPEED_PECI_INT_MASK); /* * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit * set even in an error case. */ if (status & ASPEED_PECI_INT_CMD_DONE) complete(&priv->xfer_complete); writel(0, priv->base + ASPEED_PECI_CMD); spin_unlock(&priv->lock); return IRQ_HANDLED; } static void clk_aspeed_peci_find_div_values(unsigned long rate, int *msg_timing, int *clk_div_exp) { unsigned long best_diff = ~0ul, diff; int msg_timing_temp, clk_div_exp_temp, i, j; for (i = 1; i <= 255; i++) for (j = 0; j < 8; j++) { diff = abs(rate - ASPEED_PECI_CLK_DIV1(i) * ASPEED_PECI_CLK_DIV2(j)); if (diff < best_diff) { msg_timing_temp = i; clk_div_exp_temp = j; best_diff = diff; } } *msg_timing = msg_timing_temp; *clk_div_exp = clk_div_exp_temp; } static int clk_aspeed_peci_get_div(unsigned long rate, const unsigned long *prate) { unsigned long this_rate = *prate / (4 * rate); int msg_timing, clk_div_exp; clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp); return ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp); } static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) { struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw); struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci; unsigned long this_rate = prate / (4 * rate); int clk_div_exp, msg_timing; u32 val; clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp); val = readl(aspeed_peci->base + ASPEED_PECI_CTRL); val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp); writel(val, aspeed_peci->base + ASPEED_PECI_CTRL); val = FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK, msg_timing); val |= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK, msg_timing); writel(val, aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION); return 0; } static long clk_aspeed_peci_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { int div = clk_aspeed_peci_get_div(rate, prate); return DIV_ROUND_UP_ULL(*prate, div); } static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw *hw, unsigned long prate) { struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw); struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci; int div, msg_timing, addr_timing, clk_div_exp; u32 reg; reg = readl(aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION); msg_timing = FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK, reg); addr_timing = FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK, reg); if (msg_timing != addr_timing) return 0; reg = readl(aspeed_peci->base + ASPEED_PECI_CTRL); clk_div_exp = FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK, reg); div = ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp); return DIV_ROUND_UP_ULL(prate, div); } static const struct clk_ops clk_aspeed_peci_ops = { .set_rate = clk_aspeed_peci_set_rate, .round_rate = clk_aspeed_peci_round_rate, .recalc_rate = clk_aspeed_peci_recalc_rate, }; /* * PECI HW contains a clock divider which is a combination of: * div0: 4 (fixed divider) * div1: x + 1 * div2: 1 << y * In other words, out_clk = in_clk / (div0 * div1 * div2) * The resulting frequency is used by PECI Controller to drive the PECI bus to * negotiate optimal transfer rate. */ static struct clk *devm_aspeed_peci_register_clk_div(struct device *dev, struct clk *parent, struct aspeed_peci *priv) { struct clk_aspeed_peci *peci_clk; struct clk_init_data init; const char *parent_name; char name[32]; int ret; snprintf(name, sizeof(name), "%s_div", dev_name(dev)); parent_name = __clk_get_name(parent); init.ops = &clk_aspeed_peci_ops; init.name = name; init.parent_names = (const char* []) { parent_name }; init.num_parents = 1; init.flags = 0; peci_clk = devm_kzalloc(dev, sizeof(struct clk_aspeed_peci), GFP_KERNEL); if (!peci_clk) return ERR_PTR(-ENOMEM); peci_clk->hw.init = &init; peci_clk->aspeed_peci = priv; ret = devm_clk_hw_register(dev, &peci_clk->hw); if (ret) return ERR_PTR(ret); return peci_clk->hw.clk; } static void aspeed_peci_property_sanitize(struct device *dev, const char *propname, u32 min, u32 max, u32 default_val, u32 *propval) { u32 val; int ret; ret = device_property_read_u32(dev, propname, &val); if (ret) { val = default_val; } else if (val > max || val < min) { dev_warn(dev, "invalid %s: %u, falling back to: %u\n", propname, val, default_val); val = default_val; } *propval = val; } static void aspeed_peci_property_setup(struct aspeed_peci *priv) { aspeed_peci_property_sanitize(priv->dev, "clock-frequency", ASPEED_PECI_CLK_FREQUENCY_MIN, ASPEED_PECI_CLK_FREQUENCY_MAX, ASPEED_PECI_CLK_FREQUENCY_DEFAULT, &priv->clk_frequency); aspeed_peci_property_sanitize(priv->dev, "cmd-timeout-ms", 1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX, ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms); } static const struct peci_controller_ops aspeed_ops = { .xfer = aspeed_peci_xfer, }; static void aspeed_peci_reset_control_release(void *data) { reset_control_assert(data); } static int devm_aspeed_peci_reset_control_deassert(struct device *dev, struct reset_control *rst) { int ret; ret = reset_control_deassert(rst); if (ret) return ret; return devm_add_action_or_reset(dev, aspeed_peci_reset_control_release, rst); } static void aspeed_peci_clk_release(void *data) { clk_disable_unprepare(data); } static int devm_aspeed_peci_clk_enable(struct device *dev, struct clk *clk) { int ret; ret = clk_prepare_enable(clk); if (ret) return ret; return devm_add_action_or_reset(dev, aspeed_peci_clk_release, clk); } static int aspeed_peci_probe(struct platform_device *pdev) { struct peci_controller *controller; struct aspeed_peci *priv; struct clk *ref_clk; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; dev_set_drvdata(priv->dev, priv); priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) return priv->irq; ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler, 0, "peci-aspeed", priv); if (ret) return ret; init_completion(&priv->xfer_complete); spin_lock_init(&priv->lock); priv->rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(priv->rst)) return dev_err_probe(priv->dev, PTR_ERR(priv->rst), "failed to get reset control\n"); ret = devm_aspeed_peci_reset_control_deassert(priv->dev, priv->rst); if (ret) return dev_err_probe(priv->dev, ret, "cannot deassert reset control\n"); aspeed_peci_property_setup(priv); aspeed_peci_init_regs(priv); ref_clk = devm_clk_get(priv->dev, NULL); if (IS_ERR(ref_clk)) return dev_err_probe(priv->dev, PTR_ERR(ref_clk), "failed to get ref clock\n"); priv->clk = devm_aspeed_peci_register_clk_div(priv->dev, ref_clk, priv); if (IS_ERR(priv->clk)) return dev_err_probe(priv->dev, PTR_ERR(priv->clk), "cannot register clock\n"); ret = clk_set_rate(priv->clk, priv->clk_frequency); if (ret < 0) return dev_err_probe(priv->dev, ret, "cannot set clock frequency\n"); ret = devm_aspeed_peci_clk_enable(priv->dev, priv->clk); if (ret) return dev_err_probe(priv->dev, ret, "failed to enable clock\n"); aspeed_peci_controller_enable(priv); controller = devm_peci_controller_add(priv->dev, &aspeed_ops); if (IS_ERR(controller)) return dev_err_probe(priv->dev, PTR_ERR(controller), "failed to add aspeed peci controller\n"); priv->controller = controller; return 0; } static const struct of_device_id aspeed_peci_of_table[] = { { .compatible = "aspeed,ast2400-peci", }, { .compatible = "aspeed,ast2500-peci", }, { .compatible = "aspeed,ast2600-peci", }, { } }; MODULE_DEVICE_TABLE(of, aspeed_peci_of_table); static struct platform_driver aspeed_peci_driver = { .probe = aspeed_peci_probe, .driver = { .name = "peci-aspeed", .of_match_table = aspeed_peci_of_table, }, }; module_platform_driver(aspeed_peci_driver); MODULE_AUTHOR("Ryan Chen <[email protected]>"); MODULE_AUTHOR("Jae Hyun Yoo <[email protected]>"); MODULE_DESCRIPTION("ASPEED PECI driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(PECI);
linux-master
drivers/peci/controller/peci-aspeed.c
// SPDX-License-Identifier: GPL-2.0-only /* * HSI clients registration interface * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea <[email protected]> */ #include <linux/hsi/hsi.h> #include <linux/list.h> #include <linux/slab.h> #include "hsi_core.h" /* * hsi_board_list is only used internally by the HSI framework. * No one else is allowed to make use of it. */ LIST_HEAD(hsi_board_list); EXPORT_SYMBOL_GPL(hsi_board_list); /** * hsi_register_board_info - Register HSI clients information * @info: Array of HSI clients on the board * @len: Length of the array * * HSI clients are statically declared and registered on board files. * * HSI clients will be automatically registered to the HSI bus once the * controller and the port where the clients wishes to attach are registered * to it. * * Return -errno on failure, 0 on success. */ int __init hsi_register_board_info(struct hsi_board_info const *info, unsigned int len) { struct hsi_cl_info *cl_info; cl_info = kcalloc(len, sizeof(*cl_info), GFP_KERNEL); if (!cl_info) return -ENOMEM; for (; len; len--, info++, cl_info++) { cl_info->info = *info; list_add_tail(&cl_info->list, &hsi_board_list); } return 0; }
linux-master
drivers/hsi/hsi_boardinfo.c
// SPDX-License-Identifier: GPL-2.0-only /* * HSI core. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea <[email protected]> */ #include <linux/hsi/hsi.h> #include <linux/compiler.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_device.h> #include "hsi_core.h" static ssize_t modalias_show(struct device *dev, struct device_attribute *a __maybe_unused, char *buf) { return sprintf(buf, "hsi:%s\n", dev_name(dev)); } static DEVICE_ATTR_RO(modalias); static struct attribute *hsi_bus_dev_attrs[] = { &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(hsi_bus_dev); static int hsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); return 0; } static int hsi_bus_match(struct device *dev, struct device_driver *driver) { if (of_driver_match_device(dev, driver)) return true; if (strcmp(dev_name(dev), driver->name) == 0) return true; return false; } static struct bus_type hsi_bus_type = { .name = "hsi", .dev_groups = hsi_bus_dev_groups, .match = hsi_bus_match, .uevent = hsi_bus_uevent, }; static void hsi_client_release(struct device *dev) { struct hsi_client *cl = to_hsi_client(dev); kfree(cl->tx_cfg.channels); kfree(cl->rx_cfg.channels); kfree(cl); } struct hsi_client *hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) { struct hsi_client *cl; size_t size; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) goto err; cl->tx_cfg = info->tx_cfg; if (cl->tx_cfg.channels) { size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size, GFP_KERNEL); if (!cl->tx_cfg.channels) goto err_tx; } cl->rx_cfg = info->rx_cfg; if (cl->rx_cfg.channels) { size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size, GFP_KERNEL); if (!cl->rx_cfg.channels) goto err_rx; } cl->device.bus = &hsi_bus_type; cl->device.parent = &port->device; cl->device.release = hsi_client_release; dev_set_name(&cl->device, "%s", info->name); cl->device.platform_data = info->platform_data; if (info->archdata) cl->device.archdata = *info->archdata; if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); goto err; } return cl; err_rx: kfree(cl->tx_cfg.channels); err_tx: kfree(cl); err: return NULL; } EXPORT_SYMBOL_GPL(hsi_new_client); static void hsi_scan_board_info(struct hsi_controller *hsi) { struct hsi_cl_info *cl_info; struct hsi_port *p; list_for_each_entry(cl_info, &hsi_board_list, list) if (cl_info->info.hsi_id == hsi->id) { p = hsi_find_port_num(hsi, cl_info->info.port); if (!p) continue; hsi_new_client(p, &cl_info->info); } } #ifdef CONFIG_OF static struct hsi_board_info hsi_char_dev_info = { .name = "hsi_char", }; static int hsi_of_property_parse_mode(struct device_node *client, char *name, unsigned int *result) { const char *mode; int err; err = of_property_read_string(client, name, &mode); if (err < 0) return err; if (strcmp(mode, "stream") == 0) *result = HSI_MODE_STREAM; else if (strcmp(mode, "frame") == 0) *result = HSI_MODE_FRAME; else return -EINVAL; return 0; } static int hsi_of_property_parse_flow(struct device_node *client, char *name, unsigned int *result) { const char *flow; int err; err = of_property_read_string(client, name, &flow); if (err < 0) return err; if (strcmp(flow, "synchronized") == 0) *result = HSI_FLOW_SYNC; else if (strcmp(flow, "pipeline") == 0) *result = HSI_FLOW_PIPE; else return -EINVAL; return 0; } static int hsi_of_property_parse_arb_mode(struct device_node *client, char *name, unsigned int *result) { const char *arb_mode; int err; err = of_property_read_string(client, name, &arb_mode); if (err < 0) return err; if (strcmp(arb_mode, "round-robin") == 0) *result = HSI_ARB_RR; else if (strcmp(arb_mode, "priority") == 0) *result = HSI_ARB_PRIO; else return -EINVAL; return 0; } static void hsi_add_client_from_dt(struct hsi_port *port, struct device_node *client) { struct hsi_client *cl; struct hsi_channel channel; struct property *prop; char name[32]; int length, cells, err, i, max_chan, mode; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) return; err = of_alias_from_compatible(client, name, sizeof(name)); if (err) goto err; err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); if (err) { err = hsi_of_property_parse_mode(client, "hsi-rx-mode", &cl->rx_cfg.mode); if (err) goto err; err = hsi_of_property_parse_mode(client, "hsi-tx-mode", &cl->tx_cfg.mode); if (err) goto err; } else { cl->rx_cfg.mode = mode; cl->tx_cfg.mode = mode; } err = of_property_read_u32(client, "hsi-speed-kbps", &cl->tx_cfg.speed); if (err) goto err; cl->rx_cfg.speed = cl->tx_cfg.speed; err = hsi_of_property_parse_flow(client, "hsi-flow", &cl->rx_cfg.flow); if (err) goto err; err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode", &cl->rx_cfg.arb_mode); if (err) goto err; prop = of_find_property(client, "hsi-channel-ids", &length); if (!prop) { err = -EINVAL; goto err; } cells = length / sizeof(u32); cl->rx_cfg.num_channels = cells; cl->tx_cfg.num_channels = cells; cl->rx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL); if (!cl->rx_cfg.channels) { err = -ENOMEM; goto err; } cl->tx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL); if (!cl->tx_cfg.channels) { err = -ENOMEM; goto err2; } max_chan = 0; for (i = 0; i < cells; i++) { err = of_property_read_u32_index(client, "hsi-channel-ids", i, &channel.id); if (err) goto err3; err = of_property_read_string_index(client, "hsi-channel-names", i, &channel.name); if (err) channel.name = NULL; if (channel.id > max_chan) max_chan = channel.id; cl->rx_cfg.channels[i] = channel; cl->tx_cfg.channels[i] = channel; } cl->rx_cfg.num_hw_channels = max_chan + 1; cl->tx_cfg.num_hw_channels = max_chan + 1; cl->device.bus = &hsi_bus_type; cl->device.parent = &port->device; cl->device.release = hsi_client_release; cl->device.of_node = client; dev_set_name(&cl->device, "%s", name); if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", name); put_device(&cl->device); } return; err3: kfree(cl->tx_cfg.channels); err2: kfree(cl->rx_cfg.channels); err: kfree(cl); pr_err("hsi client: missing or incorrect of property: err=%d\n", err); } void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) { struct device_node *child; /* register hsi-char device */ hsi_new_client(port, &hsi_char_dev_info); for_each_available_child_of_node(clients, child) hsi_add_client_from_dt(port, child); } EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt); #endif int hsi_remove_client(struct device *dev, void *data __maybe_unused) { device_unregister(dev); return 0; } EXPORT_SYMBOL_GPL(hsi_remove_client); static int hsi_remove_port(struct device *dev, void *data __maybe_unused) { device_for_each_child(dev, NULL, hsi_remove_client); device_unregister(dev); return 0; } static void hsi_controller_release(struct device *dev) { struct hsi_controller *hsi = to_hsi_controller(dev); kfree(hsi->port); kfree(hsi); } static void hsi_port_release(struct device *dev) { kfree(to_hsi_port(dev)); } /** * hsi_port_unregister_clients - Unregister an HSI port * @port: The HSI port to unregister */ void hsi_port_unregister_clients(struct hsi_port *port) { device_for_each_child(&port->device, NULL, hsi_remove_client); } EXPORT_SYMBOL_GPL(hsi_port_unregister_clients); /** * hsi_unregister_controller - Unregister an HSI controller * @hsi: The HSI controller to register */ void hsi_unregister_controller(struct hsi_controller *hsi) { device_for_each_child(&hsi->device, NULL, hsi_remove_port); device_unregister(&hsi->device); } EXPORT_SYMBOL_GPL(hsi_unregister_controller); /** * hsi_register_controller - Register an HSI controller and its ports * @hsi: The HSI controller to register * * Returns -errno on failure, 0 on success. */ int hsi_register_controller(struct hsi_controller *hsi) { unsigned int i; int err; err = device_add(&hsi->device); if (err < 0) return err; for (i = 0; i < hsi->num_ports; i++) { hsi->port[i]->device.parent = &hsi->device; err = device_add(&hsi->port[i]->device); if (err < 0) goto out; } /* Populate HSI bus with HSI clients */ hsi_scan_board_info(hsi); return 0; out: while (i-- > 0) device_del(&hsi->port[i]->device); device_del(&hsi->device); return err; } EXPORT_SYMBOL_GPL(hsi_register_controller); /** * hsi_register_client_driver - Register an HSI client to the HSI bus * @drv: HSI client driver to register * * Returns -errno on failure, 0 on success. */ int hsi_register_client_driver(struct hsi_client_driver *drv) { drv->driver.bus = &hsi_bus_type; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(hsi_register_client_driver); static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) { return 0; } static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) { return 0; } /** * hsi_put_controller - Free an HSI controller * * @hsi: Pointer to the HSI controller to freed * * HSI controller drivers should only use this function if they need * to free their allocated hsi_controller structures before a successful * call to hsi_register_controller. Other use is not allowed. */ void hsi_put_controller(struct hsi_controller *hsi) { unsigned int i; if (!hsi) return; for (i = 0; i < hsi->num_ports; i++) if (hsi->port && hsi->port[i]) put_device(&hsi->port[i]->device); put_device(&hsi->device); } EXPORT_SYMBOL_GPL(hsi_put_controller); /** * hsi_alloc_controller - Allocate an HSI controller and its ports * @n_ports: Number of ports on the HSI controller * @flags: Kernel allocation flags * * Return NULL on failure or a pointer to an hsi_controller on success. */ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) { struct hsi_controller *hsi; struct hsi_port **port; unsigned int i; if (!n_ports) return NULL; hsi = kzalloc(sizeof(*hsi), flags); if (!hsi) return NULL; port = kcalloc(n_ports, sizeof(*port), flags); if (!port) { kfree(hsi); return NULL; } hsi->num_ports = n_ports; hsi->port = port; hsi->device.release = hsi_controller_release; device_initialize(&hsi->device); for (i = 0; i < n_ports; i++) { port[i] = kzalloc(sizeof(**port), flags); if (port[i] == NULL) goto out; port[i]->num = i; port[i]->async = hsi_dummy_msg; port[i]->setup = hsi_dummy_cl; port[i]->flush = hsi_dummy_cl; port[i]->start_tx = hsi_dummy_cl; port[i]->stop_tx = hsi_dummy_cl; port[i]->release = hsi_dummy_cl; mutex_init(&port[i]->lock); BLOCKING_INIT_NOTIFIER_HEAD(&port[i]->n_head); dev_set_name(&port[i]->device, "port%d", i); hsi->port[i]->device.release = hsi_port_release; device_initialize(&hsi->port[i]->device); } return hsi; out: hsi_put_controller(hsi); return NULL; } EXPORT_SYMBOL_GPL(hsi_alloc_controller); /** * hsi_free_msg - Free an HSI message * @msg: Pointer to the HSI message * * Client is responsible to free the buffers pointed by the scatterlists. */ void hsi_free_msg(struct hsi_msg *msg) { if (!msg) return; sg_free_table(&msg->sgt); kfree(msg); } EXPORT_SYMBOL_GPL(hsi_free_msg); /** * hsi_alloc_msg - Allocate an HSI message * @nents: Number of memory entries * @flags: Kernel allocation flags * * nents can be 0. This mainly makes sense for read transfer. * In that case, HSI drivers will call the complete callback when * there is data to be read without consuming it. * * Return NULL on failure or a pointer to an hsi_msg on success. */ struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags) { struct hsi_msg *msg; int err; msg = kzalloc(sizeof(*msg), flags); if (!msg) return NULL; if (!nents) return msg; err = sg_alloc_table(&msg->sgt, nents, flags); if (unlikely(err)) { kfree(msg); msg = NULL; } return msg; } EXPORT_SYMBOL_GPL(hsi_alloc_msg); /** * hsi_async - Submit an HSI transfer to the controller * @cl: HSI client sending the transfer * @msg: The HSI transfer passed to controller * * The HSI message must have the channel, ttype, complete and destructor * fields set beforehand. If nents > 0 then the client has to initialize * also the scatterlists to point to the buffers to write to or read from. * * HSI controllers relay on pre-allocated buffers from their clients and they * do not allocate buffers on their own. * * Once the HSI message transfer finishes, the HSI controller calls the * complete callback with the status and actual_len fields of the HSI message * updated. The complete callback can be called before returning from * hsi_async. * * Returns -errno on failure or 0 on success */ int hsi_async(struct hsi_client *cl, struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(cl); if (!hsi_port_claimed(cl)) return -EACCES; WARN_ON_ONCE(!msg->destructor || !msg->complete); msg->cl = cl; return port->async(msg); } EXPORT_SYMBOL_GPL(hsi_async); /** * hsi_claim_port - Claim the HSI client's port * @cl: HSI client that wants to claim its port * @share: Flag to indicate if the client wants to share the port or not. * * Returns -errno on failure, 0 on success. */ int hsi_claim_port(struct hsi_client *cl, unsigned int share) { struct hsi_port *port = hsi_get_port(cl); int err = 0; mutex_lock(&port->lock); if ((port->claimed) && (!port->shared || !share)) { err = -EBUSY; goto out; } if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) { err = -ENODEV; goto out; } port->claimed++; port->shared = !!share; cl->pclaimed = 1; out: mutex_unlock(&port->lock); return err; } EXPORT_SYMBOL_GPL(hsi_claim_port); /** * hsi_release_port - Release the HSI client's port * @cl: HSI client which previously claimed its port */ void hsi_release_port(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); mutex_lock(&port->lock); /* Allow HW driver to do some cleanup */ port->release(cl); if (cl->pclaimed) port->claimed--; BUG_ON(port->claimed < 0); cl->pclaimed = 0; if (!port->claimed) port->shared = 0; module_put(to_hsi_controller(port->device.parent)->owner); mutex_unlock(&port->lock); } EXPORT_SYMBOL_GPL(hsi_release_port); static int hsi_event_notifier_call(struct notifier_block *nb, unsigned long event, void *data __maybe_unused) { struct hsi_client *cl = container_of(nb, struct hsi_client, nb); (*cl->ehandler)(cl, event); return 0; } /** * hsi_register_port_event - Register a client to receive port events * @cl: HSI client that wants to receive port events * @handler: Event handler callback * * Clients should register a callback to be able to receive * events from the ports. Registration should happen after * claiming the port. * The handler can be called in interrupt context. * * Returns -errno on error, or 0 on success. */ int hsi_register_port_event(struct hsi_client *cl, void (*handler)(struct hsi_client *, unsigned long)) { struct hsi_port *port = hsi_get_port(cl); if (!handler || cl->ehandler) return -EINVAL; if (!hsi_port_claimed(cl)) return -EACCES; cl->ehandler = handler; cl->nb.notifier_call = hsi_event_notifier_call; return blocking_notifier_chain_register(&port->n_head, &cl->nb); } EXPORT_SYMBOL_GPL(hsi_register_port_event); /** * hsi_unregister_port_event - Stop receiving port events for a client * @cl: HSI client that wants to stop receiving port events * * Clients should call this function before releasing their associated * port. * * Returns -errno on error, or 0 on success. */ int hsi_unregister_port_event(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); int err; WARN_ON(!hsi_port_claimed(cl)); err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb); if (!err) cl->ehandler = NULL; return err; } EXPORT_SYMBOL_GPL(hsi_unregister_port_event); /** * hsi_event - Notifies clients about port events * @port: Port where the event occurred * @event: The event type * * Clients should not be concerned about wake line behavior. However, due * to a race condition in HSI HW protocol, clients need to be notified * about wake line changes, so they can implement a workaround for it. * * Events: * HSI_EVENT_START_RX - Incoming wake line high * HSI_EVENT_STOP_RX - Incoming wake line down * * Returns -errno on error, or 0 on success. */ int hsi_event(struct hsi_port *port, unsigned long event) { return blocking_notifier_call_chain(&port->n_head, event, NULL); } EXPORT_SYMBOL_GPL(hsi_event); /** * hsi_get_channel_id_by_name - acquire channel id by channel name * @cl: HSI client, which uses the channel * @name: name the channel is known under * * Clients can call this function to get the hsi channel ids similar to * requesting IRQs or GPIOs by name. This function assumes the same * channel configuration is used for RX and TX. * * Returns -errno on error or channel id on success. */ int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name) { int i; if (!cl->rx_cfg.channels) return -ENOENT; for (i = 0; i < cl->rx_cfg.num_channels; i++) if (!strcmp(cl->rx_cfg.channels[i].name, name)) return cl->rx_cfg.channels[i].id; return -ENXIO; } EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name); static int __init hsi_init(void) { return bus_register(&hsi_bus_type); } postcore_initcall(hsi_init); static void __exit hsi_exit(void) { bus_unregister(&hsi_bus_type); } module_exit(hsi_exit); MODULE_AUTHOR("Carlos Chinea <[email protected]>"); MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework"); MODULE_LICENSE("GPL v2");
linux-master
drivers/hsi/hsi_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * HSI character device driver, implements the character device * interface. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Andras Domokos <[email protected]> */ #include <linux/errno.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/kmemleak.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/scatterlist.h> #include <linux/stat.h> #include <linux/hsi/hsi.h> #include <linux/hsi/hsi_char.h> #define HSC_DEVS 16 /* Num of channels */ #define HSC_MSGS 4 #define HSC_RXBREAK 0 #define HSC_ID_BITS 6 #define HSC_PORT_ID_BITS 4 #define HSC_ID_MASK 3 #define HSC_PORT_ID_MASK 3 #define HSC_CH_MASK 0xf /* * We support up to 4 controllers that can have up to 4 * ports, which should currently be more than enough. */ #define HSC_BASEMINOR(id, port_id) \ ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \ (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS)) enum { HSC_CH_OPEN, HSC_CH_READ, HSC_CH_WRITE, HSC_CH_WLINE, }; enum { HSC_RX, HSC_TX, }; struct hsc_client_data; /** * struct hsc_channel - hsi_char internal channel data * @ch: channel number * @flags: Keeps state of the channel (open/close, reading, writing) * @free_msgs_list: List of free HSI messages/requests * @rx_msgs_queue: List of pending RX requests * @tx_msgs_queue: List of pending TX requests * @lock: Serialize access to the lists * @cl: reference to the associated hsi_client * @cl_data: reference to the client data that this channels belongs to * @rx_wait: RX requests wait queue * @tx_wait: TX requests wait queue */ struct hsc_channel { unsigned int ch; unsigned long flags; struct list_head free_msgs_list; struct list_head rx_msgs_queue; struct list_head tx_msgs_queue; spinlock_t lock; struct hsi_client *cl; struct hsc_client_data *cl_data; wait_queue_head_t rx_wait; wait_queue_head_t tx_wait; }; /** * struct hsc_client_data - hsi_char internal client data * @cdev: Characther device associated to the hsi_client * @lock: Lock to serialize open/close access * @flags: Keeps track of port state (rx hwbreak armed) * @usecnt: Use count for claiming the HSI port (mutex protected) * @cl: Referece to the HSI client * @channels: Array of channels accessible by the client */ struct hsc_client_data { struct cdev cdev; struct mutex lock; unsigned long flags; unsigned int usecnt; struct hsi_client *cl; struct hsc_channel channels[HSC_DEVS]; }; /* Stores the major number dynamically allocated for hsi_char */ static unsigned int hsc_major; /* Maximum buffer size that hsi_char will accept from userspace */ static unsigned int max_data_size = 0x1000; module_param(max_data_size, uint, 0); MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, struct list_head *queue) { unsigned long flags; spin_lock_irqsave(&channel->lock, flags); list_add_tail(&msg->link, queue); spin_unlock_irqrestore(&channel->lock, flags); } static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel, struct list_head *queue) { struct hsi_msg *msg = NULL; unsigned long flags; spin_lock_irqsave(&channel->lock, flags); if (list_empty(queue)) goto out; msg = list_first_entry(queue, struct hsi_msg, link); list_del(&msg->link); out: spin_unlock_irqrestore(&channel->lock, flags); return msg; } static inline void hsc_msg_free(struct hsi_msg *msg) { kfree(sg_virt(msg->sgt.sgl)); hsi_free_msg(msg); } static void hsc_free_list(struct list_head *list) { struct hsi_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, list, link) { list_del(&msg->link); hsc_msg_free(msg); } } static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l) { unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&channel->lock, flags); list_splice_init(l, &list); spin_unlock_irqrestore(&channel->lock, flags); hsc_free_list(&list); } static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size) { struct hsi_msg *msg; void *buf; msg = hsi_alloc_msg(1, GFP_KERNEL); if (!msg) goto out; buf = kmalloc(alloc_size, GFP_KERNEL); if (!buf) { hsi_free_msg(msg); goto out; } sg_init_one(msg->sgt.sgl, buf, alloc_size); /* Ignore false positive, due to sg pointer handling */ kmemleak_ignore(buf); return msg; out: return NULL; } static inline int hsc_msgs_alloc(struct hsc_channel *channel) { struct hsi_msg *msg; int i; for (i = 0; i < HSC_MSGS; i++) { msg = hsc_msg_alloc(max_data_size); if (!msg) goto out; msg->channel = channel->ch; list_add_tail(&msg->link, &channel->free_msgs_list); } return 0; out: hsc_free_list(&channel->free_msgs_list); return -ENOMEM; } static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg) { return msg->sgt.sgl->length; } static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len) { msg->sgt.sgl->length = len; } static void hsc_rx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_READ, &channel->flags)) { hsc_add_tail(channel, msg, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_rx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_rx_completed(msg); } static void hsc_tx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_WRITE, &channel->flags)) { hsc_add_tail(channel, msg, &channel->tx_msgs_queue); wake_up(&channel->tx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_tx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_tx_completed(msg); } static void hsc_break_req_destructor(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsi_free_msg(msg); clear_bit(HSC_RXBREAK, &cl_data->flags); } static void hsc_break_received(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels; int i, ret; /* Broadcast HWBREAK on all channels */ for (i = 0; i < HSC_DEVS; i++, channel++) { struct hsi_msg *msg2; if (!test_bit(HSC_CH_READ, &channel->flags)) continue; msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg2) continue; clear_bit(HSC_CH_READ, &channel->flags); hsc_msg_len_set(msg2, 0); msg2->status = HSI_STATUS_COMPLETED; hsc_add_tail(channel, msg2, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } hsi_flush(msg->cl); ret = hsi_async_read(msg->cl, msg); if (ret < 0) hsc_break_req_destructor(msg); } static int hsc_break_request(struct hsi_client *cl) { struct hsc_client_data *cl_data = hsi_client_drvdata(cl); struct hsi_msg *msg; int ret; if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags)) return -EBUSY; msg = hsi_alloc_msg(0, GFP_KERNEL); if (!msg) { clear_bit(HSC_RXBREAK, &cl_data->flags); return -ENOMEM; } msg->break_frame = 1; msg->complete = hsc_break_received; msg->destructor = hsc_break_req_destructor; ret = hsi_async_read(cl, msg); if (ret < 0) hsc_break_req_destructor(msg); return ret; } static int hsc_break_send(struct hsi_client *cl) { struct hsi_msg *msg; int ret; msg = hsi_alloc_msg(0, GFP_ATOMIC); if (!msg) return -ENOMEM; msg->break_frame = 1; msg->complete = hsi_free_msg; msg->destructor = hsi_free_msg; ret = hsi_async_write(cl, msg); if (ret < 0) hsi_free_msg(msg); return ret; } static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) { struct hsi_config tmp; int ret; if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS)) return -EINVAL; if (rxc->channels & (rxc->channels - 1)) return -EINVAL; if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE)) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; cl->rx_cfg.num_hw_channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { cl->rx_cfg = tmp; return ret; } if (rxc->mode == HSI_MODE_FRAME) hsc_break_request(cl); return ret; } static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; rxc->channels = cl->rx_cfg.num_hw_channels; rxc->flow = cl->rx_cfg.flow; } static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) { struct hsi_config tmp; int ret; if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((txc->channels == 0) || (txc->channels > HSC_DEVS)) return -EINVAL; if (txc->channels & (txc->channels - 1)) return -EINVAL; if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO)) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; cl->tx_cfg.num_hw_channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); if (ret < 0) { cl->tx_cfg = tmp; return ret; } return ret; } static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; txc->channels = cl->tx_cfg.num_hw_channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if (len == 0) return 0; if (!IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->rx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { ret = -ENOSPC; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_rx_completed; msg->destructor = hsc_rx_msg_destructor; ret = hsi_async_read(channel->cl, msg); if (ret < 0) { hsc_add_tail(channel, msg, &channel->free_msgs_list); goto out; } ret = wait_event_interruptible(channel->rx_wait, !list_empty(&channel->rx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_READ, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue); if (msg) { if (msg->status != HSI_STATUS_ERROR) { ret = copy_to_user((void __user *)buf, sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg)); if (ret) ret = -EFAULT; else ret = hsc_msg_len_get(msg); } else { ret = -EIO; } hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_READ, &channel->flags); return ret; } static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if ((len == 0) || !IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->tx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { clear_bit(HSC_CH_WRITE, &channel->flags); return -ENOSPC; } if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) { ret = -EFAULT; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_tx_completed; msg->destructor = hsc_tx_msg_destructor; ret = hsi_async_write(channel->cl, msg); if (ret < 0) goto out; ret = wait_event_interruptible(channel->tx_wait, !list_empty(&channel->tx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_WRITE, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue); if (msg) { if (msg->status == HSI_STATUS_ERROR) ret = -EIO; else ret = hsc_msg_len_get(msg); hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_WRITE, &channel->flags); return ret; } static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hsc_channel *channel = file->private_data; unsigned int state; struct hsc_rx_config rxc; struct hsc_tx_config txc; long ret = 0; switch (cmd) { case HSC_RESET: hsi_flush(channel->cl); break; case HSC_SET_PM: if (copy_from_user(&state, (void __user *)arg, sizeof(state))) return -EFAULT; if (state == HSC_PM_DISABLE) { if (test_and_set_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_start_tx(channel->cl); } else if (state == HSC_PM_ENABLE) { if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_stop_tx(channel->cl); } else { ret = -EINVAL; } break; case HSC_SEND_BREAK: return hsc_break_send(channel->cl); case HSC_SET_RX: if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc))) return -EFAULT; return hsc_rx_set(channel->cl, &rxc); case HSC_GET_RX: hsc_rx_get(channel->cl, &rxc); if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc))) return -EFAULT; break; case HSC_SET_TX: if (copy_from_user(&txc, (void __user *)arg, sizeof(txc))) return -EFAULT; return hsc_tx_set(channel->cl, &txc); case HSC_GET_TX: hsc_tx_get(channel->cl, &txc); if (copy_to_user((void __user *)arg, &txc, sizeof(txc))) return -EFAULT; break; default: return -ENOIOCTLCMD; } return ret; } static inline void __hsc_port_release(struct hsc_client_data *cl_data) { BUG_ON(cl_data->usecnt == 0); if (--cl_data->usecnt == 0) { hsi_flush(cl_data->cl); hsi_release_port(cl_data->cl); } } static int hsc_open(struct inode *inode, struct file *file) { struct hsc_client_data *cl_data; struct hsc_channel *channel; int ret = 0; pr_debug("open, minor = %d\n", iminor(inode)); cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev); mutex_lock(&cl_data->lock); channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK); if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) { ret = -EBUSY; goto out; } /* * Check if we have already claimed the port associated to the HSI * client. If not then try to claim it, else increase its refcount */ if (cl_data->usecnt == 0) { ret = hsi_claim_port(cl_data->cl, 0); if (ret < 0) goto out; hsi_setup(cl_data->cl); } cl_data->usecnt++; ret = hsc_msgs_alloc(channel); if (ret < 0) { __hsc_port_release(cl_data); goto out; } file->private_data = channel; mutex_unlock(&cl_data->lock); return ret; out: mutex_unlock(&cl_data->lock); return ret; } static int hsc_release(struct inode *inode __maybe_unused, struct file *file) { struct hsc_channel *channel = file->private_data; struct hsc_client_data *cl_data = channel->cl_data; mutex_lock(&cl_data->lock); file->private_data = NULL; if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) hsi_stop_tx(channel->cl); __hsc_port_release(cl_data); hsc_reset_list(channel, &channel->rx_msgs_queue); hsc_reset_list(channel, &channel->tx_msgs_queue); hsc_reset_list(channel, &channel->free_msgs_list); clear_bit(HSC_CH_READ, &channel->flags); clear_bit(HSC_CH_WRITE, &channel->flags); clear_bit(HSC_CH_OPEN, &channel->flags); wake_up(&channel->rx_wait); wake_up(&channel->tx_wait); mutex_unlock(&cl_data->lock); return 0; } static const struct file_operations hsc_fops = { .owner = THIS_MODULE, .read = hsc_read, .write = hsc_write, .unlocked_ioctl = hsc_ioctl, .open = hsc_open, .release = hsc_release, }; static void hsc_channel_init(struct hsc_channel *channel) { init_waitqueue_head(&channel->rx_wait); init_waitqueue_head(&channel->tx_wait); spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->free_msgs_list); INIT_LIST_HEAD(&channel->rx_msgs_queue); INIT_LIST_HEAD(&channel->tx_msgs_queue); } static int hsc_probe(struct device *dev) { const char devname[] = "hsi_char"; struct hsc_client_data *cl_data; struct hsc_channel *channel; struct hsi_client *cl = to_hsi_client(dev); unsigned int hsc_baseminor; dev_t hsc_dev; int ret; int i; cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL); if (!cl_data) return -ENOMEM; hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl)); if (!hsc_major) { ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor, HSC_DEVS, devname); if (ret == 0) hsc_major = MAJOR(hsc_dev); } else { hsc_dev = MKDEV(hsc_major, hsc_baseminor); ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname); } if (ret < 0) { dev_err(dev, "Device %s allocation failed %d\n", hsc_major ? "minor" : "major", ret); goto out1; } mutex_init(&cl_data->lock); hsi_client_set_drvdata(cl, cl_data); cdev_init(&cl_data->cdev, &hsc_fops); cl_data->cdev.owner = THIS_MODULE; cl_data->cl = cl; for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) { hsc_channel_init(channel); channel->ch = i; channel->cl = cl; channel->cl_data = cl_data; } /* 1 hsi client -> N char devices (one for each channel) */ ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS); if (ret) { dev_err(dev, "Could not add char device %d\n", ret); goto out2; } return 0; out2: unregister_chrdev_region(hsc_dev, HSC_DEVS); out1: kfree(cl_data); return ret; } static int hsc_remove(struct device *dev) { struct hsi_client *cl = to_hsi_client(dev); struct hsc_client_data *cl_data = hsi_client_drvdata(cl); dev_t hsc_dev = cl_data->cdev.dev; cdev_del(&cl_data->cdev); unregister_chrdev_region(hsc_dev, HSC_DEVS); hsi_client_set_drvdata(cl, NULL); kfree(cl_data); return 0; } static struct hsi_client_driver hsc_driver = { .driver = { .name = "hsi_char", .owner = THIS_MODULE, .probe = hsc_probe, .remove = hsc_remove, }, }; static int __init hsc_init(void) { int ret; if ((max_data_size < 4) || (max_data_size > 0x10000) || (max_data_size & (max_data_size - 1))) { pr_err("Invalid max read/write data size\n"); return -EINVAL; } ret = hsi_register_client_driver(&hsc_driver); if (ret) { pr_err("Error while registering HSI/SSI driver %d\n", ret); return ret; } pr_info("HSI/SSI char device loaded\n"); return 0; } module_init(hsc_init); static void __exit hsc_exit(void) { hsi_unregister_client_driver(&hsc_driver); pr_info("HSI char device removed\n"); } module_exit(hsc_exit); MODULE_AUTHOR("Andras Domokos <[email protected]>"); MODULE_ALIAS("hsi:hsi_char"); MODULE_DESCRIPTION("HSI character device"); MODULE_LICENSE("GPL v2");
linux-master
drivers/hsi/clients/hsi_char.c
// SPDX-License-Identifier: GPL-2.0-only /* * cmt_speech.c - HSI CMT speech driver * * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved. * * Contact: Kai Vehmanen <[email protected]> * Original author: Peter Ujfalusi <[email protected]> */ #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/sched/signal.h> #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/pm_qos.h> #include <linux/hsi/hsi.h> #include <linux/hsi/ssi_protocol.h> #include <linux/hsi/cs-protocol.h> #define CS_MMAP_SIZE PAGE_SIZE struct char_queue { struct list_head list; u32 msg; }; struct cs_char { unsigned int opened; struct hsi_client *cl; struct cs_hsi_iface *hi; struct list_head chardev_queue; struct list_head dataind_queue; int dataind_pending; /* mmap things */ unsigned long mmap_base; unsigned long mmap_size; spinlock_t lock; struct fasync_struct *async_queue; wait_queue_head_t wait; /* hsi channel ids */ int channel_id_cmd; int channel_id_data; }; #define SSI_CHANNEL_STATE_READING 1 #define SSI_CHANNEL_STATE_WRITING (1 << 1) #define SSI_CHANNEL_STATE_POLL (1 << 2) #define SSI_CHANNEL_STATE_ERROR (1 << 3) #define TARGET_MASK 0xf000000 #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT) #define TARGET_LOCAL 0 /* Number of pre-allocated commands buffers */ #define CS_MAX_CMDS 4 /* * During data transfers, transactions must be handled * within 20ms (fixed value in cmtspeech HSI protocol) */ #define CS_QOS_LATENCY_FOR_DATA_USEC 20000 /* Timeout to wait for pending HSI transfers to complete */ #define CS_HSI_TRANSFER_TIMEOUT_MS 500 #define RX_PTR_BOUNDARY_SHIFT 8 #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \ CS_MAX_BUFFERS_SHIFT) struct cs_hsi_iface { struct hsi_client *cl; struct hsi_client *master; unsigned int iface_state; unsigned int wakeline_state; unsigned int control_state; unsigned int data_state; /* state exposed to application */ struct cs_mmap_config_block *mmap_cfg; unsigned long mmap_base; unsigned long mmap_size; unsigned int rx_slot; unsigned int tx_slot; /* note: for security reasons, we do not trust the contents of * mmap_cfg, but instead duplicate the variables here */ unsigned int buf_size; unsigned int rx_bufs; unsigned int tx_bufs; unsigned int rx_ptr_boundary; unsigned int rx_offsets[CS_MAX_BUFFERS]; unsigned int tx_offsets[CS_MAX_BUFFERS]; /* size of aligned memory blocks */ unsigned int slot_size; unsigned int flags; struct list_head cmdqueue; struct hsi_msg *data_rx_msg; struct hsi_msg *data_tx_msg; wait_queue_head_t datawait; struct pm_qos_request pm_qos_req; spinlock_t lock; }; static struct cs_char cs_char_data; static void cs_hsi_read_on_control(struct cs_hsi_iface *hi); static void cs_hsi_read_on_data(struct cs_hsi_iface *hi); static inline void rx_ptr_shift_too_big(void) { BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX); } static void cs_notify(u32 message, struct list_head *head) { struct char_queue *entry; spin_lock(&cs_char_data.lock); if (!cs_char_data.opened) { spin_unlock(&cs_char_data.lock); goto out; } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { dev_err(&cs_char_data.cl->device, "Can't allocate new entry for the queue.\n"); spin_unlock(&cs_char_data.lock); goto out; } entry->msg = message; list_add_tail(&entry->list, head); spin_unlock(&cs_char_data.lock); wake_up_interruptible(&cs_char_data.wait); kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN); out: return; } static u32 cs_pop_entry(struct list_head *head) { struct char_queue *entry; u32 data; entry = list_entry(head->next, struct char_queue, list); data = entry->msg; list_del(&entry->list); kfree(entry); return data; } static void cs_notify_control(u32 message) { cs_notify(message, &cs_char_data.chardev_queue); } static void cs_notify_data(u32 message, int maxlength) { cs_notify(message, &cs_char_data.dataind_queue); spin_lock(&cs_char_data.lock); cs_char_data.dataind_pending++; while (cs_char_data.dataind_pending > maxlength && !list_empty(&cs_char_data.dataind_queue)) { dev_dbg(&cs_char_data.cl->device, "data notification " "queue overrun (%u entries)\n", cs_char_data.dataind_pending); cs_pop_entry(&cs_char_data.dataind_queue); cs_char_data.dataind_pending--; } spin_unlock(&cs_char_data.lock); } static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd) { u32 *data = sg_virt(msg->sgt.sgl); *data = cmd; } static inline u32 cs_get_cmd(struct hsi_msg *msg) { u32 *data = sg_virt(msg->sgt.sgl); return *data; } static void cs_release_cmd(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; list_add_tail(&msg->link, &hi->cmdqueue); } static void cs_cmd_destructor(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; spin_lock(&hi->lock); dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n"); if (hi->iface_state != CS_STATE_CLOSED) dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); if (msg->ttype == HSI_MSG_READ) hi->control_state &= ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); else if (msg->ttype == HSI_MSG_WRITE && hi->control_state & SSI_CHANNEL_STATE_WRITING) hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; cs_release_cmd(msg); spin_unlock(&hi->lock); } static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi) { struct hsi_msg *msg; BUG_ON(list_empty(&ssi->cmdqueue)); msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); list_del(&msg->link); msg->destructor = cs_cmd_destructor; return msg; } static void cs_free_cmds(struct cs_hsi_iface *ssi) { struct hsi_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { list_del(&msg->link); msg->destructor = NULL; kfree(sg_virt(msg->sgt.sgl)); hsi_free_msg(msg); } } static int cs_alloc_cmds(struct cs_hsi_iface *hi) { struct hsi_msg *msg; u32 *buf; unsigned int i; INIT_LIST_HEAD(&hi->cmdqueue); for (i = 0; i < CS_MAX_CMDS; i++) { msg = hsi_alloc_msg(1, GFP_KERNEL); if (!msg) goto out; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { hsi_free_msg(msg); goto out; } sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); msg->channel = cs_char_data.channel_id_cmd; msg->context = hi; list_add_tail(&msg->link, &hi->cmdqueue); } return 0; out: cs_free_cmds(hi); return -ENOMEM; } static void cs_hsi_data_destructor(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX"; dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir); spin_lock(&hi->lock); if (hi->iface_state != CS_STATE_CLOSED) dev_err(&cs_char_data.cl->device, "Data %s flush while device active\n", dir); if (msg->ttype == HSI_MSG_READ) hi->data_state &= ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); else hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; msg->status = HSI_STATUS_COMPLETED; if (unlikely(waitqueue_active(&hi->datawait))) wake_up_interruptible(&hi->datawait); spin_unlock(&hi->lock); } static int cs_hsi_alloc_data(struct cs_hsi_iface *hi) { struct hsi_msg *txmsg, *rxmsg; int res = 0; rxmsg = hsi_alloc_msg(1, GFP_KERNEL); if (!rxmsg) { res = -ENOMEM; goto out1; } rxmsg->channel = cs_char_data.channel_id_data; rxmsg->destructor = cs_hsi_data_destructor; rxmsg->context = hi; txmsg = hsi_alloc_msg(1, GFP_KERNEL); if (!txmsg) { res = -ENOMEM; goto out2; } txmsg->channel = cs_char_data.channel_id_data; txmsg->destructor = cs_hsi_data_destructor; txmsg->context = hi; hi->data_rx_msg = rxmsg; hi->data_tx_msg = txmsg; return 0; out2: hsi_free_msg(rxmsg); out1: return res; } static void cs_hsi_free_data_msg(struct hsi_msg *msg) { WARN_ON(msg->status != HSI_STATUS_COMPLETED && msg->status != HSI_STATUS_ERROR); hsi_free_msg(msg); } static void cs_hsi_free_data(struct cs_hsi_iface *hi) { cs_hsi_free_data_msg(hi->data_rx_msg); cs_hsi_free_data_msg(hi->data_tx_msg); } static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi, struct hsi_msg *msg, const char *info, unsigned int *state) { spin_lock(&hi->lock); dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", info, msg->status, *state); } static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi) { spin_unlock(&hi->lock); } static inline void __cs_hsi_error_read_bits(unsigned int *state) { *state |= SSI_CHANNEL_STATE_ERROR; *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL); } static inline void __cs_hsi_error_write_bits(unsigned int *state) { *state |= SSI_CHANNEL_STATE_ERROR; *state &= ~SSI_CHANNEL_STATE_WRITING; } static void cs_hsi_control_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) { __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); cs_release_cmd(msg); __cs_hsi_error_read_bits(&hi->control_state); __cs_hsi_error_post(hi); } static void cs_hsi_control_write_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) { __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); cs_release_cmd(msg); __cs_hsi_error_write_bits(&hi->control_state); __cs_hsi_error_post(hi); } static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) { __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); __cs_hsi_error_read_bits(&hi->data_state); __cs_hsi_error_post(hi); } static void cs_hsi_data_write_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) { __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); __cs_hsi_error_write_bits(&hi->data_state); __cs_hsi_error_post(hi); } static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) { u32 cmd = cs_get_cmd(msg); struct cs_hsi_iface *hi = msg->context; spin_lock(&hi->lock); hi->control_state &= ~SSI_CHANNEL_STATE_READING; if (msg->status == HSI_STATUS_ERROR) { dev_err(&hi->cl->device, "Control RX error detected\n"); spin_unlock(&hi->lock); cs_hsi_control_read_error(hi, msg); goto out; } dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); cs_release_cmd(msg); if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { struct timespec64 tspec; struct cs_timestamp *tstamp = &hi->mmap_cfg->tstamp_rx_ctrl; ktime_get_ts64(&tspec); tstamp->tv_sec = (__u32) tspec.tv_sec; tstamp->tv_nsec = (__u32) tspec.tv_nsec; } spin_unlock(&hi->lock); cs_notify_control(cmd); out: cs_hsi_read_on_control(hi); } static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; int ret; if (msg->status == HSI_STATUS_ERROR) { dev_err(&hi->cl->device, "Control peek RX error detected\n"); cs_hsi_control_read_error(hi, msg); return; } WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); msg->sgt.nents = 1; msg->complete = cs_hsi_read_on_control_complete; ret = hsi_async_read(hi->cl, msg); if (ret) cs_hsi_control_read_error(hi, msg); } static void cs_hsi_read_on_control(struct cs_hsi_iface *hi) { struct hsi_msg *msg; int ret; spin_lock(&hi->lock); if (hi->control_state & SSI_CHANNEL_STATE_READING) { dev_err(&hi->cl->device, "Control read already pending (%d)\n", hi->control_state); spin_unlock(&hi->lock); return; } if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { dev_err(&hi->cl->device, "Control read error (%d)\n", hi->control_state); spin_unlock(&hi->lock); return; } hi->control_state |= SSI_CHANNEL_STATE_READING; dev_dbg(&hi->cl->device, "Issuing RX on control\n"); msg = cs_claim_cmd(hi); spin_unlock(&hi->lock); msg->sgt.nents = 0; msg->complete = cs_hsi_peek_on_control_complete; ret = hsi_async_read(hi->cl, msg); if (ret) cs_hsi_control_read_error(hi, msg); } static void cs_hsi_write_on_control_complete(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; if (msg->status == HSI_STATUS_COMPLETED) { spin_lock(&hi->lock); hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; cs_release_cmd(msg); spin_unlock(&hi->lock); } else if (msg->status == HSI_STATUS_ERROR) { cs_hsi_control_write_error(hi, msg); } else { dev_err(&hi->cl->device, "unexpected status in control write callback %d\n", msg->status); } } static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message) { struct hsi_msg *msg; int ret; spin_lock(&hi->lock); if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { spin_unlock(&hi->lock); return -EIO; } if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { dev_err(&hi->cl->device, "Write still pending on control channel.\n"); spin_unlock(&hi->lock); return -EBUSY; } hi->control_state |= SSI_CHANNEL_STATE_WRITING; msg = cs_claim_cmd(hi); spin_unlock(&hi->lock); cs_set_cmd(msg, message); msg->sgt.nents = 1; msg->complete = cs_hsi_write_on_control_complete; dev_dbg(&hi->cl->device, "Sending control message %08X\n", message); ret = hsi_async_write(hi->cl, msg); if (ret) { dev_err(&hi->cl->device, "async_write failed with %d\n", ret); cs_hsi_control_write_error(hi, msg); } /* * Make sure control read is always pending when issuing * new control writes. This is needed as the controller * may flush our messages if e.g. the peer device reboots * unexpectedly (and we cannot directly resubmit a new read from * the message destructor; see cs_cmd_destructor()). */ if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { dev_err(&hi->cl->device, "Restarting control reads\n"); cs_hsi_read_on_control(hi); } return 0; } static void cs_hsi_read_on_data_complete(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; u32 payload; if (unlikely(msg->status == HSI_STATUS_ERROR)) { cs_hsi_data_read_error(hi, msg); return; } spin_lock(&hi->lock); WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); hi->data_state &= ~SSI_CHANNEL_STATE_READING; payload = CS_RX_DATA_RECEIVED; payload |= hi->rx_slot; hi->rx_slot++; hi->rx_slot %= hi->rx_ptr_boundary; /* expose current rx ptr in mmap area */ hi->mmap_cfg->rx_ptr = hi->rx_slot; if (unlikely(waitqueue_active(&hi->datawait))) wake_up_interruptible(&hi->datawait); spin_unlock(&hi->lock); cs_notify_data(payload, hi->rx_bufs); cs_hsi_read_on_data(hi); } static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; u32 *address; int ret; if (unlikely(msg->status == HSI_STATUS_ERROR)) { cs_hsi_data_read_error(hi, msg); return; } if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { dev_err(&hi->cl->device, "Data received in invalid state\n"); cs_hsi_data_read_error(hi, msg); return; } spin_lock(&hi->lock); WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); hi->data_state &= ~SSI_CHANNEL_STATE_POLL; hi->data_state |= SSI_CHANNEL_STATE_READING; spin_unlock(&hi->lock); address = (u32 *)(hi->mmap_base + hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); sg_init_one(msg->sgt.sgl, address, hi->buf_size); msg->sgt.nents = 1; msg->complete = cs_hsi_read_on_data_complete; ret = hsi_async_read(hi->cl, msg); if (ret) cs_hsi_data_read_error(hi, msg); } /* * Read/write transaction is ongoing. Returns false if in * SSI_CHANNEL_STATE_POLL state. */ static inline int cs_state_xfer_active(unsigned int state) { return (state & SSI_CHANNEL_STATE_WRITING) || (state & SSI_CHANNEL_STATE_READING); } /* * No pending read/writes */ static inline int cs_state_idle(unsigned int state) { return !(state & ~SSI_CHANNEL_STATE_ERROR); } static void cs_hsi_read_on_data(struct cs_hsi_iface *hi) { struct hsi_msg *rxmsg; int ret; spin_lock(&hi->lock); if (hi->data_state & (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) { dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", hi->data_state); spin_unlock(&hi->lock); return; } hi->data_state |= SSI_CHANNEL_STATE_POLL; spin_unlock(&hi->lock); rxmsg = hi->data_rx_msg; sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); rxmsg->sgt.nents = 0; rxmsg->complete = cs_hsi_peek_on_data_complete; ret = hsi_async_read(hi->cl, rxmsg); if (ret) cs_hsi_data_read_error(hi, rxmsg); } static void cs_hsi_write_on_data_complete(struct hsi_msg *msg) { struct cs_hsi_iface *hi = msg->context; if (msg->status == HSI_STATUS_COMPLETED) { spin_lock(&hi->lock); hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; if (unlikely(waitqueue_active(&hi->datawait))) wake_up_interruptible(&hi->datawait); spin_unlock(&hi->lock); } else { cs_hsi_data_write_error(hi, msg); } } static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot) { u32 *address; struct hsi_msg *txmsg; int ret; spin_lock(&hi->lock); if (hi->iface_state != CS_STATE_CONFIGURED) { dev_err(&hi->cl->device, "Not configured, aborting\n"); ret = -EINVAL; goto error; } if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { dev_err(&hi->cl->device, "HSI error, aborting\n"); ret = -EIO; goto error; } if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { dev_err(&hi->cl->device, "Write pending on data channel.\n"); ret = -EBUSY; goto error; } hi->data_state |= SSI_CHANNEL_STATE_WRITING; spin_unlock(&hi->lock); hi->tx_slot = slot; address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); txmsg = hi->data_tx_msg; sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); txmsg->complete = cs_hsi_write_on_data_complete; ret = hsi_async_write(hi->cl, txmsg); if (ret) cs_hsi_data_write_error(hi, txmsg); return ret; error: spin_unlock(&hi->lock); if (ret == -EIO) cs_hsi_data_write_error(hi, hi->data_tx_msg); return ret; } static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi) { return hi->iface_state; } static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd) { int ret = 0; local_bh_disable(); switch (cmd & TARGET_MASK) { case TARGET_REMOTE: ret = cs_hsi_write_on_control(hi, cmd); break; case TARGET_LOCAL: if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY) ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK); else ret = -EINVAL; break; default: ret = -EINVAL; break; } local_bh_enable(); return ret; } static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state) { int change = 0; spin_lock_bh(&hi->lock); if (hi->wakeline_state != new_state) { hi->wakeline_state = new_state; change = 1; dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", new_state, hi->cl); } spin_unlock_bh(&hi->lock); if (change) { if (new_state) ssip_slave_start_tx(hi->master); else ssip_slave_stop_tx(hi->master); } dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", new_state, hi->cl); } static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs) { hi->rx_bufs = rx_bufs; hi->tx_bufs = tx_bufs; hi->mmap_cfg->rx_bufs = rx_bufs; hi->mmap_cfg->tx_bufs = tx_bufs; if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { /* * For more robust overrun detection, let the rx * pointer run in range 0..'boundary-1'. Boundary * is a multiple of rx_bufs, and limited in max size * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff * calculation. */ hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; } else { hi->rx_ptr_boundary = hi->rx_bufs; } } static int check_buf_params(struct cs_hsi_iface *hi, const struct cs_buffer_config *buf_cfg) { size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) * (buf_cfg->rx_bufs + buf_cfg->tx_bufs); size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); int r = 0; if (buf_cfg->rx_bufs > CS_MAX_BUFFERS || buf_cfg->tx_bufs > CS_MAX_BUFFERS) { r = -EINVAL; } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { dev_err(&hi->cl->device, "No space for the requested buffer " "configuration\n"); r = -ENOBUFS; } return r; } /* * Block until pending data transfers have completed. */ static int cs_hsi_data_sync(struct cs_hsi_iface *hi) { int r = 0; spin_lock_bh(&hi->lock); if (!cs_state_xfer_active(hi->data_state)) { dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); goto out; } for (;;) { int s; DEFINE_WAIT(wait); if (!cs_state_xfer_active(hi->data_state)) goto out; if (signal_pending(current)) { r = -ERESTARTSYS; goto out; } /* * prepare_to_wait must be called with hi->lock held * so that callbacks can check for waitqueue_active() */ prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); spin_unlock_bh(&hi->lock); s = schedule_timeout( msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS)); spin_lock_bh(&hi->lock); finish_wait(&hi->datawait, &wait); if (!s) { dev_dbg(&hi->cl->device, "hsi_data_sync timeout after %d ms\n", CS_HSI_TRANSFER_TIMEOUT_MS); r = -EIO; goto out; } } out: spin_unlock_bh(&hi->lock); dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); return r; } static void cs_hsi_data_enable(struct cs_hsi_iface *hi, struct cs_buffer_config *buf_cfg) { unsigned int data_start, i; BUG_ON(hi->buf_size == 0); set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); dev_dbg(&hi->cl->device, "setting slot size to %u, buf size %u, align %u\n", hi->slot_size, hi->buf_size, L1_CACHE_BYTES); data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); dev_dbg(&hi->cl->device, "setting data start at %u, cfg block %u, align %u\n", data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { hi->rx_offsets[i] = data_start + i * hi->slot_size; hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", i, hi->rx_offsets[i]); } for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { hi->tx_offsets[i] = data_start + (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", i, hi->rx_offsets[i]); } hi->iface_state = CS_STATE_CONFIGURED; } static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state) { if (old_state == CS_STATE_CONFIGURED) { dev_dbg(&hi->cl->device, "closing data channel with slot size 0\n"); hi->iface_state = CS_STATE_OPENED; } } static int cs_hsi_buf_config(struct cs_hsi_iface *hi, struct cs_buffer_config *buf_cfg) { int r = 0; unsigned int old_state = hi->iface_state; spin_lock_bh(&hi->lock); /* Prevent new transactions during buffer reconfig */ if (old_state == CS_STATE_CONFIGURED) hi->iface_state = CS_STATE_OPENED; spin_unlock_bh(&hi->lock); /* * make sure that no non-zero data reads are ongoing before * proceeding to change the buffer layout */ r = cs_hsi_data_sync(hi); if (r < 0) return r; WARN_ON(cs_state_xfer_active(hi->data_state)); spin_lock_bh(&hi->lock); r = check_buf_params(hi, buf_cfg); if (r < 0) goto error; hi->buf_size = buf_cfg->buf_size; hi->mmap_cfg->buf_size = hi->buf_size; hi->flags = buf_cfg->flags; hi->rx_slot = 0; hi->tx_slot = 0; hi->slot_size = 0; if (hi->buf_size) cs_hsi_data_enable(hi, buf_cfg); else cs_hsi_data_disable(hi, old_state); spin_unlock_bh(&hi->lock); if (old_state != hi->iface_state) { if (hi->iface_state == CS_STATE_CONFIGURED) { cpu_latency_qos_add_request(&hi->pm_qos_req, CS_QOS_LATENCY_FOR_DATA_USEC); local_bh_disable(); cs_hsi_read_on_data(hi); local_bh_enable(); } else if (old_state == CS_STATE_CONFIGURED) { cpu_latency_qos_remove_request(&hi->pm_qos_req); } } return r; error: spin_unlock_bh(&hi->lock); return r; } static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl, unsigned long mmap_base, unsigned long mmap_size) { int err = 0; struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL); dev_dbg(&cl->device, "cs_hsi_start\n"); if (!hsi_if) { err = -ENOMEM; goto leave0; } spin_lock_init(&hsi_if->lock); hsi_if->cl = cl; hsi_if->iface_state = CS_STATE_CLOSED; hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base; hsi_if->mmap_base = mmap_base; hsi_if->mmap_size = mmap_size; memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg)); init_waitqueue_head(&hsi_if->datawait); err = cs_alloc_cmds(hsi_if); if (err < 0) { dev_err(&cl->device, "Unable to alloc HSI messages\n"); goto leave1; } err = cs_hsi_alloc_data(hsi_if); if (err < 0) { dev_err(&cl->device, "Unable to alloc HSI messages for data\n"); goto leave2; } err = hsi_claim_port(cl, 1); if (err < 0) { dev_err(&cl->device, "Could not open, HSI port already claimed\n"); goto leave3; } hsi_if->master = ssip_slave_get_master(cl); if (IS_ERR(hsi_if->master)) { err = PTR_ERR(hsi_if->master); dev_err(&cl->device, "Could not get HSI master client\n"); goto leave4; } if (!ssip_slave_running(hsi_if->master)) { err = -ENODEV; dev_err(&cl->device, "HSI port not initialized\n"); goto leave4; } hsi_if->iface_state = CS_STATE_OPENED; local_bh_disable(); cs_hsi_read_on_control(hsi_if); local_bh_enable(); dev_dbg(&cl->device, "cs_hsi_start...done\n"); BUG_ON(!hi); *hi = hsi_if; return 0; leave4: hsi_release_port(cl); leave3: cs_hsi_free_data(hsi_if); leave2: cs_free_cmds(hsi_if); leave1: kfree(hsi_if); leave0: dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n"); return err; } static void cs_hsi_stop(struct cs_hsi_iface *hi) { dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); cs_hsi_set_wakeline(hi, 0); ssip_slave_put_master(hi->master); /* hsi_release_port() needs to be called with CS_STATE_CLOSED */ hi->iface_state = CS_STATE_CLOSED; hsi_release_port(hi->cl); /* * hsi_release_port() should flush out all the pending * messages, so cs_state_idle() should be true for both * control and data channels. */ WARN_ON(!cs_state_idle(hi->control_state)); WARN_ON(!cs_state_idle(hi->data_state)); if (cpu_latency_qos_request_active(&hi->pm_qos_req)) cpu_latency_qos_remove_request(&hi->pm_qos_req); spin_lock_bh(&hi->lock); cs_hsi_free_data(hi); cs_free_cmds(hi); spin_unlock_bh(&hi->lock); kfree(hi); } static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf) { struct cs_char *csdata = vmf->vma->vm_private_data; struct page *page; page = virt_to_page((void *)csdata->mmap_base); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct cs_char_vm_ops = { .fault = cs_char_vma_fault, }; static int cs_char_fasync(int fd, struct file *file, int on) { struct cs_char *csdata = file->private_data; if (fasync_helper(fd, file, on, &csdata->async_queue) < 0) return -EIO; return 0; } static __poll_t cs_char_poll(struct file *file, poll_table *wait) { struct cs_char *csdata = file->private_data; __poll_t ret = 0; poll_wait(file, &cs_char_data.wait, wait); spin_lock_bh(&csdata->lock); if (!list_empty(&csdata->chardev_queue)) ret = EPOLLIN | EPOLLRDNORM; else if (!list_empty(&csdata->dataind_queue)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock_bh(&csdata->lock); return ret; } static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count, loff_t *unused) { struct cs_char *csdata = file->private_data; u32 data; ssize_t retval; if (count < sizeof(data)) return -EINVAL; for (;;) { DEFINE_WAIT(wait); spin_lock_bh(&csdata->lock); if (!list_empty(&csdata->chardev_queue)) { data = cs_pop_entry(&csdata->chardev_queue); } else if (!list_empty(&csdata->dataind_queue)) { data = cs_pop_entry(&csdata->dataind_queue); csdata->dataind_pending--; } else { data = 0; } spin_unlock_bh(&csdata->lock); if (data) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } else if (signal_pending(current)) { retval = -ERESTARTSYS; goto out; } prepare_to_wait_exclusive(&csdata->wait, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(&csdata->wait, &wait); } retval = put_user(data, (u32 __user *)buf); if (!retval) retval = sizeof(data); out: return retval; } static ssize_t cs_char_write(struct file *file, const char __user *buf, size_t count, loff_t *unused) { struct cs_char *csdata = file->private_data; u32 data; int err; ssize_t retval; if (count < sizeof(data)) return -EINVAL; if (get_user(data, (u32 __user *)buf)) retval = -EFAULT; else retval = count; err = cs_hsi_command(csdata->hi, data); if (err < 0) retval = err; return retval; } static long cs_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct cs_char *csdata = file->private_data; int r = 0; switch (cmd) { case CS_GET_STATE: { unsigned int state; state = cs_hsi_get_state(csdata->hi); if (copy_to_user((void __user *)arg, &state, sizeof(state))) r = -EFAULT; break; } case CS_SET_WAKELINE: { unsigned int state; if (copy_from_user(&state, (void __user *)arg, sizeof(state))) { r = -EFAULT; break; } if (state > 1) { r = -EINVAL; break; } cs_hsi_set_wakeline(csdata->hi, !!state); break; } case CS_GET_IF_VERSION: { unsigned int ifver = CS_IF_VERSION; if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver))) r = -EFAULT; break; } case CS_CONFIG_BUFS: { struct cs_buffer_config buf_cfg; if (copy_from_user(&buf_cfg, (void __user *)arg, sizeof(buf_cfg))) r = -EFAULT; else r = cs_hsi_buf_config(csdata->hi, &buf_cfg); break; } default: r = -ENOTTY; break; } return r; } static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) { if (vma->vm_end < vma->vm_start) return -EINVAL; if (vma_pages(vma) != 1) return -EINVAL; vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND); vma->vm_ops = &cs_char_vm_ops; vma->vm_private_data = file->private_data; return 0; } static int cs_char_open(struct inode *unused, struct file *file) { int ret = 0; unsigned long p; spin_lock_bh(&cs_char_data.lock); if (cs_char_data.opened) { ret = -EBUSY; spin_unlock_bh(&cs_char_data.lock); goto out1; } cs_char_data.opened = 1; cs_char_data.dataind_pending = 0; spin_unlock_bh(&cs_char_data.lock); p = get_zeroed_page(GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out2; } ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE); if (ret) { dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n"); goto out3; } /* these are only used in release so lock not needed */ cs_char_data.mmap_base = p; cs_char_data.mmap_size = CS_MMAP_SIZE; file->private_data = &cs_char_data; return 0; out3: free_page(p); out2: spin_lock_bh(&cs_char_data.lock); cs_char_data.opened = 0; spin_unlock_bh(&cs_char_data.lock); out1: return ret; } static void cs_free_char_queue(struct list_head *head) { struct char_queue *entry; struct list_head *cursor, *next; if (!list_empty(head)) { list_for_each_safe(cursor, next, head) { entry = list_entry(cursor, struct char_queue, list); list_del(&entry->list); kfree(entry); } } } static int cs_char_release(struct inode *unused, struct file *file) { struct cs_char *csdata = file->private_data; cs_hsi_stop(csdata->hi); spin_lock_bh(&csdata->lock); csdata->hi = NULL; free_page(csdata->mmap_base); cs_free_char_queue(&csdata->chardev_queue); cs_free_char_queue(&csdata->dataind_queue); csdata->opened = 0; spin_unlock_bh(&csdata->lock); return 0; } static const struct file_operations cs_char_fops = { .owner = THIS_MODULE, .read = cs_char_read, .write = cs_char_write, .poll = cs_char_poll, .unlocked_ioctl = cs_char_ioctl, .mmap = cs_char_mmap, .open = cs_char_open, .release = cs_char_release, .fasync = cs_char_fasync, }; static struct miscdevice cs_char_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "cmt_speech", .fops = &cs_char_fops }; static int cs_hsi_client_probe(struct device *dev) { int err = 0; struct hsi_client *cl = to_hsi_client(dev); dev_dbg(dev, "hsi_client_probe\n"); init_waitqueue_head(&cs_char_data.wait); spin_lock_init(&cs_char_data.lock); cs_char_data.opened = 0; cs_char_data.cl = cl; cs_char_data.hi = NULL; INIT_LIST_HEAD(&cs_char_data.chardev_queue); INIT_LIST_HEAD(&cs_char_data.dataind_queue); cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl, "speech-control"); if (cs_char_data.channel_id_cmd < 0) { err = cs_char_data.channel_id_cmd; dev_err(dev, "Could not get cmd channel (%d)\n", err); return err; } cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl, "speech-data"); if (cs_char_data.channel_id_data < 0) { err = cs_char_data.channel_id_data; dev_err(dev, "Could not get data channel (%d)\n", err); return err; } err = misc_register(&cs_char_miscdev); if (err) dev_err(dev, "Failed to register: %d\n", err); return err; } static int cs_hsi_client_remove(struct device *dev) { struct cs_hsi_iface *hi; dev_dbg(dev, "hsi_client_remove\n"); misc_deregister(&cs_char_miscdev); spin_lock_bh(&cs_char_data.lock); hi = cs_char_data.hi; cs_char_data.hi = NULL; spin_unlock_bh(&cs_char_data.lock); if (hi) cs_hsi_stop(hi); return 0; } static struct hsi_client_driver cs_hsi_driver = { .driver = { .name = "cmt-speech", .owner = THIS_MODULE, .probe = cs_hsi_client_probe, .remove = cs_hsi_client_remove, }, }; static int __init cs_char_init(void) { pr_info("CMT speech driver added\n"); return hsi_register_client_driver(&cs_hsi_driver); } module_init(cs_char_init); static void __exit cs_char_exit(void) { hsi_unregister_client_driver(&cs_hsi_driver); pr_info("CMT speech driver removed\n"); } module_exit(cs_char_exit); MODULE_ALIAS("hsi:cmt-speech"); MODULE_AUTHOR("Kai Vehmanen <[email protected]>"); MODULE_AUTHOR("Peter Ujfalusi <[email protected]>"); MODULE_DESCRIPTION("CMT speech driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/hsi/clients/cmt_speech.c
// SPDX-License-Identifier: GPL-2.0-only /* * ssi_protocol.c * * Implementation of the SSI McSAAB improved protocol. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * Copyright (C) 2013 Sebastian Reichel <[email protected]> * * Contact: Carlos Chinea <[email protected]> */ #include <linux/atomic.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/if_phonet.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/hsi/hsi.h> #include <linux/hsi/ssi_protocol.h> #define SSIP_TXQUEUE_LEN 100 #define SSIP_MAX_MTU 65535 #define SSIP_DEFAULT_MTU 4000 #define PN_MEDIA_SOS 21 #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ #define SSIP_KATOUT 15 /* 15 msecs */ #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) #define SSIP_CMT_LOADER_SYNC 0x11223344 /* * SSI protocol command definitions */ #define SSIP_COMMAND(data) ((data) >> 28) #define SSIP_PAYLOAD(data) ((data) & 0xfffffff) /* Commands */ #define SSIP_SW_BREAK 0 #define SSIP_BOOTINFO_REQ 1 #define SSIP_BOOTINFO_RESP 2 #define SSIP_WAKETEST_RESULT 3 #define SSIP_START_TRANS 4 #define SSIP_READY 5 /* Payloads */ #define SSIP_DATA_VERSION(data) ((data) & 0xff) #define SSIP_LOCAL_VERID 1 #define SSIP_WAKETEST_OK 0 #define SSIP_WAKETEST_FAILED 1 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) #define SSIP_MSG_ID(data) ((data) & 0xff) /* Generic Command */ #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) /* Commands for the control channel */ #define SSIP_BOOTINFO_REQ_CMD(ver) \ SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) #define SSIP_BOOTINFO_RESP_CMD(ver) \ SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) #define SSIP_START_TRANS_CMD(pdulen, id) \ SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) #define SSIP_WAKETEST_FLAG 0 /* Main state machine states */ enum { INIT, HANDSHAKE, ACTIVE, }; /* Send state machine states */ enum { SEND_IDLE, WAIT4READY, SEND_READY, SENDING, SENDING_SWBREAK, }; /* Receive state machine states */ enum { RECV_IDLE, RECV_READY, RECEIVING, }; /** * struct ssi_protocol - SSI protocol (McSAAB) data * @main_state: Main state machine * @send_state: TX state machine * @recv_state: RX state machine * @flags: Flags, currently only used to follow wake line test * @rxid: RX data id * @txid: TX data id * @txqueue_len: TX queue length * @tx_wd: TX watchdog * @rx_wd: RX watchdog * @keep_alive: Workaround for SSI HW bug * @lock: To serialize access to this struct * @netdev: Phonet network device * @txqueue: TX data queue * @cmdqueue: Queue of free commands * @cl: HSI client own reference * @link: Link for ssip_list * @tx_usecount: Refcount to keep track the slaves that use the wake line * @channel_id_cmd: HSI channel id for command stream * @channel_id_data: HSI channel id for data stream */ struct ssi_protocol { unsigned int main_state; unsigned int send_state; unsigned int recv_state; unsigned long flags; u8 rxid; u8 txid; unsigned int txqueue_len; struct timer_list tx_wd; struct timer_list rx_wd; struct timer_list keep_alive; /* wake-up workaround */ spinlock_t lock; struct net_device *netdev; struct list_head txqueue; struct list_head cmdqueue; struct work_struct work; struct hsi_client *cl; struct list_head link; atomic_t tx_usecnt; int channel_id_cmd; int channel_id_data; }; /* List of ssi protocol instances */ static LIST_HEAD(ssip_list); static void ssip_rxcmd_complete(struct hsi_msg *msg); static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) { u32 *data; data = sg_virt(msg->sgt.sgl); *data = cmd; } static inline u32 ssip_get_cmd(struct hsi_msg *msg) { u32 *data; data = sg_virt(msg->sgt.sgl); return *data; } static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) { skb_frag_t *frag; struct scatterlist *sg; int i; BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); sg = msg->sgt.sgl; sg_set_buf(sg, skb->data, skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { sg = sg_next(sg); BUG_ON(!sg); frag = &skb_shinfo(skb)->frags[i]; sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag), skb_frag_off(frag)); } } static void ssip_free_data(struct hsi_msg *msg) { struct sk_buff *skb; skb = msg->context; pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, skb); msg->destructor = NULL; dev_kfree_skb(skb); hsi_free_msg(msg); } static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, struct sk_buff *skb, gfp_t flags) { struct hsi_msg *msg; msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); if (!msg) return NULL; ssip_skb_to_msg(skb, msg); msg->destructor = ssip_free_data; msg->channel = ssi->channel_id_data; msg->context = skb; return msg; } static inline void ssip_release_cmd(struct hsi_msg *msg) { struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); spin_lock_bh(&ssi->lock); list_add_tail(&msg->link, &ssi->cmdqueue); spin_unlock_bh(&ssi->lock); } static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) { struct hsi_msg *msg; BUG_ON(list_empty(&ssi->cmdqueue)); spin_lock_bh(&ssi->lock); msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); list_del(&msg->link); spin_unlock_bh(&ssi->lock); msg->destructor = ssip_release_cmd; return msg; } static void ssip_free_cmds(struct ssi_protocol *ssi) { struct hsi_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { list_del(&msg->link); msg->destructor = NULL; kfree(sg_virt(msg->sgt.sgl)); hsi_free_msg(msg); } } static int ssip_alloc_cmds(struct ssi_protocol *ssi) { struct hsi_msg *msg; u32 *buf; unsigned int i; for (i = 0; i < SSIP_MAX_CMDS; i++) { msg = hsi_alloc_msg(1, GFP_KERNEL); if (!msg) goto out; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { hsi_free_msg(msg); goto out; } sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); msg->channel = ssi->channel_id_cmd; list_add_tail(&msg->link, &ssi->cmdqueue); } return 0; out: ssip_free_cmds(ssi); return -ENOMEM; } static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) { ssi->recv_state = state; switch (state) { case RECV_IDLE: del_timer(&ssi->rx_wd); if (ssi->send_state == SEND_IDLE) del_timer(&ssi->keep_alive); break; case RECV_READY: /* CMT speech workaround */ if (atomic_read(&ssi->tx_usecnt)) break; fallthrough; case RECEIVING: mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); break; default: break; } } static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) { ssi->send_state = state; switch (state) { case SEND_IDLE: case SEND_READY: del_timer(&ssi->tx_wd); if (ssi->recv_state == RECV_IDLE) del_timer(&ssi->keep_alive); break; case WAIT4READY: case SENDING: case SENDING_SWBREAK: mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); break; default: break; } } struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) { struct hsi_client *master = ERR_PTR(-ENODEV); struct ssi_protocol *ssi; list_for_each_entry(ssi, &ssip_list, link) if (slave->device.parent == ssi->cl->device.parent) { master = ssi->cl; break; } return master; } EXPORT_SYMBOL_GPL(ssip_slave_get_master); int ssip_slave_start_tx(struct hsi_client *master) { struct ssi_protocol *ssi = hsi_client_drvdata(master); dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); spin_lock_bh(&ssi->lock); if (ssi->send_state == SEND_IDLE) { ssip_set_txstate(ssi, WAIT4READY); hsi_start_tx(master); } spin_unlock_bh(&ssi->lock); atomic_inc(&ssi->tx_usecnt); return 0; } EXPORT_SYMBOL_GPL(ssip_slave_start_tx); int ssip_slave_stop_tx(struct hsi_client *master) { struct ssi_protocol *ssi = hsi_client_drvdata(master); WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); if (atomic_dec_and_test(&ssi->tx_usecnt)) { spin_lock_bh(&ssi->lock); if ((ssi->send_state == SEND_READY) || (ssi->send_state == WAIT4READY)) { ssip_set_txstate(ssi, SEND_IDLE); hsi_stop_tx(master); } spin_unlock_bh(&ssi->lock); } dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); return 0; } EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); int ssip_slave_running(struct hsi_client *master) { struct ssi_protocol *ssi = hsi_client_drvdata(master); return netif_running(ssi->netdev); } EXPORT_SYMBOL_GPL(ssip_slave_running); static void ssip_reset(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct list_head *head, *tmp; struct hsi_msg *msg; if (netif_running(ssi->netdev)) netif_carrier_off(ssi->netdev); hsi_flush(cl); spin_lock_bh(&ssi->lock); if (ssi->send_state != SEND_IDLE) hsi_stop_tx(cl); spin_unlock_bh(&ssi->lock); if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 0); /* FIXME: To be removed */ spin_lock_bh(&ssi->lock); del_timer(&ssi->rx_wd); del_timer(&ssi->tx_wd); del_timer(&ssi->keep_alive); ssi->main_state = 0; ssi->send_state = 0; ssi->recv_state = 0; ssi->flags = 0; ssi->rxid = 0; ssi->txid = 0; list_for_each_safe(head, tmp, &ssi->txqueue) { msg = list_entry(head, struct hsi_msg, link); dev_dbg(&cl->device, "Pending TX data\n"); list_del(head); ssip_free_data(msg); } ssi->txqueue_len = 0; spin_unlock_bh(&ssi->lock); } static void ssip_dump_state(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; spin_lock_bh(&ssi->lock); dev_err(&cl->device, "Main state: %d\n", ssi->main_state); dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); dev_err(&cl->device, "Send state: %d\n", ssi->send_state); dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? "Online" : "Offline"); dev_err(&cl->device, "Wake test %d\n", test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); list_for_each_entry(msg, &ssi->txqueue, link) dev_err(&cl->device, "pending TX data (%p)\n", msg); spin_unlock_bh(&ssi->lock); } static void ssip_error(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; ssip_dump_state(cl); ssip_reset(cl); msg = ssip_claim_cmd(ssi); msg->complete = ssip_rxcmd_complete; hsi_async_read(cl, msg); } static void ssip_keep_alive(struct timer_list *t) { struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive); struct hsi_client *cl = ssi->cl; dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", ssi->main_state, ssi->recv_state, ssi->send_state); spin_lock(&ssi->lock); if (ssi->recv_state == RECV_IDLE) switch (ssi->send_state) { case SEND_READY: if (atomic_read(&ssi->tx_usecnt) == 0) break; fallthrough; /* * Workaround for cmt-speech in that case * we relay on audio timers. */ case SEND_IDLE: spin_unlock(&ssi->lock); return; } mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); spin_unlock(&ssi->lock); } static void ssip_rx_wd(struct timer_list *t) { struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd); struct hsi_client *cl = ssi->cl; dev_err(&cl->device, "Watchdog triggered\n"); ssip_error(cl); } static void ssip_tx_wd(struct timer_list *t) { struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd); struct hsi_client *cl = ssi->cl; dev_err(&cl->device, "Watchdog triggered\n"); ssip_error(cl); } static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); msg = ssip_claim_cmd(ssi); ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); msg->complete = ssip_release_cmd; hsi_async_write(cl, msg); dev_dbg(&cl->device, "Issuing RX command\n"); msg = ssip_claim_cmd(ssi); msg->complete = ssip_rxcmd_complete; hsi_async_read(cl, msg); } static void ssip_start_rx(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, ssi->recv_state); spin_lock_bh(&ssi->lock); /* * We can have two UP events in a row due to a short low * high transition. Therefore we need to ignore the sencond UP event. */ if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECV_READY); spin_unlock_bh(&ssi->lock); msg = ssip_claim_cmd(ssi); ssip_set_cmd(msg, SSIP_READY_CMD); msg->complete = ssip_release_cmd; dev_dbg(&cl->device, "Send READY\n"); hsi_async_write(cl, msg); } static void ssip_stop_rx(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); spin_lock_bh(&ssi->lock); if (likely(ssi->main_state == ACTIVE)) ssip_set_rxstate(ssi, RECV_IDLE); spin_unlock_bh(&ssi->lock); } static void ssip_free_strans(struct hsi_msg *msg) { ssip_free_data(msg->context); ssip_release_cmd(msg); } static void ssip_strans_complete(struct hsi_msg *msg) { struct hsi_client *cl = msg->cl; struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *data; data = msg->context; ssip_release_cmd(msg); spin_lock_bh(&ssi->lock); ssip_set_txstate(ssi, SENDING); spin_unlock_bh(&ssi->lock); hsi_async_write(cl, data); } static int ssip_xmit(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg, *dmsg; struct sk_buff *skb; spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { spin_unlock_bh(&ssi->lock); return 0; } dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); list_del(&dmsg->link); ssi->txqueue_len--; spin_unlock_bh(&ssi->lock); msg = ssip_claim_cmd(ssi); skb = dmsg->context; msg->context = dmsg; msg->complete = ssip_strans_complete; msg->destructor = ssip_free_strans; spin_lock_bh(&ssi->lock); ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), ssi->txid)); ssi->txid++; ssip_set_txstate(ssi, SENDING); spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Send STRANS (%d frames)\n", SSIP_BYTES_TO_FRAMES(skb->len)); return hsi_async_write(cl, msg); } /* In soft IRQ context */ static void ssip_pn_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; if (unlikely(!netif_running(dev))) { dev_dbg(&dev->dev, "Drop RX packet\n"); dev->stats.rx_dropped++; dev_kfree_skb(skb); return; } if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { dev_dbg(&dev->dev, "Error drop RX packet\n"); dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev_kfree_skb(skb); return; } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* length field is exchanged in network byte order */ ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); skb->protocol = htons(ETH_P_PHONET); skb_reset_mac_header(skb); __skb_pull(skb, 1); netif_rx(skb); } static void ssip_rx_data_complete(struct hsi_msg *msg) { struct hsi_client *cl = msg->cl; struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct sk_buff *skb; if (msg->status == HSI_STATUS_ERROR) { dev_err(&cl->device, "RX data error\n"); ssip_free_data(msg); ssip_error(cl); return; } del_timer(&ssi->rx_wd); /* FIXME: Revisit */ skb = msg->context; ssip_pn_rx(skb); hsi_free_msg(msg); } static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; /* Workaroud: Ignore CMT Loader message leftover */ if (cmd == SSIP_CMT_LOADER_SYNC) return; switch (ssi->main_state) { case ACTIVE: dev_err(&cl->device, "Boot info req on active state\n"); ssip_error(cl); fallthrough; case INIT: case HANDSHAKE: spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; spin_unlock_bh(&ssi->lock); if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ spin_lock_bh(&ssi->lock); /* Start boot handshake watchdog */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info req verid mismatch\n"); msg = ssip_claim_cmd(ssi); ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); msg->complete = ssip_release_cmd; hsi_async_write(cl, msg); break; default: dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); break; } } static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info resp verid mismatch\n"); spin_lock_bh(&ssi->lock); if (ssi->main_state != ACTIVE) /* Use tx_wd as a boot watchdog in non ACTIVE state */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); else dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", ssi->main_state); spin_unlock_bh(&ssi->lock); } static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); unsigned int wkres = SSIP_PAYLOAD(cmd); spin_lock_bh(&ssi->lock); if (ssi->main_state != HANDSHAKE) { dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", ssi->main_state); spin_unlock_bh(&ssi->lock); return; } spin_unlock_bh(&ssi->lock); if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 0); /* FIXME: To be removed */ spin_lock_bh(&ssi->lock); ssi->main_state = ACTIVE; del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ spin_unlock_bh(&ssi->lock); dev_notice(&cl->device, "WAKELINES TEST %s\n", wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); if (wkres & SSIP_WAKETEST_FAILED) { ssip_error(cl); return; } dev_dbg(&cl->device, "CMT is ONLINE\n"); netif_wake_queue(ssi->netdev); netif_carrier_on(ssi->netdev); } static void ssip_rx_ready(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); spin_unlock_bh(&ssi->lock); return; } if (ssi->send_state != WAIT4READY) { dev_dbg(&cl->device, "Ignore spurious READY command\n"); spin_unlock_bh(&ssi->lock); return; } ssip_set_txstate(ssi, SEND_READY); spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct sk_buff *skb; struct hsi_msg *msg; int len = SSIP_PDU_LENGTH(cmd); dev_dbg(&cl->device, "RX strans: %d frames\n", len); spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECEIVING); if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { dev_err(&cl->device, "START TRANS id %d expected %d\n", SSIP_MSG_ID(cmd), ssi->rxid); spin_unlock_bh(&ssi->lock); goto out1; } ssi->rxid++; spin_unlock_bh(&ssi->lock); skb = netdev_alloc_skb(ssi->netdev, len * 4); if (unlikely(!skb)) { dev_err(&cl->device, "No memory for rx skb\n"); goto out1; } skb_put(skb, len * 4); msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); if (unlikely(!msg)) { dev_err(&cl->device, "No memory for RX data msg\n"); goto out2; } msg->complete = ssip_rx_data_complete; hsi_async_read(cl, msg); return; out2: dev_kfree_skb(skb); out1: ssip_error(cl); } static void ssip_rxcmd_complete(struct hsi_msg *msg) { struct hsi_client *cl = msg->cl; u32 cmd = ssip_get_cmd(msg); unsigned int cmdid = SSIP_COMMAND(cmd); if (msg->status == HSI_STATUS_ERROR) { dev_err(&cl->device, "RX error detected\n"); ssip_release_cmd(msg); ssip_error(cl); return; } hsi_async_read(cl, msg); dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); switch (cmdid) { case SSIP_SW_BREAK: /* Ignored */ break; case SSIP_BOOTINFO_REQ: ssip_rx_bootinforeq(cl, cmd); break; case SSIP_BOOTINFO_RESP: ssip_rx_bootinforesp(cl, cmd); break; case SSIP_WAKETEST_RESULT: ssip_rx_waketest(cl, cmd); break; case SSIP_START_TRANS: ssip_rx_strans(cl, cmd); break; case SSIP_READY: ssip_rx_ready(cl); break; default: dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); break; } } static void ssip_swbreak_complete(struct hsi_msg *msg) { struct hsi_client *cl = msg->cl; struct ssi_protocol *ssi = hsi_client_drvdata(cl); ssip_release_cmd(msg); spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { if (atomic_read(&ssi->tx_usecnt)) { ssip_set_txstate(ssi, SEND_READY); } else { ssip_set_txstate(ssi, SEND_IDLE); hsi_stop_tx(cl); } spin_unlock_bh(&ssi->lock); } else { spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } netif_wake_queue(ssi->netdev); } static void ssip_tx_data_complete(struct hsi_msg *msg) { struct hsi_client *cl = msg->cl; struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *cmsg; if (msg->status == HSI_STATUS_ERROR) { dev_err(&cl->device, "TX data error\n"); ssip_error(cl); goto out; } spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { ssip_set_txstate(ssi, SENDING_SWBREAK); spin_unlock_bh(&ssi->lock); cmsg = ssip_claim_cmd(ssi); ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); cmsg->complete = ssip_swbreak_complete; dev_dbg(&cl->device, "Send SWBREAK\n"); hsi_async_write(cl, cmsg); } else { spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } out: ssip_free_data(msg); } static void ssip_port_event(struct hsi_client *cl, unsigned long event) { switch (event) { case HSI_EVENT_START_RX: ssip_start_rx(cl); break; case HSI_EVENT_STOP_RX: ssip_stop_rx(cl); break; default: return; } } static int ssip_pn_open(struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); struct ssi_protocol *ssi = hsi_client_drvdata(cl); int err; err = hsi_claim_port(cl, 1); if (err < 0) { dev_err(&cl->device, "SSI port already claimed\n"); return err; } err = hsi_register_port_event(cl, ssip_port_event); if (err < 0) { dev_err(&cl->device, "Register HSI port event failed (%d)\n", err); hsi_release_port(cl); return err; } dev_dbg(&cl->device, "Configuring SSI port\n"); hsi_setup(cl); if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; spin_unlock_bh(&ssi->lock); ssip_send_bootinfo_req_cmd(cl); return 0; } static int ssip_pn_stop(struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); ssip_reset(cl); hsi_unregister_port_event(cl); hsi_release_port(cl); return 0; } static void ssip_xmit_work(struct work_struct *work) { struct ssi_protocol *ssi = container_of(work, struct ssi_protocol, work); struct hsi_client *cl = ssi->cl; ssip_xmit(cl); } static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); struct ssi_protocol *ssi = hsi_client_drvdata(cl); struct hsi_msg *msg; if ((skb->protocol != htons(ETH_P_PHONET)) || (skb->len < SSIP_MIN_PN_HDR)) goto drop; /* Pad to 32-bits - FIXME: Revisit*/ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) goto inc_dropped; /* * Modem sends Phonet messages over SSI with its own endianness. * Assume that modem has the same endianness as we do. */ if (skb_cow_head(skb, 0)) goto drop; /* length field is exchanged in network byte order */ ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); if (!msg) { dev_dbg(&cl->device, "Dropping tx data: No memory\n"); goto drop; } msg->complete = ssip_tx_data_complete; spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); goto drop2; } list_add_tail(&msg->link, &ssi->txqueue); ssi->txqueue_len++; if (dev->tx_queue_len < ssi->txqueue_len) { dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); netif_stop_queue(dev); } if (ssi->send_state == SEND_IDLE) { ssip_set_txstate(ssi, WAIT4READY); spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); hsi_start_tx(cl); } else if (ssi->send_state == SEND_READY) { /* Needed for cmt-speech workaround */ dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", ssi->txqueue_len); spin_unlock_bh(&ssi->lock); schedule_work(&ssi->work); } else { spin_unlock_bh(&ssi->lock); } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; return NETDEV_TX_OK; drop2: hsi_free_msg(msg); drop: dev_kfree_skb(skb); inc_dropped: dev->stats.tx_dropped++; return NETDEV_TX_OK; } /* CMT reset event handler */ void ssip_reset_event(struct hsi_client *master) { struct ssi_protocol *ssi = hsi_client_drvdata(master); dev_err(&ssi->cl->device, "CMT reset detected!\n"); ssip_error(ssi->cl); } EXPORT_SYMBOL_GPL(ssip_reset_event); static const struct net_device_ops ssip_pn_ops = { .ndo_open = ssip_pn_open, .ndo_stop = ssip_pn_stop, .ndo_start_xmit = ssip_pn_xmit, }; static void ssip_pn_setup(struct net_device *dev) { static const u8 addr = PN_MEDIA_SOS; dev->features = 0; dev->netdev_ops = &ssip_pn_ops; dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = SSIP_DEFAULT_MTU; dev->hard_header_len = 1; dev->addr_len = 1; dev_addr_set(dev, &addr); dev->tx_queue_len = SSIP_TXQUEUE_LEN; dev->needs_free_netdev = true; dev->header_ops = &phonet_header_ops; } static int ssi_protocol_probe(struct device *dev) { static const char ifname[] = "phonet%d"; struct hsi_client *cl = to_hsi_client(dev); struct ssi_protocol *ssi; int err; ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); if (!ssi) return -ENOMEM; spin_lock_init(&ssi->lock); timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); INIT_LIST_HEAD(&ssi->txqueue); INIT_LIST_HEAD(&ssi->cmdqueue); atomic_set(&ssi->tx_usecnt, 0); hsi_client_set_drvdata(cl, ssi); ssi->cl = cl; INIT_WORK(&ssi->work, ssip_xmit_work); ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); if (ssi->channel_id_cmd < 0) { err = ssi->channel_id_cmd; dev_err(dev, "Could not get cmd channel (%d)\n", err); goto out; } ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); if (ssi->channel_id_data < 0) { err = ssi->channel_id_data; dev_err(dev, "Could not get data channel (%d)\n", err); goto out; } err = ssip_alloc_cmds(ssi); if (err < 0) { dev_err(dev, "No memory for commands\n"); goto out; } ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup); if (!ssi->netdev) { dev_err(dev, "No memory for netdev\n"); err = -ENOMEM; goto out1; } /* MTU range: 6 - 65535 */ ssi->netdev->min_mtu = PHONET_MIN_MTU; ssi->netdev->max_mtu = SSIP_MAX_MTU; SET_NETDEV_DEV(ssi->netdev, dev); netif_carrier_off(ssi->netdev); err = register_netdev(ssi->netdev); if (err < 0) { dev_err(dev, "Register netdev failed (%d)\n", err); goto out2; } list_add(&ssi->link, &ssip_list); dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", ssi->channel_id_cmd, ssi->channel_id_data); return 0; out2: free_netdev(ssi->netdev); out1: ssip_free_cmds(ssi); out: kfree(ssi); return err; } static int ssi_protocol_remove(struct device *dev) { struct hsi_client *cl = to_hsi_client(dev); struct ssi_protocol *ssi = hsi_client_drvdata(cl); list_del(&ssi->link); unregister_netdev(ssi->netdev); ssip_free_cmds(ssi); hsi_client_set_drvdata(cl, NULL); kfree(ssi); return 0; } static struct hsi_client_driver ssip_driver = { .driver = { .name = "ssi-protocol", .owner = THIS_MODULE, .probe = ssi_protocol_probe, .remove = ssi_protocol_remove, }, }; static int __init ssip_init(void) { pr_info("SSI protocol aka McSAAB added\n"); return hsi_register_client_driver(&ssip_driver); } module_init(ssip_init); static void __exit ssip_exit(void) { hsi_unregister_client_driver(&ssip_driver); pr_info("SSI protocol driver removed\n"); } module_exit(ssip_exit); MODULE_ALIAS("hsi:ssi-protocol"); MODULE_AUTHOR("Carlos Chinea <[email protected]>"); MODULE_AUTHOR("Remi Denis-Courmont <[email protected]>"); MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); MODULE_LICENSE("GPL");
linux-master
drivers/hsi/clients/ssi_protocol.c
// SPDX-License-Identifier: GPL-2.0-only /* * nokia-modem.c * * HSI client driver for Nokia N900 modem. * * Copyright (C) 2014 Sebastian Reichel <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/hsi/hsi.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/hsi/ssi_protocol.h> static unsigned int pm = 1; module_param(pm, int, 0400); MODULE_PARM_DESC(pm, "Enable power management (0=disabled, 1=userland based [default])"); struct nokia_modem_gpio { struct gpio_desc *gpio; const char *name; }; struct nokia_modem_device { struct tasklet_struct nokia_modem_rst_ind_tasklet; int nokia_modem_rst_ind_irq; struct device *device; struct nokia_modem_gpio *gpios; int gpio_amount; struct hsi_client *ssi_protocol; struct hsi_client *cmt_speech; }; static void do_nokia_modem_rst_ind_tasklet(unsigned long data) { struct nokia_modem_device *modem = (struct nokia_modem_device *)data; if (!modem) return; dev_info(modem->device, "CMT rst line change detected\n"); if (modem->ssi_protocol) ssip_reset_event(modem->ssi_protocol); } static irqreturn_t nokia_modem_rst_ind_isr(int irq, void *data) { struct nokia_modem_device *modem = (struct nokia_modem_device *)data; tasklet_schedule(&modem->nokia_modem_rst_ind_tasklet); return IRQ_HANDLED; } static void nokia_modem_gpio_unexport(struct device *dev) { struct nokia_modem_device *modem = dev_get_drvdata(dev); int i; for (i = 0; i < modem->gpio_amount; i++) { sysfs_remove_link(&dev->kobj, modem->gpios[i].name); gpiod_unexport(modem->gpios[i].gpio); } } static int nokia_modem_gpio_probe(struct device *dev) { struct device_node *np = dev->of_node; struct nokia_modem_device *modem = dev_get_drvdata(dev); int gpio_count, gpio_name_count, i, err; gpio_count = gpiod_count(dev, NULL); if (gpio_count < 0) { dev_err(dev, "missing gpios: %d\n", gpio_count); return gpio_count; } gpio_name_count = of_property_count_strings(np, "gpio-names"); if (gpio_count != gpio_name_count) { dev_err(dev, "number of gpios does not equal number of gpio names\n"); return -EINVAL; } modem->gpios = devm_kcalloc(dev, gpio_count, sizeof(*modem->gpios), GFP_KERNEL); if (!modem->gpios) return -ENOMEM; modem->gpio_amount = gpio_count; for (i = 0; i < gpio_count; i++) { modem->gpios[i].gpio = devm_gpiod_get_index(dev, NULL, i, GPIOD_OUT_LOW); if (IS_ERR(modem->gpios[i].gpio)) { dev_err(dev, "Could not get gpio %d\n", i); return PTR_ERR(modem->gpios[i].gpio); } err = of_property_read_string_index(np, "gpio-names", i, &(modem->gpios[i].name)); if (err) { dev_err(dev, "Could not get gpio name %d\n", i); return err; } err = gpiod_export(modem->gpios[i].gpio, 0); if (err) return err; err = gpiod_export_link(dev, modem->gpios[i].name, modem->gpios[i].gpio); if (err) return err; } return 0; } static int nokia_modem_probe(struct device *dev) { struct device_node *np; struct nokia_modem_device *modem; struct hsi_client *cl = to_hsi_client(dev); struct hsi_port *port = hsi_get_port(cl); int irq, pflags, err; struct hsi_board_info ssip; struct hsi_board_info cmtspeech; np = dev->of_node; if (!np) { dev_err(dev, "device tree node not found\n"); return -ENXIO; } modem = devm_kzalloc(dev, sizeof(*modem), GFP_KERNEL); if (!modem) return -ENOMEM; dev_set_drvdata(dev, modem); modem->device = dev; irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq); return -EINVAL; } modem->nokia_modem_rst_ind_irq = irq; pflags = irq_get_trigger_type(irq); tasklet_init(&modem->nokia_modem_rst_ind_tasklet, do_nokia_modem_rst_ind_tasklet, (unsigned long)modem); err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr, pflags, "modem_rst_ind", modem); if (err < 0) { dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n", irq, pflags); return err; } enable_irq_wake(irq); if (pm) { err = nokia_modem_gpio_probe(dev); if (err < 0) { dev_err(dev, "Could not probe GPIOs\n"); goto error1; } } ssip.name = "ssi-protocol"; ssip.tx_cfg = cl->tx_cfg; ssip.rx_cfg = cl->rx_cfg; ssip.platform_data = NULL; ssip.archdata = NULL; modem->ssi_protocol = hsi_new_client(port, &ssip); if (!modem->ssi_protocol) { dev_err(dev, "Could not register ssi-protocol device\n"); err = -ENOMEM; goto error2; } err = device_attach(&modem->ssi_protocol->device); if (err == 0) { dev_dbg(dev, "Missing ssi-protocol driver\n"); err = -EPROBE_DEFER; goto error3; } else if (err < 0) { dev_err(dev, "Could not load ssi-protocol driver (%d)\n", err); goto error3; } cmtspeech.name = "cmt-speech"; cmtspeech.tx_cfg = cl->tx_cfg; cmtspeech.rx_cfg = cl->rx_cfg; cmtspeech.platform_data = NULL; cmtspeech.archdata = NULL; modem->cmt_speech = hsi_new_client(port, &cmtspeech); if (!modem->cmt_speech) { dev_err(dev, "Could not register cmt-speech device\n"); err = -ENOMEM; goto error3; } err = device_attach(&modem->cmt_speech->device); if (err == 0) { dev_dbg(dev, "Missing cmt-speech driver\n"); err = -EPROBE_DEFER; goto error4; } else if (err < 0) { dev_err(dev, "Could not load cmt-speech driver (%d)\n", err); goto error4; } dev_info(dev, "Registered Nokia HSI modem\n"); return 0; error4: hsi_remove_client(&modem->cmt_speech->device, NULL); error3: hsi_remove_client(&modem->ssi_protocol->device, NULL); error2: nokia_modem_gpio_unexport(dev); error1: disable_irq_wake(modem->nokia_modem_rst_ind_irq); tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); return err; } static int nokia_modem_remove(struct device *dev) { struct nokia_modem_device *modem = dev_get_drvdata(dev); if (!modem) return 0; if (modem->cmt_speech) { hsi_remove_client(&modem->cmt_speech->device, NULL); modem->cmt_speech = NULL; } if (modem->ssi_protocol) { hsi_remove_client(&modem->ssi_protocol->device, NULL); modem->ssi_protocol = NULL; } nokia_modem_gpio_unexport(dev); dev_set_drvdata(dev, NULL); disable_irq_wake(modem->nokia_modem_rst_ind_irq); tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); return 0; } #ifdef CONFIG_OF static const struct of_device_id nokia_modem_of_match[] = { { .compatible = "nokia,n900-modem", }, { .compatible = "nokia,n950-modem", }, { .compatible = "nokia,n9-modem", }, {}, }; MODULE_DEVICE_TABLE(of, nokia_modem_of_match); #endif static struct hsi_client_driver nokia_modem_driver = { .driver = { .name = "nokia-modem", .owner = THIS_MODULE, .probe = nokia_modem_probe, .remove = nokia_modem_remove, .of_match_table = of_match_ptr(nokia_modem_of_match), }, }; static int __init nokia_modem_init(void) { return hsi_register_client_driver(&nokia_modem_driver); } module_init(nokia_modem_init); static void __exit nokia_modem_exit(void) { hsi_unregister_client_driver(&nokia_modem_driver); } module_exit(nokia_modem_exit); MODULE_ALIAS("hsi:nokia-modem"); MODULE_AUTHOR("Sebastian Reichel <[email protected]>"); MODULE_DESCRIPTION("HSI driver module for Nokia N900 Modem"); MODULE_LICENSE("GPL");
linux-master
drivers/hsi/clients/nokia-modem.c
// SPDX-License-Identifier: GPL-2.0-only /* OMAP SSI driver. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * Copyright (C) 2014 Sebastian Reichel <[email protected]> * * Contact: Carlos Chinea <[email protected]> */ #include <linux/compiler.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/hsi/ssi_protocol.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/hsi/hsi.h> #include <linux/idr.h> #include "omap_ssi_regs.h" #include "omap_ssi.h" /* For automatically allocated device IDs */ static DEFINE_IDA(platform_omap_ssi_ida); #ifdef CONFIG_DEBUG_FS static int ssi_regs_show(struct seq_file *m, void *p __maybe_unused) { struct hsi_controller *ssi = m->private; struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sys = omap_ssi->sys; pm_runtime_get_sync(ssi->device.parent); seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); pm_runtime_put(ssi->device.parent); return 0; } static int ssi_gdd_regs_show(struct seq_file *m, void *p __maybe_unused) { struct hsi_controller *ssi = m->private; struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *gdd = omap_ssi->gdd; void __iomem *sys = omap_ssi->sys; int lch; pm_runtime_get_sync(ssi->device.parent); seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG)); seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", readl(sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); seq_printf(m, "HW_ID\t\t: 0x%08x\n", readl(gdd + SSI_GDD_HW_ID_REG)); seq_printf(m, "PPORT_ID\t: 0x%08x\n", readl(gdd + SSI_GDD_PPORT_ID_REG)); seq_printf(m, "MPORT_ID\t: 0x%08x\n", readl(gdd + SSI_GDD_MPORT_ID_REG)); seq_printf(m, "TEST\t\t: 0x%08x\n", readl(gdd + SSI_GDD_TEST_REG)); seq_printf(m, "GCR\t\t: 0x%08x\n", readl(gdd + SSI_GDD_GCR_REG)); for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { seq_printf(m, "\nGDD LCH %d\n=========\n", lch); seq_printf(m, "CSDP\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CSDP_REG(lch))); seq_printf(m, "CCR\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CCR_REG(lch))); seq_printf(m, "CICR\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CICR_REG(lch))); seq_printf(m, "CSR\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CSR_REG(lch))); seq_printf(m, "CSSA\t\t: 0x%08x\n", readl(gdd + SSI_GDD_CSSA_REG(lch))); seq_printf(m, "CDSA\t\t: 0x%08x\n", readl(gdd + SSI_GDD_CDSA_REG(lch))); seq_printf(m, "CEN\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CEN_REG(lch))); seq_printf(m, "CSAC\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CSAC_REG(lch))); seq_printf(m, "CDAC\t\t: 0x%04x\n", readw(gdd + SSI_GDD_CDAC_REG(lch))); seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); } pm_runtime_put(ssi->device.parent); return 0; } DEFINE_SHOW_ATTRIBUTE(ssi_regs); DEFINE_SHOW_ATTRIBUTE(ssi_gdd_regs); static int ssi_debug_add_ctrl(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct dentry *dir; /* SSI controller */ omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); if (!omap_ssi->dir) return -ENOMEM; debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, &ssi_regs_fops); /* SSI GDD (DMA) */ dir = debugfs_create_dir("gdd", omap_ssi->dir); if (!dir) goto rback; debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); return 0; rback: debugfs_remove_recursive(omap_ssi->dir); return -ENOMEM; } static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); debugfs_remove_recursive(omap_ssi->dir); } #endif /* CONFIG_DEBUG_FS */ /* * FIXME: Horrible HACK needed until we remove the useless wakeline test * in the CMT. To be removed !!!! */ void ssi_waketest(struct hsi_client *cl, unsigned int enable) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); omap_port->wktest = !!enable; if (omap_port->wktest) { pm_runtime_get_sync(ssi->device.parent); writel_relaxed(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); } else { writel_relaxed(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); pm_runtime_put(ssi->device.parent); } } EXPORT_SYMBOL_GPL(ssi_waketest); static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; struct hsi_port *port = to_hsi_port(msg->cl->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); unsigned int dir; u32 csr; u32 val; spin_lock(&omap_ssi->lock); val = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); val &= ~SSI_GDD_LCH(lch); writel_relaxed(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); if (msg->ttype == HSI_MSG_READ) { dir = DMA_FROM_DEVICE; val = SSI_DATAAVAILABLE(msg->channel); pm_runtime_put(omap_port->pdev); } else { dir = DMA_TO_DEVICE; val = SSI_DATAACCEPT(msg->channel); /* Keep clocks reference for write pio event */ } dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); csr = readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", msg->channel, msg->ttype); spin_unlock(&omap_ssi->lock); if (csr & SSI_CSR_TOUR) { /* Timeout error */ msg->status = HSI_STATUS_ERROR; msg->actual_len = 0; spin_lock(&omap_port->lock); list_del(&msg->link); /* Dequeue msg */ spin_unlock(&omap_port->lock); list_add_tail(&msg->link, &omap_port->errqueue); schedule_delayed_work(&omap_port->errqueue_work, 0); return; } spin_lock(&omap_port->lock); val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); spin_unlock(&omap_port->lock); msg->status = HSI_STATUS_COMPLETED; msg->actual_len = sg_dma_len(msg->sgt.sgl); } static void ssi_gdd_tasklet(unsigned long dev) { struct hsi_controller *ssi = (struct hsi_controller *)dev; struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sys = omap_ssi->sys; unsigned int lch; u32 status_reg; pm_runtime_get(ssi->device.parent); if (!pm_runtime_active(ssi->device.parent)) { dev_warn(ssi->device.parent, "ssi_gdd_tasklet called without runtime PM!\n"); pm_runtime_put(ssi->device.parent); return; } status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { if (status_reg & SSI_GDD_LCH(lch)) ssi_gdd_complete(ssi, lch); } writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); pm_runtime_put(ssi->device.parent); if (status_reg) tasklet_hi_schedule(&omap_ssi->gdd_tasklet); else enable_irq(omap_ssi->gdd_irq); } static irqreturn_t ssi_gdd_isr(int irq, void *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); tasklet_hi_schedule(&omap_ssi->gdd_tasklet); disable_irq_nosync(irq); return IRQ_HANDLED; } static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); unsigned long rate = clk_get_rate(omap_ssi->fck); return rate; } static int ssi_clk_event(struct notifier_block *nb, unsigned long event, void *data) { struct omap_ssi_controller *omap_ssi = container_of(nb, struct omap_ssi_controller, fck_nb); struct hsi_controller *ssi = to_hsi_controller(omap_ssi->dev); struct clk_notifier_data *clk_data = data; struct omap_ssi_port *omap_port; int i; switch (event) { case PRE_RATE_CHANGE: dev_dbg(&ssi->device, "pre rate change\n"); for (i = 0; i < ssi->num_ports; i++) { omap_port = omap_ssi->port[i]; if (!omap_port) continue; /* Workaround for SWBREAK + CAwake down race in CMT */ disable_irq(omap_port->wake_irq); /* stop all ssi communication */ pinctrl_pm_select_idle_state(omap_port->pdev); udelay(1); /* wait for racing frames */ } break; case ABORT_RATE_CHANGE: dev_dbg(&ssi->device, "abort rate change\n"); fallthrough; case POST_RATE_CHANGE: dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n", clk_data->old_rate, clk_data->new_rate); omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* kHz */ for (i = 0; i < ssi->num_ports; i++) { omap_port = omap_ssi->port[i]; if (!omap_port) continue; omap_ssi_port_update_fclk(ssi, omap_port); /* resume ssi communication */ pinctrl_pm_select_default_state(omap_port->pdev); enable_irq(omap_port->wake_irq); } break; default: break; } return NOTIFY_DONE; } static int ssi_get_iomem(struct platform_device *pd, const char *name, void __iomem **pbase, dma_addr_t *phy) { struct resource *mem; void __iomem *base; struct hsi_controller *ssi = platform_get_drvdata(pd); mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); base = devm_ioremap_resource(&ssi->device, mem); if (IS_ERR(base)) return PTR_ERR(base); *pbase = base; if (phy) *phy = mem->start; return 0; } static int ssi_add_controller(struct hsi_controller *ssi, struct platform_device *pd) { struct omap_ssi_controller *omap_ssi; int err; omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL); if (!omap_ssi) return -ENOMEM; err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); if (err < 0) return err; ssi->id = err; ssi->owner = THIS_MODULE; ssi->device.parent = &pd->dev; dev_set_name(&ssi->device, "ssi%d", ssi->id); hsi_controller_set_drvdata(ssi, omap_ssi); omap_ssi->dev = &ssi->device; err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); if (err < 0) goto out_err; err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); if (err < 0) goto out_err; err = platform_get_irq_byname(pd, "gdd_mpu"); if (err < 0) goto out_err; omap_ssi->gdd_irq = err; tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, (unsigned long)ssi); err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr, 0, "gdd_mpu", ssi); if (err < 0) { dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", omap_ssi->gdd_irq, err); goto out_err; } omap_ssi->port = devm_kcalloc(&ssi->device, ssi->num_ports, sizeof(*omap_ssi->port), GFP_KERNEL); if (!omap_ssi->port) { err = -ENOMEM; goto out_err; } omap_ssi->fck = devm_clk_get(&ssi->device, "ssi_ssr_fck"); if (IS_ERR(omap_ssi->fck)) { dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n", PTR_ERR(omap_ssi->fck)); err = -ENODEV; goto out_err; } omap_ssi->fck_nb.notifier_call = ssi_clk_event; omap_ssi->fck_nb.priority = INT_MAX; clk_notifier_register(omap_ssi->fck, &omap_ssi->fck_nb); /* TODO: find register, which can be used to detect context loss */ omap_ssi->get_loss = NULL; omap_ssi->max_speed = UINT_MAX; spin_lock_init(&omap_ssi->lock); err = hsi_register_controller(ssi); if (err < 0) goto out_err; return 0; out_err: ida_simple_remove(&platform_omap_ssi_ida, ssi->id); return err; } static int ssi_hw_init(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int err; err = pm_runtime_resume_and_get(ssi->device.parent); if (err < 0) { dev_err(&ssi->device, "runtime PM failed %d\n", err); return err; } /* Resetting GDD */ writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); /* Get FCK rate in kHz */ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); dev_dbg(&ssi->device, "SSI fck rate %lu kHz\n", omap_ssi->fck_rate); writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; pm_runtime_put_sync(ssi->device.parent); return 0; } static void ssi_remove_controller(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int id = ssi->id; tasklet_kill(&omap_ssi->gdd_tasklet); hsi_unregister_controller(ssi); clk_notifier_unregister(omap_ssi->fck, &omap_ssi->fck_nb); ida_simple_remove(&platform_omap_ssi_ida, id); } static inline int ssi_of_get_available_ports_count(const struct device_node *np) { struct device_node *child; int num = 0; for_each_available_child_of_node(np, child) if (of_device_is_compatible(child, "ti,omap3-ssi-port")) num++; return num; } static int ssi_remove_ports(struct device *dev, void *c) { struct platform_device *pdev = to_platform_device(dev); if (!dev->of_node) return 0; of_node_clear_flag(dev->of_node, OF_POPULATED); of_device_unregister(pdev); return 0; } static int ssi_probe(struct platform_device *pd) { struct platform_device *childpdev; struct device_node *np = pd->dev.of_node; struct device_node *child; struct hsi_controller *ssi; int err; int num_ports; if (!np) { dev_err(&pd->dev, "missing device tree data\n"); return -EINVAL; } num_ports = ssi_of_get_available_ports_count(np); ssi = hsi_alloc_controller(num_ports, GFP_KERNEL); if (!ssi) { dev_err(&pd->dev, "No memory for controller\n"); return -ENOMEM; } platform_set_drvdata(pd, ssi); err = ssi_add_controller(ssi, pd); if (err < 0) { hsi_put_controller(ssi); goto out1; } pm_runtime_enable(&pd->dev); err = ssi_hw_init(ssi); if (err < 0) goto out2; #ifdef CONFIG_DEBUG_FS err = ssi_debug_add_ctrl(ssi); if (err < 0) goto out2; #endif for_each_available_child_of_node(np, child) { if (!of_device_is_compatible(child, "ti,omap3-ssi-port")) continue; childpdev = of_platform_device_create(child, NULL, &pd->dev); if (!childpdev) { err = -ENODEV; dev_err(&pd->dev, "failed to create ssi controller port\n"); of_node_put(child); goto out3; } } dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n", ssi->id, num_ports); return err; out3: device_for_each_child(&pd->dev, NULL, ssi_remove_ports); out2: ssi_remove_controller(ssi); pm_runtime_disable(&pd->dev); out1: platform_set_drvdata(pd, NULL); return err; } static int ssi_remove(struct platform_device *pd) { struct hsi_controller *ssi = platform_get_drvdata(pd); /* cleanup of of_platform_populate() call */ device_for_each_child(&pd->dev, NULL, ssi_remove_ports); #ifdef CONFIG_DEBUG_FS ssi_debug_remove_ctrl(ssi); #endif ssi_remove_controller(ssi); platform_set_drvdata(pd, NULL); pm_runtime_disable(&pd->dev); return 0; } #ifdef CONFIG_PM static int omap_ssi_runtime_suspend(struct device *dev) { struct hsi_controller *ssi = dev_get_drvdata(dev); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "runtime suspend!\n"); if (omap_ssi->get_loss) omap_ssi->loss_count = omap_ssi->get_loss(ssi->device.parent); return 0; } static int omap_ssi_runtime_resume(struct device *dev) { struct hsi_controller *ssi = dev_get_drvdata(dev); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "runtime resume!\n"); if ((omap_ssi->get_loss) && (omap_ssi->loss_count == omap_ssi->get_loss(ssi->device.parent))) return 0; writel_relaxed(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); return 0; } static const struct dev_pm_ops omap_ssi_pm_ops = { SET_RUNTIME_PM_OPS(omap_ssi_runtime_suspend, omap_ssi_runtime_resume, NULL) }; #define DEV_PM_OPS (&omap_ssi_pm_ops) #else #define DEV_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id omap_ssi_of_match[] = { { .compatible = "ti,omap3-ssi", }, {}, }; MODULE_DEVICE_TABLE(of, omap_ssi_of_match); #else #define omap_ssi_of_match NULL #endif static struct platform_driver ssi_pdriver = { .probe = ssi_probe, .remove = ssi_remove, .driver = { .name = "omap_ssi", .pm = DEV_PM_OPS, .of_match_table = omap_ssi_of_match, }, }; static int __init ssi_init(void) { int ret; ret = platform_driver_register(&ssi_pdriver); if (ret) return ret; ret = platform_driver_register(&ssi_port_pdriver); if (ret) { platform_driver_unregister(&ssi_pdriver); return ret; } return 0; } module_init(ssi_init); static void __exit ssi_exit(void) { platform_driver_unregister(&ssi_port_pdriver); platform_driver_unregister(&ssi_pdriver); } module_exit(ssi_exit); MODULE_ALIAS("platform:omap_ssi"); MODULE_AUTHOR("Carlos Chinea <[email protected]>"); MODULE_AUTHOR("Sebastian Reichel <[email protected]>"); MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/hsi/controllers/omap_ssi_core.c
// SPDX-License-Identifier: GPL-2.0-only /* OMAP SSI port driver. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * Copyright (C) 2014 Sebastian Reichel <[email protected]> * * Contact: Carlos Chinea <[email protected]> */ #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/debugfs.h> #include "omap_ssi_regs.h" #include "omap_ssi.h" static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) { return 0; } static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) { return 0; } static inline unsigned int ssi_wakein(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); return gpiod_get_value(omap_port->wake_gpio); } #ifdef CONFIG_DEBUG_FS static void ssi_debug_remove_port(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); debugfs_remove_recursive(omap_port->dir); } static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused) { struct hsi_port *port = m->private; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *base = omap_ssi->sys; unsigned int ch; pm_runtime_get_sync(omap_port->pdev); if (omap_port->wake_irq > 0) seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); seq_printf(m, "WAKE\t\t: 0x%08x\n", readl(base + SSI_WAKE_REG(port->num))); seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, readl(base + SSI_MPU_STATUS_REG(port->num, 0))); /* SST */ base = omap_port->sst_base; seq_puts(m, "\nSST\n===\n"); seq_printf(m, "ID SST\t\t: 0x%08x\n", readl(base + SSI_SST_ID_REG)); seq_printf(m, "MODE\t\t: 0x%08x\n", readl(base + SSI_SST_MODE_REG)); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", readl(base + SSI_SST_FRAMESIZE_REG)); seq_printf(m, "DIVISOR\t\t: 0x%08x\n", readl(base + SSI_SST_DIVISOR_REG)); seq_printf(m, "CHANNELS\t: 0x%08x\n", readl(base + SSI_SST_CHANNELS_REG)); seq_printf(m, "ARBMODE\t\t: 0x%08x\n", readl(base + SSI_SST_ARBMODE_REG)); seq_printf(m, "TXSTATE\t\t: 0x%08x\n", readl(base + SSI_SST_TXSTATE_REG)); seq_printf(m, "BUFSTATE\t: 0x%08x\n", readl(base + SSI_SST_BUFSTATE_REG)); seq_printf(m, "BREAK\t\t: 0x%08x\n", readl(base + SSI_SST_BREAK_REG)); for (ch = 0; ch < omap_port->channels; ch++) { seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SST_BUFFER_CH_REG(ch))); } /* SSR */ base = omap_port->ssr_base; seq_puts(m, "\nSSR\n===\n"); seq_printf(m, "ID SSR\t\t: 0x%08x\n", readl(base + SSI_SSR_ID_REG)); seq_printf(m, "MODE\t\t: 0x%08x\n", readl(base + SSI_SSR_MODE_REG)); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", readl(base + SSI_SSR_FRAMESIZE_REG)); seq_printf(m, "CHANNELS\t: 0x%08x\n", readl(base + SSI_SSR_CHANNELS_REG)); seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", readl(base + SSI_SSR_TIMEOUT_REG)); seq_printf(m, "RXSTATE\t\t: 0x%08x\n", readl(base + SSI_SSR_RXSTATE_REG)); seq_printf(m, "BUFSTATE\t: 0x%08x\n", readl(base + SSI_SSR_BUFSTATE_REG)); seq_printf(m, "BREAK\t\t: 0x%08x\n", readl(base + SSI_SSR_BREAK_REG)); seq_printf(m, "ERROR\t\t: 0x%08x\n", readl(base + SSI_SSR_ERROR_REG)); seq_printf(m, "ERRORACK\t: 0x%08x\n", readl(base + SSI_SSR_ERRORACK_REG)); for (ch = 0; ch < omap_port->channels; ch++) { seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SSR_BUFFER_CH_REG(ch))); } pm_runtime_put_autosuspend(omap_port->pdev); return 0; } DEFINE_SHOW_ATTRIBUTE(ssi_port_regs); static int ssi_div_get(void *data, u64 *val) { struct hsi_port *port = data; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); pm_runtime_get_sync(omap_port->pdev); *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); pm_runtime_put_autosuspend(omap_port->pdev); return 0; } static int ssi_div_set(void *data, u64 val) { struct hsi_port *port = data; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); if (val > 127) return -EINVAL; pm_runtime_get_sync(omap_port->pdev); writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); omap_port->sst.divisor = val; pm_runtime_put_autosuspend(omap_port->pdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); static void ssi_debug_add_port(struct omap_ssi_port *omap_port, struct dentry *dir) { struct hsi_port *port = to_hsi_port(omap_port->dev); dir = debugfs_create_dir(dev_name(omap_port->dev), dir); omap_port->dir = dir; debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); dir = debugfs_create_dir("sst", dir); debugfs_create_file_unsafe("divisor", 0644, dir, port, &ssi_sst_div_fops); } #endif static void ssi_process_errqueue(struct work_struct *work) { struct omap_ssi_port *omap_port; struct list_head *head, *tmp; struct hsi_msg *msg; omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); list_for_each_safe(head, tmp, &omap_port->errqueue) { msg = list_entry(head, struct hsi_msg, link); msg->complete(msg); list_del(head); } } static int ssi_claim_lch(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int lch; for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) if (!omap_ssi->gdd_trn[lch].msg) { omap_ssi->gdd_trn[lch].msg = msg; omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; return lch; } return -EBUSY; } static int ssi_start_dma(struct hsi_msg *msg, int lch) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *gdd = omap_ssi->gdd; int err; u16 csdp; u16 ccr; u32 s_addr; u32 d_addr; u32 tmp; /* Hold clocks during the transfer */ pm_runtime_get(omap_port->pdev); if (!pm_runtime_active(omap_port->pdev)) { dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n"); pm_runtime_put_autosuspend(omap_port->pdev); return -EREMOTEIO; } if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); return -EIO; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | SSI_DATA_TYPE_S32; ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | SSI_CCR_ENABLE; s_addr = omap_port->ssr_dma + SSI_SSR_BUFFER_CH_REG(msg->channel); d_addr = sg_dma_address(msg->sgt.sgl); } else { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_TO_DEVICE); if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); return -EIO; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | SSI_DATA_TYPE_S32; ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | SSI_CCR_ENABLE; s_addr = sg_dma_address(msg->sgt.sgl); d_addr = omap_port->sst_dma + SSI_SST_BUFFER_CH_REG(msg->channel); } dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), gdd + SSI_GDD_CEN_REG(lch)); spin_lock_bh(&omap_ssi->lock); tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp |= SSI_GDD_LCH(lch); writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); spin_unlock_bh(&omap_ssi->lock); writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); msg->status = HSI_STATUS_PROCEEDING; return 0; } static int ssi_start_pio(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 val; pm_runtime_get(omap_port->pdev); if (!pm_runtime_active(omap_port->pdev)) { dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n"); pm_runtime_put_autosuspend(omap_port->pdev); return -EREMOTEIO; } if (msg->ttype == HSI_MSG_WRITE) { val = SSI_DATAACCEPT(msg->channel); /* Hold clocks for pio writes */ pm_runtime_get(omap_port->pdev); } else { val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; } dev_dbg(&port->device, "Single %s transfer\n", msg->ttype ? "write" : "read"); val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); pm_runtime_put_autosuspend(omap_port->pdev); msg->actual_len = 0; msg->status = HSI_STATUS_PROCEEDING; return 0; } static int ssi_start_transfer(struct list_head *queue) { struct hsi_msg *msg; int lch = -1; if (list_empty(queue)) return 0; msg = list_first_entry(queue, struct hsi_msg, link); if (msg->status != HSI_STATUS_QUEUED) return 0; if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) lch = ssi_claim_lch(msg); if (lch >= 0) return ssi_start_dma(msg, lch); else return ssi_start_pio(msg); } static int ssi_async_break(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int err = 0; u32 tmp; pm_runtime_get_sync(omap_port->pdev); if (msg->ttype == HSI_MSG_WRITE) { if (omap_port->sst.mode != SSI_MODE_FRAME) { err = -EINVAL; goto out; } writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); msg->status = HSI_STATUS_COMPLETED; msg->complete(msg); } else { if (omap_port->ssr.mode != SSI_MODE_FRAME) { err = -EINVAL; goto out; } spin_lock_bh(&omap_port->lock); tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(tmp | SSI_BREAKDETECTED, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); msg->status = HSI_STATUS_PROCEEDING; list_add_tail(&msg->link, &omap_port->brkqueue); spin_unlock_bh(&omap_port->lock); } out: pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); return err; } static int ssi_async(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct list_head *queue; int err = 0; BUG_ON(!msg); if (msg->sgt.nents > 1) return -ENOSYS; /* TODO: Add sg support */ if (msg->break_frame) return ssi_async_break(msg); if (msg->ttype) { BUG_ON(msg->channel >= omap_port->sst.channels); queue = &omap_port->txqueue[msg->channel]; } else { BUG_ON(msg->channel >= omap_port->ssr.channels); queue = &omap_port->rxqueue[msg->channel]; } msg->status = HSI_STATUS_QUEUED; pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); list_add_tail(&msg->link, queue); err = ssi_start_transfer(queue); if (err < 0) { list_del(&msg->link); msg->status = HSI_STATUS_ERROR; } spin_unlock_bh(&omap_port->lock); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", msg->status, msg->ttype, msg->channel); return err; } static u32 ssi_calculate_div(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 tx_fckrate = (u32) omap_ssi->fck_rate; /* / 2 : SSI TX clock is always half of the SSI functional clock */ tx_fckrate >>= 1; /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ tx_fckrate--; dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, omap_ssi->max_speed); return tx_fckrate / omap_ssi->max_speed; } static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) { struct list_head *node, *tmp; struct hsi_msg *msg; list_for_each_safe(node, tmp, queue) { msg = list_entry(node, struct hsi_msg, link); if ((cl) && (cl != msg->cl)) continue; list_del(node); pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", msg->channel, msg, msg->sgt.sgl->length, msg->ttype, msg->context); if (msg->destructor) msg->destructor(msg); else hsi_free_msg(msg); } } static int ssi_setup(struct hsi_client *cl) { struct hsi_port *port = to_hsi_port(cl->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sst = omap_port->sst_base; void __iomem *ssr = omap_port->ssr_base; u32 div; u32 val; int err = 0; pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); if (cl->tx_cfg.speed) omap_ssi->max_speed = cl->tx_cfg.speed; div = ssi_calculate_div(ssi); if (div > SSI_MAX_DIVISOR) { dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", cl->tx_cfg.speed, div); err = -EINVAL; goto out; } /* Set TX/RX module to sleep to stop TX/RX during cfg update */ writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); /* Flush posted write */ val = readl(ssr + SSI_SSR_MODE_REG); /* TX */ writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); /* RX */ writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); /* Cleanup the break queue if we leave FRAME mode */ if ((omap_port->ssr.mode == SSI_MODE_FRAME) && (cl->rx_cfg.mode != SSI_MODE_FRAME)) ssi_flush_queue(&omap_port->brkqueue, cl); writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); omap_port->channels = max(cl->rx_cfg.num_hw_channels, cl->tx_cfg.num_hw_channels); /* Shadow registering for OFF mode */ /* SST */ omap_port->sst.divisor = div; omap_port->sst.frame_size = 31; omap_port->sst.channels = cl->tx_cfg.num_hw_channels; omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; omap_port->sst.mode = cl->tx_cfg.mode; /* SSR */ omap_port->ssr.frame_size = 31; omap_port->ssr.timeout = 0; omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; omap_port->ssr.mode = cl->rx_cfg.mode; out: spin_unlock_bh(&omap_port->lock); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); return err; } static int ssi_flush(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; void __iomem *sst = omap_port->sst_base; void __iomem *ssr = omap_port->ssr_base; unsigned int i; u32 err; pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); /* stop all ssi communication */ pinctrl_pm_select_idle_state(omap_port->pdev); udelay(1); /* wait for racing frames */ /* Stop all DMA transfers */ for (i = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if (!msg || (port != hsi_get_port(msg->cl))) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); if (msg->ttype == HSI_MSG_READ) pm_runtime_put_autosuspend(omap_port->pdev); omap_ssi->gdd_trn[i].msg = NULL; } /* Flush all SST buffers */ writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); /* Flush all SSR buffers */ writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); /* Flush all errors */ err = readl(ssr + SSI_SSR_ERROR_REG); writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); /* Flush break */ writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); /* Clear interrupts */ writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(0xffffff00, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); /* Dequeue all pending requests */ for (i = 0; i < omap_port->channels; i++) { /* Release write clocks */ if (!list_empty(&omap_port->txqueue[i])) pm_runtime_put_autosuspend(omap_port->pdev); ssi_flush_queue(&omap_port->txqueue[i], NULL); ssi_flush_queue(&omap_port->rxqueue[i], NULL); } ssi_flush_queue(&omap_port->brkqueue, NULL); /* Resume SSI communication */ pinctrl_pm_select_default_state(omap_port->pdev); spin_unlock_bh(&omap_port->lock); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); return 0; } static void start_tx_work(struct work_struct *work) { struct omap_ssi_port *omap_port = container_of(work, struct omap_ssi_port, work); struct hsi_port *port = to_hsi_port(omap_port->dev); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); } static int ssi_start_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); spin_lock_bh(&omap_port->wk_lock); if (omap_port->wk_refcount++) { spin_unlock_bh(&omap_port->wk_lock); return 0; } spin_unlock_bh(&omap_port->wk_lock); schedule_work(&omap_port->work); return 0; } static int ssi_stop_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); spin_lock_bh(&omap_port->wk_lock); BUG_ON(!omap_port->wk_refcount); if (--omap_port->wk_refcount) { spin_unlock_bh(&omap_port->wk_lock); return 0; } writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); spin_unlock_bh(&omap_port->wk_lock); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ return 0; } static void ssi_transfer(struct omap_ssi_port *omap_port, struct list_head *queue) { struct hsi_msg *msg; int err = -1; pm_runtime_get(omap_port->pdev); spin_lock_bh(&omap_port->lock); while (err < 0) { err = ssi_start_transfer(queue); if (err < 0) { msg = list_first_entry(queue, struct hsi_msg, link); msg->status = HSI_STATUS_ERROR; msg->actual_len = 0; list_del(&msg->link); spin_unlock_bh(&omap_port->lock); msg->complete(msg); spin_lock_bh(&omap_port->lock); } } spin_unlock_bh(&omap_port->lock); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); } static void ssi_cleanup_queues(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; unsigned int i; u32 rxbufstate = 0; u32 txbufstate = 0; u32 status = SSI_ERROROCCURED; u32 tmp; ssi_flush_queue(&omap_port->brkqueue, cl); if (list_empty(&omap_port->brkqueue)) status |= SSI_BREAKDETECTED; for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->txqueue[i])) continue; msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, link); if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { txbufstate |= (1 << i); status |= SSI_DATAACCEPT(i); /* Release the clocks writes, also GDD ones */ pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); } ssi_flush_queue(&omap_port->txqueue[i], cl); } for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->rxqueue[i])) continue; msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, link); if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { rxbufstate |= (1 << i); status |= SSI_DATAAVAILABLE(i); } ssi_flush_queue(&omap_port->rxqueue[i], cl); /* Check if we keep the error detection interrupt armed */ if (!list_empty(&omap_port->rxqueue[i])) status &= ~SSI_ERROROCCURED; } /* Cleanup write buffers */ tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); tmp &= ~txbufstate; writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); /* Cleanup read buffers */ tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); tmp &= ~rxbufstate; writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); /* Disarm and ack pending interrupts */ tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); tmp &= ~status; writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); } static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_msg *msg; unsigned int i; u32 val = 0; u32 tmp; for (i = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if ((!msg) || (msg->cl != cl)) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); val |= (1 << i); /* * Clock references for write will be handled in * ssi_cleanup_queues */ if (msg->ttype == HSI_MSG_READ) { pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); } omap_ssi->gdd_trn[i].msg = NULL; } tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp &= ~val; writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); } static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) { writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); /* OCP barrier */ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); return 0; } static int ssi_release(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); /* Stop all the pending DMA requests for that client */ ssi_cleanup_gdd(ssi, cl); /* Now cleanup all the queues */ ssi_cleanup_queues(cl); /* If it is the last client of the port, do extra checks and cleanup */ if (port->claimed <= 1) { /* * Drop the clock reference for the incoming wake line * if it is still kept high by the other side. */ if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_put_sync(omap_port->pdev); pm_runtime_get(omap_port->pdev); /* Stop any SSI TX/RX without a client */ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); omap_port->sst.mode = SSI_MODE_SLEEP; omap_port->ssr.mode = SSI_MODE_SLEEP; pm_runtime_put(omap_port->pdev); WARN_ON(omap_port->wk_refcount != 0); } spin_unlock_bh(&omap_port->lock); pm_runtime_put_sync(omap_port->pdev); return 0; } static void ssi_error(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; unsigned int i; u32 err; u32 val; u32 tmp; /* ACK error */ err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); dev_err(&port->device, "SSI error: 0x%02x\n", err); if (!err) { dev_dbg(&port->device, "spurious SSI error ignored!\n"); return; } spin_lock(&omap_ssi->lock); /* Cancel all GDD read transfers */ for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if ((msg) && (msg->ttype == HSI_MSG_READ)) { writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); val |= (1 << i); omap_ssi->gdd_trn[i].msg = NULL; } } tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp &= ~val; writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); spin_unlock(&omap_ssi->lock); /* Cancel all PIO read transfers */ spin_lock(&omap_port->lock); tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); /* ACK error */ writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); writel_relaxed(SSI_ERROROCCURED, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); /* Signal the error all current pending read requests */ for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->rxqueue[i])) continue; msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, link); list_del(&msg->link); msg->status = HSI_STATUS_ERROR; spin_unlock(&omap_port->lock); msg->complete(msg); /* Now restart queued reads if any */ ssi_transfer(omap_port, &omap_port->rxqueue[i]); spin_lock(&omap_port->lock); } spin_unlock(&omap_port->lock); } static void ssi_break_complete(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; struct hsi_msg *tmp; u32 val; dev_dbg(&port->device, "HWBREAK received\n"); spin_lock(&omap_port->lock); val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); val &= ~SSI_BREAKDETECTED; writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); writel(SSI_BREAKDETECTED, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); spin_unlock(&omap_port->lock); list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { msg->status = HSI_STATUS_COMPLETED; spin_lock(&omap_port->lock); list_del(&msg->link); spin_unlock(&omap_port->lock); msg->complete(msg); } } static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) { struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_msg *msg; u32 *buf; u32 reg; u32 val; spin_lock_bh(&omap_port->lock); msg = list_first_entry(queue, struct hsi_msg, link); if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { msg->actual_len = 0; msg->status = HSI_STATUS_PENDING; } if (msg->ttype == HSI_MSG_WRITE) val = SSI_DATAACCEPT(msg->channel); else val = SSI_DATAAVAILABLE(msg->channel); if (msg->status == HSI_STATUS_PROCEEDING) { buf = sg_virt(msg->sgt.sgl) + msg->actual_len; if (msg->ttype == HSI_MSG_WRITE) writel(*buf, omap_port->sst_base + SSI_SST_BUFFER_CH_REG(msg->channel)); else *buf = readl(omap_port->ssr_base + SSI_SSR_BUFFER_CH_REG(msg->channel)); dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, msg->ttype, *buf); msg->actual_len += sizeof(*buf); if (msg->actual_len >= msg->sgt.sgl->length) msg->status = HSI_STATUS_COMPLETED; /* * Wait for the last written frame to be really sent before * we call the complete callback */ if ((msg->status == HSI_STATUS_PROCEEDING) || ((msg->status == HSI_STATUS_COMPLETED) && (msg->ttype == HSI_MSG_WRITE))) { writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); spin_unlock_bh(&omap_port->lock); return; } } /* Transfer completed at this point */ reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); if (msg->ttype == HSI_MSG_WRITE) { /* Release clocks for write transfer */ pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); } reg &= ~val; writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); list_del(&msg->link); spin_unlock_bh(&omap_port->lock); msg->complete(msg); ssi_transfer(omap_port, queue); } static irqreturn_t ssi_pio_thread(int irq, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sys = omap_ssi->sys; unsigned int ch; u32 status_reg; pm_runtime_get_sync(omap_port->pdev); do { status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); for (ch = 0; ch < omap_port->channels; ch++) { if (status_reg & SSI_DATAACCEPT(ch)) ssi_pio_complete(port, &omap_port->txqueue[ch]); if (status_reg & SSI_DATAAVAILABLE(ch)) ssi_pio_complete(port, &omap_port->rxqueue[ch]); } if (status_reg & SSI_BREAKDETECTED) ssi_break_complete(port); if (status_reg & SSI_ERROROCCURED) ssi_error(port); status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); /* TODO: sleep if we retry? */ } while (status_reg); pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); return IRQ_HANDLED; } static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); if (ssi_wakein(port)) { /** * We can have a quick High-Low-High transition in the line. * In such a case if we have long interrupt latencies, * we can miss the low event or get twice a high event. * This workaround will avoid breaking the clock reference * count when such a situation ocurrs. */ if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_get_sync(omap_port->pdev); dev_dbg(&ssi->device, "Wake in high\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_START_RX); } else { dev_dbg(&ssi->device, "Wake in low\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_STOP_RX); if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { pm_runtime_mark_last_busy(omap_port->pdev); pm_runtime_put_autosuspend(omap_port->pdev); } } return IRQ_HANDLED; } static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int err; err = platform_get_irq(pd, 0); if (err < 0) return err; omap_port->irq = err; err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port); if (err < 0) dev_err(&port->device, "Request IRQ %d failed (%d)\n", omap_port->irq, err); return err; } static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int cawake_irq; int err; if (!omap_port->wake_gpio) { omap_port->wake_irq = -1; return 0; } cawake_irq = gpiod_to_irq(omap_port->wake_gpio); omap_port->wake_irq = cawake_irq; err = devm_request_threaded_irq(&port->device, cawake_irq, NULL, ssi_wake_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "SSI cawake", port); if (err < 0) dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", cawake_irq, err); err = enable_irq_wake(cawake_irq); if (err < 0) dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", cawake_irq, err); return err; } static void ssi_queues_init(struct omap_ssi_port *omap_port) { unsigned int ch; for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { INIT_LIST_HEAD(&omap_port->txqueue[ch]); INIT_LIST_HEAD(&omap_port->rxqueue[ch]); } INIT_LIST_HEAD(&omap_port->brkqueue); } static int ssi_port_get_iomem(struct platform_device *pd, const char *name, void __iomem **pbase, dma_addr_t *phy) { struct hsi_port *port = platform_get_drvdata(pd); struct resource *mem; struct resource *ioarea; void __iomem *base; mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); if (!mem) { dev_err(&pd->dev, "IO memory region missing (%s)\n", name); return -ENXIO; } ioarea = devm_request_mem_region(&port->device, mem->start, resource_size(mem), dev_name(&pd->dev)); if (!ioarea) { dev_err(&pd->dev, "%s IO memory region request failed\n", mem->name); return -ENXIO; } base = devm_ioremap(&port->device, mem->start, resource_size(mem)); if (!base) { dev_err(&pd->dev, "%s IO remap failed\n", mem->name); return -ENXIO; } *pbase = base; if (phy) *phy = mem->start; return 0; } static int ssi_port_probe(struct platform_device *pd) { struct device_node *np = pd->dev.of_node; struct hsi_port *port; struct omap_ssi_port *omap_port; struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct gpio_desc *cawake_gpio = NULL; u32 port_id; int err; dev_dbg(&pd->dev, "init ssi port...\n"); if (!ssi->port || !omap_ssi->port) { dev_err(&pd->dev, "ssi controller not initialized!\n"); err = -ENODEV; goto error; } /* get id of first uninitialized port in controller */ for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; port_id++) ; if (port_id >= ssi->num_ports) { dev_err(&pd->dev, "port id out of range!\n"); err = -ENODEV; goto error; } port = ssi->port[port_id]; if (!np) { dev_err(&pd->dev, "missing device tree data\n"); err = -EINVAL; goto error; } cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN); if (IS_ERR(cawake_gpio)) { err = PTR_ERR(cawake_gpio); dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err); goto error; } omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); if (!omap_port) { err = -ENOMEM; goto error; } omap_port->wake_gpio = cawake_gpio; omap_port->pdev = &pd->dev; omap_port->port_id = port_id; INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); INIT_WORK(&omap_port->work, start_tx_work); /* initialize HSI port */ port->async = ssi_async; port->setup = ssi_setup; port->flush = ssi_flush; port->start_tx = ssi_start_tx; port->stop_tx = ssi_stop_tx; port->release = ssi_release; hsi_port_set_drvdata(port, omap_port); omap_ssi->port[port_id] = omap_port; platform_set_drvdata(pd, port); err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, &omap_port->sst_dma); if (err < 0) goto error; err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, &omap_port->ssr_dma); if (err < 0) goto error; err = ssi_port_irq(port, pd); if (err < 0) goto error; err = ssi_wake_irq(port, pd); if (err < 0) goto error; ssi_queues_init(omap_port); spin_lock_init(&omap_port->lock); spin_lock_init(&omap_port->wk_lock); omap_port->dev = &port->device; pm_runtime_use_autosuspend(omap_port->pdev); pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); pm_runtime_enable(omap_port->pdev); #ifdef CONFIG_DEBUG_FS ssi_debug_add_port(omap_port, omap_ssi->dir); #endif hsi_add_clients_from_dt(port, np); dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id); return 0; error: return err; } static int ssi_port_remove(struct platform_device *pd) { struct hsi_port *port = platform_get_drvdata(pd); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); #ifdef CONFIG_DEBUG_FS ssi_debug_remove_port(port); #endif cancel_delayed_work_sync(&omap_port->errqueue_work); hsi_port_unregister_clients(port); port->async = hsi_dummy_msg; port->setup = hsi_dummy_cl; port->flush = hsi_dummy_cl; port->start_tx = hsi_dummy_cl; port->stop_tx = hsi_dummy_cl; port->release = hsi_dummy_cl; omap_ssi->port[omap_port->port_id] = NULL; platform_set_drvdata(pd, NULL); pm_runtime_dont_use_autosuspend(&pd->dev); pm_runtime_disable(&pd->dev); return 0; } static int ssi_restore_divisor(struct omap_ssi_port *omap_port) { writel_relaxed(omap_port->sst.divisor, omap_port->sst_base + SSI_SST_DIVISOR_REG); return 0; } void omap_ssi_port_update_fclk(struct hsi_controller *ssi, struct omap_ssi_port *omap_port) { /* update divisor */ u32 div = ssi_calculate_div(ssi); omap_port->sst.divisor = div; ssi_restore_divisor(omap_port); } #ifdef CONFIG_PM static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) { struct hsi_port *port = to_hsi_port(omap_port->dev); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); omap_port->sys_mpu_enable = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); return 0; } static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) { struct hsi_port *port = to_hsi_port(omap_port->dev); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *base; writel_relaxed(omap_port->sys_mpu_enable, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); /* SST context */ base = omap_port->sst_base; writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); /* SSR context */ base = omap_port->ssr_base; writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); return 0; } static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) { u32 mode; writel_relaxed(omap_port->sst.mode, omap_port->sst_base + SSI_SST_MODE_REG); writel_relaxed(omap_port->ssr.mode, omap_port->ssr_base + SSI_SSR_MODE_REG); /* OCP barrier */ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); return 0; } static int omap_ssi_port_runtime_suspend(struct device *dev) { struct hsi_port *port = dev_get_drvdata(dev); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "port runtime suspend!\n"); ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); if (omap_ssi->get_loss) omap_port->loss_count = omap_ssi->get_loss(ssi->device.parent); ssi_save_port_ctx(omap_port); return 0; } static int omap_ssi_port_runtime_resume(struct device *dev) { struct hsi_port *port = dev_get_drvdata(dev); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "port runtime resume!\n"); if ((omap_ssi->get_loss) && (omap_port->loss_count == omap_ssi->get_loss(ssi->device.parent))) goto mode; /* We always need to restore the mode & TX divisor */ ssi_restore_port_ctx(omap_port); mode: ssi_restore_divisor(omap_port); ssi_restore_port_mode(omap_port); return 0; } static const struct dev_pm_ops omap_ssi_port_pm_ops = { SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, omap_ssi_port_runtime_resume, NULL) }; #define DEV_PM_OPS (&omap_ssi_port_pm_ops) #else #define DEV_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id omap_ssi_port_of_match[] = { { .compatible = "ti,omap3-ssi-port", }, {}, }; MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); #else #define omap_ssi_port_of_match NULL #endif struct platform_driver ssi_port_pdriver = { .probe = ssi_port_probe, .remove = ssi_port_remove, .driver = { .name = "omap_ssi_port", .of_match_table = omap_ssi_port_of_match, .pm = DEV_PM_OPS, }, };
linux-master
drivers/hsi/controllers/omap_ssi_port.c
// SPDX-License-Identifier: GPL-2.0-only /* * ms_block.c - Sony MemoryStick (legacy) storage support * Copyright (C) 2013 Maxim Levitsky <[email protected]> * * Minor portions of the driver were copied from mspro_block.c which is * Copyright (C) 2007 Alex Dubov <[email protected]> */ #define DRIVER_NAME "ms_block" #define pr_fmt(fmt) DRIVER_NAME ": " fmt #include <linux/module.h> #include <linux/blk-mq.h> #include <linux/memstick.h> #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/bitmap.h> #include <linux/scatterlist.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include "ms_block.h" static int debug; static int cache_flush_timeout = 1000; static bool verify_writes; /* * Copies section of 'sg_from' starting from offset 'offset' and with length * 'len' To another scatterlist of to_nents enties */ static size_t msb_sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int to_nents, size_t offset, size_t len) { size_t copied = 0; while (offset > 0) { if (offset >= sg_from->length) { if (sg_is_last(sg_from)) return 0; offset -= sg_from->length; sg_from = sg_next(sg_from); continue; } copied = min(len, sg_from->length - offset); sg_set_page(sg_to, sg_page(sg_from), copied, sg_from->offset + offset); len -= copied; offset = 0; if (sg_is_last(sg_from) || !len) goto out; sg_to = sg_next(sg_to); to_nents--; sg_from = sg_next(sg_from); } while (len > sg_from->length && to_nents--) { len -= sg_from->length; copied += sg_from->length; sg_set_page(sg_to, sg_page(sg_from), sg_from->length, sg_from->offset); if (sg_is_last(sg_from) || !len) goto out; sg_from = sg_next(sg_from); sg_to = sg_next(sg_to); } if (len && to_nents) { sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset); copied += len; } out: sg_mark_end(sg_to); return copied; } /* * Compares section of 'sg' starting from offset 'offset' and with length 'len' * to linear buffer of length 'len' at address 'buffer' * Returns 0 if equal and -1 otherwice */ static int msb_sg_compare_to_buffer(struct scatterlist *sg, size_t offset, u8 *buffer, size_t len) { int retval = 0, cmplen; struct sg_mapping_iter miter; sg_miter_start(&miter, sg, sg_nents(sg), SG_MITER_ATOMIC | SG_MITER_FROM_SG); while (sg_miter_next(&miter) && len > 0) { if (offset >= miter.length) { offset -= miter.length; continue; } cmplen = min(miter.length - offset, len); retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0; if (retval) break; buffer += cmplen; len -= cmplen; offset = 0; } if (!retval && len) retval = -1; sg_miter_stop(&miter); return retval; } /* Get zone at which block with logical address 'lba' lives * Flash is broken into zones. * Each zone consists of 512 eraseblocks, out of which in first * zone 494 are used and 496 are for all following zones. * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc... */ static int msb_get_zone_from_lba(int lba) { if (lba < 494) return 0; return ((lba - 494) / 496) + 1; } /* Get zone of physical block. Trivial */ static int msb_get_zone_from_pba(int pba) { return pba / MS_BLOCKS_IN_ZONE; } /* Debug test to validate free block counts */ static int msb_validate_used_block_bitmap(struct msb_data *msb) { int total_free_blocks = 0; int i; if (!debug) return 0; for (i = 0; i < msb->zone_count; i++) total_free_blocks += msb->free_block_count[i]; if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap, msb->block_count) == total_free_blocks) return 0; pr_err("BUG: free block counts don't match the bitmap"); msb->read_only = true; return -EINVAL; } /* Mark physical block as used */ static void msb_mark_block_used(struct msb_data *msb, int pba) { int zone = msb_get_zone_from_pba(pba); if (test_bit(pba, msb->used_blocks_bitmap)) { pr_err( "BUG: attempt to mark already used pba %d as used", pba); msb->read_only = true; return; } if (msb_validate_used_block_bitmap(msb)) return; /* No races because all IO is single threaded */ __set_bit(pba, msb->used_blocks_bitmap); msb->free_block_count[zone]--; } /* Mark physical block as free */ static void msb_mark_block_unused(struct msb_data *msb, int pba) { int zone = msb_get_zone_from_pba(pba); if (!test_bit(pba, msb->used_blocks_bitmap)) { pr_err("BUG: attempt to mark already unused pba %d as unused" , pba); msb->read_only = true; return; } if (msb_validate_used_block_bitmap(msb)) return; /* No races because all IO is single threaded */ __clear_bit(pba, msb->used_blocks_bitmap); msb->free_block_count[zone]++; } /* Invalidate current register window */ static void msb_invalidate_reg_window(struct msb_data *msb) { msb->reg_addr.w_offset = offsetof(struct ms_register, id); msb->reg_addr.w_length = sizeof(struct ms_id_register); msb->reg_addr.r_offset = offsetof(struct ms_register, id); msb->reg_addr.r_length = sizeof(struct ms_id_register); msb->addr_valid = false; } /* Start a state machine */ static int msb_run_state_machine(struct msb_data *msb, int (*state_func) (struct memstick_dev *card, struct memstick_request **req)) { struct memstick_dev *card = msb->card; WARN_ON(msb->state != -1); msb->int_polling = false; msb->state = 0; msb->exit_error = 0; memset(&card->current_mrq, 0, sizeof(card->current_mrq)); card->next_request = state_func; memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); WARN_ON(msb->state != -1); return msb->exit_error; } /* State machines call that to exit */ static int msb_exit_state_machine(struct msb_data *msb, int error) { WARN_ON(msb->state == -1); msb->state = -1; msb->exit_error = error; msb->card->next_request = h_msb_default_bad; /* Invalidate reg window on errors */ if (error) msb_invalidate_reg_window(msb); complete(&msb->card->mrq_complete); return -ENXIO; } /* read INT register */ static int msb_read_int_reg(struct msb_data *msb, long timeout) { struct memstick_request *mrq = &msb->card->current_mrq; WARN_ON(msb->state == -1); if (!msb->int_polling) { msb->int_timeout = jiffies + msecs_to_jiffies(timeout == -1 ? 500 : timeout); msb->int_polling = true; } else if (time_after(jiffies, msb->int_timeout)) { mrq->data[0] = MEMSTICK_INT_CMDNAK; return 0; } if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) && mrq->need_card_int && !mrq->error) { mrq->data[0] = mrq->int_reg; mrq->need_card_int = false; return 0; } else { memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1); return 1; } } /* Read a register */ static int msb_read_regs(struct msb_data *msb, int offset, int len) { struct memstick_request *req = &msb->card->current_mrq; if (msb->reg_addr.r_offset != offset || msb->reg_addr.r_length != len || !msb->addr_valid) { msb->reg_addr.r_offset = offset; msb->reg_addr.r_length = len; msb->addr_valid = true; memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS, &msb->reg_addr, sizeof(msb->reg_addr)); return 0; } memstick_init_req(req, MS_TPC_READ_REG, NULL, len); return 1; } /* Write a card register */ static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf) { struct memstick_request *req = &msb->card->current_mrq; if (msb->reg_addr.w_offset != offset || msb->reg_addr.w_length != len || !msb->addr_valid) { msb->reg_addr.w_offset = offset; msb->reg_addr.w_length = len; msb->addr_valid = true; memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS, &msb->reg_addr, sizeof(msb->reg_addr)); return 0; } memstick_init_req(req, MS_TPC_WRITE_REG, buf, len); return 1; } /* Handler for absence of IO */ static int h_msb_default_bad(struct memstick_dev *card, struct memstick_request **mrq) { return -ENXIO; } /* * This function is a handler for reads of one page from device. * Writes output to msb->current_sg, takes sector address from msb->reg.param * Can also be used to read extra data only. Set params accordintly. */ static int h_msb_read_page(struct memstick_dev *card, struct memstick_request **out_mrq) { struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; struct scatterlist sg[2]; u8 command, intreg; if (mrq->error) { dbg("read_page, unknown error"); return msb_exit_state_machine(msb, mrq->error); } again: switch (msb->state) { case MSB_RP_SEND_BLOCK_ADDRESS: /* msb_write_regs sometimes "fails" because it needs to update * the reg window, and thus it returns request for that. * Then we stay in this state and retry */ if (!msb_write_regs(msb, offsetof(struct ms_register, param), sizeof(struct ms_param_register), (unsigned char *)&msb->regs.param)) return 0; msb->state = MSB_RP_SEND_READ_COMMAND; return 0; case MSB_RP_SEND_READ_COMMAND: command = MS_CMD_BLOCK_READ; memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1); msb->state = MSB_RP_SEND_INT_REQ; return 0; case MSB_RP_SEND_INT_REQ: msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT; /* If dont actually need to send the int read request (only in * serial mode), then just fall through */ if (msb_read_int_reg(msb, -1)) return 0; fallthrough; case MSB_RP_RECEIVE_INT_REQ_RESULT: intreg = mrq->data[0]; msb->regs.status.interrupt = intreg; if (intreg & MEMSTICK_INT_CMDNAK) return msb_exit_state_machine(msb, -EIO); if (!(intreg & MEMSTICK_INT_CED)) { msb->state = MSB_RP_SEND_INT_REQ; goto again; } msb->int_polling = false; msb->state = (intreg & MEMSTICK_INT_ERR) ? MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ; goto again; case MSB_RP_SEND_READ_STATUS_REG: /* read the status register to understand source of the INT_ERR */ if (!msb_read_regs(msb, offsetof(struct ms_register, status), sizeof(struct ms_status_register))) return 0; msb->state = MSB_RP_RECEIVE_STATUS_REG; return 0; case MSB_RP_RECEIVE_STATUS_REG: msb->regs.status = *(struct ms_status_register *)mrq->data; msb->state = MSB_RP_SEND_OOB_READ; fallthrough; case MSB_RP_SEND_OOB_READ: if (!msb_read_regs(msb, offsetof(struct ms_register, extra_data), sizeof(struct ms_extra_data_register))) return 0; msb->state = MSB_RP_RECEIVE_OOB_READ; return 0; case MSB_RP_RECEIVE_OOB_READ: msb->regs.extra_data = *(struct ms_extra_data_register *) mrq->data; msb->state = MSB_RP_SEND_READ_DATA; fallthrough; case MSB_RP_SEND_READ_DATA: /* Skip that state if we only read the oob */ if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) { msb->state = MSB_RP_RECEIVE_READ_DATA; goto again; } sg_init_table(sg, ARRAY_SIZE(sg)); msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), msb->current_sg_offset, msb->page_size); memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg); msb->state = MSB_RP_RECEIVE_READ_DATA; return 0; case MSB_RP_RECEIVE_READ_DATA: if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) { msb->current_sg_offset += msb->page_size; return msb_exit_state_machine(msb, 0); } if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) { dbg("read_page: uncorrectable error"); return msb_exit_state_machine(msb, -EBADMSG); } if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) { dbg("read_page: correctable error"); msb->current_sg_offset += msb->page_size; return msb_exit_state_machine(msb, -EUCLEAN); } else { dbg("read_page: INT error, but no status error bits"); return msb_exit_state_machine(msb, -EIO); } } BUG(); } /* * Handler of writes of exactly one block. * Takes address from msb->regs.param. * Writes same extra data to blocks, also taken * from msb->regs.extra * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if * device refuses to take the command or something else */ static int h_msb_write_block(struct memstick_dev *card, struct memstick_request **out_mrq) { struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; struct scatterlist sg[2]; u8 intreg, command; if (mrq->error) return msb_exit_state_machine(msb, mrq->error); again: switch (msb->state) { /* HACK: Jmicon handling of TPCs between 8 and * sizeof(memstick_request.data) is broken due to hardware * bug in PIO mode that is used for these TPCs * Therefore split the write */ case MSB_WB_SEND_WRITE_PARAMS: if (!msb_write_regs(msb, offsetof(struct ms_register, param), sizeof(struct ms_param_register), &msb->regs.param)) return 0; msb->state = MSB_WB_SEND_WRITE_OOB; return 0; case MSB_WB_SEND_WRITE_OOB: if (!msb_write_regs(msb, offsetof(struct ms_register, extra_data), sizeof(struct ms_extra_data_register), &msb->regs.extra_data)) return 0; msb->state = MSB_WB_SEND_WRITE_COMMAND; return 0; case MSB_WB_SEND_WRITE_COMMAND: command = MS_CMD_BLOCK_WRITE; memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1); msb->state = MSB_WB_SEND_INT_REQ; return 0; case MSB_WB_SEND_INT_REQ: msb->state = MSB_WB_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; fallthrough; case MSB_WB_RECEIVE_INT_REQ: intreg = mrq->data[0]; msb->regs.status.interrupt = intreg; /* errors mean out of here, and fast... */ if (intreg & (MEMSTICK_INT_CMDNAK)) return msb_exit_state_machine(msb, -EIO); if (intreg & MEMSTICK_INT_ERR) return msb_exit_state_machine(msb, -EBADMSG); /* for last page we need to poll CED */ if (msb->current_page == msb->pages_in_block) { if (intreg & MEMSTICK_INT_CED) return msb_exit_state_machine(msb, 0); msb->state = MSB_WB_SEND_INT_REQ; goto again; } /* for non-last page we need BREQ before writing next chunk */ if (!(intreg & MEMSTICK_INT_BREQ)) { msb->state = MSB_WB_SEND_INT_REQ; goto again; } msb->int_polling = false; msb->state = MSB_WB_SEND_WRITE_DATA; fallthrough; case MSB_WB_SEND_WRITE_DATA: sg_init_table(sg, ARRAY_SIZE(sg)); if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), msb->current_sg_offset, msb->page_size) < msb->page_size) return msb_exit_state_machine(msb, -EIO); memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg); mrq->need_card_int = 1; msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION; return 0; case MSB_WB_RECEIVE_WRITE_CONFIRMATION: msb->current_page++; msb->current_sg_offset += msb->page_size; msb->state = MSB_WB_SEND_INT_REQ; goto again; default: BUG(); } return 0; } /* * This function is used to send simple IO requests to device that consist * of register write + command */ static int h_msb_send_command(struct memstick_dev *card, struct memstick_request **out_mrq) { struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; u8 intreg; if (mrq->error) { dbg("send_command: unknown error"); return msb_exit_state_machine(msb, mrq->error); } again: switch (msb->state) { /* HACK: see h_msb_write_block */ case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/ if (!msb_write_regs(msb, offsetof(struct ms_register, param), sizeof(struct ms_param_register), &msb->regs.param)) return 0; msb->state = MSB_SC_SEND_WRITE_OOB; return 0; case MSB_SC_SEND_WRITE_OOB: if (!msb->command_need_oob) { msb->state = MSB_SC_SEND_COMMAND; goto again; } if (!msb_write_regs(msb, offsetof(struct ms_register, extra_data), sizeof(struct ms_extra_data_register), &msb->regs.extra_data)) return 0; msb->state = MSB_SC_SEND_COMMAND; return 0; case MSB_SC_SEND_COMMAND: memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1); msb->state = MSB_SC_SEND_INT_REQ; return 0; case MSB_SC_SEND_INT_REQ: msb->state = MSB_SC_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; fallthrough; case MSB_SC_RECEIVE_INT_REQ: intreg = mrq->data[0]; if (intreg & MEMSTICK_INT_CMDNAK) return msb_exit_state_machine(msb, -EIO); if (intreg & MEMSTICK_INT_ERR) return msb_exit_state_machine(msb, -EBADMSG); if (!(intreg & MEMSTICK_INT_CED)) { msb->state = MSB_SC_SEND_INT_REQ; goto again; } return msb_exit_state_machine(msb, 0); } BUG(); } /* Small handler for card reset */ static int h_msb_reset(struct memstick_dev *card, struct memstick_request **out_mrq) { u8 command = MS_CMD_RESET; struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; if (mrq->error) return msb_exit_state_machine(msb, mrq->error); switch (msb->state) { case MSB_RS_SEND: memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1); mrq->need_card_int = 0; msb->state = MSB_RS_CONFIRM; return 0; case MSB_RS_CONFIRM: return msb_exit_state_machine(msb, 0); } BUG(); } /* This handler is used to do serial->parallel switch */ static int h_msb_parallel_switch(struct memstick_dev *card, struct memstick_request **out_mrq) { struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; struct memstick_host *host = card->host; if (mrq->error) { dbg("parallel_switch: error"); msb->regs.param.system &= ~MEMSTICK_SYS_PAM; return msb_exit_state_machine(msb, mrq->error); } switch (msb->state) { case MSB_PS_SEND_SWITCH_COMMAND: /* Set the parallel interface on memstick side */ msb->regs.param.system |= MEMSTICK_SYS_PAM; if (!msb_write_regs(msb, offsetof(struct ms_register, param), 1, (unsigned char *)&msb->regs.param)) return 0; msb->state = MSB_PS_SWICH_HOST; return 0; case MSB_PS_SWICH_HOST: /* Set parallel interface on our side + send a dummy request * to see if card responds */ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4); memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1); msb->state = MSB_PS_CONFIRM; return 0; case MSB_PS_CONFIRM: return msb_exit_state_machine(msb, 0); } BUG(); } static int msb_switch_to_parallel(struct msb_data *msb); /* Reset the card, to guard against hw errors beeing treated as bad blocks */ static int msb_reset(struct msb_data *msb, bool full) { bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM; struct memstick_dev *card = msb->card; struct memstick_host *host = card->host; int error; /* Reset the card */ msb->regs.param.system = MEMSTICK_SYS_BAMD; if (full) { error = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); if (error) goto out_error; msb_invalidate_reg_window(msb); error = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); if (error) goto out_error; error = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); if (error) { out_error: dbg("Failed to reset the host controller"); msb->read_only = true; return -EFAULT; } } error = msb_run_state_machine(msb, h_msb_reset); if (error) { dbg("Failed to reset the card"); msb->read_only = true; return -ENODEV; } /* Set parallel mode */ if (was_parallel) msb_switch_to_parallel(msb); return 0; } /* Attempts to switch interface to parallel mode */ static int msb_switch_to_parallel(struct msb_data *msb) { int error; error = msb_run_state_machine(msb, h_msb_parallel_switch); if (error) { pr_err("Switch to parallel failed"); msb->regs.param.system &= ~MEMSTICK_SYS_PAM; msb_reset(msb, true); return -EFAULT; } msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; return 0; } /* Changes overwrite flag on a page */ static int msb_set_overwrite_flag(struct msb_data *msb, u16 pba, u8 page, u8 flag) { if (msb->read_only) return -EROFS; msb->regs.param.block_address = cpu_to_be16(pba); msb->regs.param.page_address = page; msb->regs.param.cp = MEMSTICK_CP_OVERWRITE; msb->regs.extra_data.overwrite_flag = flag; msb->command_value = MS_CMD_BLOCK_WRITE; msb->command_need_oob = true; dbg_verbose("changing overwrite flag to %02x for sector %d, page %d", flag, pba, page); return msb_run_state_machine(msb, h_msb_send_command); } static int msb_mark_bad(struct msb_data *msb, int pba) { pr_notice("marking pba %d as bad", pba); msb_reset(msb, true); return msb_set_overwrite_flag( msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST); } static int msb_mark_page_bad(struct msb_data *msb, int pba, int page) { dbg("marking page %d of pba %d as bad", page, pba); msb_reset(msb, true); return msb_set_overwrite_flag(msb, pba, page, ~MEMSTICK_OVERWRITE_PGST0); } /* Erases one physical block */ static int msb_erase_block(struct msb_data *msb, u16 pba) { int error, try; if (msb->read_only) return -EROFS; dbg_verbose("erasing pba %d", pba); for (try = 1; try < 3; try++) { msb->regs.param.block_address = cpu_to_be16(pba); msb->regs.param.page_address = 0; msb->regs.param.cp = MEMSTICK_CP_BLOCK; msb->command_value = MS_CMD_BLOCK_ERASE; msb->command_need_oob = false; error = msb_run_state_machine(msb, h_msb_send_command); if (!error || msb_reset(msb, true)) break; } if (error) { pr_err("erase failed, marking pba %d as bad", pba); msb_mark_bad(msb, pba); } dbg_verbose("erase success, marking pba %d as unused", pba); msb_mark_block_unused(msb, pba); __set_bit(pba, msb->erased_blocks_bitmap); return error; } /* Reads one page from device */ static int msb_read_page(struct msb_data *msb, u16 pba, u8 page, struct ms_extra_data_register *extra, struct scatterlist *sg, int offset) { int try, error; if (pba == MS_BLOCK_INVALID) { unsigned long flags; struct sg_mapping_iter miter; size_t len = msb->page_size; dbg_verbose("read unmapped sector. returning 0xFF"); local_irq_save(flags); sg_miter_start(&miter, sg, sg_nents(sg), SG_MITER_ATOMIC | SG_MITER_TO_SG); while (sg_miter_next(&miter) && len > 0) { int chunklen; if (offset && offset >= miter.length) { offset -= miter.length; continue; } chunklen = min(miter.length - offset, len); memset(miter.addr + offset, 0xFF, chunklen); len -= chunklen; offset = 0; } sg_miter_stop(&miter); local_irq_restore(flags); if (offset) return -EFAULT; if (extra) memset(extra, 0xFF, sizeof(*extra)); return 0; } if (pba >= msb->block_count) { pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba); return -EINVAL; } for (try = 1; try < 3; try++) { msb->regs.param.block_address = cpu_to_be16(pba); msb->regs.param.page_address = page; msb->regs.param.cp = MEMSTICK_CP_PAGE; msb->current_sg = sg; msb->current_sg_offset = offset; error = msb_run_state_machine(msb, h_msb_read_page); if (error == -EUCLEAN) { pr_notice("correctable error on pba %d, page %d", pba, page); error = 0; } if (!error && extra) *extra = msb->regs.extra_data; if (!error || msb_reset(msb, true)) break; } /* Mark bad pages */ if (error == -EBADMSG) { pr_err("uncorrectable error on read of pba %d, page %d", pba, page); if (msb->regs.extra_data.overwrite_flag & MEMSTICK_OVERWRITE_PGST0) msb_mark_page_bad(msb, pba, page); return -EBADMSG; } if (error) pr_err("read of pba %d, page %d failed with error %d", pba, page, error); return error; } /* Reads oob of page only */ static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page, struct ms_extra_data_register *extra) { int error; BUG_ON(!extra); msb->regs.param.block_address = cpu_to_be16(pba); msb->regs.param.page_address = page; msb->regs.param.cp = MEMSTICK_CP_EXTRA; if (pba > msb->block_count) { pr_err("BUG: attempt to read beyond the end of card at pba %d", pba); return -EINVAL; } error = msb_run_state_machine(msb, h_msb_read_page); *extra = msb->regs.extra_data; if (error == -EUCLEAN) { pr_notice("correctable error on pba %d, page %d", pba, page); return 0; } return error; } /* Reads a block and compares it with data contained in scatterlist orig_sg */ static int msb_verify_block(struct msb_data *msb, u16 pba, struct scatterlist *orig_sg, int offset) { struct scatterlist sg; int page = 0, error; sg_init_one(&sg, msb->block_buffer, msb->block_size); while (page < msb->pages_in_block) { error = msb_read_page(msb, pba, page, NULL, &sg, page * msb->page_size); if (error) return error; page++; } if (msb_sg_compare_to_buffer(orig_sg, offset, msb->block_buffer, msb->block_size)) return -EIO; return 0; } /* Writes exectly one block + oob */ static int msb_write_block(struct msb_data *msb, u16 pba, u32 lba, struct scatterlist *sg, int offset) { int error, current_try = 1; BUG_ON(sg->length < msb->page_size); if (msb->read_only) return -EROFS; if (pba == MS_BLOCK_INVALID) { pr_err( "BUG: write: attempt to write MS_BLOCK_INVALID block"); return -EINVAL; } if (pba >= msb->block_count || lba >= msb->logical_block_count) { pr_err( "BUG: write: attempt to write beyond the end of device"); return -EINVAL; } if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) { pr_err("BUG: write: lba zone mismatch"); return -EINVAL; } if (pba == msb->boot_block_locations[0] || pba == msb->boot_block_locations[1]) { pr_err("BUG: write: attempt to write to boot blocks!"); return -EINVAL; } while (1) { if (msb->read_only) return -EROFS; msb->regs.param.cp = MEMSTICK_CP_BLOCK; msb->regs.param.page_address = 0; msb->regs.param.block_address = cpu_to_be16(pba); msb->regs.extra_data.management_flag = 0xFF; msb->regs.extra_data.overwrite_flag = 0xF8; msb->regs.extra_data.logical_address = cpu_to_be16(lba); msb->current_sg = sg; msb->current_sg_offset = offset; msb->current_page = 0; error = msb_run_state_machine(msb, h_msb_write_block); /* Sector we just wrote to is assumed erased since its pba * was erased. If it wasn't erased, write will succeed * and will just clear the bits that were set in the block * thus test that what we have written, * matches what we expect. * We do trust the blocks that we erased */ if (!error && (verify_writes || !test_bit(pba, msb->erased_blocks_bitmap))) error = msb_verify_block(msb, pba, sg, offset); if (!error) break; if (current_try > 1 || msb_reset(msb, true)) break; pr_err("write failed, trying to erase the pba %d", pba); error = msb_erase_block(msb, pba); if (error) break; current_try++; } return error; } /* Finds a free block for write replacement */ static u16 msb_get_free_block(struct msb_data *msb, int zone) { u16 pos; int pba = zone * MS_BLOCKS_IN_ZONE; int i; get_random_bytes(&pos, sizeof(pos)); if (!msb->free_block_count[zone]) { pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone); msb->read_only = true; return MS_BLOCK_INVALID; } pos %= msb->free_block_count[zone]; dbg_verbose("have %d choices for a free block, selected randomly: %d", msb->free_block_count[zone], pos); pba = find_next_zero_bit(msb->used_blocks_bitmap, msb->block_count, pba); for (i = 0; i < pos; ++i) pba = find_next_zero_bit(msb->used_blocks_bitmap, msb->block_count, pba + 1); dbg_verbose("result of the free blocks scan: pba %d", pba); if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) { pr_err("BUG: can't get a free block"); msb->read_only = true; return MS_BLOCK_INVALID; } msb_mark_block_used(msb, pba); return pba; } static int msb_update_block(struct msb_data *msb, u16 lba, struct scatterlist *sg, int offset) { u16 pba, new_pba; int error, try; pba = msb->lba_to_pba_table[lba]; dbg_verbose("start of a block update at lba %d, pba %d", lba, pba); if (pba != MS_BLOCK_INVALID) { dbg_verbose("setting the update flag on the block"); msb_set_overwrite_flag(msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_UDST); } for (try = 0; try < 3; try++) { new_pba = msb_get_free_block(msb, msb_get_zone_from_lba(lba)); if (new_pba == MS_BLOCK_INVALID) { error = -EIO; goto out; } dbg_verbose("block update: writing updated block to the pba %d", new_pba); error = msb_write_block(msb, new_pba, lba, sg, offset); if (error == -EBADMSG) { msb_mark_bad(msb, new_pba); continue; } if (error) goto out; dbg_verbose("block update: erasing the old block"); msb_erase_block(msb, pba); msb->lba_to_pba_table[lba] = new_pba; return 0; } out: if (error) { pr_err("block update error after %d tries, switching to r/o mode", try); msb->read_only = true; } return error; } /* Converts endiannes in the boot block for easy use */ static void msb_fix_boot_page_endianness(struct ms_boot_page *p) { p->header.block_id = be16_to_cpu(p->header.block_id); p->header.format_reserved = be16_to_cpu(p->header.format_reserved); p->entry.disabled_block.start_addr = be32_to_cpu(p->entry.disabled_block.start_addr); p->entry.disabled_block.data_size = be32_to_cpu(p->entry.disabled_block.data_size); p->entry.cis_idi.start_addr = be32_to_cpu(p->entry.cis_idi.start_addr); p->entry.cis_idi.data_size = be32_to_cpu(p->entry.cis_idi.data_size); p->attr.block_size = be16_to_cpu(p->attr.block_size); p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks); p->attr.number_of_effective_blocks = be16_to_cpu(p->attr.number_of_effective_blocks); p->attr.page_size = be16_to_cpu(p->attr.page_size); p->attr.memory_manufacturer_code = be16_to_cpu(p->attr.memory_manufacturer_code); p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code); p->attr.implemented_capacity = be16_to_cpu(p->attr.implemented_capacity); p->attr.controller_number = be16_to_cpu(p->attr.controller_number); p->attr.controller_function = be16_to_cpu(p->attr.controller_function); } static int msb_read_boot_blocks(struct msb_data *msb) { int pba = 0; struct scatterlist sg; struct ms_extra_data_register extra; struct ms_boot_page *page; msb->boot_block_locations[0] = MS_BLOCK_INVALID; msb->boot_block_locations[1] = MS_BLOCK_INVALID; msb->boot_block_count = 0; dbg_verbose("Start of a scan for the boot blocks"); if (!msb->boot_page) { page = kmalloc_array(2, sizeof(struct ms_boot_page), GFP_KERNEL); if (!page) return -ENOMEM; msb->boot_page = page; } else page = msb->boot_page; msb->block_count = MS_BLOCK_MAX_BOOT_ADDR; for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) { sg_init_one(&sg, page, sizeof(*page)); if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) { dbg("boot scan: can't read pba %d", pba); continue; } if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) { dbg("management flag doesn't indicate boot block %d", pba); continue; } if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) { dbg("the pba at %d doesn't contain boot block ID", pba); continue; } msb_fix_boot_page_endianness(page); msb->boot_block_locations[msb->boot_block_count] = pba; page++; msb->boot_block_count++; if (msb->boot_block_count == 2) break; } if (!msb->boot_block_count) { pr_err("media doesn't contain master page, aborting"); return -EIO; } dbg_verbose("End of scan for boot blocks"); return 0; } static int msb_read_bad_block_table(struct msb_data *msb, int block_nr) { struct ms_boot_page *boot_block; struct scatterlist sg; u16 *buffer = NULL; int offset = 0; int i, error = 0; int data_size, data_offset, page, page_offset, size_to_read; u16 pba; BUG_ON(block_nr > 1); boot_block = &msb->boot_page[block_nr]; pba = msb->boot_block_locations[block_nr]; if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID) return -EINVAL; data_size = boot_block->entry.disabled_block.data_size; data_offset = sizeof(struct ms_boot_page) + boot_block->entry.disabled_block.start_addr; if (!data_size) return 0; page = data_offset / msb->page_size; page_offset = data_offset % msb->page_size; size_to_read = DIV_ROUND_UP(data_size + page_offset, msb->page_size) * msb->page_size; dbg("reading bad block of boot block at pba %d, offset %d len %d", pba, data_offset, data_size); buffer = kzalloc(size_to_read, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Read the buffer */ sg_init_one(&sg, buffer, size_to_read); while (offset < size_to_read) { error = msb_read_page(msb, pba, page, NULL, &sg, offset); if (error) goto out; page++; offset += msb->page_size; if (page == msb->pages_in_block) { pr_err( "bad block table extends beyond the boot block"); break; } } /* Process the bad block table */ for (i = page_offset; i < data_size / sizeof(u16); i++) { u16 bad_block = be16_to_cpu(buffer[i]); if (bad_block >= msb->block_count) { dbg("bad block table contains invalid block %d", bad_block); continue; } if (test_bit(bad_block, msb->used_blocks_bitmap)) { dbg("duplicate bad block %d in the table", bad_block); continue; } dbg("block %d is marked as factory bad", bad_block); msb_mark_block_used(msb, bad_block); } out: kfree(buffer); return error; } static int msb_ftl_initialize(struct msb_data *msb) { int i; if (msb->ftl_initialized) return 0; msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE; msb->logical_block_count = msb->zone_count * 496 - 2; msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); msb->lba_to_pba_table = kmalloc_array(msb->logical_block_count, sizeof(u16), GFP_KERNEL); if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table || !msb->erased_blocks_bitmap) { bitmap_free(msb->used_blocks_bitmap); bitmap_free(msb->erased_blocks_bitmap); kfree(msb->lba_to_pba_table); return -ENOMEM; } for (i = 0; i < msb->zone_count; i++) msb->free_block_count[i] = MS_BLOCKS_IN_ZONE; memset(msb->lba_to_pba_table, MS_BLOCK_INVALID, msb->logical_block_count * sizeof(u16)); dbg("initial FTL tables created. Zone count = %d, Logical block count = %d", msb->zone_count, msb->logical_block_count); msb->ftl_initialized = true; return 0; } static int msb_ftl_scan(struct msb_data *msb) { u16 pba, lba, other_block; u8 overwrite_flag, management_flag, other_overwrite_flag; int error; struct ms_extra_data_register extra; u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL); if (!overwrite_flags) return -ENOMEM; dbg("Start of media scanning"); for (pba = 0; pba < msb->block_count; pba++) { if (pba == msb->boot_block_locations[0] || pba == msb->boot_block_locations[1]) { dbg_verbose("pba %05d -> [boot block]", pba); msb_mark_block_used(msb, pba); continue; } if (test_bit(pba, msb->used_blocks_bitmap)) { dbg_verbose("pba %05d -> [factory bad]", pba); continue; } memset(&extra, 0, sizeof(extra)); error = msb_read_oob(msb, pba, 0, &extra); /* can't trust the page if we can't read the oob */ if (error == -EBADMSG) { pr_notice( "oob of pba %d damaged, will try to erase it", pba); msb_mark_block_used(msb, pba); msb_erase_block(msb, pba); continue; } else if (error) { pr_err("unknown error %d on read of oob of pba %d - aborting", error, pba); kfree(overwrite_flags); return error; } lba = be16_to_cpu(extra.logical_address); management_flag = extra.management_flag; overwrite_flag = extra.overwrite_flag; overwrite_flags[pba] = overwrite_flag; /* Skip bad blocks */ if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) { dbg("pba %05d -> [BAD]", pba); msb_mark_block_used(msb, pba); continue; } /* Skip system/drm blocks */ if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) != MEMSTICK_MANAGEMENT_FLAG_NORMAL) { dbg("pba %05d -> [reserved management flag %02x]", pba, management_flag); msb_mark_block_used(msb, pba); continue; } /* Erase temporary tables */ if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) { dbg("pba %05d -> [temp table] - will erase", pba); msb_mark_block_used(msb, pba); msb_erase_block(msb, pba); continue; } if (lba == MS_BLOCK_INVALID) { dbg_verbose("pba %05d -> [free]", pba); continue; } msb_mark_block_used(msb, pba); /* Block has LBA not according to zoning*/ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) { pr_notice("pba %05d -> [bad lba %05d] - will erase", pba, lba); msb_erase_block(msb, pba); continue; } /* No collisions - great */ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) { dbg_verbose("pba %05d -> [lba %05d]", pba, lba); msb->lba_to_pba_table[lba] = pba; continue; } other_block = msb->lba_to_pba_table[lba]; other_overwrite_flag = overwrite_flags[other_block]; pr_notice("Collision between pba %d and pba %d", pba, other_block); if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) { pr_notice("pba %d is marked as stable, use it", pba); msb_erase_block(msb, other_block); msb->lba_to_pba_table[lba] = pba; continue; } if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) { pr_notice("pba %d is marked as stable, use it", other_block); msb_erase_block(msb, pba); continue; } pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d", pba, other_block, other_block); msb_erase_block(msb, other_block); msb->lba_to_pba_table[lba] = pba; } dbg("End of media scanning"); kfree(overwrite_flags); return 0; } static void msb_cache_flush_timer(struct timer_list *t) { struct msb_data *msb = from_timer(msb, t, cache_flush_timer); msb->need_flush_cache = true; queue_work(msb->io_queue, &msb->io_work); } static void msb_cache_discard(struct msb_data *msb) { if (msb->cache_block_lba == MS_BLOCK_INVALID) return; del_timer_sync(&msb->cache_flush_timer); dbg_verbose("Discarding the write cache"); msb->cache_block_lba = MS_BLOCK_INVALID; bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block); } static int msb_cache_init(struct msb_data *msb) { timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0); if (!msb->cache) msb->cache = kzalloc(msb->block_size, GFP_KERNEL); if (!msb->cache) return -ENOMEM; msb_cache_discard(msb); return 0; } static int msb_cache_flush(struct msb_data *msb) { struct scatterlist sg; struct ms_extra_data_register extra; int page, offset, error; u16 pba, lba; if (msb->read_only) return -EROFS; if (msb->cache_block_lba == MS_BLOCK_INVALID) return 0; lba = msb->cache_block_lba; pba = msb->lba_to_pba_table[lba]; dbg_verbose("Flushing the write cache of pba %d (LBA %d)", pba, msb->cache_block_lba); sg_init_one(&sg, msb->cache , msb->block_size); /* Read all missing pages in cache */ for (page = 0; page < msb->pages_in_block; page++) { if (test_bit(page, &msb->valid_cache_bitmap)) continue; offset = page * msb->page_size; dbg_verbose("reading non-present sector %d of cache block %d", page, lba); error = msb_read_page(msb, pba, page, &extra, &sg, offset); /* Bad pages are copied with 00 page status */ if (error == -EBADMSG) { pr_err("read error on sector %d, contents probably damaged", page); continue; } if (error) return error; if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) != MEMSTICK_OV_PG_NORMAL) { dbg("page %d is marked as bad", page); continue; } set_bit(page, &msb->valid_cache_bitmap); } /* Write the cache now */ error = msb_update_block(msb, msb->cache_block_lba, &sg, 0); pba = msb->lba_to_pba_table[msb->cache_block_lba]; /* Mark invalid pages */ if (!error) { for (page = 0; page < msb->pages_in_block; page++) { if (test_bit(page, &msb->valid_cache_bitmap)) continue; dbg("marking page %d as containing damaged data", page); msb_set_overwrite_flag(msb, pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL); } } msb_cache_discard(msb); return error; } static int msb_cache_write(struct msb_data *msb, int lba, int page, bool add_to_cache_only, struct scatterlist *sg, int offset) { int error; struct scatterlist sg_tmp[10]; if (msb->read_only) return -EROFS; if (msb->cache_block_lba == MS_BLOCK_INVALID || lba != msb->cache_block_lba) if (add_to_cache_only) return 0; /* If we need to write different block */ if (msb->cache_block_lba != MS_BLOCK_INVALID && lba != msb->cache_block_lba) { dbg_verbose("first flush the cache"); error = msb_cache_flush(msb); if (error) return error; } if (msb->cache_block_lba == MS_BLOCK_INVALID) { msb->cache_block_lba = lba; mod_timer(&msb->cache_flush_timer, jiffies + msecs_to_jiffies(cache_flush_timeout)); } dbg_verbose("Write of LBA %d page %d to cache ", lba, page); sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp)); msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp), msb->cache + page * msb->page_size, msb->page_size); set_bit(page, &msb->valid_cache_bitmap); return 0; } static int msb_cache_read(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int offset) { int pba = msb->lba_to_pba_table[lba]; struct scatterlist sg_tmp[10]; int error = 0; if (lba == msb->cache_block_lba && test_bit(page, &msb->valid_cache_bitmap)) { dbg_verbose("Read of LBA %d (pba %d) sector %d from cache", lba, pba, page); sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp)); msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp), msb->cache + msb->page_size * page, msb->page_size); } else { dbg_verbose("Read of LBA %d (pba %d) sector %d from device", lba, pba, page); error = msb_read_page(msb, pba, page, NULL, sg, offset); if (error) return error; msb_cache_write(msb, lba, page, true, sg, offset); } return error; } /* Emulated geometry table * This table content isn't that importaint, * One could put here different values, providing that they still * cover whole disk. * 64 MB entry is what windows reports for my 64M memstick */ static const struct chs_entry chs_table[] = { /* size sectors cylynders heads */ { 4, 16, 247, 2 }, { 8, 16, 495, 2 }, { 16, 16, 495, 4 }, { 32, 16, 991, 4 }, { 64, 16, 991, 8 }, {128, 16, 991, 16 }, { 0 } }; /* Load information about the card */ static int msb_init_card(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; struct ms_boot_page *boot_block; int error = 0, i, raw_size_in_megs; msb->caps = 0; if (card->id.class >= MEMSTICK_CLASS_ROM && card->id.class <= MEMSTICK_CLASS_ROM) msb->read_only = true; msb->state = -1; error = msb_reset(msb, false); if (error) return error; /* Due to a bug in Jmicron driver written by Alex Dubov, * its serial mode barely works, * so we switch to parallel mode right away */ if (host->caps & MEMSTICK_CAP_PAR4) msb_switch_to_parallel(msb); msb->page_size = sizeof(struct ms_boot_page); /* Read the boot page */ error = msb_read_boot_blocks(msb); if (error) return -EIO; boot_block = &msb->boot_page[0]; /* Save intersting attributes from boot page */ msb->block_count = boot_block->attr.number_of_blocks; msb->page_size = boot_block->attr.page_size; msb->pages_in_block = boot_block->attr.block_size * 2; msb->block_size = msb->page_size * msb->pages_in_block; if ((size_t)msb->page_size > PAGE_SIZE) { /* this isn't supported by linux at all, anyway*/ dbg("device page %d size isn't supported", msb->page_size); return -EINVAL; } msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL); if (!msb->block_buffer) return -ENOMEM; raw_size_in_megs = (msb->block_size * msb->block_count) >> 20; for (i = 0; chs_table[i].size; i++) { if (chs_table[i].size != raw_size_in_megs) continue; msb->geometry.cylinders = chs_table[i].cyl; msb->geometry.heads = chs_table[i].head; msb->geometry.sectors = chs_table[i].sec; break; } if (boot_block->attr.transfer_supporting == 1) msb->caps |= MEMSTICK_CAP_PAR4; if (boot_block->attr.device_type & 0x03) msb->read_only = true; dbg("Total block count = %d", msb->block_count); dbg("Each block consists of %d pages", msb->pages_in_block); dbg("Page size = %d bytes", msb->page_size); dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4)); dbg("Read only: %d", msb->read_only); #if 0 /* Now we can switch the interface */ if (host->caps & msb->caps & MEMSTICK_CAP_PAR4) msb_switch_to_parallel(msb); #endif error = msb_cache_init(msb); if (error) return error; error = msb_ftl_initialize(msb); if (error) return error; /* Read the bad block table */ error = msb_read_bad_block_table(msb, 0); if (error && error != -ENOMEM) { dbg("failed to read bad block table from primary boot block, trying from backup"); error = msb_read_bad_block_table(msb, 1); } if (error) return error; /* *drum roll* Scan the media */ error = msb_ftl_scan(msb); if (error) { pr_err("Scan of media failed"); return error; } return 0; } static int msb_do_write_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, size_t len, int *sucessfuly_written) { int error = 0; off_t offset = 0; *sucessfuly_written = 0; while (offset < len) { if (page == 0 && len - offset >= msb->block_size) { if (msb->cache_block_lba == lba) msb_cache_discard(msb); dbg_verbose("Writing whole lba %d", lba); error = msb_update_block(msb, lba, sg, offset); if (error) return error; offset += msb->block_size; *sucessfuly_written += msb->block_size; lba++; continue; } error = msb_cache_write(msb, lba, page, false, sg, offset); if (error) return error; offset += msb->page_size; *sucessfuly_written += msb->page_size; page++; if (page == msb->pages_in_block) { page = 0; lba++; } } return 0; } static int msb_do_read_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int len, int *sucessfuly_read) { int error = 0; int offset = 0; *sucessfuly_read = 0; while (offset < len) { error = msb_cache_read(msb, lba, page, sg, offset); if (error) return error; offset += msb->page_size; *sucessfuly_read += msb->page_size; page++; if (page == msb->pages_in_block) { page = 0; lba++; } } return 0; } static void msb_io_work(struct work_struct *work) { struct msb_data *msb = container_of(work, struct msb_data, io_work); int page, error, len; sector_t lba; struct scatterlist *sg = msb->prealloc_sg; struct request *req; dbg_verbose("IO: work started"); while (1) { spin_lock_irq(&msb->q_lock); if (msb->need_flush_cache) { msb->need_flush_cache = false; spin_unlock_irq(&msb->q_lock); msb_cache_flush(msb); continue; } req = msb->req; if (!req) { dbg_verbose("IO: no more requests exiting"); spin_unlock_irq(&msb->q_lock); return; } spin_unlock_irq(&msb->q_lock); /* process the request */ dbg_verbose("IO: processing new request"); blk_rq_map_sg(msb->queue, req, sg); lba = blk_rq_pos(req); sector_div(lba, msb->page_size / 512); page = sector_div(lba, msb->pages_in_block); if (rq_data_dir(msb->req) == READ) error = msb_do_read_request(msb, lba, page, sg, blk_rq_bytes(req), &len); else error = msb_do_write_request(msb, lba, page, sg, blk_rq_bytes(req), &len); if (len && !blk_update_request(req, BLK_STS_OK, len)) { __blk_mq_end_request(req, BLK_STS_OK); spin_lock_irq(&msb->q_lock); msb->req = NULL; spin_unlock_irq(&msb->q_lock); } if (error && msb->req) { blk_status_t ret = errno_to_blk_status(error); dbg_verbose("IO: ending one sector of the request with error"); blk_mq_end_request(req, ret); spin_lock_irq(&msb->q_lock); msb->req = NULL; spin_unlock_irq(&msb->q_lock); } if (msb->req) dbg_verbose("IO: request still pending"); } } static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */ static void msb_data_clear(struct msb_data *msb) { kfree(msb->boot_page); bitmap_free(msb->used_blocks_bitmap); bitmap_free(msb->erased_blocks_bitmap); kfree(msb->lba_to_pba_table); kfree(msb->cache); msb->card = NULL; } static int msb_bd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct msb_data *msb = bdev->bd_disk->private_data; *geo = msb->geometry; return 0; } static void msb_bd_free_disk(struct gendisk *disk) { struct msb_data *msb = disk->private_data; mutex_lock(&msb_disk_lock); idr_remove(&msb_disk_idr, msb->disk_id); mutex_unlock(&msb_disk_lock); kfree(msb); } static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct memstick_dev *card = hctx->queue->queuedata; struct msb_data *msb = memstick_get_drvdata(card); struct request *req = bd->rq; dbg_verbose("Submit request"); spin_lock_irq(&msb->q_lock); if (msb->card_dead) { dbg("Refusing requests on removed card"); WARN_ON(!msb->io_queue_stopped); spin_unlock_irq(&msb->q_lock); blk_mq_start_request(req); return BLK_STS_IOERR; } if (msb->req) { spin_unlock_irq(&msb->q_lock); return BLK_STS_DEV_RESOURCE; } blk_mq_start_request(req); msb->req = req; if (!msb->io_queue_stopped) queue_work(msb->io_queue, &msb->io_work); spin_unlock_irq(&msb->q_lock); return BLK_STS_OK; } static int msb_check_card(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); return (msb->card_dead == 0); } static void msb_stop(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); unsigned long flags; dbg("Stopping all msblock IO"); blk_mq_stop_hw_queues(msb->queue); spin_lock_irqsave(&msb->q_lock, flags); msb->io_queue_stopped = true; spin_unlock_irqrestore(&msb->q_lock, flags); del_timer_sync(&msb->cache_flush_timer); flush_workqueue(msb->io_queue); spin_lock_irqsave(&msb->q_lock, flags); if (msb->req) { blk_mq_requeue_request(msb->req, false); msb->req = NULL; } spin_unlock_irqrestore(&msb->q_lock, flags); } static void msb_start(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); unsigned long flags; dbg("Resuming IO from msblock"); msb_invalidate_reg_window(msb); spin_lock_irqsave(&msb->q_lock, flags); if (!msb->io_queue_stopped || msb->card_dead) { spin_unlock_irqrestore(&msb->q_lock, flags); return; } spin_unlock_irqrestore(&msb->q_lock, flags); /* Kick cache flush anyway, its harmless */ msb->need_flush_cache = true; msb->io_queue_stopped = false; blk_mq_start_hw_queues(msb->queue); queue_work(msb->io_queue, &msb->io_work); } static const struct block_device_operations msb_bdops = { .owner = THIS_MODULE, .getgeo = msb_bd_getgeo, .free_disk = msb_bd_free_disk, }; static const struct blk_mq_ops msb_mq_ops = { .queue_rq = msb_queue_rq, }; /* Registers the block device */ static int msb_init_disk(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); int rc; unsigned long capacity; mutex_lock(&msb_disk_lock); msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL); mutex_unlock(&msb_disk_lock); if (msb->disk_id < 0) return msb->disk_id; rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE); if (rc) goto out_release_id; msb->disk = blk_mq_alloc_disk(&msb->tag_set, card); if (IS_ERR(msb->disk)) { rc = PTR_ERR(msb->disk); goto out_free_tag_set; } msb->queue = msb->disk->queue; blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MS_BLOCK_MAX_PAGES * msb->page_size); blk_queue_logical_block_size(msb->queue, msb->page_size); sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id); msb->disk->fops = &msb_bdops; msb->disk->private_data = msb; capacity = msb->pages_in_block * msb->logical_block_count; capacity *= (msb->page_size / 512); set_capacity(msb->disk, capacity); dbg("Set total disk size to %lu sectors", capacity); msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); if (!msb->io_queue) { rc = -ENOMEM; goto out_cleanup_disk; } INIT_WORK(&msb->io_work, msb_io_work); sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); if (msb->read_only) set_disk_ro(msb->disk, 1); msb_start(card); rc = device_add_disk(&card->dev, msb->disk, NULL); if (rc) goto out_destroy_workqueue; dbg("Disk added"); return 0; out_destroy_workqueue: destroy_workqueue(msb->io_queue); out_cleanup_disk: put_disk(msb->disk); out_free_tag_set: blk_mq_free_tag_set(&msb->tag_set); out_release_id: mutex_lock(&msb_disk_lock); idr_remove(&msb_disk_idr, msb->disk_id); mutex_unlock(&msb_disk_lock); return rc; } static int msb_probe(struct memstick_dev *card) { struct msb_data *msb; int rc = 0; msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL); if (!msb) return -ENOMEM; memstick_set_drvdata(card, msb); msb->card = card; spin_lock_init(&msb->q_lock); rc = msb_init_card(card); if (rc) goto out_free; rc = msb_init_disk(card); if (!rc) { card->check = msb_check_card; card->stop = msb_stop; card->start = msb_start; return 0; } out_free: memstick_set_drvdata(card, NULL); msb_data_clear(msb); kfree(msb); return rc; } static void msb_remove(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); unsigned long flags; if (!msb->io_queue_stopped) msb_stop(card); dbg("Removing the disk device"); /* Take care of unhandled + new requests from now on */ spin_lock_irqsave(&msb->q_lock, flags); msb->card_dead = true; spin_unlock_irqrestore(&msb->q_lock, flags); blk_mq_start_hw_queues(msb->queue); /* Remove the disk */ del_gendisk(msb->disk); blk_mq_free_tag_set(&msb->tag_set); msb->queue = NULL; mutex_lock(&msb_disk_lock); msb_data_clear(msb); mutex_unlock(&msb_disk_lock); put_disk(msb->disk); memstick_set_drvdata(card, NULL); } #ifdef CONFIG_PM static int msb_suspend(struct memstick_dev *card, pm_message_t state) { msb_stop(card); return 0; } static int msb_resume(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); struct msb_data *new_msb = NULL; bool card_dead = true; #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME msb->card_dead = true; return 0; #endif mutex_lock(&card->host->lock); new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL); if (!new_msb) goto out; new_msb->card = card; memstick_set_drvdata(card, new_msb); spin_lock_init(&new_msb->q_lock); sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); if (msb_init_card(card)) goto out; if (msb->block_size != new_msb->block_size) goto out; if (memcmp(msb->boot_page, new_msb->boot_page, sizeof(struct ms_boot_page))) goto out; if (msb->logical_block_count != new_msb->logical_block_count || memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table, msb->logical_block_count)) goto out; if (msb->block_count != new_msb->block_count || !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap, msb->block_count)) goto out; card_dead = false; out: if (card_dead) dbg("Card was removed/replaced during suspend"); msb->card_dead = card_dead; memstick_set_drvdata(card, msb); if (new_msb) { msb_data_clear(new_msb); kfree(new_msb); } msb_start(card); mutex_unlock(&card->host->lock); return 0; } #else #define msb_suspend NULL #define msb_resume NULL #endif /* CONFIG_PM */ static struct memstick_device_id msb_id_tbl[] = { {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE, MEMSTICK_CLASS_FLASH}, {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE, MEMSTICK_CLASS_ROM}, {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE, MEMSTICK_CLASS_RO}, {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE, MEMSTICK_CLASS_WP}, {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO, MEMSTICK_CLASS_DUO}, {} }; MODULE_DEVICE_TABLE(memstick, msb_id_tbl); static struct memstick_driver msb_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = msb_id_tbl, .probe = msb_probe, .remove = msb_remove, .suspend = msb_suspend, .resume = msb_resume }; static int __init msb_init(void) { int rc = memstick_register_driver(&msb_driver); if (rc) pr_err("failed to register memstick driver (error %d)\n", rc); return rc; } static void __exit msb_exit(void) { memstick_unregister_driver(&msb_driver); idr_destroy(&msb_disk_idr); } module_init(msb_init); module_exit(msb_exit); module_param(cache_flush_timeout, int, S_IRUGO); MODULE_PARM_DESC(cache_flush_timeout, "Cache flush timeout in msec (1000 default)"); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-2)"); module_param(verify_writes, bool, S_IRUGO); MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky"); MODULE_DESCRIPTION("Sony MemoryStick block device driver");
linux-master
drivers/memstick/core/ms_block.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sony MemoryStick Pro storage support * * Copyright (C) 2007 Alex Dubov <[email protected]> * * Special thanks to Carlos Corbacho for providing various MemoryStick cards * that made this driver possible. */ #include <linux/blk-mq.h> #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memstick.h> #include <linux/module.h> #define DRIVER_NAME "mspro_block" static int major; module_param(major, int, 0644); #define MSPRO_BLOCK_MAX_SEGS 32 #define MSPRO_BLOCK_MAX_PAGES ((2 << 16) - 1) #define MSPRO_BLOCK_SIGNATURE 0xa5c3 #define MSPRO_BLOCK_MAX_ATTRIBUTES 41 #define MSPRO_BLOCK_PART_SHIFT 3 enum { MSPRO_BLOCK_ID_SYSINFO = 0x10, MSPRO_BLOCK_ID_MODELNAME = 0x15, MSPRO_BLOCK_ID_MBR = 0x20, MSPRO_BLOCK_ID_PBR16 = 0x21, MSPRO_BLOCK_ID_PBR32 = 0x22, MSPRO_BLOCK_ID_SPECFILEVALUES1 = 0x25, MSPRO_BLOCK_ID_SPECFILEVALUES2 = 0x26, MSPRO_BLOCK_ID_DEVINFO = 0x30 }; struct mspro_sys_attr { size_t size; void *data; unsigned char id; char name[32]; struct device_attribute dev_attr; }; struct mspro_attr_entry { __be32 address; __be32 size; unsigned char id; unsigned char reserved[3]; } __attribute__((packed)); struct mspro_attribute { __be16 signature; unsigned short version; unsigned char count; unsigned char reserved[11]; struct mspro_attr_entry entries[]; } __attribute__((packed)); struct mspro_sys_info { unsigned char class; unsigned char reserved0; __be16 block_size; __be16 block_count; __be16 user_block_count; __be16 page_size; unsigned char reserved1[2]; unsigned char assembly_date[8]; __be32 serial_number; unsigned char assembly_maker_code; unsigned char assembly_model_code[3]; __be16 memory_maker_code; __be16 memory_model_code; unsigned char reserved2[4]; unsigned char vcc; unsigned char vpp; __be16 controller_number; __be16 controller_function; __be16 start_sector; __be16 unit_size; unsigned char ms_sub_class; unsigned char reserved3[4]; unsigned char interface_type; __be16 controller_code; unsigned char format_type; unsigned char reserved4; unsigned char device_type; unsigned char reserved5[7]; unsigned char mspro_id[16]; unsigned char reserved6[16]; } __attribute__((packed)); struct mspro_mbr { unsigned char boot_partition; unsigned char start_head; unsigned char start_sector; unsigned char start_cylinder; unsigned char partition_type; unsigned char end_head; unsigned char end_sector; unsigned char end_cylinder; unsigned int start_sectors; unsigned int sectors_per_partition; } __attribute__((packed)); struct mspro_specfile { char name[8]; char ext[3]; unsigned char attr; unsigned char reserved[10]; unsigned short time; unsigned short date; unsigned short cluster; unsigned int size; } __attribute__((packed)); struct mspro_devinfo { __be16 cylinders; __be16 heads; __be16 bytes_per_track; __be16 bytes_per_sector; __be16 sectors_per_track; unsigned char reserved[6]; } __attribute__((packed)); struct mspro_block_data { struct memstick_dev *card; unsigned int caps; struct gendisk *disk; struct request_queue *queue; struct request *block_req; struct blk_mq_tag_set tag_set; spinlock_t q_lock; unsigned short page_size; unsigned short cylinders; unsigned short heads; unsigned short sectors_per_track; unsigned char system; unsigned char read_only:1, eject:1, data_dir:1, active:1; unsigned char transfer_cmd; int (*mrq_handler)(struct memstick_dev *card, struct memstick_request **mrq); /* Default request setup function for data access method preferred by * this host instance. */ void (*setup_transfer)(struct memstick_dev *card, u64 offset, size_t length); struct attribute_group attr_group; struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; unsigned int seg_count; unsigned int current_seg; unsigned int current_page; }; static DEFINE_IDR(mspro_block_disk_idr); static DEFINE_MUTEX(mspro_block_disk_lock); static int mspro_block_complete_req(struct memstick_dev *card, int error); /*** Block device ***/ static void mspro_block_bd_free_disk(struct gendisk *disk) { struct mspro_block_data *msb = disk->private_data; int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT; mutex_lock(&mspro_block_disk_lock); idr_remove(&mspro_block_disk_idr, disk_id); mutex_unlock(&mspro_block_disk_lock); kfree(msb); } static int mspro_block_bd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mspro_block_data *msb = bdev->bd_disk->private_data; geo->heads = msb->heads; geo->sectors = msb->sectors_per_track; geo->cylinders = msb->cylinders; return 0; } static const struct block_device_operations ms_block_bdops = { .owner = THIS_MODULE, .getgeo = mspro_block_bd_getgeo, .free_disk = mspro_block_bd_free_disk, }; /*** Information ***/ static struct mspro_sys_attr *mspro_from_sysfs_attr(struct attribute *attr) { struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); return container_of(dev_attr, struct mspro_sys_attr, dev_attr); } static const char *mspro_block_attr_name(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return "attr_sysinfo"; case MSPRO_BLOCK_ID_MODELNAME: return "attr_modelname"; case MSPRO_BLOCK_ID_MBR: return "attr_mbr"; case MSPRO_BLOCK_ID_PBR16: return "attr_pbr16"; case MSPRO_BLOCK_ID_PBR32: return "attr_pbr32"; case MSPRO_BLOCK_ID_SPECFILEVALUES1: return "attr_specfilevalues1"; case MSPRO_BLOCK_ID_SPECFILEVALUES2: return "attr_specfilevalues2"; case MSPRO_BLOCK_ID_DEVINFO: return "attr_devinfo"; default: return NULL; } } typedef ssize_t (*sysfs_show_t)(struct device *dev, struct device_attribute *attr, char *buffer); static ssize_t mspro_block_attr_show_default(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); ssize_t cnt, rc = 0; for (cnt = 0; cnt < s_attr->size; cnt++) { if (cnt && !(cnt % 16)) { if (PAGE_SIZE - rc) buffer[rc++] = '\n'; } rc += sysfs_emit_at(buffer, rc, "%02x ", ((unsigned char *)s_attr->data)[cnt]); } return rc; } static ssize_t mspro_block_attr_show_sysinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_sys_info *x_sys = x_attr->data; ssize_t rc = 0; int date_tz = 0, date_tz_f = 0; if (x_sys->assembly_date[0] > 0x80U) { date_tz = (~x_sys->assembly_date[0]) + 1; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz = -date_tz; date_tz_f *= 15; } else if (x_sys->assembly_date[0] < 0x80U) { date_tz = x_sys->assembly_date[0]; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz_f *= 15; } rc += sysfs_emit_at(buffer, rc, "class: %x\n", x_sys->class); rc += sysfs_emit_at(buffer, rc, "block size: %x\n", be16_to_cpu(x_sys->block_size)); rc += sysfs_emit_at(buffer, rc, "block count: %x\n", be16_to_cpu(x_sys->block_count)); rc += sysfs_emit_at(buffer, rc, "user block count: %x\n", be16_to_cpu(x_sys->user_block_count)); rc += sysfs_emit_at(buffer, rc, "page size: %x\n", be16_to_cpu(x_sys->page_size)); rc += sysfs_emit_at(buffer, rc, "assembly date: GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n", date_tz, date_tz_f, be16_to_cpup((__be16 *)&x_sys->assembly_date[1]), x_sys->assembly_date[3], x_sys->assembly_date[4], x_sys->assembly_date[5], x_sys->assembly_date[6], x_sys->assembly_date[7]); rc += sysfs_emit_at(buffer, rc, "serial number: %x\n", be32_to_cpu(x_sys->serial_number)); rc += sysfs_emit_at(buffer, rc, "assembly maker code: %x\n", x_sys->assembly_maker_code); rc += sysfs_emit_at(buffer, rc, "assembly model code: %02x%02x%02x\n", x_sys->assembly_model_code[0], x_sys->assembly_model_code[1], x_sys->assembly_model_code[2]); rc += sysfs_emit_at(buffer, rc, "memory maker code: %x\n", be16_to_cpu(x_sys->memory_maker_code)); rc += sysfs_emit_at(buffer, rc, "memory model code: %x\n", be16_to_cpu(x_sys->memory_model_code)); rc += sysfs_emit_at(buffer, rc, "vcc: %x\n", x_sys->vcc); rc += sysfs_emit_at(buffer, rc, "vpp: %x\n", x_sys->vpp); rc += sysfs_emit_at(buffer, rc, "controller number: %x\n", be16_to_cpu(x_sys->controller_number)); rc += sysfs_emit_at(buffer, rc, "controller function: %x\n", be16_to_cpu(x_sys->controller_function)); rc += sysfs_emit_at(buffer, rc, "start sector: %x\n", be16_to_cpu(x_sys->start_sector)); rc += sysfs_emit_at(buffer, rc, "unit size: %x\n", be16_to_cpu(x_sys->unit_size)); rc += sysfs_emit_at(buffer, rc, "sub class: %x\n", x_sys->ms_sub_class); rc += sysfs_emit_at(buffer, rc, "interface type: %x\n", x_sys->interface_type); rc += sysfs_emit_at(buffer, rc, "controller code: %x\n", be16_to_cpu(x_sys->controller_code)); rc += sysfs_emit_at(buffer, rc, "format type: %x\n", x_sys->format_type); rc += sysfs_emit_at(buffer, rc, "device type: %x\n", x_sys->device_type); rc += sysfs_emit_at(buffer, rc, "mspro id: %s\n", x_sys->mspro_id); return rc; } static ssize_t mspro_block_attr_show_modelname(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); return sysfs_emit(buffer, "%s", (char *)s_attr->data); } static ssize_t mspro_block_attr_show_mbr(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_mbr *x_mbr = x_attr->data; ssize_t rc = 0; rc += sysfs_emit_at(buffer, rc, "boot partition: %x\n", x_mbr->boot_partition); rc += sysfs_emit_at(buffer, rc, "start head: %x\n", x_mbr->start_head); rc += sysfs_emit_at(buffer, rc, "start sector: %x\n", x_mbr->start_sector); rc += sysfs_emit_at(buffer, rc, "start cylinder: %x\n", x_mbr->start_cylinder); rc += sysfs_emit_at(buffer, rc, "partition type: %x\n", x_mbr->partition_type); rc += sysfs_emit_at(buffer, rc, "end head: %x\n", x_mbr->end_head); rc += sysfs_emit_at(buffer, rc, "end sector: %x\n", x_mbr->end_sector); rc += sysfs_emit_at(buffer, rc, "end cylinder: %x\n", x_mbr->end_cylinder); rc += sysfs_emit_at(buffer, rc, "start sectors: %x\n", x_mbr->start_sectors); rc += sysfs_emit_at(buffer, rc, "sectors per partition: %x\n", x_mbr->sectors_per_partition); return rc; } static ssize_t mspro_block_attr_show_specfile(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_specfile *x_spfile = x_attr->data; char name[9], ext[4]; ssize_t rc = 0; memcpy(name, x_spfile->name, 8); name[8] = 0; memcpy(ext, x_spfile->ext, 3); ext[3] = 0; rc += sysfs_emit_at(buffer, rc, "name: %s\n", name); rc += sysfs_emit_at(buffer, rc, "ext: %s\n", ext); rc += sysfs_emit_at(buffer, rc, "attribute: %x\n", x_spfile->attr); rc += sysfs_emit_at(buffer, rc, "time: %d:%d:%d\n", x_spfile->time >> 11, (x_spfile->time >> 5) & 0x3f, (x_spfile->time & 0x1f) * 2); rc += sysfs_emit_at(buffer, rc, "date: %d-%d-%d\n", (x_spfile->date >> 9) + 1980, (x_spfile->date >> 5) & 0xf, x_spfile->date & 0x1f); rc += sysfs_emit_at(buffer, rc, "start cluster: %x\n", x_spfile->cluster); rc += sysfs_emit_at(buffer, rc, "size: %x\n", x_spfile->size); return rc; } static ssize_t mspro_block_attr_show_devinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_devinfo *x_devinfo = x_attr->data; ssize_t rc = 0; rc += sysfs_emit_at(buffer, rc, "cylinders: %x\n", be16_to_cpu(x_devinfo->cylinders)); rc += sysfs_emit_at(buffer, rc, "heads: %x\n", be16_to_cpu(x_devinfo->heads)); rc += sysfs_emit_at(buffer, rc, "bytes per track: %x\n", be16_to_cpu(x_devinfo->bytes_per_track)); rc += sysfs_emit_at(buffer, rc, "bytes per sector: %x\n", be16_to_cpu(x_devinfo->bytes_per_sector)); rc += sysfs_emit_at(buffer, rc, "sectors per track: %x\n", be16_to_cpu(x_devinfo->sectors_per_track)); return rc; } static sysfs_show_t mspro_block_attr_show(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return mspro_block_attr_show_sysinfo; case MSPRO_BLOCK_ID_MODELNAME: return mspro_block_attr_show_modelname; case MSPRO_BLOCK_ID_MBR: return mspro_block_attr_show_mbr; case MSPRO_BLOCK_ID_SPECFILEVALUES1: case MSPRO_BLOCK_ID_SPECFILEVALUES2: return mspro_block_attr_show_specfile; case MSPRO_BLOCK_ID_DEVINFO: return mspro_block_attr_show_devinfo; default: return mspro_block_attr_show_default; } } /*** Protocol handlers ***/ /* * Functions prefixed with "h_" are protocol callbacks. They can be called from * interrupt context. Return value of 0 means that request processing is still * ongoing, while special error value of -EAGAIN means that current request is * finished (and request processor should come back some time later). */ static int h_mspro_block_req_init(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); *mrq = &card->current_mrq; card->next_request = msb->mrq_handler; return 0; } static int h_mspro_block_default(struct memstick_dev *card, struct memstick_request **mrq) { return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_default_bad(struct memstick_dev *card, struct memstick_request **mrq) { return -ENXIO; } static int h_mspro_block_get_ro(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); if (!(*mrq)->error) { if ((*mrq)->data[offsetof(struct ms_status_register, status0)] & MEMSTICK_STATUS0_WP) msb->read_only = 1; else msb->read_only = 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_wait_for_ced(struct memstick_dev *card, struct memstick_request **mrq) { dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]); if (!(*mrq)->error) { if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) (*mrq)->error = -EFAULT; else if (!((*mrq)->data[0] & MEMSTICK_INT_CED)) return 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_transfer_data(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned char t_val = 0; struct scatterlist t_sg = { 0 }; size_t t_offset; if ((*mrq)->error) return mspro_block_complete_req(card, (*mrq)->error); switch ((*mrq)->tpc) { case MS_TPC_WRITE_REG: memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->transfer_cmd, 1); (*mrq)->need_card_int = 1; return 0; case MS_TPC_SET_CMD: t_val = (*mrq)->int_reg; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) goto has_int_reg; return 0; case MS_TPC_GET_INT: t_val = (*mrq)->data[0]; has_int_reg: if (t_val & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) { t_val = MSPRO_CMD_STOP; memstick_init_req(*mrq, MS_TPC_SET_CMD, &t_val, 1); card->next_request = h_mspro_block_default; return 0; } if (msb->current_page == (msb->req_sg[msb->current_seg].length / msb->page_size)) { msb->current_page = 0; msb->current_seg++; if (msb->current_seg == msb->seg_count) { if (t_val & MEMSTICK_INT_CED) { return mspro_block_complete_req(card, 0); } else { card->next_request = h_mspro_block_wait_for_ced; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } } } if (!(t_val & MEMSTICK_INT_BREQ)) { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } t_offset = msb->req_sg[msb->current_seg].offset; t_offset += msb->current_page * msb->page_size; sg_set_page(&t_sg, nth_page(sg_page(&(msb->req_sg[msb->current_seg])), t_offset >> PAGE_SHIFT), msb->page_size, offset_in_page(t_offset)); memstick_init_req_sg(*mrq, msb->data_dir == READ ? MS_TPC_READ_LONG_DATA : MS_TPC_WRITE_LONG_DATA, &t_sg); (*mrq)->need_card_int = 1; return 0; case MS_TPC_READ_LONG_DATA: case MS_TPC_WRITE_LONG_DATA: msb->current_page++; if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) { t_val = (*mrq)->int_reg; goto has_int_reg; } else { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } default: BUG(); } } /*** Transfer setup functions for different access methods. ***/ /** Setup data transfer request for SET_CMD TPC with arguments in card * registers. * * @card Current media instance * @offset Target data offset in bytes * @length Required transfer length in bytes. */ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset, size_t length) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = msb->system, .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)), /* ISO C90 warning precludes direct initialization for now. */ .data_address = 0, .tpc_param = 0 }; do_div(offset, msb->page_size); param.data_address = cpu_to_be32((uint32_t)offset); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_transfer_data; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); } /*** Data transfer ***/ static int mspro_block_issue_req(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); u64 t_off; unsigned int count; while (true) { msb->current_page = 0; msb->current_seg = 0; msb->seg_count = blk_rq_map_sg(msb->block_req->q, msb->block_req, msb->req_sg); if (!msb->seg_count) { unsigned int bytes = blk_rq_cur_bytes(msb->block_req); bool chunk; chunk = blk_update_request(msb->block_req, BLK_STS_RESOURCE, bytes); if (chunk) continue; __blk_mq_end_request(msb->block_req, BLK_STS_RESOURCE); msb->block_req = NULL; return -EAGAIN; } t_off = blk_rq_pos(msb->block_req); t_off <<= 9; count = blk_rq_bytes(msb->block_req); msb->setup_transfer(card, t_off, count); msb->data_dir = rq_data_dir(msb->block_req); msb->transfer_cmd = msb->data_dir == READ ? MSPRO_CMD_READ_DATA : MSPRO_CMD_WRITE_DATA; memstick_new_req(card->host); return 0; } } static int mspro_block_complete_req(struct memstick_dev *card, int error) { struct mspro_block_data *msb = memstick_get_drvdata(card); int cnt; bool chunk; unsigned int t_len = 0; unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); dev_dbg(&card->dev, "complete %d, %d\n", msb->block_req ? 1 : 0, error); if (msb->block_req) { /* Nothing to do - not really an error */ if (error == -EAGAIN) error = 0; if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { if (msb->data_dir == READ) { for (cnt = 0; cnt < msb->current_seg; cnt++) { t_len += msb->req_sg[cnt].length / msb->page_size; if (msb->current_page) t_len += msb->current_page - 1; t_len *= msb->page_size; } } } else t_len = blk_rq_bytes(msb->block_req); dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); if (error && !t_len) t_len = blk_rq_cur_bytes(msb->block_req); chunk = blk_update_request(msb->block_req, errno_to_blk_status(error), t_len); if (chunk) { error = mspro_block_issue_req(card); if (!error) goto out; } else { __blk_mq_end_request(msb->block_req, errno_to_blk_status(error)); msb->block_req = NULL; } } else { if (!error) error = -EAGAIN; } card->next_request = h_mspro_block_default_bad; complete_all(&card->mrq_complete); out: spin_unlock_irqrestore(&msb->q_lock, flags); return error; } static void mspro_block_stop(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; unsigned long flags; while (1) { spin_lock_irqsave(&msb->q_lock, flags); if (!msb->block_req) { blk_mq_stop_hw_queues(msb->queue); rc = 1; } spin_unlock_irqrestore(&msb->q_lock, flags); if (rc) break; wait_for_completion(&card->mrq_complete); } } static void mspro_block_start(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); blk_mq_start_hw_queues(msb->queue); } static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct memstick_dev *card = hctx->queue->queuedata; struct mspro_block_data *msb = memstick_get_drvdata(card); spin_lock_irq(&msb->q_lock); if (msb->block_req) { spin_unlock_irq(&msb->q_lock); return BLK_STS_DEV_RESOURCE; } if (msb->eject) { spin_unlock_irq(&msb->q_lock); blk_mq_start_request(bd->rq); return BLK_STS_IOERR; } msb->block_req = bd->rq; blk_mq_start_request(bd->rq); if (mspro_block_issue_req(card)) msb->block_req = NULL; spin_unlock_irq(&msb->q_lock); return BLK_STS_OK; } /*** Initialization ***/ static int mspro_block_wait_for_ced(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_wait_for_ced; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_set_interface(struct memstick_dev *card, unsigned char sys_reg) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = sys_reg, .data_count = 0, .data_address = 0, .tpc_param = 0 }; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); memstick_new_req(host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_switch_interface(struct memstick_dev *card) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; try_again: if (msb->caps & MEMSTICK_CAP_PAR4) rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR4); else return 0; if (rc) { printk(KERN_WARNING "%s: could not switch to 4-bit mode, error %d\n", dev_name(&card->dev), rc); return 0; } msb->system = MEMSTICK_SYS_PAR4; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4); printk(KERN_INFO "%s: switching to 4-bit parallel mode\n", dev_name(&card->dev)); if (msb->caps & MEMSTICK_CAP_PAR8) { rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8); if (!rc) { msb->system = MEMSTICK_SYS_PAR8; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR8); printk(KERN_INFO "%s: switching to 8-bit parallel mode\n", dev_name(&card->dev)); } else printk(KERN_WARNING "%s: could not switch to 8-bit mode, error %d\n", dev_name(&card->dev), rc); } card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); rc = card->current_mrq.error; if (rc) { printk(KERN_WARNING "%s: interface error, trying to fall back to serial\n", dev_name(&card->dev)); msb->system = MEMSTICK_SYS_SERIAL; host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); msleep(10); host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); rc = memstick_set_rw_addr(card); if (!rc) rc = mspro_block_set_interface(card, msb->system); if (!rc) { msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; if (msb->caps & MEMSTICK_CAP_PAR8) { msb->caps &= ~MEMSTICK_CAP_PAR8; goto try_again; } } } return rc; } /* Memory allocated for attributes by this function should be freed by * mspro_block_data_clear, no matter if the initialization process succeeded * or failed. */ static int mspro_block_read_attributes(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_attribute *attr = NULL; struct mspro_sys_attr *s_attr = NULL; unsigned char *buffer = NULL; int cnt, rc, attr_count; /* While normally physical device offsets, represented here by * attr_offset and attr_len will be of large numeric types, we can be * sure, that attributes are close enough to the beginning of the * device, to save ourselves some trouble. */ unsigned int addr, attr_offset = 0, attr_len = msb->page_size; attr = kmalloc(msb->page_size, GFP_KERNEL); if (!attr) return -ENOMEM; sg_init_one(&msb->req_sg[0], attr, msb->page_size); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_attr; } if (be16_to_cpu(attr->signature) != MSPRO_BLOCK_SIGNATURE) { printk(KERN_ERR "%s: unrecognized device signature %x\n", dev_name(&card->dev), be16_to_cpu(attr->signature)); rc = -ENODEV; goto out_free_attr; } if (attr->count > MSPRO_BLOCK_MAX_ATTRIBUTES) { printk(KERN_WARNING "%s: way too many attribute entries\n", dev_name(&card->dev)); attr_count = MSPRO_BLOCK_MAX_ATTRIBUTES; } else attr_count = attr->count; msb->attr_group.attrs = kcalloc(attr_count + 1, sizeof(*msb->attr_group.attrs), GFP_KERNEL); if (!msb->attr_group.attrs) { rc = -ENOMEM; goto out_free_attr; } msb->attr_group.name = "media_attributes"; buffer = kmemdup(attr, attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } for (cnt = 0; cnt < attr_count; ++cnt) { s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL); if (!s_attr) { rc = -ENOMEM; goto out_free_buffer; } msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr; addr = be32_to_cpu(attr->entries[cnt].address); s_attr->size = be32_to_cpu(attr->entries[cnt].size); dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, " "size %zx\n", cnt, attr->entries[cnt].id, addr, s_attr->size); s_attr->id = attr->entries[cnt].id; if (mspro_block_attr_name(s_attr->id)) snprintf(s_attr->name, sizeof(s_attr->name), "%s", mspro_block_attr_name(attr->entries[cnt].id)); else snprintf(s_attr->name, sizeof(s_attr->name), "attr_x%02x", attr->entries[cnt].id); sysfs_attr_init(&s_attr->dev_attr.attr); s_attr->dev_attr.attr.name = s_attr->name; s_attr->dev_attr.attr.mode = S_IRUGO; s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); if (!s_attr->size) continue; s_attr->data = kmalloc(s_attr->size, GFP_KERNEL); if (!s_attr->data) { rc = -ENOMEM; goto out_free_buffer; } if (((addr / msb->page_size) == (attr_offset / msb->page_size)) && (((addr + s_attr->size - 1) / msb->page_size) == (attr_offset / msb->page_size))) { memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); continue; } attr_offset = (addr / msb->page_size) * msb->page_size; if ((attr_offset + attr_len) < (addr + s_attr->size)) { kfree(buffer); attr_len = (((addr + s_attr->size) / msb->page_size) + 1 ) * msb->page_size - attr_offset; buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } } sg_init_one(&msb->req_sg[0], buffer, attr_len); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; dev_dbg(&card->dev, "reading attribute range %x, %x\n", attr_offset, attr_len); msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_buffer; } memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); } rc = 0; out_free_buffer: kfree(buffer); out_free_attr: kfree(attr); return rc; } static int mspro_block_init_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; int rc = 0; msb->system = MEMSTICK_SYS_SERIAL; msb->setup_transfer = h_mspro_block_setup_cmd; card->reg_addr.r_offset = offsetof(struct mspro_register, status); card->reg_addr.r_length = sizeof(struct ms_status_register); card->reg_addr.w_offset = offsetof(struct mspro_register, param); card->reg_addr.w_length = sizeof(struct mspro_param_register); if (memstick_set_rw_addr(card)) return -EIO; msb->caps = host->caps; msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; rc = mspro_block_switch_interface(card); if (rc) return rc; dev_dbg(&card->dev, "card activated\n"); if (msb->system != MEMSTICK_SYS_SERIAL) msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_get_ro; memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, sizeof(struct ms_status_register)); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) return card->current_mrq.error; dev_dbg(&card->dev, "card r/w status %d\n", msb->read_only ? 0 : 1); msb->page_size = 512; rc = mspro_block_read_attributes(card); if (rc) return rc; dev_dbg(&card->dev, "attributes loaded\n"); return 0; } static const struct blk_mq_ops mspro_mq_ops = { .queue_rq = mspro_queue_rq, }; static int mspro_block_init_disk(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_devinfo *dev_info = NULL; struct mspro_sys_info *sys_info = NULL; struct mspro_sys_attr *s_attr = NULL; int rc, disk_id; unsigned long capacity; for (rc = 0; msb->attr_group.attrs[rc]; ++rc) { s_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[rc]); if (s_attr->id == MSPRO_BLOCK_ID_DEVINFO) dev_info = s_attr->data; else if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO) sys_info = s_attr->data; } if (!dev_info || !sys_info) return -ENODEV; msb->cylinders = be16_to_cpu(dev_info->cylinders); msb->heads = be16_to_cpu(dev_info->heads); msb->sectors_per_track = be16_to_cpu(dev_info->sectors_per_track); msb->page_size = be16_to_cpu(sys_info->unit_size); mutex_lock(&mspro_block_disk_lock); disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL); mutex_unlock(&mspro_block_disk_lock); if (disk_id < 0) return disk_id; rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE); if (rc) goto out_release_id; msb->disk = blk_mq_alloc_disk(&msb->tag_set, card); if (IS_ERR(msb->disk)) { rc = PTR_ERR(msb->disk); goto out_free_tag_set; } msb->queue = msb->disk->queue; blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MSPRO_BLOCK_MAX_PAGES * msb->page_size); msb->disk->major = major; msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT; msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT; msb->disk->fops = &ms_block_bdops; msb->disk->private_data = msb; sprintf(msb->disk->disk_name, "mspblk%d", disk_id); blk_queue_logical_block_size(msb->queue, msb->page_size); capacity = be16_to_cpu(sys_info->user_block_count); capacity *= be16_to_cpu(sys_info->block_size); capacity *= msb->page_size >> 9; set_capacity(msb->disk, capacity); dev_dbg(&card->dev, "capacity set %ld\n", capacity); if (msb->read_only) set_disk_ro(msb->disk, true); rc = device_add_disk(&card->dev, msb->disk, NULL); if (rc) goto out_cleanup_disk; msb->active = 1; return 0; out_cleanup_disk: put_disk(msb->disk); out_free_tag_set: blk_mq_free_tag_set(&msb->tag_set); out_release_id: mutex_lock(&mspro_block_disk_lock); idr_remove(&mspro_block_disk_idr, disk_id); mutex_unlock(&mspro_block_disk_lock); return rc; } static void mspro_block_data_clear(struct mspro_block_data *msb) { int cnt; struct mspro_sys_attr *s_attr; if (msb->attr_group.attrs) { for (cnt = 0; msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(msb->attr_group .attrs[cnt]); kfree(s_attr->data); kfree(s_attr); } kfree(msb->attr_group.attrs); } msb->card = NULL; } static int mspro_block_check_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); return (msb->active == 1); } static int mspro_block_probe(struct memstick_dev *card) { struct mspro_block_data *msb; int rc = 0; msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!msb) return -ENOMEM; memstick_set_drvdata(card, msb); msb->card = card; spin_lock_init(&msb->q_lock); rc = mspro_block_init_card(card); if (rc) goto out_free; rc = sysfs_create_group(&card->dev.kobj, &msb->attr_group); if (rc) goto out_free; rc = mspro_block_init_disk(card); if (!rc) { card->check = mspro_block_check_card; card->stop = mspro_block_stop; card->start = mspro_block_start; return 0; } sysfs_remove_group(&card->dev.kobj, &msb->attr_group); out_free: memstick_set_drvdata(card, NULL); mspro_block_data_clear(msb); kfree(msb); return rc; } static void mspro_block_remove(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); msb->eject = 1; spin_unlock_irqrestore(&msb->q_lock, flags); blk_mq_start_hw_queues(msb->queue); del_gendisk(msb->disk); dev_dbg(&card->dev, "mspro block remove\n"); blk_mq_free_tag_set(&msb->tag_set); msb->queue = NULL; sysfs_remove_group(&card->dev.kobj, &msb->attr_group); mutex_lock(&mspro_block_disk_lock); mspro_block_data_clear(msb); mutex_unlock(&mspro_block_disk_lock); put_disk(msb->disk); memstick_set_drvdata(card, NULL); } #ifdef CONFIG_PM static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; blk_mq_stop_hw_queues(msb->queue); spin_lock_irqsave(&msb->q_lock, flags); msb->active = 0; spin_unlock_irqrestore(&msb->q_lock, flags); return 0; } static int mspro_block_resume(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; #ifdef CONFIG_MEMSTICK_UNSAFE_RESUME struct mspro_block_data *new_msb; struct memstick_host *host = card->host; struct mspro_sys_attr *s_attr, *r_attr; unsigned char cnt; mutex_lock(&host->lock); new_msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!new_msb) { rc = -ENOMEM; goto out_unlock; } new_msb->card = card; memstick_set_drvdata(card, new_msb); rc = mspro_block_init_card(card); if (rc) goto out_free; for (cnt = 0; new_msb->attr_group.attrs[cnt] && msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(new_msb->attr_group.attrs[cnt]); r_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[cnt]); if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO && r_attr->id == s_attr->id) { if (memcmp(s_attr->data, r_attr->data, s_attr->size)) break; msb->active = 1; break; } } out_free: memstick_set_drvdata(card, msb); mspro_block_data_clear(new_msb); kfree(new_msb); out_unlock: mutex_unlock(&host->lock); #endif /* CONFIG_MEMSTICK_UNSAFE_RESUME */ blk_mq_start_hw_queues(msb->queue); return rc; } #else #define mspro_block_suspend NULL #define mspro_block_resume NULL #endif /* CONFIG_PM */ static struct memstick_device_id mspro_block_id_tbl[] = { {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_PRO, MEMSTICK_CATEGORY_STORAGE_DUO, MEMSTICK_CLASS_DUO}, {} }; static struct memstick_driver mspro_block_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = mspro_block_id_tbl, .probe = mspro_block_probe, .remove = mspro_block_remove, .suspend = mspro_block_suspend, .resume = mspro_block_resume }; static int __init mspro_block_init(void) { int rc = -ENOMEM; rc = register_blkdev(major, DRIVER_NAME); if (rc < 0) { printk(KERN_ERR DRIVER_NAME ": failed to register " "major %d, error %d\n", major, rc); return rc; } if (!major) major = rc; rc = memstick_register_driver(&mspro_block_driver); if (rc) unregister_blkdev(major, DRIVER_NAME); return rc; } static void __exit mspro_block_exit(void) { memstick_unregister_driver(&mspro_block_driver); unregister_blkdev(major, DRIVER_NAME); idr_destroy(&mspro_block_disk_idr); } module_init(mspro_block_init); module_exit(mspro_block_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl);
linux-master
drivers/memstick/core/mspro_block.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sony MemoryStick support * * Copyright (C) 2007 Alex Dubov <[email protected]> * * Special thanks to Carlos Corbacho for providing various MemoryStick cards * that made this driver possible. */ #include <linux/memstick.h> #include <linux/idr.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_runtime.h> #define DRIVER_NAME "memstick" static unsigned int cmd_retries = 3; module_param(cmd_retries, uint, 0644); static struct workqueue_struct *workqueue; static DEFINE_IDR(memstick_host_idr); static DEFINE_SPINLOCK(memstick_host_lock); static int memstick_dev_match(struct memstick_dev *card, struct memstick_device_id *id) { if (id->match_flags & MEMSTICK_MATCH_ALL) { if ((id->type == card->id.type) && (id->category == card->id.category) && (id->class == card->id.class)) return 1; } return 0; } static int memstick_bus_match(struct device *dev, struct device_driver *drv) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); struct memstick_driver *ms_drv = container_of(drv, struct memstick_driver, driver); struct memstick_device_id *ids = ms_drv->id_table; if (ids) { while (ids->match_flags) { if (memstick_dev_match(card, ids)) return 1; ++ids; } } return 0; } static int memstick_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct memstick_dev *card = container_of_const(dev, struct memstick_dev, dev); if (add_uevent_var(env, "MEMSTICK_TYPE=%02X", card->id.type)) return -ENOMEM; if (add_uevent_var(env, "MEMSTICK_CATEGORY=%02X", card->id.category)) return -ENOMEM; if (add_uevent_var(env, "MEMSTICK_CLASS=%02X", card->id.class)) return -ENOMEM; return 0; } static int memstick_device_probe(struct device *dev) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); struct memstick_driver *drv = container_of(dev->driver, struct memstick_driver, driver); int rc = -ENODEV; if (dev->driver && drv->probe) { rc = drv->probe(card); if (!rc) get_device(dev); } return rc; } static void memstick_device_remove(struct device *dev) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); struct memstick_driver *drv = container_of(dev->driver, struct memstick_driver, driver); if (dev->driver && drv->remove) { drv->remove(card); card->dev.driver = NULL; } put_device(dev); } #ifdef CONFIG_PM static int memstick_device_suspend(struct device *dev, pm_message_t state) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); struct memstick_driver *drv = container_of(dev->driver, struct memstick_driver, driver); if (dev->driver && drv->suspend) return drv->suspend(card, state); return 0; } static int memstick_device_resume(struct device *dev) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); struct memstick_driver *drv = container_of(dev->driver, struct memstick_driver, driver); if (dev->driver && drv->resume) return drv->resume(card); return 0; } #else #define memstick_device_suspend NULL #define memstick_device_resume NULL #endif /* CONFIG_PM */ #define MEMSTICK_ATTR(name, format) \ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct memstick_dev *card = container_of(dev, struct memstick_dev, \ dev); \ return sprintf(buf, format, card->id.name); \ } \ static DEVICE_ATTR_RO(name); MEMSTICK_ATTR(type, "%02X"); MEMSTICK_ATTR(category, "%02X"); MEMSTICK_ATTR(class, "%02X"); static struct attribute *memstick_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_category.attr, &dev_attr_class.attr, NULL, }; ATTRIBUTE_GROUPS(memstick_dev); static struct bus_type memstick_bus_type = { .name = "memstick", .dev_groups = memstick_dev_groups, .match = memstick_bus_match, .uevent = memstick_uevent, .probe = memstick_device_probe, .remove = memstick_device_remove, .suspend = memstick_device_suspend, .resume = memstick_device_resume }; static void memstick_free(struct device *dev) { struct memstick_host *host = container_of(dev, struct memstick_host, dev); kfree(host); } static struct class memstick_host_class = { .name = "memstick_host", .dev_release = memstick_free }; static void memstick_free_card(struct device *dev) { struct memstick_dev *card = container_of(dev, struct memstick_dev, dev); kfree(card); } static int memstick_dummy_check(struct memstick_dev *card) { return 0; } /** * memstick_detect_change - schedule media detection on memstick host * @host - host to use */ void memstick_detect_change(struct memstick_host *host) { queue_work(workqueue, &host->media_checker); } EXPORT_SYMBOL(memstick_detect_change); /** * memstick_next_req - called by host driver to obtain next request to process * @host - host to use * @mrq - pointer to stick the request to * * Host calls this function from idle state (*mrq == NULL) or after finishing * previous request (*mrq should point to it). If previous request was * unsuccessful, it is retried for predetermined number of times. Return value * of 0 means that new request was assigned to the host. */ int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq) { int rc = -ENXIO; if ((*mrq) && (*mrq)->error && host->retries) { (*mrq)->error = rc; host->retries--; return 0; } if (host->card && host->card->next_request) rc = host->card->next_request(host->card, mrq); if (!rc) host->retries = cmd_retries > 1 ? cmd_retries - 1 : 1; else *mrq = NULL; return rc; } EXPORT_SYMBOL(memstick_next_req); /** * memstick_new_req - notify the host that some requests are pending * @host - host to use */ void memstick_new_req(struct memstick_host *host) { if (host->card) { host->retries = cmd_retries; reinit_completion(&host->card->mrq_complete); host->request(host); } } EXPORT_SYMBOL(memstick_new_req); /** * memstick_init_req_sg - set request fields needed for bulk data transfer * @mrq - request to use * @tpc - memstick Transport Protocol Command * @sg - TPC argument */ void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc, const struct scatterlist *sg) { mrq->tpc = tpc; if (tpc & 8) mrq->data_dir = WRITE; else mrq->data_dir = READ; mrq->sg = *sg; mrq->long_data = 1; if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) mrq->need_card_int = 1; else mrq->need_card_int = 0; } EXPORT_SYMBOL(memstick_init_req_sg); /** * memstick_init_req - set request fields needed for short data transfer * @mrq - request to use * @tpc - memstick Transport Protocol Command * @buf - TPC argument buffer * @length - TPC argument size * * The intended use of this function (transfer of data items several bytes * in size) allows us to just copy the value between request structure and * user supplied buffer. */ void memstick_init_req(struct memstick_request *mrq, unsigned char tpc, const void *buf, size_t length) { mrq->tpc = tpc; if (tpc & 8) mrq->data_dir = WRITE; else mrq->data_dir = READ; mrq->data_len = length > sizeof(mrq->data) ? sizeof(mrq->data) : length; if (mrq->data_dir == WRITE) memcpy(mrq->data, buf, mrq->data_len); mrq->long_data = 0; if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) mrq->need_card_int = 1; else mrq->need_card_int = 0; } EXPORT_SYMBOL(memstick_init_req); /* * Functions prefixed with "h_" are protocol callbacks. They can be called from * interrupt context. Return value of 0 means that request processing is still * ongoing, while special error value of -EAGAIN means that current request is * finished (and request processor should come back some time later). */ static int h_memstick_read_dev_id(struct memstick_dev *card, struct memstick_request **mrq) { struct ms_id_register id_reg; if (!(*mrq)) { memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, sizeof(struct ms_id_register)); *mrq = &card->current_mrq; return 0; } if (!(*mrq)->error) { memcpy(&id_reg, (*mrq)->data, sizeof(id_reg)); card->id.match_flags = MEMSTICK_MATCH_ALL; card->id.type = id_reg.type; card->id.category = id_reg.category; card->id.class = id_reg.class; dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode); } complete(&card->mrq_complete); return -EAGAIN; } static int h_memstick_set_rw_addr(struct memstick_dev *card, struct memstick_request **mrq) { if (!(*mrq)) { memstick_init_req(&card->current_mrq, MS_TPC_SET_RW_REG_ADRS, (char *)&card->reg_addr, sizeof(card->reg_addr)); *mrq = &card->current_mrq; return 0; } else { complete(&card->mrq_complete); return -EAGAIN; } } /** * memstick_set_rw_addr - issue SET_RW_REG_ADDR request and wait for it to * complete * @card - media device to use */ int memstick_set_rw_addr(struct memstick_dev *card) { card->next_request = h_memstick_set_rw_addr; memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } EXPORT_SYMBOL(memstick_set_rw_addr); static struct memstick_dev *memstick_alloc_card(struct memstick_host *host) { struct memstick_dev *card = kzalloc(sizeof(struct memstick_dev), GFP_KERNEL); struct memstick_dev *old_card = host->card; struct ms_id_register id_reg; if (card) { card->host = host; dev_set_name(&card->dev, "%s", dev_name(&host->dev)); card->dev.parent = &host->dev; card->dev.bus = &memstick_bus_type; card->dev.release = memstick_free_card; card->check = memstick_dummy_check; card->reg_addr.r_offset = offsetof(struct ms_register, id); card->reg_addr.r_length = sizeof(id_reg); card->reg_addr.w_offset = offsetof(struct ms_register, id); card->reg_addr.w_length = sizeof(id_reg); init_completion(&card->mrq_complete); host->card = card; if (memstick_set_rw_addr(card)) goto err_out; card->next_request = h_memstick_read_dev_id; memstick_new_req(host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) goto err_out; } host->card = old_card; return card; err_out: host->card = old_card; kfree_const(card->dev.kobj.name); kfree(card); return NULL; } static int memstick_power_on(struct memstick_host *host) { int rc = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); if (!rc) rc = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); return rc; } static void memstick_check(struct work_struct *work) { struct memstick_host *host = container_of(work, struct memstick_host, media_checker); struct memstick_dev *card; dev_dbg(&host->dev, "memstick_check started\n"); pm_runtime_get_noresume(host->dev.parent); mutex_lock(&host->lock); if (!host->card) { if (memstick_power_on(host)) goto out_power_off; } else if (host->card->stop) host->card->stop(host->card); if (host->removing) goto out_power_off; card = memstick_alloc_card(host); if (!card) { if (host->card) { device_unregister(&host->card->dev); host->card = NULL; } } else { dev_dbg(&host->dev, "new card %02x, %02x, %02x\n", card->id.type, card->id.category, card->id.class); if (host->card) { if (memstick_set_rw_addr(host->card) || !memstick_dev_match(host->card, &card->id) || !(host->card->check(host->card))) { device_unregister(&host->card->dev); host->card = NULL; } else if (host->card->start) host->card->start(host->card); } if (!host->card) { host->card = card; if (device_register(&card->dev)) { put_device(&card->dev); host->card = NULL; } } else { kfree_const(card->dev.kobj.name); kfree(card); } } out_power_off: if (!host->card) host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); mutex_unlock(&host->lock); pm_runtime_put(host->dev.parent); dev_dbg(&host->dev, "memstick_check finished\n"); } /** * memstick_alloc_host - allocate a memstick_host structure * @extra: size of the user private data to allocate * @dev: parent device of the host */ struct memstick_host *memstick_alloc_host(unsigned int extra, struct device *dev) { struct memstick_host *host; host = kzalloc(sizeof(struct memstick_host) + extra, GFP_KERNEL); if (host) { mutex_init(&host->lock); INIT_WORK(&host->media_checker, memstick_check); host->dev.class = &memstick_host_class; host->dev.parent = dev; device_initialize(&host->dev); } return host; } EXPORT_SYMBOL(memstick_alloc_host); /** * memstick_add_host - start request processing on memstick host * @host - host to use */ int memstick_add_host(struct memstick_host *host) { int rc; idr_preload(GFP_KERNEL); spin_lock(&memstick_host_lock); rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT); if (rc >= 0) host->id = rc; spin_unlock(&memstick_host_lock); idr_preload_end(); if (rc < 0) return rc; dev_set_name(&host->dev, "memstick%u", host->id); rc = device_add(&host->dev); if (rc) { spin_lock(&memstick_host_lock); idr_remove(&memstick_host_idr, host->id); spin_unlock(&memstick_host_lock); return rc; } host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); memstick_detect_change(host); return 0; } EXPORT_SYMBOL(memstick_add_host); /** * memstick_remove_host - stop request processing on memstick host * @host - host to use */ void memstick_remove_host(struct memstick_host *host) { host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) device_unregister(&host->card->dev); host->card = NULL; host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); mutex_unlock(&host->lock); spin_lock(&memstick_host_lock); idr_remove(&memstick_host_idr, host->id); spin_unlock(&memstick_host_lock); device_del(&host->dev); } EXPORT_SYMBOL(memstick_remove_host); /** * memstick_free_host - free memstick host * @host - host to use */ void memstick_free_host(struct memstick_host *host) { mutex_destroy(&host->lock); put_device(&host->dev); } EXPORT_SYMBOL(memstick_free_host); /** * memstick_suspend_host - notify bus driver of host suspension * @host - host to use */ void memstick_suspend_host(struct memstick_host *host) { mutex_lock(&host->lock); host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); mutex_unlock(&host->lock); } EXPORT_SYMBOL(memstick_suspend_host); /** * memstick_resume_host - notify bus driver of host resumption * @host - host to use */ void memstick_resume_host(struct memstick_host *host) { int rc = 0; mutex_lock(&host->lock); if (host->card) rc = memstick_power_on(host); mutex_unlock(&host->lock); if (!rc) memstick_detect_change(host); } EXPORT_SYMBOL(memstick_resume_host); int memstick_register_driver(struct memstick_driver *drv) { drv->driver.bus = &memstick_bus_type; return driver_register(&drv->driver); } EXPORT_SYMBOL(memstick_register_driver); void memstick_unregister_driver(struct memstick_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL(memstick_unregister_driver); static int __init memstick_init(void) { int rc; workqueue = create_freezable_workqueue("kmemstick"); if (!workqueue) return -ENOMEM; rc = bus_register(&memstick_bus_type); if (rc) goto error_destroy_workqueue; rc = class_register(&memstick_host_class); if (rc) goto error_bus_unregister; return 0; error_bus_unregister: bus_unregister(&memstick_bus_type); error_destroy_workqueue: destroy_workqueue(workqueue); return rc; } static void __exit memstick_exit(void) { class_unregister(&memstick_host_class); bus_unregister(&memstick_bus_type); destroy_workqueue(workqueue); idr_destroy(&memstick_host_idr); } module_init(memstick_init); module_exit(memstick_exit); MODULE_AUTHOR("Alex Dubov"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Sony MemoryStick core driver");
linux-master
drivers/memstick/core/memstick.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Realtek PCI-Express Memstick Card Interface driver * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Wei WANG <[email protected]> */ #include <linux/module.h> #include <linux/highmem.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/memstick.h> #include <linux/rtsx_pci.h> #include <asm/unaligned.h> struct realtek_pci_ms { struct platform_device *pdev; struct rtsx_pcr *pcr; struct memstick_host *msh; struct memstick_request *req; struct mutex host_mutex; struct work_struct handle_req; u8 ssc_depth; unsigned int clock; unsigned char ifmode; bool eject; }; static inline struct device *ms_dev(struct realtek_pci_ms *host) { return &(host->pdev->dev); } static inline void ms_clear_error(struct realtek_pci_ms *host) { rtsx_pci_write_register(host->pcr, CARD_STOP, MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR); } #ifdef DEBUG static void ms_print_debug_regs(struct realtek_pci_ms *host) { struct rtsx_pcr *pcr = host->pcr; u16 i; u8 *ptr; /* Print MS host internal registers */ rtsx_pci_init_cmd(pcr); for (i = 0xFD40; i <= 0xFD44; i++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); for (i = 0xFD52; i <= 0xFD69; i++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); rtsx_pci_send_cmd(pcr, 100); ptr = rtsx_pci_get_cmd_data(pcr); for (i = 0xFD40; i <= 0xFD44; i++) dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); for (i = 0xFD52; i <= 0xFD69; i++) dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); } #else #define ms_print_debug_regs(host) #endif static int ms_power_on(struct realtek_pci_ms *host) { struct rtsx_pcr *pcr = host->pcr; int err; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, CARD_SHARE_MASK, CARD_SHARE_48_MS); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, MS_CLK_EN); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_MS_CARD); if (err < 0) return err; err = rtsx_pci_card_power_on(pcr, RTSX_MS_CARD); if (err < 0) return err; /* Wait ms power stable */ msleep(150); err = rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN); if (err < 0) return err; return 0; } static int ms_power_off(struct realtek_pci_ms *host) { struct rtsx_pcr *pcr = host->pcr; int err; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0); err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; err = rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); if (err < 0) return err; return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); } static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir, u8 tpc, u8 cfg, struct scatterlist *sg) { struct rtsx_pcr *pcr = host->pcr; int err; unsigned int length = sg->length; u16 sec_cnt = (u16)(length / 512); u8 val, trans_mode, dma_dir; struct memstick_dev *card = host->msh->card; bool pro_card = card->id.type == MEMSTICK_TYPE_PRO; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", length); if (data_dir == READ) { dma_dir = DMA_DIR_FROM_CARD; trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ; } else { dma_dir = DMA_DIR_TO_CARD; trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE; } rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); if (pro_card) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8)); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt); } rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, DMA_DONE_INT, DMA_DONE_INT); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(length >> 24)); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(length >> 16)); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(length >> 8)); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)length); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_pci_send_cmd_no_wait(pcr); err = rtsx_pci_transfer_data(pcr, sg, 1, data_dir == READ, 10000); if (err < 0) { ms_clear_error(host); return err; } rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); if (pro_card) { if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) return -EIO; } else { if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) return -EIO; } return 0; } static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc, u8 cfg, u8 cnt, u8 *data, u8 *int_reg) { struct rtsx_pcr *pcr = host->pcr; int err, i; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); if (!data) return -EINVAL; rtsx_pci_init_cmd(pcr); for (i = 0; i < cnt; i++) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, data[i]); if (cnt % 2) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES); rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); if (int_reg) rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); err = rtsx_pci_send_cmd(pcr, 5000); if (err < 0) { u8 val; rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); if (int_reg) *int_reg = val & 0x0F; ms_print_debug_regs(host); ms_clear_error(host); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) return -EIO; } else { if (!(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) return -EIO; } } return -ETIMEDOUT; } if (int_reg) { u8 *ptr = rtsx_pci_get_cmd_data(pcr) + 1; *int_reg = *ptr & 0x0F; } return 0; } static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc, u8 cfg, u8 cnt, u8 *data, u8 *int_reg) { struct rtsx_pcr *pcr = host->pcr; int err, i; u8 *ptr; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); if (!data) return -EINVAL; rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES); rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); for (i = 0; i < cnt - 1; i++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0); if (cnt % 2) rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0); else rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt - 1, 0, 0); if (int_reg) rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); err = rtsx_pci_send_cmd(pcr, 5000); if (err < 0) { u8 val; rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val); dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); if (int_reg) *int_reg = val & 0x0F; ms_print_debug_regs(host); ms_clear_error(host); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) return -EIO; } else { if (!(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) return -EIO; } } return -ETIMEDOUT; } ptr = rtsx_pci_get_cmd_data(pcr) + 1; for (i = 0; i < cnt; i++) data[i] = *ptr++; if (int_reg) *int_reg = *ptr & 0x0F; return 0; } static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host) { struct memstick_request *req = host->req; int err = 0; u8 cfg = 0, int_reg; dev_dbg(ms_dev(host), "%s\n", __func__); if (req->need_card_int) { if (host->ifmode != MEMSTICK_SERIAL) cfg = WAIT_INT; } if (req->long_data) { err = ms_transfer_data(host, req->data_dir, req->tpc, cfg, &(req->sg)); } else { if (req->data_dir == READ) { err = ms_read_bytes(host, req->tpc, cfg, req->data_len, req->data, &int_reg); } else { err = ms_write_bytes(host, req->tpc, cfg, req->data_len, req->data, &int_reg); } } if (err < 0) return err; if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) { err = ms_read_bytes(host, MS_TPC_GET_INT, NO_WAIT_INT, 1, &int_reg, NULL); if (err < 0) return err; } if (req->need_card_int) { dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg); if (int_reg & MS_INT_CMDNK) req->int_reg |= MEMSTICK_INT_CMDNAK; if (int_reg & MS_INT_BREQ) req->int_reg |= MEMSTICK_INT_BREQ; if (int_reg & MS_INT_ERR) req->int_reg |= MEMSTICK_INT_ERR; if (int_reg & MS_INT_CED) req->int_reg |= MEMSTICK_INT_CED; } return 0; } static void rtsx_pci_ms_handle_req(struct work_struct *work) { struct realtek_pci_ms *host = container_of(work, struct realtek_pci_ms, handle_req); struct rtsx_pcr *pcr = host->pcr; struct memstick_host *msh = host->msh; int rc; mutex_lock(&pcr->pcr_mutex); rtsx_pci_start_run(pcr); rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth, false, true, false); rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, MS_MOD_SEL); rtsx_pci_write_register(pcr, CARD_SHARE_MODE, CARD_SHARE_MASK, CARD_SHARE_48_MS); if (!host->req) { do { rc = memstick_next_req(msh, &host->req); dev_dbg(ms_dev(host), "next req %d\n", rc); if (!rc) host->req->error = rtsx_pci_ms_issue_cmd(host); } while (!rc); } mutex_unlock(&pcr->pcr_mutex); } static void rtsx_pci_ms_request(struct memstick_host *msh) { struct realtek_pci_ms *host = memstick_priv(msh); dev_dbg(ms_dev(host), "--> %s\n", __func__); if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD)) return; schedule_work(&host->handle_req); } static int rtsx_pci_ms_set_param(struct memstick_host *msh, enum memstick_param param, int value) { struct realtek_pci_ms *host = memstick_priv(msh); struct rtsx_pcr *pcr = host->pcr; unsigned int clock = 0; u8 ssc_depth = 0; int err; dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", __func__, param, value); err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD); if (err) return err; switch (param) { case MEMSTICK_POWER: if (value == MEMSTICK_POWER_ON) err = ms_power_on(host); else if (value == MEMSTICK_POWER_OFF) err = ms_power_off(host); else return -EINVAL; break; case MEMSTICK_INTERFACE: if (value == MEMSTICK_SERIAL) { clock = 19000000; ssc_depth = RTSX_SSC_DEPTH_500K; err = rtsx_pci_write_register(pcr, MS_CFG, 0x58, MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT); if (err < 0) return err; } else if (value == MEMSTICK_PAR4) { clock = 39000000; ssc_depth = RTSX_SSC_DEPTH_1M; err = rtsx_pci_write_register(pcr, MS_CFG, 0x58, MS_BUS_WIDTH_4 | PUSH_TIME_ODD); if (err < 0) return err; } else { return -EINVAL; } err = rtsx_pci_switch_clock(pcr, clock, ssc_depth, false, true, false); if (err < 0) return err; host->ssc_depth = ssc_depth; host->clock = clock; host->ifmode = value; break; } return 0; } #ifdef CONFIG_PM static int rtsx_pci_ms_suspend(struct platform_device *pdev, pm_message_t state) { struct realtek_pci_ms *host = platform_get_drvdata(pdev); struct memstick_host *msh = host->msh; dev_dbg(ms_dev(host), "--> %s\n", __func__); memstick_suspend_host(msh); return 0; } static int rtsx_pci_ms_resume(struct platform_device *pdev) { struct realtek_pci_ms *host = platform_get_drvdata(pdev); struct memstick_host *msh = host->msh; dev_dbg(ms_dev(host), "--> %s\n", __func__); memstick_resume_host(msh); return 0; } #else /* CONFIG_PM */ #define rtsx_pci_ms_suspend NULL #define rtsx_pci_ms_resume NULL #endif /* CONFIG_PM */ static void rtsx_pci_ms_card_event(struct platform_device *pdev) { struct realtek_pci_ms *host = platform_get_drvdata(pdev); memstick_detect_change(host->msh); } static int rtsx_pci_ms_drv_probe(struct platform_device *pdev) { struct memstick_host *msh; struct realtek_pci_ms *host; struct rtsx_pcr *pcr; struct pcr_handle *handle = pdev->dev.platform_data; int rc; if (!handle) return -ENXIO; pcr = handle->pcr; if (!pcr) return -ENXIO; dev_dbg(&(pdev->dev), ": Realtek PCI-E Memstick controller found\n"); msh = memstick_alloc_host(sizeof(*host), &pdev->dev); if (!msh) return -ENOMEM; host = memstick_priv(msh); host->pcr = pcr; host->msh = msh; host->pdev = pdev; platform_set_drvdata(pdev, host); pcr->slots[RTSX_MS_CARD].p_dev = pdev; pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event; mutex_init(&host->host_mutex); INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req); msh->request = rtsx_pci_ms_request; msh->set_param = rtsx_pci_ms_set_param; msh->caps = MEMSTICK_CAP_PAR4; rc = memstick_add_host(msh); if (rc) { memstick_free_host(msh); return rc; } return 0; } static int rtsx_pci_ms_drv_remove(struct platform_device *pdev) { struct realtek_pci_ms *host = platform_get_drvdata(pdev); struct rtsx_pcr *pcr; struct memstick_host *msh; int rc; if (!host) return 0; pcr = host->pcr; pcr->slots[RTSX_MS_CARD].p_dev = NULL; pcr->slots[RTSX_MS_CARD].card_event = NULL; msh = host->msh; host->eject = true; cancel_work_sync(&host->handle_req); mutex_lock(&host->host_mutex); if (host->req) { dev_dbg(&(pdev->dev), "%s: Controller removed during transfer\n", dev_name(&msh->dev)); rtsx_pci_complete_unfinished_transfer(pcr); host->req->error = -ENOMEDIUM; do { rc = memstick_next_req(msh, &host->req); if (!rc) host->req->error = -ENOMEDIUM; } while (!rc); } mutex_unlock(&host->host_mutex); memstick_remove_host(msh); memstick_free_host(msh); dev_dbg(&(pdev->dev), ": Realtek PCI-E Memstick controller has been removed\n"); return 0; } static struct platform_device_id rtsx_pci_ms_ids[] = { { .name = DRV_NAME_RTSX_PCI_MS, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids); static struct platform_driver rtsx_pci_ms_driver = { .probe = rtsx_pci_ms_drv_probe, .remove = rtsx_pci_ms_drv_remove, .id_table = rtsx_pci_ms_ids, .suspend = rtsx_pci_ms_suspend, .resume = rtsx_pci_ms_resume, .driver = { .name = DRV_NAME_RTSX_PCI_MS, }, }; module_platform_driver(rtsx_pci_ms_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Wei WANG <[email protected]>"); MODULE_DESCRIPTION("Realtek PCI-E Memstick Card Host Driver");
linux-master
drivers/memstick/host/rtsx_pci_ms.c
// SPDX-License-Identifier: GPL-2.0-only /* Realtek USB Memstick Card Interface driver * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Roger Tseng <[email protected]> */ #include <linux/module.h> #include <linux/highmem.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/memstick.h> #include <linux/kthread.h> #include <linux/rtsx_usb.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/completion.h> #include <asm/unaligned.h> struct rtsx_usb_ms { struct platform_device *pdev; struct rtsx_ucr *ucr; struct memstick_host *msh; struct memstick_request *req; struct mutex host_mutex; struct work_struct handle_req; struct delayed_work poll_card; u8 ssc_depth; unsigned int clock; int power_mode; unsigned char ifmode; bool eject; bool system_suspending; }; static inline struct device *ms_dev(struct rtsx_usb_ms *host) { return &(host->pdev->dev); } static inline void ms_clear_error(struct rtsx_usb_ms *host) { struct rtsx_ucr *ucr = host->ucr; rtsx_usb_ep0_write_register(ucr, CARD_STOP, MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR); rtsx_usb_clear_dma_err(ucr); rtsx_usb_clear_fsm_err(ucr); } #ifdef DEBUG static void ms_print_debug_regs(struct rtsx_usb_ms *host) { struct rtsx_ucr *ucr = host->ucr; u16 i; u8 *ptr; /* Print MS host internal registers */ rtsx_usb_init_cmd(ucr); /* MS_CFG to MS_INT_REG */ for (i = 0xFD40; i <= 0xFD44; i++) rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0); /* CARD_SHARE_MODE to CARD_GPIO */ for (i = 0xFD51; i <= 0xFD56; i++) rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0); /* CARD_PULL_CTLx */ for (i = 0xFD60; i <= 0xFD65; i++) rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0); /* CARD_DATA_SOURCE, CARD_SELECT, CARD_CLK_EN, CARD_PWR_CTL */ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_DATA_SOURCE, 0, 0); rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_SELECT, 0, 0); rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_CLK_EN, 0, 0); rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_PWR_CTL, 0, 0); rtsx_usb_send_cmd(ucr, MODE_CR, 100); rtsx_usb_get_rsp(ucr, 21, 100); ptr = ucr->rsp_buf; for (i = 0xFD40; i <= 0xFD44; i++) dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); for (i = 0xFD51; i <= 0xFD56; i++) dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); for (i = 0xFD60; i <= 0xFD65; i++) dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_DATA_SOURCE, *(ptr++)); dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_SELECT, *(ptr++)); dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_CLK_EN, *(ptr++)); dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_PWR_CTL, *(ptr++)); } #else static void ms_print_debug_regs(struct rtsx_usb_ms *host) { } #endif static int ms_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr) { rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); return rtsx_usb_send_cmd(ucr, MODE_C, 100); } static int ms_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr) { rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); return rtsx_usb_send_cmd(ucr, MODE_C, 100); } static int ms_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr) { rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); return rtsx_usb_send_cmd(ucr, MODE_C, 100); } static int ms_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr) { rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); return rtsx_usb_send_cmd(ucr, MODE_C, 100); } static int ms_power_on(struct rtsx_usb_ms *host) { struct rtsx_ucr *ucr = host->ucr; int err; dev_dbg(ms_dev(host), "%s\n", __func__); rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE, CARD_SHARE_MASK, CARD_SHARE_MS); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, MS_CLK_EN); err = rtsx_usb_send_cmd(ucr, MODE_C, 100); if (err < 0) return err; if (CHECK_PKG(ucr, LQFP48)) err = ms_pull_ctl_enable_lqfp48(ucr); else err = ms_pull_ctl_enable_qfn24(ucr); if (err < 0) return err; err = rtsx_usb_write_register(ucr, CARD_PWR_CTL, POWER_MASK, PARTIAL_POWER_ON); if (err) return err; usleep_range(800, 1000); rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK, POWER_ON); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN); return rtsx_usb_send_cmd(ucr, MODE_C, 100); } static int ms_power_off(struct rtsx_usb_ms *host) { struct rtsx_ucr *ucr = host->ucr; int err; dev_dbg(ms_dev(host), "%s\n", __func__); rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0); err = rtsx_usb_send_cmd(ucr, MODE_C, 100); if (err < 0) return err; if (CHECK_PKG(ucr, LQFP48)) return ms_pull_ctl_disable_lqfp48(ucr); return ms_pull_ctl_disable_qfn24(ucr); } static int ms_transfer_data(struct rtsx_usb_ms *host, unsigned char data_dir, u8 tpc, u8 cfg, struct scatterlist *sg) { struct rtsx_ucr *ucr = host->ucr; int err; unsigned int length = sg->length; u16 sec_cnt = (u16)(length / 512); u8 trans_mode, dma_dir, flag; unsigned int pipe; struct memstick_dev *card = host->msh->card; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n", __func__, tpc, (data_dir == READ) ? "READ" : "WRITE", length); if (data_dir == READ) { flag = MODE_CDIR; dma_dir = DMA_DIR_FROM_CARD; if (card->id.type != MEMSTICK_TYPE_PRO) trans_mode = MS_TM_NORMAL_READ; else trans_mode = MS_TM_AUTO_READ; pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN); } else { flag = MODE_CDOR; dma_dir = DMA_DIR_TO_CARD; if (card->id.type != MEMSTICK_TYPE_PRO) trans_mode = MS_TM_NORMAL_WRITE; else trans_mode = MS_TM_AUTO_WRITE; pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT); } rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); if (card->id.type == MEMSTICK_TYPE_PRO) { rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8)); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt); } rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3, 0xFF, (u8)(length >> 24)); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2, 0xFF, (u8)(length >> 16)); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1, 0xFF, (u8)(length >> 8)); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0, 0xFF, (u8)length); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); err = rtsx_usb_send_cmd(ucr, flag | STAGE_MS_STATUS, 100); if (err) return err; err = rtsx_usb_transfer_data(ucr, pipe, sg, length, 1, NULL, 10000); if (err) goto err_out; err = rtsx_usb_get_rsp(ucr, 3, 15000); if (err) goto err_out; if (ucr->rsp_buf[0] & MS_TRANSFER_ERR || ucr->rsp_buf[1] & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { err = -EIO; goto err_out; } return 0; err_out: ms_clear_error(host); return err; } static int ms_write_bytes(struct rtsx_usb_ms *host, u8 tpc, u8 cfg, u8 cnt, u8 *data, u8 *int_reg) { struct rtsx_ucr *ucr = host->ucr; int err, i; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); rtsx_usb_init_cmd(ucr); for (i = 0; i < cnt; i++) rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, data[i]); if (cnt % 2) rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES); rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_usb_add_cmd(ucr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); err = rtsx_usb_send_cmd(ucr, MODE_CR, 100); if (err) return err; err = rtsx_usb_get_rsp(ucr, 2, 5000); if (err || (ucr->rsp_buf[0] & MS_TRANSFER_ERR)) { u8 val; rtsx_usb_ep0_read_register(ucr, MS_TRANS_CFG, &val); dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); if (int_reg) *int_reg = val & 0x0F; ms_print_debug_regs(host); ms_clear_error(host); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) return -EIO; } else { if (!(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) return -EIO; } } return -ETIMEDOUT; } if (int_reg) *int_reg = ucr->rsp_buf[1] & 0x0F; return 0; } static int ms_read_bytes(struct rtsx_usb_ms *host, u8 tpc, u8 cfg, u8 cnt, u8 *data, u8 *int_reg) { struct rtsx_ucr *ucr = host->ucr; int err, i; u8 *ptr; dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc); rtsx_usb_init_cmd(ucr); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES); rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); for (i = 0; i < cnt - 1; i++) rtsx_usb_add_cmd(ucr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0); if (cnt % 2) rtsx_usb_add_cmd(ucr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0); else rtsx_usb_add_cmd(ucr, READ_REG_CMD, PPBUF_BASE2 + cnt - 1, 0, 0); rtsx_usb_add_cmd(ucr, READ_REG_CMD, MS_TRANS_CFG, 0, 0); err = rtsx_usb_send_cmd(ucr, MODE_CR, 100); if (err) return err; err = rtsx_usb_get_rsp(ucr, cnt + 2, 5000); if (err || (ucr->rsp_buf[0] & MS_TRANSFER_ERR)) { u8 val; rtsx_usb_ep0_read_register(ucr, MS_TRANS_CFG, &val); dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val); if (int_reg && (host->ifmode != MEMSTICK_SERIAL)) *int_reg = val & 0x0F; ms_print_debug_regs(host); ms_clear_error(host); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) return -EIO; } else { if (!(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) return -EIO; } } return -ETIMEDOUT; } ptr = ucr->rsp_buf + 1; for (i = 0; i < cnt; i++) data[i] = *ptr++; if (int_reg && (host->ifmode != MEMSTICK_SERIAL)) *int_reg = *ptr & 0x0F; return 0; } static int rtsx_usb_ms_issue_cmd(struct rtsx_usb_ms *host) { struct memstick_request *req = host->req; int err = 0; u8 cfg = 0, int_reg; dev_dbg(ms_dev(host), "%s\n", __func__); if (req->need_card_int) { if (host->ifmode != MEMSTICK_SERIAL) cfg = WAIT_INT; } if (req->long_data) { err = ms_transfer_data(host, req->data_dir, req->tpc, cfg, &(req->sg)); } else { if (req->data_dir == READ) err = ms_read_bytes(host, req->tpc, cfg, req->data_len, req->data, &int_reg); else err = ms_write_bytes(host, req->tpc, cfg, req->data_len, req->data, &int_reg); } if (err < 0) return err; if (req->need_card_int) { if (host->ifmode == MEMSTICK_SERIAL) { err = ms_read_bytes(host, MS_TPC_GET_INT, NO_WAIT_INT, 1, &req->int_reg, NULL); if (err < 0) return err; } else { if (int_reg & MS_INT_CMDNK) req->int_reg |= MEMSTICK_INT_CMDNAK; if (int_reg & MS_INT_BREQ) req->int_reg |= MEMSTICK_INT_BREQ; if (int_reg & MS_INT_ERR) req->int_reg |= MEMSTICK_INT_ERR; if (int_reg & MS_INT_CED) req->int_reg |= MEMSTICK_INT_CED; } dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", req->int_reg); } return 0; } static void rtsx_usb_ms_handle_req(struct work_struct *work) { struct rtsx_usb_ms *host = container_of(work, struct rtsx_usb_ms, handle_req); struct rtsx_ucr *ucr = host->ucr; struct memstick_host *msh = host->msh; int rc; if (!host->req) { pm_runtime_get_sync(ms_dev(host)); do { rc = memstick_next_req(msh, &host->req); dev_dbg(ms_dev(host), "next req %d\n", rc); if (!rc) { mutex_lock(&ucr->dev_mutex); if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD)) host->req->error = -EIO; else host->req->error = rtsx_usb_ms_issue_cmd(host); mutex_unlock(&ucr->dev_mutex); dev_dbg(ms_dev(host), "req result %d\n", host->req->error); } } while (!rc); pm_runtime_put_sync(ms_dev(host)); } } static void rtsx_usb_ms_request(struct memstick_host *msh) { struct rtsx_usb_ms *host = memstick_priv(msh); dev_dbg(ms_dev(host), "--> %s\n", __func__); if (!host->eject) schedule_work(&host->handle_req); } static int rtsx_usb_ms_set_param(struct memstick_host *msh, enum memstick_param param, int value) { struct rtsx_usb_ms *host = memstick_priv(msh); struct rtsx_ucr *ucr = host->ucr; unsigned int clock = 0; u8 ssc_depth = 0; int err; dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", __func__, param, value); pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); if (err) goto out; switch (param) { case MEMSTICK_POWER: if (value == host->power_mode) break; if (value == MEMSTICK_POWER_ON) { pm_runtime_get_noresume(ms_dev(host)); err = ms_power_on(host); if (err) pm_runtime_put_noidle(ms_dev(host)); } else if (value == MEMSTICK_POWER_OFF) { err = ms_power_off(host); if (!err) pm_runtime_put_noidle(ms_dev(host)); } else err = -EINVAL; if (!err) host->power_mode = value; break; case MEMSTICK_INTERFACE: if (value == MEMSTICK_SERIAL) { clock = 19000000; ssc_depth = SSC_DEPTH_512K; err = rtsx_usb_write_register(ucr, MS_CFG, 0x5A, MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT); if (err < 0) break; } else if (value == MEMSTICK_PAR4) { clock = 39000000; ssc_depth = SSC_DEPTH_1M; err = rtsx_usb_write_register(ucr, MS_CFG, 0x5A, MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT); if (err < 0) break; } else { err = -EINVAL; break; } err = rtsx_usb_switch_clock(ucr, clock, ssc_depth, false, true, false); if (err < 0) { dev_dbg(ms_dev(host), "switch clock failed\n"); break; } host->ssc_depth = ssc_depth; host->clock = clock; host->ifmode = value; break; default: err = -EINVAL; break; } out: mutex_unlock(&ucr->dev_mutex); pm_runtime_put_sync(ms_dev(host)); /* power-on delay */ if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) { usleep_range(10000, 12000); if (!host->eject) schedule_delayed_work(&host->poll_card, 100); } dev_dbg(ms_dev(host), "%s: return = %d\n", __func__, err); return err; } #ifdef CONFIG_PM_SLEEP static int rtsx_usb_ms_suspend(struct device *dev) { struct rtsx_usb_ms *host = dev_get_drvdata(dev); struct memstick_host *msh = host->msh; /* Since we use rtsx_usb's resume callback to runtime resume its * children to implement remote wakeup signaling, this causes * rtsx_usb_ms' runtime resume callback runs after its suspend * callback: * rtsx_usb_ms_suspend() * rtsx_usb_resume() * -> rtsx_usb_ms_runtime_resume() * -> memstick_detect_change() * * rtsx_usb_suspend() * * To avoid this, skip runtime resume/suspend if system suspend is * underway. */ host->system_suspending = true; memstick_suspend_host(msh); return 0; } static int rtsx_usb_ms_resume(struct device *dev) { struct rtsx_usb_ms *host = dev_get_drvdata(dev); struct memstick_host *msh = host->msh; memstick_resume_host(msh); host->system_suspending = false; return 0; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM static int rtsx_usb_ms_runtime_suspend(struct device *dev) { struct rtsx_usb_ms *host = dev_get_drvdata(dev); if (host->system_suspending) return 0; if (host->msh->card || host->power_mode != MEMSTICK_POWER_OFF) return -EAGAIN; return 0; } static int rtsx_usb_ms_runtime_resume(struct device *dev) { struct rtsx_usb_ms *host = dev_get_drvdata(dev); if (host->system_suspending) return 0; memstick_detect_change(host->msh); return 0; } #endif /* CONFIG_PM */ static const struct dev_pm_ops rtsx_usb_ms_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(rtsx_usb_ms_suspend, rtsx_usb_ms_resume) SET_RUNTIME_PM_OPS(rtsx_usb_ms_runtime_suspend, rtsx_usb_ms_runtime_resume, NULL) }; static void rtsx_usb_ms_poll_card(struct work_struct *work) { struct rtsx_usb_ms *host = container_of(work, struct rtsx_usb_ms, poll_card.work); struct rtsx_ucr *ucr = host->ucr; int err; u8 val; if (host->eject || host->power_mode != MEMSTICK_POWER_ON) return; pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); /* Check pending MS card changes */ err = rtsx_usb_read_register(ucr, CARD_INT_PEND, &val); if (err) { mutex_unlock(&ucr->dev_mutex); goto poll_again; } /* Clear the pending */ rtsx_usb_write_register(ucr, CARD_INT_PEND, XD_INT | MS_INT | SD_INT, XD_INT | MS_INT | SD_INT); mutex_unlock(&ucr->dev_mutex); if (val & MS_INT) { dev_dbg(ms_dev(host), "MS slot change detected\n"); memstick_detect_change(host->msh); } poll_again: pm_runtime_put_sync(ms_dev(host)); if (!host->eject && host->power_mode == MEMSTICK_POWER_ON) schedule_delayed_work(&host->poll_card, 100); } static int rtsx_usb_ms_drv_probe(struct platform_device *pdev) { struct memstick_host *msh; struct rtsx_usb_ms *host; struct rtsx_ucr *ucr; int err; ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); if (!ucr) return -ENXIO; dev_dbg(&(pdev->dev), "Realtek USB Memstick controller found\n"); msh = memstick_alloc_host(sizeof(*host), &pdev->dev); if (!msh) return -ENOMEM; host = memstick_priv(msh); host->ucr = ucr; host->msh = msh; host->pdev = pdev; host->power_mode = MEMSTICK_POWER_OFF; platform_set_drvdata(pdev, host); mutex_init(&host->host_mutex); INIT_WORK(&host->handle_req, rtsx_usb_ms_handle_req); INIT_DELAYED_WORK(&host->poll_card, rtsx_usb_ms_poll_card); msh->request = rtsx_usb_ms_request; msh->set_param = rtsx_usb_ms_set_param; msh->caps = MEMSTICK_CAP_PAR4; pm_runtime_get_noresume(ms_dev(host)); pm_runtime_set_active(ms_dev(host)); pm_runtime_enable(ms_dev(host)); err = memstick_add_host(msh); if (err) goto err_out; pm_runtime_put(ms_dev(host)); return 0; err_out: pm_runtime_disable(ms_dev(host)); pm_runtime_put_noidle(ms_dev(host)); memstick_free_host(msh); return err; } static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) { struct rtsx_usb_ms *host = platform_get_drvdata(pdev); struct memstick_host *msh = host->msh; int err; host->eject = true; cancel_work_sync(&host->handle_req); mutex_lock(&host->host_mutex); if (host->req) { dev_dbg(ms_dev(host), "%s: Controller removed during transfer\n", dev_name(&msh->dev)); host->req->error = -ENOMEDIUM; do { err = memstick_next_req(msh, &host->req); if (!err) host->req->error = -ENOMEDIUM; } while (!err); } mutex_unlock(&host->host_mutex); /* Balance possible unbalanced usage count * e.g. unconditional module removal */ if (pm_runtime_active(ms_dev(host))) pm_runtime_put(ms_dev(host)); pm_runtime_disable(ms_dev(host)); memstick_remove_host(msh); dev_dbg(ms_dev(host), ": Realtek USB Memstick controller has been removed\n"); memstick_free_host(msh); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_device_id rtsx_usb_ms_ids[] = { { .name = "rtsx_usb_ms", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, rtsx_usb_ms_ids); static struct platform_driver rtsx_usb_ms_driver = { .probe = rtsx_usb_ms_drv_probe, .remove = rtsx_usb_ms_drv_remove, .id_table = rtsx_usb_ms_ids, .driver = { .name = "rtsx_usb_ms", .pm = &rtsx_usb_ms_pm_ops, }, }; module_platform_driver(rtsx_usb_ms_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Roger Tseng <[email protected]>"); MODULE_DESCRIPTION("Realtek USB Memstick Card Host Driver");
linux-master
drivers/memstick/host/rtsx_usb_ms.c
// SPDX-License-Identifier: GPL-2.0-only /* * jmb38x_ms.c - JMicron jmb38x MemoryStick card reader * * Copyright (C) 2008 Alex Dubov <[email protected]> */ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memstick.h> #include <linux/slab.h> #include <linux/module.h> #define DRIVER_NAME "jmb38x_ms" static bool no_dma; module_param(no_dma, bool, 0644); enum { DMA_ADDRESS = 0x00, BLOCK = 0x04, DMA_CONTROL = 0x08, TPC_P0 = 0x0c, TPC_P1 = 0x10, TPC = 0x14, HOST_CONTROL = 0x18, DATA = 0x1c, STATUS = 0x20, INT_STATUS = 0x24, INT_STATUS_ENABLE = 0x28, INT_SIGNAL_ENABLE = 0x2c, TIMER = 0x30, TIMER_CONTROL = 0x34, PAD_OUTPUT_ENABLE = 0x38, PAD_PU_PD = 0x3c, CLOCK_DELAY = 0x40, ADMA_ADDRESS = 0x44, CLOCK_CONTROL = 0x48, LED_CONTROL = 0x4c, VERSION = 0x50 }; struct jmb38x_ms_host { struct jmb38x_ms *chip; void __iomem *addr; spinlock_t lock; struct tasklet_struct notify; int id; char host_id[32]; int irq; unsigned int block_pos; unsigned long timeout_jiffies; struct timer_list timer; struct memstick_host *msh; struct memstick_request *req; unsigned char cmd_flags; unsigned char io_pos; unsigned char ifmode; unsigned int io_word[2]; }; struct jmb38x_ms { struct pci_dev *pdev; int host_cnt; struct memstick_host *hosts[]; }; #define BLOCK_COUNT_MASK 0xffff0000 #define BLOCK_SIZE_MASK 0x00000fff #define DMA_CONTROL_ENABLE 0x00000001 #define TPC_DATA_SEL 0x00008000 #define TPC_DIR 0x00004000 #define TPC_WAIT_INT 0x00002000 #define TPC_GET_INT 0x00000800 #define TPC_CODE_SZ_MASK 0x00000700 #define TPC_DATA_SZ_MASK 0x00000007 #define HOST_CONTROL_TDELAY_EN 0x00040000 #define HOST_CONTROL_HW_OC_P 0x00010000 #define HOST_CONTROL_RESET_REQ 0x00008000 #define HOST_CONTROL_REI 0x00004000 #define HOST_CONTROL_LED 0x00000400 #define HOST_CONTROL_FAST_CLK 0x00000200 #define HOST_CONTROL_RESET 0x00000100 #define HOST_CONTROL_POWER_EN 0x00000080 #define HOST_CONTROL_CLOCK_EN 0x00000040 #define HOST_CONTROL_REO 0x00000008 #define HOST_CONTROL_IF_SHIFT 4 #define HOST_CONTROL_IF_SERIAL 0x0 #define HOST_CONTROL_IF_PAR4 0x1 #define HOST_CONTROL_IF_PAR8 0x3 #define STATUS_BUSY 0x00080000 #define STATUS_MS_DAT7 0x00040000 #define STATUS_MS_DAT6 0x00020000 #define STATUS_MS_DAT5 0x00010000 #define STATUS_MS_DAT4 0x00008000 #define STATUS_MS_DAT3 0x00004000 #define STATUS_MS_DAT2 0x00002000 #define STATUS_MS_DAT1 0x00001000 #define STATUS_MS_DAT0 0x00000800 #define STATUS_HAS_MEDIA 0x00000400 #define STATUS_FIFO_EMPTY 0x00000200 #define STATUS_FIFO_FULL 0x00000100 #define STATUS_MS_CED 0x00000080 #define STATUS_MS_ERR 0x00000040 #define STATUS_MS_BRQ 0x00000020 #define STATUS_MS_CNK 0x00000001 #define INT_STATUS_TPC_ERR 0x00080000 #define INT_STATUS_CRC_ERR 0x00040000 #define INT_STATUS_TIMER_TO 0x00020000 #define INT_STATUS_HSK_TO 0x00010000 #define INT_STATUS_ANY_ERR 0x00008000 #define INT_STATUS_FIFO_WRDY 0x00000080 #define INT_STATUS_FIFO_RRDY 0x00000040 #define INT_STATUS_MEDIA_OUT 0x00000010 #define INT_STATUS_MEDIA_IN 0x00000008 #define INT_STATUS_DMA_BOUNDARY 0x00000004 #define INT_STATUS_EOTRAN 0x00000002 #define INT_STATUS_EOTPC 0x00000001 #define INT_STATUS_ALL 0x000f801f #define PAD_OUTPUT_ENABLE_MS 0x0F3F #define PAD_PU_PD_OFF 0x7FFF0000 #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000 #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000 #define CLOCK_CONTROL_BY_MMIO 0x00000008 #define CLOCK_CONTROL_40MHZ 0x00000001 #define CLOCK_CONTROL_50MHZ 0x00000002 #define CLOCK_CONTROL_60MHZ 0x00000010 #define CLOCK_CONTROL_62_5MHZ 0x00000004 #define CLOCK_CONTROL_OFF 0x00000000 #define PCI_CTL_CLOCK_DLY_ADDR 0x000000b0 enum { CMD_READY = 0x01, FIFO_READY = 0x02, REG_DATA = 0x04, DMA_DATA = 0x08 }; static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host, unsigned char *buf, unsigned int length) { unsigned int off = 0; while (host->io_pos && length) { buf[off++] = host->io_word[0] & 0xff; host->io_word[0] >>= 8; length--; host->io_pos--; } if (!length) return off; while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { if (length < 4) break; *(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA); length -= 4; off += 4; } if (length && !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { host->io_word[0] = readl(host->addr + DATA); for (host->io_pos = 4; host->io_pos; --host->io_pos) { buf[off++] = host->io_word[0] & 0xff; host->io_word[0] >>= 8; length--; if (!length) break; } } return off; } static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host, unsigned char *buf, unsigned int length) { unsigned int off = 0; while (host->io_pos > 4 && length) { buf[off++] = host->io_word[0] & 0xff; host->io_word[0] >>= 8; length--; host->io_pos--; } if (!length) return off; while (host->io_pos && length) { buf[off++] = host->io_word[1] & 0xff; host->io_word[1] >>= 8; length--; host->io_pos--; } return off; } static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host, unsigned char *buf, unsigned int length) { unsigned int off = 0; if (host->io_pos) { while (host->io_pos < 4 && length) { host->io_word[0] |= buf[off++] << (host->io_pos * 8); host->io_pos++; length--; } } if (host->io_pos == 4 && !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { writel(host->io_word[0], host->addr + DATA); host->io_pos = 0; host->io_word[0] = 0; } else if (host->io_pos) { return off; } if (!length) return off; while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { if (length < 4) break; __raw_writel(*(unsigned int *)(buf + off), host->addr + DATA); length -= 4; off += 4; } switch (length) { case 3: host->io_word[0] |= buf[off + 2] << 16; host->io_pos++; fallthrough; case 2: host->io_word[0] |= buf[off + 1] << 8; host->io_pos++; fallthrough; case 1: host->io_word[0] |= buf[off]; host->io_pos++; } off += host->io_pos; return off; } static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host, unsigned char *buf, unsigned int length) { unsigned int off = 0; while (host->io_pos < 4 && length) { host->io_word[0] &= ~(0xff << (host->io_pos * 8)); host->io_word[0] |= buf[off++] << (host->io_pos * 8); host->io_pos++; length--; } if (!length) return off; while (host->io_pos < 8 && length) { host->io_word[1] &= ~(0xff << (host->io_pos * 8)); host->io_word[1] |= buf[off++] << (host->io_pos * 8); host->io_pos++; length--; } return off; } static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) { unsigned int length; unsigned int off; unsigned int t_size, p_cnt; unsigned char *buf; struct page *pg; unsigned long flags = 0; if (host->req->long_data) { length = host->req->sg.length - host->block_pos; off = host->req->sg.offset + host->block_pos; } else { length = host->req->data_len - host->block_pos; off = 0; } while (length) { unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, length); local_irq_save(flags); buf = kmap_atomic(pg) + p_off; } else { buf = host->req->data + host->block_pos; p_cnt = host->req->data_len - host->block_pos; } if (host->req->data_dir == WRITE) t_size = !(host->cmd_flags & REG_DATA) ? jmb38x_ms_write_data(host, buf, p_cnt) : jmb38x_ms_write_reg_data(host, buf, p_cnt); else t_size = !(host->cmd_flags & REG_DATA) ? jmb38x_ms_read_data(host, buf, p_cnt) : jmb38x_ms_read_reg_data(host, buf, p_cnt); if (host->req->long_data) { kunmap_atomic(buf - p_off); local_irq_restore(flags); } if (!t_size) break; host->block_pos += t_size; length -= t_size; off += t_size; } if (!length && host->req->data_dir == WRITE) { if (host->cmd_flags & REG_DATA) { writel(host->io_word[0], host->addr + TPC_P0); writel(host->io_word[1], host->addr + TPC_P1); } else if (host->io_pos) { writel(host->io_word[0], host->addr + DATA); } } return length; } static int jmb38x_ms_issue_cmd(struct memstick_host *msh) { struct jmb38x_ms_host *host = memstick_priv(msh); unsigned int data_len, cmd, t_val; if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) { dev_dbg(&msh->dev, "no media status\n"); host->req->error = -ETIME; return host->req->error; } dev_dbg(&msh->dev, "control %08x\n", readl(host->addr + HOST_CONTROL)); dev_dbg(&msh->dev, "status %08x\n", readl(host->addr + INT_STATUS)); dev_dbg(&msh->dev, "hstatus %08x\n", readl(host->addr + STATUS)); host->cmd_flags = 0; host->block_pos = 0; host->io_pos = 0; host->io_word[0] = 0; host->io_word[1] = 0; cmd = host->req->tpc << 16; cmd |= TPC_DATA_SEL; if (host->req->data_dir == READ) cmd |= TPC_DIR; if (host->req->need_card_int) { if (host->ifmode == MEMSTICK_SERIAL) cmd |= TPC_GET_INT; else cmd |= TPC_WAIT_INT; } if (!no_dma) host->cmd_flags |= DMA_DATA; if (host->req->long_data) { data_len = host->req->sg.length; } else { data_len = host->req->data_len; host->cmd_flags &= ~DMA_DATA; } if (data_len <= 8) { cmd &= ~(TPC_DATA_SEL | 0xf); host->cmd_flags |= REG_DATA; cmd |= data_len & 0xf; host->cmd_flags &= ~DMA_DATA; } if (host->cmd_flags & DMA_DATA) { if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE)) { host->req->error = -ENOMEM; return host->req->error; } data_len = sg_dma_len(&host->req->sg); writel(sg_dma_address(&host->req->sg), host->addr + DMA_ADDRESS); writel(((1 << 16) & BLOCK_COUNT_MASK) | (data_len & BLOCK_SIZE_MASK), host->addr + BLOCK); writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL); } else if (!(host->cmd_flags & REG_DATA)) { writel(((1 << 16) & BLOCK_COUNT_MASK) | (data_len & BLOCK_SIZE_MASK), host->addr + BLOCK); t_val = readl(host->addr + INT_STATUS_ENABLE); t_val |= host->req->data_dir == READ ? INT_STATUS_FIFO_RRDY : INT_STATUS_FIFO_WRDY; writel(t_val, host->addr + INT_STATUS_ENABLE); writel(t_val, host->addr + INT_SIGNAL_ENABLE); } else { cmd &= ~(TPC_DATA_SEL | 0xf); host->cmd_flags |= REG_DATA; cmd |= data_len & 0xf; if (host->req->data_dir == WRITE) { jmb38x_ms_transfer_data(host); writel(host->io_word[0], host->addr + TPC_P0); writel(host->io_word[1], host->addr + TPC_P1); } } mod_timer(&host->timer, jiffies + host->timeout_jiffies); writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); host->req->error = 0; writel(cmd, host->addr + TPC); dev_dbg(&msh->dev, "executing TPC %08x, len %x\n", cmd, data_len); return 0; } static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last) { struct jmb38x_ms_host *host = memstick_priv(msh); unsigned int t_val = 0; int rc; del_timer(&host->timer); dev_dbg(&msh->dev, "c control %08x\n", readl(host->addr + HOST_CONTROL)); dev_dbg(&msh->dev, "c status %08x\n", readl(host->addr + INT_STATUS)); dev_dbg(&msh->dev, "c hstatus %08x\n", readl(host->addr + STATUS)); host->req->int_reg = readl(host->addr + STATUS) & 0xff; writel(0, host->addr + BLOCK); writel(0, host->addr + DMA_CONTROL); if (host->cmd_flags & DMA_DATA) { dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { t_val = readl(host->addr + INT_STATUS_ENABLE); if (host->req->data_dir == READ) t_val &= ~INT_STATUS_FIFO_RRDY; else t_val &= ~INT_STATUS_FIFO_WRDY; writel(t_val, host->addr + INT_STATUS_ENABLE); writel(t_val, host->addr + INT_SIGNAL_ENABLE); } writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); if (!last) { do { rc = memstick_next_req(msh, &host->req); } while (!rc && jmb38x_ms_issue_cmd(msh)); } else { do { rc = memstick_next_req(msh, &host->req); if (!rc) host->req->error = -ETIME; } while (!rc); } } static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id) { struct memstick_host *msh = dev_id; struct jmb38x_ms_host *host = memstick_priv(msh); unsigned int irq_status; spin_lock(&host->lock); irq_status = readl(host->addr + INT_STATUS); dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status); if (irq_status == 0 || irq_status == (~0)) { spin_unlock(&host->lock); return IRQ_NONE; } if (host->req) { if (irq_status & INT_STATUS_ANY_ERR) { if (irq_status & INT_STATUS_CRC_ERR) host->req->error = -EILSEQ; else if (irq_status & INT_STATUS_TPC_ERR) { dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n"); jmb38x_ms_complete_cmd(msh, 0); } else host->req->error = -ETIME; } else { if (host->cmd_flags & DMA_DATA) { if (irq_status & INT_STATUS_EOTRAN) host->cmd_flags |= FIFO_READY; } else { if (irq_status & (INT_STATUS_FIFO_RRDY | INT_STATUS_FIFO_WRDY)) jmb38x_ms_transfer_data(host); if (irq_status & INT_STATUS_EOTRAN) { jmb38x_ms_transfer_data(host); host->cmd_flags |= FIFO_READY; } } if (irq_status & INT_STATUS_EOTPC) { host->cmd_flags |= CMD_READY; if (host->cmd_flags & REG_DATA) { if (host->req->data_dir == READ) { host->io_word[0] = readl(host->addr + TPC_P0); host->io_word[1] = readl(host->addr + TPC_P1); host->io_pos = 8; jmb38x_ms_transfer_data(host); } host->cmd_flags |= FIFO_READY; } } } } if (irq_status & (INT_STATUS_MEDIA_IN | INT_STATUS_MEDIA_OUT)) { dev_dbg(&host->chip->pdev->dev, "media changed\n"); memstick_detect_change(msh); } writel(irq_status, host->addr + INT_STATUS); if (host->req && (((host->cmd_flags & CMD_READY) && (host->cmd_flags & FIFO_READY)) || host->req->error)) jmb38x_ms_complete_cmd(msh, 0); spin_unlock(&host->lock); return IRQ_HANDLED; } static void jmb38x_ms_abort(struct timer_list *t) { struct jmb38x_ms_host *host = from_timer(host, t, timer); struct memstick_host *msh = host->msh; unsigned long flags; dev_dbg(&host->chip->pdev->dev, "abort\n"); spin_lock_irqsave(&host->lock, flags); if (host->req) { host->req->error = -ETIME; jmb38x_ms_complete_cmd(msh, 0); } spin_unlock_irqrestore(&host->lock, flags); } static void jmb38x_ms_req_tasklet(unsigned long data) { struct memstick_host *msh = (struct memstick_host *)data; struct jmb38x_ms_host *host = memstick_priv(msh); unsigned long flags; int rc; spin_lock_irqsave(&host->lock, flags); if (!host->req) { do { rc = memstick_next_req(msh, &host->req); dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc); } while (!rc && jmb38x_ms_issue_cmd(msh)); } spin_unlock_irqrestore(&host->lock, flags); } static void jmb38x_ms_dummy_submit(struct memstick_host *msh) { return; } static void jmb38x_ms_submit_req(struct memstick_host *msh) { struct jmb38x_ms_host *host = memstick_priv(msh); tasklet_schedule(&host->notify); } static int jmb38x_ms_reset(struct jmb38x_ms_host *host) { int cnt; writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN | readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); for (cnt = 0; cnt < 20; ++cnt) { if (!(HOST_CONTROL_RESET_REQ & readl(host->addr + HOST_CONTROL))) goto reset_next; ndelay(20); } dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n"); reset_next: writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN | readl(host->addr + HOST_CONTROL), host->addr + HOST_CONTROL); for (cnt = 0; cnt < 20; ++cnt) { if (!(HOST_CONTROL_RESET & readl(host->addr + HOST_CONTROL))) goto reset_ok; ndelay(20); } dev_dbg(&host->chip->pdev->dev, "reset timeout\n"); return -EIO; reset_ok: writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); return 0; } static int jmb38x_ms_set_param(struct memstick_host *msh, enum memstick_param param, int value) { struct jmb38x_ms_host *host = memstick_priv(msh); unsigned int host_ctl = readl(host->addr + HOST_CONTROL); unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0; int rc = 0; switch (param) { case MEMSTICK_POWER: if (value == MEMSTICK_POWER_ON) { rc = jmb38x_ms_reset(host); if (rc) return rc; host_ctl = 7; host_ctl |= HOST_CONTROL_POWER_EN | HOST_CONTROL_CLOCK_EN; writel(host_ctl, host->addr + HOST_CONTROL); writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 : PAD_PU_PD_ON_MS_SOCK0, host->addr + PAD_PU_PD); writel(PAD_OUTPUT_ENABLE_MS, host->addr + PAD_OUTPUT_ENABLE); msleep(10); dev_dbg(&host->chip->pdev->dev, "power on\n"); } else if (value == MEMSTICK_POWER_OFF) { host_ctl &= ~(HOST_CONTROL_POWER_EN | HOST_CONTROL_CLOCK_EN); writel(host_ctl, host->addr + HOST_CONTROL); writel(0, host->addr + PAD_OUTPUT_ENABLE); writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD); dev_dbg(&host->chip->pdev->dev, "power off\n"); } else return -EINVAL; break; case MEMSTICK_INTERFACE: dev_dbg(&host->chip->pdev->dev, "Set Host Interface Mode to %d\n", value); host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI | HOST_CONTROL_REO); host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P; host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT); if (value == MEMSTICK_SERIAL) { host_ctl |= HOST_CONTROL_IF_SERIAL << HOST_CONTROL_IF_SHIFT; host_ctl |= HOST_CONTROL_REI; clock_ctl |= CLOCK_CONTROL_40MHZ; clock_delay = 0; } else if (value == MEMSTICK_PAR4) { host_ctl |= HOST_CONTROL_FAST_CLK; host_ctl |= HOST_CONTROL_IF_PAR4 << HOST_CONTROL_IF_SHIFT; host_ctl |= HOST_CONTROL_REO; clock_ctl |= CLOCK_CONTROL_40MHZ; clock_delay = 4; } else if (value == MEMSTICK_PAR8) { host_ctl |= HOST_CONTROL_FAST_CLK; host_ctl |= HOST_CONTROL_IF_PAR8 << HOST_CONTROL_IF_SHIFT; clock_ctl |= CLOCK_CONTROL_50MHZ; clock_delay = 0; } else return -EINVAL; writel(host_ctl, host->addr + HOST_CONTROL); writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL); writel(clock_ctl, host->addr + CLOCK_CONTROL); pci_write_config_byte(host->chip->pdev, PCI_CTL_CLOCK_DLY_ADDR + 1, clock_delay); host->ifmode = value; break; } return 0; } #define PCI_PMOS0_CONTROL 0xae #define PMOS0_ENABLE 0x01 #define PMOS0_OVERCURRENT_LEVEL_2_4V 0x06 #define PMOS0_EN_OVERCURRENT_DEBOUNCE 0x40 #define PMOS0_SW_LED_POLARITY_ENABLE 0x80 #define PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \ PMOS0_OVERCURRENT_LEVEL_2_4V) #define PCI_PMOS1_CONTROL 0xbd #define PMOS1_ACTIVE_BITS 0x4a #define PCI_CLOCK_CTL 0xb9 static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag) { unsigned char val; pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val); if (flag) val |= PMOS0_ACTIVE_BITS; else val &= ~PMOS0_ACTIVE_BITS; pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val); dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val); if (pci_resource_flags(pdev, 1)) { pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val); if (flag) val |= PMOS1_ACTIVE_BITS; else val &= ~PMOS1_ACTIVE_BITS; pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val); dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val); } pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val); pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f); pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01); dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n"); return 0; } static int __maybe_unused jmb38x_ms_suspend(struct device *dev) { struct jmb38x_ms *jm = dev_get_drvdata(dev); int cnt; for (cnt = 0; cnt < jm->host_cnt; ++cnt) { if (!jm->hosts[cnt]) break; memstick_suspend_host(jm->hosts[cnt]); } device_wakeup_disable(dev); return 0; } static int __maybe_unused jmb38x_ms_resume(struct device *dev) { struct jmb38x_ms *jm = dev_get_drvdata(dev); int rc; jmb38x_ms_pmos(to_pci_dev(dev), 1); for (rc = 0; rc < jm->host_cnt; ++rc) { if (!jm->hosts[rc]) break; memstick_resume_host(jm->hosts[rc]); memstick_detect_change(jm->hosts[rc]); } return 0; } static int jmb38x_ms_count_slots(struct pci_dev *pdev) { int cnt, rc = 0; for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) { if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt))) break; if (256 != pci_resource_len(pdev, cnt)) break; ++rc; } return rc; } static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) { struct memstick_host *msh; struct jmb38x_ms_host *host; msh = memstick_alloc_host(sizeof(struct jmb38x_ms_host), &jm->pdev->dev); if (!msh) return NULL; host = memstick_priv(msh); host->msh = msh; host->chip = jm; host->addr = ioremap(pci_resource_start(jm->pdev, cnt), pci_resource_len(jm->pdev, cnt)); if (!host->addr) goto err_out_free; spin_lock_init(&host->lock); host->id = cnt; snprintf(host->host_id, sizeof(host->host_id), DRIVER_NAME ":slot%d", host->id); host->irq = jm->pdev->irq; host->timeout_jiffies = msecs_to_jiffies(1000); tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh); msh->request = jmb38x_ms_submit_req; msh->set_param = jmb38x_ms_set_param; msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; timer_setup(&host->timer, jmb38x_ms_abort, 0); if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id, msh)) return msh; iounmap(host->addr); err_out_free: memstick_free_host(msh); return NULL; } static void jmb38x_ms_free_host(struct memstick_host *msh) { struct jmb38x_ms_host *host = memstick_priv(msh); free_irq(host->irq, msh); iounmap(host->addr); memstick_free_host(msh); } static int jmb38x_ms_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct jmb38x_ms *jm; int pci_dev_busy = 0; int rc, cnt; rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc) return rc; rc = pci_enable_device(pdev); if (rc) return rc; pci_set_master(pdev); rc = pci_request_regions(pdev, DRIVER_NAME); if (rc) { pci_dev_busy = 1; goto err_out; } jmb38x_ms_pmos(pdev, 1); cnt = jmb38x_ms_count_slots(pdev); if (!cnt) { rc = -ENODEV; pci_dev_busy = 1; goto err_out_int; } jm = kzalloc(struct_size(jm, hosts, cnt), GFP_KERNEL); if (!jm) { rc = -ENOMEM; goto err_out_int; } jm->pdev = pdev; jm->host_cnt = cnt; pci_set_drvdata(pdev, jm); for (cnt = 0; cnt < jm->host_cnt; ++cnt) { jm->hosts[cnt] = jmb38x_ms_alloc_host(jm, cnt); if (!jm->hosts[cnt]) break; rc = memstick_add_host(jm->hosts[cnt]); if (rc) { jmb38x_ms_free_host(jm->hosts[cnt]); jm->hosts[cnt] = NULL; break; } } if (cnt) return 0; rc = -ENODEV; pci_set_drvdata(pdev, NULL); kfree(jm); err_out_int: pci_release_regions(pdev); err_out: if (!pci_dev_busy) pci_disable_device(pdev); return rc; } static void jmb38x_ms_remove(struct pci_dev *dev) { struct jmb38x_ms *jm = pci_get_drvdata(dev); struct jmb38x_ms_host *host; int cnt; unsigned long flags; for (cnt = 0; cnt < jm->host_cnt; ++cnt) { if (!jm->hosts[cnt]) break; host = memstick_priv(jm->hosts[cnt]); jm->hosts[cnt]->request = jmb38x_ms_dummy_submit; tasklet_kill(&host->notify); writel(0, host->addr + INT_SIGNAL_ENABLE); writel(0, host->addr + INT_STATUS_ENABLE); dev_dbg(&jm->pdev->dev, "interrupts off\n"); spin_lock_irqsave(&host->lock, flags); if (host->req) { host->req->error = -ETIME; jmb38x_ms_complete_cmd(jm->hosts[cnt], 1); } spin_unlock_irqrestore(&host->lock, flags); memstick_remove_host(jm->hosts[cnt]); dev_dbg(&jm->pdev->dev, "host removed\n"); jmb38x_ms_free_host(jm->hosts[cnt]); } jmb38x_ms_pmos(dev, 0); pci_set_drvdata(dev, NULL); pci_release_regions(dev); pci_disable_device(dev); kfree(jm); } static struct pci_device_id jmb38x_ms_id_tbl [] = { { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) }, { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) }, { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) }, { } }; static SIMPLE_DEV_PM_OPS(jmb38x_ms_pm_ops, jmb38x_ms_suspend, jmb38x_ms_resume); static struct pci_driver jmb38x_ms_driver = { .name = DRIVER_NAME, .id_table = jmb38x_ms_id_tbl, .probe = jmb38x_ms_probe, .remove = jmb38x_ms_remove, .driver.pm = &jmb38x_ms_pm_ops, }; module_pci_driver(jmb38x_ms_driver); MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl);
linux-master
drivers/memstick/host/jmb38x_ms.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI FlashMedia driver * * Copyright (C) 2007 Alex Dubov <[email protected]> * * Special thanks to Carlos Corbacho for providing various MemoryStick cards * that made this driver possible. */ #include <linux/tifm.h> #include <linux/memstick.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/module.h> #include <asm/io.h> #define DRIVER_NAME "tifm_ms" static bool no_dma; module_param(no_dma, bool, 0644); /* * Some control bits of TIFM appear to conform to Sony's reference design, * so I'm just assuming they all are. */ #define TIFM_MS_STAT_DRQ 0x04000 #define TIFM_MS_STAT_MSINT 0x02000 #define TIFM_MS_STAT_RDY 0x01000 #define TIFM_MS_STAT_CRC 0x00200 #define TIFM_MS_STAT_TOE 0x00100 #define TIFM_MS_STAT_EMP 0x00020 #define TIFM_MS_STAT_FUL 0x00010 #define TIFM_MS_STAT_CED 0x00008 #define TIFM_MS_STAT_ERR 0x00004 #define TIFM_MS_STAT_BRQ 0x00002 #define TIFM_MS_STAT_CNK 0x00001 #define TIFM_MS_SYS_DMA 0x10000 #define TIFM_MS_SYS_RESET 0x08000 #define TIFM_MS_SYS_SRAC 0x04000 #define TIFM_MS_SYS_INTEN 0x02000 #define TIFM_MS_SYS_NOCRC 0x01000 #define TIFM_MS_SYS_INTCLR 0x00800 #define TIFM_MS_SYS_MSIEN 0x00400 #define TIFM_MS_SYS_FCLR 0x00200 #define TIFM_MS_SYS_FDIR 0x00100 #define TIFM_MS_SYS_DAM 0x00080 #define TIFM_MS_SYS_DRM 0x00040 #define TIFM_MS_SYS_DRQSL 0x00020 #define TIFM_MS_SYS_REI 0x00010 #define TIFM_MS_SYS_REO 0x00008 #define TIFM_MS_SYS_BSY_MASK 0x00007 #define TIFM_MS_SYS_FIFO (TIFM_MS_SYS_INTEN | TIFM_MS_SYS_MSIEN \ | TIFM_MS_SYS_FCLR | TIFM_MS_SYS_BSY_MASK) /* Hardware flags */ enum { CMD_READY = 0x01, FIFO_READY = 0x02, CARD_INT = 0x04 }; struct tifm_ms { struct tifm_dev *dev; struct timer_list timer; struct memstick_request *req; struct tasklet_struct notify; unsigned int mode_mask; unsigned int block_pos; unsigned long timeout_jiffies; unsigned char eject:1, use_dma:1; unsigned char cmd_flags; unsigned char io_pos; unsigned int io_word; }; static unsigned int tifm_ms_read_data(struct tifm_ms *host, unsigned char *buf, unsigned int length) { struct tifm_dev *sock = host->dev; unsigned int off = 0; while (host->io_pos && length) { buf[off++] = host->io_word & 0xff; host->io_word >>= 8; length--; host->io_pos--; } if (!length) return off; while (!(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) { if (length < 4) break; *(unsigned int *)(buf + off) = __raw_readl(sock->addr + SOCK_MS_DATA); length -= 4; off += 4; } if (length && !(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) { host->io_word = readl(sock->addr + SOCK_MS_DATA); for (host->io_pos = 4; host->io_pos; --host->io_pos) { buf[off++] = host->io_word & 0xff; host->io_word >>= 8; length--; if (!length) break; } } return off; } static unsigned int tifm_ms_write_data(struct tifm_ms *host, unsigned char *buf, unsigned int length) { struct tifm_dev *sock = host->dev; unsigned int off = 0; if (host->io_pos) { while (host->io_pos < 4 && length) { host->io_word |= buf[off++] << (host->io_pos * 8); host->io_pos++; length--; } } if (host->io_pos == 4 && !(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) { writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), sock->addr + SOCK_MS_SYSTEM); writel(host->io_word, sock->addr + SOCK_MS_DATA); host->io_pos = 0; host->io_word = 0; } else if (host->io_pos) { return off; } if (!length) return off; while (!(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) { if (length < 4) break; writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), sock->addr + SOCK_MS_SYSTEM); __raw_writel(*(unsigned int *)(buf + off), sock->addr + SOCK_MS_DATA); length -= 4; off += 4; } switch (length) { case 3: host->io_word |= buf[off + 2] << 16; host->io_pos++; fallthrough; case 2: host->io_word |= buf[off + 1] << 8; host->io_pos++; fallthrough; case 1: host->io_word |= buf[off]; host->io_pos++; } off += host->io_pos; return off; } static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) { struct tifm_dev *sock = host->dev; unsigned int length; unsigned int off; unsigned int t_size, p_cnt; unsigned char *buf; struct page *pg; unsigned long flags = 0; if (host->req->long_data) { length = host->req->sg.length - host->block_pos; off = host->req->sg.offset + host->block_pos; } else { length = host->req->data_len - host->block_pos; off = 0; } dev_dbg(&sock->dev, "fifo data transfer, %d, %d\n", length, host->block_pos); while (length) { unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, length); local_irq_save(flags); buf = kmap_atomic(pg) + p_off; } else { buf = host->req->data + host->block_pos; p_cnt = host->req->data_len - host->block_pos; } t_size = host->req->data_dir == WRITE ? tifm_ms_write_data(host, buf, p_cnt) : tifm_ms_read_data(host, buf, p_cnt); if (host->req->long_data) { kunmap_atomic(buf - p_off); local_irq_restore(flags); } if (!t_size) break; host->block_pos += t_size; length -= t_size; off += t_size; } dev_dbg(&sock->dev, "fifo data transfer, %d remaining\n", length); if (!length && (host->req->data_dir == WRITE)) { if (host->io_pos) { writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), sock->addr + SOCK_MS_SYSTEM); writel(host->io_word, sock->addr + SOCK_MS_DATA); } writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), sock->addr + SOCK_MS_SYSTEM); writel(0, sock->addr + SOCK_MS_DATA); } else { readl(sock->addr + SOCK_MS_DATA); } return length; } static int tifm_ms_issue_cmd(struct tifm_ms *host) { struct tifm_dev *sock = host->dev; unsigned int data_len, cmd, sys_param; host->cmd_flags = 0; host->block_pos = 0; host->io_pos = 0; host->io_word = 0; host->cmd_flags = 0; host->use_dma = !no_dma; if (host->req->long_data) { data_len = host->req->sg.length; if (!is_power_of_2(data_len)) host->use_dma = 0; } else { data_len = host->req->data_len; host->use_dma = 0; } writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); if (host->use_dma) { if (1 != tifm_map_sg(sock, &host->req->sg, 1, host->req->data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE)) { host->req->error = -ENOMEM; return host->req->error; } data_len = sg_dma_len(&host->req->sg); writel(ilog2(data_len) - 2, sock->addr + SOCK_FIFO_PAGE_SIZE); writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); sys_param = TIFM_DMA_EN | (1 << 8); if (host->req->data_dir == WRITE) sys_param |= TIFM_DMA_TX; writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); writel(sg_dma_address(&host->req->sg), sock->addr + SOCK_DMA_ADDRESS); writel(sys_param, sock->addr + SOCK_DMA_CONTROL); } else { writel(host->mode_mask | TIFM_MS_SYS_FIFO, sock->addr + SOCK_MS_SYSTEM); writel(TIFM_FIFO_MORE, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); } mod_timer(&host->timer, jiffies + host->timeout_jiffies); writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); host->req->error = 0; sys_param = readl(sock->addr + SOCK_MS_SYSTEM); sys_param |= TIFM_MS_SYS_INTCLR; if (host->use_dma) sys_param |= TIFM_MS_SYS_DMA; else sys_param &= ~TIFM_MS_SYS_DMA; writel(sys_param, sock->addr + SOCK_MS_SYSTEM); cmd = (host->req->tpc & 0xf) << 12; cmd |= data_len; writel(cmd, sock->addr + SOCK_MS_COMMAND); dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, sys_param); return 0; } static void tifm_ms_complete_cmd(struct tifm_ms *host) { struct tifm_dev *sock = host->dev; struct memstick_host *msh = tifm_get_drvdata(sock); int rc; del_timer(&host->timer); host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff; host->req->int_reg = (host->req->int_reg & 1) | ((host->req->int_reg << 4) & 0xe0); writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); if (host->use_dma) { tifm_unmap_sg(sock, &host->req->sg, 1, host->req->data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); dev_dbg(&sock->dev, "TPC complete\n"); do { rc = memstick_next_req(msh, &host->req); } while (!rc && tifm_ms_issue_cmd(host)); } static int tifm_ms_check_status(struct tifm_ms *host) { if (!host->req->error) { if (!(host->cmd_flags & CMD_READY)) return 1; if (!(host->cmd_flags & FIFO_READY)) return 1; if (host->req->need_card_int && !(host->cmd_flags & CARD_INT)) return 1; } return 0; } /* Called from interrupt handler */ static void tifm_ms_data_event(struct tifm_dev *sock) { struct tifm_ms *host; unsigned int fifo_status = 0, host_status = 0; int rc = 1; spin_lock(&sock->lock); host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); host_status = readl(sock->addr + SOCK_MS_STATUS); dev_dbg(&sock->dev, "data event: fifo_status %x, host_status %x, flags %x\n", fifo_status, host_status, host->cmd_flags); if (host->req) { if (host->use_dma && (fifo_status & 1)) { host->cmd_flags |= FIFO_READY; rc = tifm_ms_check_status(host); } if (!host->use_dma && (fifo_status & TIFM_FIFO_MORE)) { if (!tifm_ms_transfer_data(host)) { host->cmd_flags |= FIFO_READY; rc = tifm_ms_check_status(host); } } } writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); if (!rc) tifm_ms_complete_cmd(host); spin_unlock(&sock->lock); } /* Called from interrupt handler */ static void tifm_ms_card_event(struct tifm_dev *sock) { struct tifm_ms *host; unsigned int host_status = 0; int rc = 1; spin_lock(&sock->lock); host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); host_status = readl(sock->addr + SOCK_MS_STATUS); dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", host_status, host->cmd_flags); if (host->req) { if (host_status & TIFM_MS_STAT_TOE) host->req->error = -ETIME; else if (host_status & TIFM_MS_STAT_CRC) host->req->error = -EILSEQ; if (host_status & TIFM_MS_STAT_RDY) host->cmd_flags |= CMD_READY; if (host_status & TIFM_MS_STAT_MSINT) host->cmd_flags |= CARD_INT; rc = tifm_ms_check_status(host); } writel(TIFM_MS_SYS_INTCLR | readl(sock->addr + SOCK_MS_SYSTEM), sock->addr + SOCK_MS_SYSTEM); if (!rc) tifm_ms_complete_cmd(host); spin_unlock(&sock->lock); return; } static void tifm_ms_req_tasklet(unsigned long data) { struct memstick_host *msh = (struct memstick_host *)data; struct tifm_ms *host = memstick_priv(msh); struct tifm_dev *sock = host->dev; unsigned long flags; int rc; spin_lock_irqsave(&sock->lock, flags); if (!host->req) { if (host->eject) { do { rc = memstick_next_req(msh, &host->req); if (!rc) host->req->error = -ETIME; } while (!rc); spin_unlock_irqrestore(&sock->lock, flags); return; } do { rc = memstick_next_req(msh, &host->req); } while (!rc && tifm_ms_issue_cmd(host)); } spin_unlock_irqrestore(&sock->lock, flags); } static void tifm_ms_dummy_submit(struct memstick_host *msh) { return; } static void tifm_ms_submit_req(struct memstick_host *msh) { struct tifm_ms *host = memstick_priv(msh); tasklet_schedule(&host->notify); } static int tifm_ms_set_param(struct memstick_host *msh, enum memstick_param param, int value) { struct tifm_ms *host = memstick_priv(msh); struct tifm_dev *sock = host->dev; switch (param) { case MEMSTICK_POWER: /* also affected by media detection mechanism */ if (value == MEMSTICK_POWER_ON) { host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; writel(TIFM_MS_SYS_RESET, sock->addr + SOCK_MS_SYSTEM); writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, sock->addr + SOCK_MS_SYSTEM); writel(0xffffffff, sock->addr + SOCK_MS_STATUS); } else if (value == MEMSTICK_POWER_OFF) { writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, sock->addr + SOCK_MS_SYSTEM); writel(0xffffffff, sock->addr + SOCK_MS_STATUS); } else return -EINVAL; break; case MEMSTICK_INTERFACE: if (value == MEMSTICK_SERIAL) { host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; writel((~TIFM_CTRL_FAST_CLK) & readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); } else if (value == MEMSTICK_PAR4) { host->mode_mask = 0; writel(TIFM_CTRL_FAST_CLK | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); } else return -EINVAL; break; } return 0; } static void tifm_ms_abort(struct timer_list *t) { struct tifm_ms *host = from_timer(host, t, timer); dev_dbg(&host->dev->dev, "status %x\n", readl(host->dev->addr + SOCK_MS_STATUS)); printk(KERN_ERR "%s : card failed to respond for a long period of time " "(%x, %x)\n", dev_name(&host->dev->dev), host->req ? host->req->tpc : 0, host->cmd_flags); tifm_eject(host->dev); } static int tifm_ms_probe(struct tifm_dev *sock) { struct memstick_host *msh; struct tifm_ms *host; int rc = -EIO; if (!(TIFM_SOCK_STATE_OCCUPIED & readl(sock->addr + SOCK_PRESENT_STATE))) { printk(KERN_WARNING "%s : card gone, unexpectedly\n", dev_name(&sock->dev)); return rc; } msh = memstick_alloc_host(sizeof(struct tifm_ms), &sock->dev); if (!msh) return -ENOMEM; host = memstick_priv(msh); tifm_set_drvdata(sock, msh); host->dev = sock; host->timeout_jiffies = msecs_to_jiffies(1000); timer_setup(&host->timer, tifm_ms_abort, 0); tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh); msh->request = tifm_ms_submit_req; msh->set_param = tifm_ms_set_param; sock->card_event = tifm_ms_card_event; sock->data_event = tifm_ms_data_event; if (tifm_has_ms_pif(sock)) msh->caps |= MEMSTICK_CAP_PAR4; rc = memstick_add_host(msh); if (!rc) return 0; memstick_free_host(msh); return rc; } static void tifm_ms_remove(struct tifm_dev *sock) { struct memstick_host *msh = tifm_get_drvdata(sock); struct tifm_ms *host = memstick_priv(msh); int rc = 0; unsigned long flags; msh->request = tifm_ms_dummy_submit; tasklet_kill(&host->notify); spin_lock_irqsave(&sock->lock, flags); host->eject = 1; if (host->req) { del_timer(&host->timer); writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); if (host->use_dma) tifm_unmap_sg(sock, &host->req->sg, 1, host->req->data_dir == READ ? DMA_TO_DEVICE : DMA_FROM_DEVICE); host->req->error = -ETIME; do { rc = memstick_next_req(msh, &host->req); if (!rc) host->req->error = -ETIME; } while (!rc); } spin_unlock_irqrestore(&sock->lock, flags); memstick_remove_host(msh); memstick_free_host(msh); } #ifdef CONFIG_PM static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state) { struct memstick_host *msh = tifm_get_drvdata(sock); memstick_suspend_host(msh); return 0; } static int tifm_ms_resume(struct tifm_dev *sock) { struct memstick_host *msh = tifm_get_drvdata(sock); memstick_resume_host(msh); return 0; } #else #define tifm_ms_suspend NULL #define tifm_ms_resume NULL #endif /* CONFIG_PM */ static struct tifm_device_id tifm_ms_id_tbl[] = { { TIFM_TYPE_MS }, { 0 } }; static struct tifm_driver tifm_ms_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = tifm_ms_id_tbl, .probe = tifm_ms_probe, .remove = tifm_ms_remove, .suspend = tifm_ms_suspend, .resume = tifm_ms_resume }; static int __init tifm_ms_init(void) { return tifm_register_driver(&tifm_ms_driver); } static void __exit tifm_ms_exit(void) { tifm_unregister_driver(&tifm_ms_driver); } MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl); module_init(tifm_ms_init); module_exit(tifm_ms_exit);
linux-master
drivers/memstick/host/tifm_ms.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 - Maxim Levitsky * driver for Ricoh memstick readers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/freezer.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/highmem.h> #include <asm/byteorder.h> #include <linux/swab.h> #include "r592.h" static bool r592_enable_dma = 1; static int debug; static const char *tpc_names[] = { "MS_TPC_READ_MG_STATUS", "MS_TPC_READ_LONG_DATA", "MS_TPC_READ_SHORT_DATA", "MS_TPC_READ_REG", "MS_TPC_READ_QUAD_DATA", "INVALID", "MS_TPC_GET_INT", "MS_TPC_SET_RW_REG_ADRS", "MS_TPC_EX_SET_CMD", "MS_TPC_WRITE_QUAD_DATA", "MS_TPC_WRITE_REG", "MS_TPC_WRITE_SHORT_DATA", "MS_TPC_WRITE_LONG_DATA", "MS_TPC_SET_CMD", }; /** * memstick_debug_get_tpc_name - debug helper that returns string for * a TPC number */ static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc) { return tpc_names[tpc-1]; } /* Read a register*/ static inline u32 r592_read_reg(struct r592_device *dev, int address) { u32 value = readl(dev->mmio + address); dbg_reg("reg #%02d == 0x%08x", address, value); return value; } /* Write a register */ static inline void r592_write_reg(struct r592_device *dev, int address, u32 value) { dbg_reg("reg #%02d <- 0x%08x", address, value); writel(value, dev->mmio + address); } /* Reads a big endian DWORD register */ static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address) { u32 value = __raw_readl(dev->mmio + address); dbg_reg("reg #%02d == 0x%08x", address, value); return be32_to_cpu(value); } /* Writes a big endian DWORD register */ static inline void r592_write_reg_raw_be(struct r592_device *dev, int address, u32 value) { dbg_reg("reg #%02d <- 0x%08x", address, value); __raw_writel(cpu_to_be32(value), dev->mmio + address); } /* Set specific bits in a register (little endian) */ static inline void r592_set_reg_mask(struct r592_device *dev, int address, u32 mask) { u32 reg = readl(dev->mmio + address); dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg); writel(reg | mask , dev->mmio + address); } /* Clear specific bits in a register (little endian) */ static inline void r592_clear_reg_mask(struct r592_device *dev, int address, u32 mask) { u32 reg = readl(dev->mmio + address); dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)", address, ~mask, reg, mask); writel(reg & ~mask, dev->mmio + address); } /* Wait for status bits while checking for errors */ static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); u32 reg = r592_read_reg(dev, R592_STATUS); if ((reg & mask) == wanted_mask) return 0; while (time_before(jiffies, timeout)) { reg = r592_read_reg(dev, R592_STATUS); if ((reg & mask) == wanted_mask) return 0; if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)) return -EIO; cpu_relax(); } return -ETIME; } /* Enable/disable device */ static int r592_enable_device(struct r592_device *dev, bool enable) { dbg("%sabling the device", enable ? "en" : "dis"); if (enable) { /* Power up the card */ r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1); /* Perform a reset */ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); msleep(100); } else /* Power down the card */ r592_write_reg(dev, R592_POWER, 0); return 0; } /* Set serial/parallel mode */ static int r592_set_mode(struct r592_device *dev, bool parallel_mode) { if (!parallel_mode) { dbg("switching to serial mode"); /* Set serial mode */ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL); r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20); } else { dbg("switching to parallel mode"); /* This setting should be set _before_ switch TPC */ r592_set_reg_mask(dev, R592_POWER, R592_POWER_20); r592_clear_reg_mask(dev, R592_IO, R592_IO_SERIAL1 | R592_IO_SERIAL2); /* Set the parallel mode now */ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL); } dev->parallel_mode = parallel_mode; return 0; } /* Perform a controller reset without powering down the card */ static void r592_host_reset(struct r592_device *dev) { r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); msleep(100); r592_set_mode(dev, dev->parallel_mode); } #ifdef CONFIG_PM_SLEEP /* Disable all hardware interrupts */ static void r592_clear_interrupts(struct r592_device *dev) { /* Disable & ACK all interrupts */ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK); r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK); } #endif /* Tests if there is an CRC error */ static int r592_test_io_error(struct r592_device *dev) { if (!(r592_read_reg(dev, R592_STATUS) & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))) return 0; return -EIO; } /* Ensure that FIFO is ready for use */ static int r592_test_fifo_empty(struct r592_device *dev) { if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) return 0; dbg("FIFO not ready, trying to reset the device"); r592_host_reset(dev); if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) return 0; message("FIFO still not ready, giving up"); return -EIO; } /* Activates the DMA transfer from to FIFO */ static void r592_start_dma(struct r592_device *dev, bool is_write) { unsigned long flags; u32 reg; spin_lock_irqsave(&dev->irq_lock, flags); /* Ack interrupts (just in case) + enable them */ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); /* Set DMA address */ r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg)); /* Enable the DMA */ reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS); reg |= R592_FIFO_DMA_SETTINGS_EN; if (!is_write) reg |= R592_FIFO_DMA_SETTINGS_DIR; else reg &= ~R592_FIFO_DMA_SETTINGS_DIR; r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg); spin_unlock_irqrestore(&dev->irq_lock, flags); } /* Cleanups DMA related settings */ static void r592_stop_dma(struct r592_device *dev, int error) { r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS, R592_FIFO_DMA_SETTINGS_EN); /* This is only a precation */ r592_write_reg(dev, R592_FIFO_DMA, dev->dummy_dma_page_physical_address); r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); dev->dma_error = error; } /* Test if hardware supports DMA */ static void r592_check_dma(struct r592_device *dev) { dev->dma_capable = r592_enable_dma && (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & R592_FIFO_DMA_SETTINGS_CAP); } /* Transfers fifo contents in/out using DMA */ static int r592_transfer_fifo_dma(struct r592_device *dev) { int len, sg_count; bool is_write; if (!dev->dma_capable || !dev->req->long_data) return -EINVAL; len = dev->req->sg.length; is_write = dev->req->data_dir == WRITE; if (len != R592_LFIFO_SIZE) return -EINVAL; dbg_verbose("doing dma transfer"); dev->dma_error = 0; reinit_completion(&dev->dma_done); /* TODO: hidden assumption about nenth beeing always 1 */ sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) { message("problem in dma_map_sg"); return -EIO; } r592_start_dma(dev, is_write); /* Wait for DMA completion */ if (!wait_for_completion_timeout( &dev->dma_done, msecs_to_jiffies(1000))) { message("DMA timeout"); r592_stop_dma(dev, -ETIMEDOUT); } dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); return dev->dma_error; } /* * Writes the FIFO in 4 byte chunks. * If length isn't 4 byte aligned, rest of the data if put to a fifo * to be written later * Use r592_flush_fifo_write to flush that fifo when writing for the * last time */ static void r592_write_fifo_pio(struct r592_device *dev, unsigned char *buffer, int len) { /* flush spill from former write */ if (!kfifo_is_empty(&dev->pio_fifo)) { u8 tmp[4] = {0}; int copy_len = kfifo_in(&dev->pio_fifo, buffer, len); if (!kfifo_is_full(&dev->pio_fifo)) return; len -= copy_len; buffer += copy_len; copy_len = kfifo_out(&dev->pio_fifo, tmp, 4); WARN_ON(copy_len != 4); r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp); } WARN_ON(!kfifo_is_empty(&dev->pio_fifo)); /* write full dwords */ while (len >= 4) { r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); buffer += 4; len -= 4; } /* put remaining bytes to the spill */ if (len) kfifo_in(&dev->pio_fifo, buffer, len); } /* Flushes the temporary FIFO used to make aligned DWORD writes */ static void r592_flush_fifo_write(struct r592_device *dev) { int ret; u8 buffer[4] = { 0 }; if (kfifo_is_empty(&dev->pio_fifo)) return; ret = kfifo_out(&dev->pio_fifo, buffer, 4); /* intentionally ignore __must_check return code */ (void)ret; r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); } /* * Read a fifo in 4 bytes chunks. * If input doesn't fit the buffer, it places bytes of last dword in spill * buffer, so that they don't get lost on last read, just throw these away. */ static void r592_read_fifo_pio(struct r592_device *dev, unsigned char *buffer, int len) { u8 tmp[4]; /* Read from last spill */ if (!kfifo_is_empty(&dev->pio_fifo)) { int bytes_copied = kfifo_out(&dev->pio_fifo, buffer, min(4, len)); buffer += bytes_copied; len -= bytes_copied; if (!kfifo_is_empty(&dev->pio_fifo)) return; } /* Reads dwords from FIFO */ while (len >= 4) { *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO); buffer += 4; len -= 4; } if (len) { *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO); kfifo_in(&dev->pio_fifo, tmp, 4); len -= kfifo_out(&dev->pio_fifo, buffer, len); } WARN_ON(len); return; } /* Transfers actual data using PIO. */ static int r592_transfer_fifo_pio(struct r592_device *dev) { unsigned long flags; bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; struct sg_mapping_iter miter; kfifo_reset(&dev->pio_fifo); if (!dev->req->long_data) { if (is_write) { r592_write_fifo_pio(dev, dev->req->data, dev->req->data_len); r592_flush_fifo_write(dev); } else r592_read_fifo_pio(dev, dev->req->data, dev->req->data_len); return 0; } local_irq_save(flags); sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC | (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG)); /* Do the transfer fifo<->memory*/ while (sg_miter_next(&miter)) if (is_write) r592_write_fifo_pio(dev, miter.addr, miter.length); else r592_read_fifo_pio(dev, miter.addr, miter.length); /* Write last few non aligned bytes*/ if (is_write) r592_flush_fifo_write(dev); sg_miter_stop(&miter); local_irq_restore(flags); return 0; } /* Executes one TPC (data is read/written from small or large fifo) */ static void r592_execute_tpc(struct r592_device *dev) { bool is_write; int len, error; u32 status, reg; if (!dev->req) { message("BUG: tpc execution without request!"); return; } is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; len = dev->req->long_data ? dev->req->sg.length : dev->req->data_len; /* Ensure that FIFO can hold the input data */ if (len > R592_LFIFO_SIZE) { message("IO: hardware doesn't support TPCs longer that 512"); error = -ENOSYS; goto out; } if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) { dbg("IO: refusing to send TPC because card is absent"); error = -ENODEV; goto out; } dbg("IO: executing %s LEN=%d", memstick_debug_get_tpc_name(dev->req->tpc), len); /* Set IO direction */ if (is_write) r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION); else r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION); error = r592_test_fifo_empty(dev); if (error) goto out; /* Transfer write data */ if (is_write) { error = r592_transfer_fifo_dma(dev); if (error == -EINVAL) error = r592_transfer_fifo_pio(dev); } if (error) goto out; /* Trigger the TPC */ reg = (len << R592_TPC_EXEC_LEN_SHIFT) | (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) | R592_TPC_EXEC_BIG_FIFO; r592_write_reg(dev, R592_TPC_EXEC, reg); /* Wait for TPC completion */ status = R592_STATUS_RDY; if (dev->req->need_card_int) status |= R592_STATUS_CED; error = r592_wait_status(dev, status, status); if (error) { message("card didn't respond"); goto out; } /* Test IO errors */ error = r592_test_io_error(dev); if (error) { dbg("IO error"); goto out; } /* Read data from FIFO */ if (!is_write) { error = r592_transfer_fifo_dma(dev); if (error == -EINVAL) error = r592_transfer_fifo_pio(dev); } /* read INT reg. This can be shortened with shifts, but that way its more readable */ if (dev->parallel_mode && dev->req->need_card_int) { dev->req->int_reg = 0; status = r592_read_reg(dev, R592_STATUS); if (status & R592_STATUS_P_CMDNACK) dev->req->int_reg |= MEMSTICK_INT_CMDNAK; if (status & R592_STATUS_P_BREQ) dev->req->int_reg |= MEMSTICK_INT_BREQ; if (status & R592_STATUS_P_INTERR) dev->req->int_reg |= MEMSTICK_INT_ERR; if (status & R592_STATUS_P_CED) dev->req->int_reg |= MEMSTICK_INT_CED; } if (error) dbg("FIFO read error"); out: dev->req->error = error; r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED); return; } /* Main request processing thread */ static int r592_process_thread(void *data) { int error; struct r592_device *dev = (struct r592_device *)data; unsigned long flags; while (!kthread_should_stop()) { spin_lock_irqsave(&dev->io_thread_lock, flags); set_current_state(TASK_INTERRUPTIBLE); error = memstick_next_req(dev->host, &dev->req); spin_unlock_irqrestore(&dev->io_thread_lock, flags); if (error) { if (error == -ENXIO || error == -EAGAIN) { dbg_verbose("IO: done IO, sleeping"); } else { dbg("IO: unknown error from " "memstick_next_req %d", error); } if (kthread_should_stop()) set_current_state(TASK_RUNNING); schedule(); } else { set_current_state(TASK_RUNNING); r592_execute_tpc(dev); } } return 0; } /* Reprogram chip to detect change in card state */ /* eg, if card is detected, arm it to detect removal, and vice versa */ static void r592_update_card_detect(struct r592_device *dev) { u32 reg = r592_read_reg(dev, R592_REG_MSC); bool card_detected = reg & R592_REG_MSC_PRSNT; dbg("update card detect. card state: %s", card_detected ? "present" : "absent"); reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16); if (card_detected) reg |= (R592_REG_MSC_IRQ_REMOVE << 16); else reg |= (R592_REG_MSC_IRQ_INSERT << 16); r592_write_reg(dev, R592_REG_MSC, reg); } /* Timer routine that fires 1 second after last card detection event, */ static void r592_detect_timer(struct timer_list *t) { struct r592_device *dev = from_timer(dev, t, detect_timer); r592_update_card_detect(dev); memstick_detect_change(dev->host); } /* Interrupt handler */ static irqreturn_t r592_irq(int irq, void *data) { struct r592_device *dev = (struct r592_device *)data; irqreturn_t ret = IRQ_NONE; u32 reg; u16 irq_enable, irq_status; unsigned long flags; int error; spin_lock_irqsave(&dev->irq_lock, flags); reg = r592_read_reg(dev, R592_REG_MSC); irq_enable = reg >> 16; irq_status = reg & 0xFFFF; /* Ack the interrupts */ reg &= ~irq_status; r592_write_reg(dev, R592_REG_MSC, reg); /* Get the IRQ status minus bits that aren't enabled */ irq_status &= (irq_enable); /* Due to limitation of memstick core, we don't look at bits that indicate that card was removed/inserted and/or present */ if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) { bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT; ret = IRQ_HANDLED; message("IRQ: card %s", card_was_added ? "added" : "removed"); mod_timer(&dev->detect_timer, jiffies + msecs_to_jiffies(card_was_added ? 500 : 50)); } if (irq_status & (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) { ret = IRQ_HANDLED; if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) { message("IRQ: DMA error"); error = -EIO; } else { dbg_verbose("IRQ: dma done"); error = 0; } r592_stop_dma(dev, error); complete(&dev->dma_done); } spin_unlock_irqrestore(&dev->irq_lock, flags); return ret; } /* External inteface: set settings */ static int r592_set_param(struct memstick_host *host, enum memstick_param param, int value) { struct r592_device *dev = memstick_priv(host); switch (param) { case MEMSTICK_POWER: switch (value) { case MEMSTICK_POWER_ON: return r592_enable_device(dev, true); case MEMSTICK_POWER_OFF: return r592_enable_device(dev, false); default: return -EINVAL; } case MEMSTICK_INTERFACE: switch (value) { case MEMSTICK_SERIAL: return r592_set_mode(dev, 0); case MEMSTICK_PAR4: return r592_set_mode(dev, 1); default: return -EINVAL; } default: return -EINVAL; } } /* External interface: submit requests */ static void r592_submit_req(struct memstick_host *host) { struct r592_device *dev = memstick_priv(host); unsigned long flags; if (dev->req) return; spin_lock_irqsave(&dev->io_thread_lock, flags); if (wake_up_process(dev->io_thread)) dbg_verbose("IO thread woken to process requests"); spin_unlock_irqrestore(&dev->io_thread_lock, flags); } static const struct pci_device_id r592_pci_id_tbl[] = { { PCI_VDEVICE(RICOH, 0x0592), }, { }, }; /* Main entry */ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int error = -ENOMEM; struct memstick_host *host; struct r592_device *dev; /* Allocate memory */ host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev); if (!host) goto error1; dev = memstick_priv(host); dev->host = host; dev->pci_dev = pdev; pci_set_drvdata(pdev, dev); /* pci initialization */ error = pci_enable_device(pdev); if (error) goto error2; pci_set_master(pdev); error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (error) goto error3; error = pci_request_regions(pdev, DRV_NAME); if (error) goto error3; dev->mmio = pci_ioremap_bar(pdev, 0); if (!dev->mmio) { error = -ENOMEM; goto error4; } dev->irq = pdev->irq; spin_lock_init(&dev->irq_lock); spin_lock_init(&dev->io_thread_lock); init_completion(&dev->dma_done); INIT_KFIFO(dev->pio_fifo); timer_setup(&dev->detect_timer, r592_detect_timer, 0); /* Host initialization */ host->caps = MEMSTICK_CAP_PAR4; host->request = r592_submit_req; host->set_param = r592_set_param; r592_check_dma(dev); dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io"); if (IS_ERR(dev->io_thread)) { error = PTR_ERR(dev->io_thread); goto error5; } /* This is just a precation, so don't fail */ dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dev->dummy_dma_page_physical_address, GFP_KERNEL); r592_stop_dma(dev , 0); error = request_irq(dev->irq, &r592_irq, IRQF_SHARED, DRV_NAME, dev); if (error) goto error6; r592_update_card_detect(dev); error = memstick_add_host(host); if (error) goto error7; message("driver successfully loaded"); return 0; error7: free_irq(dev->irq, dev); error6: if (dev->dummy_dma_page) dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); kthread_stop(dev->io_thread); error5: iounmap(dev->mmio); error4: pci_release_regions(pdev); error3: pci_disable_device(pdev); error2: memstick_free_host(host); error1: return error; } static void r592_remove(struct pci_dev *pdev) { int error = 0; struct r592_device *dev = pci_get_drvdata(pdev); /* Stop the processing thread. That ensures that we won't take any more requests */ kthread_stop(dev->io_thread); del_timer_sync(&dev->detect_timer); r592_enable_device(dev, false); while (!error && dev->req) { dev->req->error = -ETIME; error = memstick_next_req(dev->host, &dev->req); } memstick_remove_host(dev->host); if (dev->dummy_dma_page) dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); free_irq(dev->irq, dev); iounmap(dev->mmio); pci_release_regions(pdev); pci_disable_device(pdev); memstick_free_host(dev->host); } #ifdef CONFIG_PM_SLEEP static int r592_suspend(struct device *core_dev) { struct r592_device *dev = dev_get_drvdata(core_dev); r592_clear_interrupts(dev); memstick_suspend_host(dev->host); del_timer_sync(&dev->detect_timer); return 0; } static int r592_resume(struct device *core_dev) { struct r592_device *dev = dev_get_drvdata(core_dev); r592_clear_interrupts(dev); r592_enable_device(dev, false); memstick_resume_host(dev->host); r592_update_card_detect(dev); return 0; } #endif static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume); MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl); static struct pci_driver r592_pci_driver = { .name = DRV_NAME, .id_table = r592_pci_id_tbl, .probe = r592_probe, .remove = r592_remove, .driver.pm = &r592_pm_ops, }; module_pci_driver(r592_pci_driver); module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-3)"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <[email protected]>"); MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver");
linux-master
drivers/memstick/host/r592.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2003-2015 Broadcom Corporation * All Rights Reserved */ #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/acpi.h> /* * XLP GPIO has multiple 32 bit registers for each feature where each register * controls 32 pins. So, pins up to 64 require 2 32-bit registers and up to 96 * require 3 32-bit registers for each feature. * Here we only define offset of the first register for each feature. Offset of * the registers for pins greater than 32 can be calculated as following(Use * GPIO_INT_STAT as example): * * offset = (gpio / XLP_GPIO_REGSZ) * 4; * reg_addr = addr + offset; * * where addr is base address of the that feature register and gpio is the pin. */ #define GPIO_9XX_BYTESWAP 0X00 #define GPIO_9XX_CTRL 0X04 #define GPIO_9XX_OUTPUT_EN 0x14 #define GPIO_9XX_PADDRV 0x24 /* * Only for 4 interrupt enable reg are defined for now, * total reg available are 12. */ #define GPIO_9XX_INT_EN00 0x44 #define GPIO_9XX_INT_EN10 0x54 #define GPIO_9XX_INT_EN20 0x64 #define GPIO_9XX_INT_EN30 0x74 #define GPIO_9XX_INT_POL 0x104 #define GPIO_9XX_INT_TYPE 0x114 #define GPIO_9XX_INT_STAT 0x124 /* Interrupt type register mask */ #define XLP_GPIO_IRQ_TYPE_LVL 0x0 #define XLP_GPIO_IRQ_TYPE_EDGE 0x1 /* Interrupt polarity register mask */ #define XLP_GPIO_IRQ_POL_HIGH 0x0 #define XLP_GPIO_IRQ_POL_LOW 0x1 #define XLP_GPIO_REGSZ 32 #define XLP_GPIO_IRQ_BASE 768 #define XLP_MAX_NR_GPIO 96 struct xlp_gpio_priv { struct gpio_chip chip; DECLARE_BITMAP(gpio_enabled_mask, XLP_MAX_NR_GPIO); void __iomem *gpio_intr_en; /* pointer to first intr enable reg */ void __iomem *gpio_intr_stat; /* pointer to first intr status reg */ void __iomem *gpio_intr_type; /* pointer to first intr type reg */ void __iomem *gpio_intr_pol; /* pointer to first intr polarity reg */ void __iomem *gpio_out_en; /* pointer to first output enable reg */ void __iomem *gpio_paddrv; /* pointer to first pad drive reg */ spinlock_t lock; }; static int xlp_gpio_get_reg(void __iomem *addr, unsigned gpio) { u32 pos, regset; pos = gpio % XLP_GPIO_REGSZ; regset = (gpio / XLP_GPIO_REGSZ) * 4; return !!(readl(addr + regset) & BIT(pos)); } static void xlp_gpio_set_reg(void __iomem *addr, unsigned gpio, int state) { u32 value, pos, regset; pos = gpio % XLP_GPIO_REGSZ; regset = (gpio / XLP_GPIO_REGSZ) * 4; value = readl(addr + regset); if (state) value |= BIT(pos); else value &= ~BIT(pos); writel(value, addr + regset); } static void xlp_gpio_irq_enable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); gpiochip_enable_irq(gc, irqd_to_hwirq(d)); } static void xlp_gpio_irq_disable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct xlp_gpio_priv *priv = gpiochip_get_data(gc); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x0); __clear_bit(d->hwirq, priv->gpio_enabled_mask); spin_unlock_irqrestore(&priv->lock, flags); gpiochip_disable_irq(gc, irqd_to_hwirq(d)); } static void xlp_gpio_irq_mask_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct xlp_gpio_priv *priv = gpiochip_get_data(gc); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x0); xlp_gpio_set_reg(priv->gpio_intr_stat, d->hwirq, 0x1); __clear_bit(d->hwirq, priv->gpio_enabled_mask); spin_unlock_irqrestore(&priv->lock, flags); } static void xlp_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct xlp_gpio_priv *priv = gpiochip_get_data(gc); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x1); __set_bit(d->hwirq, priv->gpio_enabled_mask); spin_unlock_irqrestore(&priv->lock, flags); } static int xlp_gpio_set_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct xlp_gpio_priv *priv = gpiochip_get_data(gc); int pol, irq_type; switch (type) { case IRQ_TYPE_EDGE_RISING: irq_type = XLP_GPIO_IRQ_TYPE_EDGE; pol = XLP_GPIO_IRQ_POL_HIGH; break; case IRQ_TYPE_EDGE_FALLING: irq_type = XLP_GPIO_IRQ_TYPE_EDGE; pol = XLP_GPIO_IRQ_POL_LOW; break; case IRQ_TYPE_LEVEL_HIGH: irq_type = XLP_GPIO_IRQ_TYPE_LVL; pol = XLP_GPIO_IRQ_POL_HIGH; break; case IRQ_TYPE_LEVEL_LOW: irq_type = XLP_GPIO_IRQ_TYPE_LVL; pol = XLP_GPIO_IRQ_POL_LOW; break; default: return -EINVAL; } xlp_gpio_set_reg(priv->gpio_intr_type, d->hwirq, irq_type); xlp_gpio_set_reg(priv->gpio_intr_pol, d->hwirq, pol); return 0; } static struct irq_chip xlp_gpio_irq_chip = { .name = "XLP-GPIO", .irq_mask_ack = xlp_gpio_irq_mask_ack, .irq_enable = xlp_gpio_irq_enable, .irq_disable = xlp_gpio_irq_disable, .irq_set_type = xlp_gpio_set_irq_type, .irq_unmask = xlp_gpio_irq_unmask, .flags = IRQCHIP_ONESHOT_SAFE | IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void xlp_gpio_generic_handler(struct irq_desc *desc) { struct xlp_gpio_priv *priv = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); int gpio, regoff; u32 gpio_stat; regoff = -1; gpio_stat = 0; chained_irq_enter(irqchip, desc); for_each_set_bit(gpio, priv->gpio_enabled_mask, XLP_MAX_NR_GPIO) { if (regoff != gpio / XLP_GPIO_REGSZ) { regoff = gpio / XLP_GPIO_REGSZ; gpio_stat = readl(priv->gpio_intr_stat + regoff * 4); } if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ)) generic_handle_domain_irq(priv->chip.irq.domain, gpio); } chained_irq_exit(irqchip, desc); } static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1); return 0; } static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0); return 0; } static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); BUG_ON(gpio >= gc->ngpio); return xlp_gpio_get_reg(priv->gpio_paddrv, gpio); } static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state) { struct xlp_gpio_priv *priv = gpiochip_get_data(gc); BUG_ON(gpio >= gc->ngpio); xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state); } static int xlp_gpio_probe(struct platform_device *pdev) { struct gpio_chip *gc; struct gpio_irq_chip *girq; struct xlp_gpio_priv *priv; void __iomem *gpio_base; int irq, err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; gpio_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio_base)) return PTR_ERR(gpio_base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN; priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV; priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT; priv->gpio_intr_type = gpio_base + GPIO_9XX_INT_TYPE; priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL; priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00; bitmap_zero(priv->gpio_enabled_mask, XLP_MAX_NR_GPIO); gc = &priv->chip; gc->owner = THIS_MODULE; gc->label = dev_name(&pdev->dev); gc->base = 0; gc->parent = &pdev->dev; gc->ngpio = 70; gc->direction_output = xlp_gpio_dir_output; gc->direction_input = xlp_gpio_dir_input; gc->set = xlp_gpio_set; gc->get = xlp_gpio_get; spin_lock_init(&priv->lock); girq = &gc->irq; gpio_irq_chip_set_chip(girq, &xlp_gpio_irq_chip); girq->parent_handler = xlp_gpio_generic_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; girq->parents[0] = irq; girq->first = 0; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; err = gpiochip_add_data(gc, priv); if (err < 0) return err; dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio); return 0; } #ifdef CONFIG_ACPI static const struct acpi_device_id xlp_gpio_acpi_match[] = { { "BRCM9006" }, { "CAV9006" }, {}, }; MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match); #endif static struct platform_driver xlp_gpio_driver = { .driver = { .name = "xlp-gpio", .acpi_match_table = ACPI_PTR(xlp_gpio_acpi_match), }, .probe = xlp_gpio_probe, }; module_platform_driver(xlp_gpio_driver); MODULE_AUTHOR("Kamlakant Patel <[email protected]>"); MODULE_AUTHOR("Ganesan Ramalingam <[email protected]>"); MODULE_DESCRIPTION("Netlogic XLP GPIO Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-xlp.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for Marvell SoCs * * Copyright (C) 2012 Marvell * * Thomas Petazzoni <[email protected]> * Andrew Lunn <[email protected]> * Sebastian Hesselbarth <[email protected]> * * This driver is a fairly straightforward GPIO driver for the * complete family of Marvell EBU SoC platforms (Orion, Dove, * Kirkwood, Discovery, Armada 370/XP). The only complexity of this * driver is the different register layout that exists between the * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP * platforms (MV78200 from the Discovery family and the Armada * XP). Therefore, this driver handles three variants of the GPIO * block: * - the basic variant, called "orion-gpio", with the simplest * register set. Used on Orion, Dove, Kirkwoord, Armada 370 and * non-SMP Discovery systems * - the mv78200 variant for MV78200 Discovery systems. This variant * turns the edge mask and level mask registers into CPU0 edge * mask/level mask registers, and adds CPU1 edge mask/level mask * registers. * - the armadaxp variant for Armada XP systems. This variant keeps * the normal cause/edge mask/level mask registers when the global * interrupts are used, but adds per-CPU cause/edge mask/level mask * registers n a separate memory area for the per-CPU GPIO * interrupts. */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/gpio/consumer.h> #include <linux/gpio/machine.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/regmap.h> #include <linux/slab.h> /* * GPIO unit register offsets. */ #define GPIO_OUT_OFF 0x0000 #define GPIO_IO_CONF_OFF 0x0004 #define GPIO_BLINK_EN_OFF 0x0008 #define GPIO_IN_POL_OFF 0x000c #define GPIO_DATA_IN_OFF 0x0010 #define GPIO_EDGE_CAUSE_OFF 0x0014 #define GPIO_EDGE_MASK_OFF 0x0018 #define GPIO_LEVEL_MASK_OFF 0x001c #define GPIO_BLINK_CNT_SELECT_OFF 0x0020 /* * PWM register offsets. */ #define PWM_BLINK_ON_DURATION_OFF 0x0 #define PWM_BLINK_OFF_DURATION_OFF 0x4 #define PWM_BLINK_COUNTER_B_OFF 0x8 /* Armada 8k variant gpios register offsets */ #define AP80X_GPIO0_OFF_A8K 0x1040 #define CP11X_GPIO0_OFF_A8K 0x100 #define CP11X_GPIO1_OFF_A8K 0x140 /* The MV78200 has per-CPU registers for edge mask and level mask */ #define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18) #define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C) /* * The Armada XP has per-CPU registers for interrupt cause, interrupt * mask and interrupt level mask. Those are in percpu_regs range. */ #define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4) #define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4) #define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4) #define MVEBU_GPIO_SOC_VARIANT_ORION 0x1 #define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2 #define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3 #define MVEBU_GPIO_SOC_VARIANT_A8K 0x4 #define MVEBU_MAX_GPIO_PER_BANK 32 struct mvebu_pwm { struct regmap *regs; u32 offset; unsigned long clk_rate; struct gpio_desc *gpiod; struct pwm_chip chip; spinlock_t lock; struct mvebu_gpio_chip *mvchip; /* Used to preserve GPIO/PWM registers across suspend/resume */ u32 blink_select; u32 blink_on_duration; u32 blink_off_duration; }; struct mvebu_gpio_chip { struct gpio_chip chip; struct regmap *regs; u32 offset; struct regmap *percpu_regs; int irqbase; struct irq_domain *domain; int soc_variant; /* Used for PWM support */ struct clk *clk; struct mvebu_pwm *mvpwm; /* Used to preserve GPIO registers across suspend/resume */ u32 out_reg; u32 io_conf_reg; u32 blink_en_reg; u32 in_pol_reg; u32 edge_mask_regs[4]; u32 level_mask_regs[4]; }; /* * Functions returning addresses of individual registers for a given * GPIO controller. */ static void mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip, struct regmap **map, unsigned int *offset) { int cpu; switch (mvchip->soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_MV78200: case MVEBU_GPIO_SOC_VARIANT_A8K: *map = mvchip->regs; *offset = GPIO_EDGE_CAUSE_OFF + mvchip->offset; break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: cpu = smp_processor_id(); *map = mvchip->percpu_regs; *offset = GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu); break; default: BUG(); } } static u32 mvebu_gpio_read_edge_cause(struct mvebu_gpio_chip *mvchip) { struct regmap *map; unsigned int offset; u32 val; mvebu_gpioreg_edge_cause(mvchip, &map, &offset); regmap_read(map, offset, &val); return val; } static void mvebu_gpio_write_edge_cause(struct mvebu_gpio_chip *mvchip, u32 val) { struct regmap *map; unsigned int offset; mvebu_gpioreg_edge_cause(mvchip, &map, &offset); regmap_write(map, offset, val); } static inline void mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip, struct regmap **map, unsigned int *offset) { int cpu; switch (mvchip->soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_A8K: *map = mvchip->regs; *offset = GPIO_EDGE_MASK_OFF + mvchip->offset; break; case MVEBU_GPIO_SOC_VARIANT_MV78200: cpu = smp_processor_id(); *map = mvchip->regs; *offset = GPIO_EDGE_MASK_MV78200_OFF(cpu); break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: cpu = smp_processor_id(); *map = mvchip->percpu_regs; *offset = GPIO_EDGE_MASK_ARMADAXP_OFF(cpu); break; default: BUG(); } } static u32 mvebu_gpio_read_edge_mask(struct mvebu_gpio_chip *mvchip) { struct regmap *map; unsigned int offset; u32 val; mvebu_gpioreg_edge_mask(mvchip, &map, &offset); regmap_read(map, offset, &val); return val; } static void mvebu_gpio_write_edge_mask(struct mvebu_gpio_chip *mvchip, u32 val) { struct regmap *map; unsigned int offset; mvebu_gpioreg_edge_mask(mvchip, &map, &offset); regmap_write(map, offset, val); } static void mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip, struct regmap **map, unsigned int *offset) { int cpu; switch (mvchip->soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_A8K: *map = mvchip->regs; *offset = GPIO_LEVEL_MASK_OFF + mvchip->offset; break; case MVEBU_GPIO_SOC_VARIANT_MV78200: cpu = smp_processor_id(); *map = mvchip->regs; *offset = GPIO_LEVEL_MASK_MV78200_OFF(cpu); break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: cpu = smp_processor_id(); *map = mvchip->percpu_regs; *offset = GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu); break; default: BUG(); } } static u32 mvebu_gpio_read_level_mask(struct mvebu_gpio_chip *mvchip) { struct regmap *map; unsigned int offset; u32 val; mvebu_gpioreg_level_mask(mvchip, &map, &offset); regmap_read(map, offset, &val); return val; } static void mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val) { struct regmap *map; unsigned int offset; mvebu_gpioreg_level_mask(mvchip, &map, &offset); regmap_write(map, offset, val); } /* * Functions returning offsets of individual registers for a given * PWM controller. */ static unsigned int mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm) { return mvpwm->offset + PWM_BLINK_ON_DURATION_OFF; } static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm) { return mvpwm->offset + PWM_BLINK_OFF_DURATION_OFF; } /* * Functions implementing the gpio_chip methods */ static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, BIT(pin), value ? BIT(pin) : 0); } static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); u32 u; regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u); if (u & BIT(pin)) { u32 data_in, in_pol; regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in); regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &in_pol); u = data_in ^ in_pol; } else { regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, &u); } return (u >> pin) & 1; } static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned int pin, int value) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); regmap_update_bits(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, BIT(pin), value ? BIT(pin) : 0); } static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); int ret; /* * Check with the pinctrl driver whether this pin is usable as * an input GPIO */ ret = pinctrl_gpio_direction_input(chip->base + pin); if (ret) return ret; regmap_update_bits(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, BIT(pin), BIT(pin)); return 0; } static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, int value) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); int ret; /* * Check with the pinctrl driver whether this pin is usable as * an output GPIO */ ret = pinctrl_gpio_direction_output(chip->base + pin); if (ret) return ret; mvebu_gpio_blink(chip, pin, 0); mvebu_gpio_set(chip, pin, value); regmap_update_bits(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, BIT(pin), 0); return 0; } static int mvebu_gpio_get_direction(struct gpio_chip *chip, unsigned int pin) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); u32 u; regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u); if (u & BIT(pin)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned int pin) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); return irq_create_mapping(mvchip->domain, pin); } /* * Functions implementing the irq_chip methods */ static void mvebu_gpio_irq_ack(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; u32 mask = d->mask; irq_gc_lock(gc); mvebu_gpio_write_edge_cause(mvchip, ~mask); irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_mask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; irq_gc_lock(gc); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; irq_gc_lock(gc); mvebu_gpio_write_edge_cause(mvchip, ~mask); ct->mask_cache_priv |= mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_mask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; irq_gc_lock(gc); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_unmask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; irq_gc_lock(gc); ct->mask_cache_priv |= mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); irq_gc_unlock(gc); } /***************************************************************************** * MVEBU GPIO IRQ * * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same * value of the line or the opposite value. * * Level IRQ handlers: DATA_IN is used directly as cause register. * Interrupt are masked by LEVEL_MASK registers. * Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE. * Interrupt are masked by EDGE_MASK registers. * Both-edge handlers: Similar to regular Edge handlers, but also swaps * the polarity to catch the next line transaction. * This is a race condition that might not perfectly * work on some use cases. * * Every eight GPIO lines are grouped (OR'ed) before going up to main * cause register. * * EDGE cause mask * data-in /--------| |-----| |----\ * -----| |----- ---- to main cause reg * X \----------------| |----/ * polarity LEVEL mask * ****************************************************************************/ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct mvebu_gpio_chip *mvchip = gc->private; int pin; u32 u; pin = d->hwirq; regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u); if ((u & BIT(pin)) == 0) return -EINVAL; type &= IRQ_TYPE_SENSE_MASK; if (type == IRQ_TYPE_NONE) return -EINVAL; /* Check if we need to change chip and handler */ if (!(ct->type & type)) if (irq_setup_alt_chip(d, type)) return -EINVAL; /* * Configure interrupt polarity. */ switch (type) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: regmap_update_bits(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, BIT(pin), 0); break; case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: regmap_update_bits(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, BIT(pin), BIT(pin)); break; case IRQ_TYPE_EDGE_BOTH: { u32 data_in, in_pol, val; regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &in_pol); regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in); /* * set initial polarity based on current input level */ if ((data_in ^ in_pol) & BIT(pin)) val = BIT(pin); /* falling */ else val = 0; /* raising */ regmap_update_bits(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, BIT(pin), val); break; } } return 0; } static void mvebu_gpio_irq_handler(struct irq_desc *desc) { struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 cause, type, data_in, level_mask, edge_cause, edge_mask; int i; if (mvchip == NULL) return; chained_irq_enter(chip, desc); regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in); level_mask = mvebu_gpio_read_level_mask(mvchip); edge_cause = mvebu_gpio_read_edge_cause(mvchip); edge_mask = mvebu_gpio_read_edge_mask(mvchip); cause = (data_in & level_mask) | (edge_cause & edge_mask); for (i = 0; i < mvchip->chip.ngpio; i++) { int irq; irq = irq_find_mapping(mvchip->domain, i); if (!(cause & BIT(i))) continue; type = irq_get_trigger_type(irq); if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { /* Swap polarity (race with GPIO line) */ u32 polarity; regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &polarity); polarity ^= BIT(i); regmap_write(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, polarity); } generic_handle_irq(irq); } chained_irq_exit(chip, desc); } static const struct regmap_config mvebu_gpio_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .fast_io = true, }; /* * Functions implementing the pwm_chip methods */ static struct mvebu_pwm *to_mvebu_pwm(struct pwm_chip *chip) { return container_of(chip, struct mvebu_pwm, chip); } static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip); struct mvebu_gpio_chip *mvchip = mvpwm->mvchip; struct gpio_desc *desc; unsigned long flags; int ret = 0; spin_lock_irqsave(&mvpwm->lock, flags); if (mvpwm->gpiod) { ret = -EBUSY; } else { desc = gpiochip_request_own_desc(&mvchip->chip, pwm->hwpwm, "mvebu-pwm", GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW); if (IS_ERR(desc)) { ret = PTR_ERR(desc); goto out; } mvpwm->gpiod = desc; } out: spin_unlock_irqrestore(&mvpwm->lock, flags); return ret; } static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip); unsigned long flags; spin_lock_irqsave(&mvpwm->lock, flags); gpiochip_free_own_desc(mvpwm->gpiod); mvpwm->gpiod = NULL; spin_unlock_irqrestore(&mvpwm->lock, flags); } static int mvebu_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip); struct mvebu_gpio_chip *mvchip = mvpwm->mvchip; unsigned long long val; unsigned long flags; u32 u; spin_lock_irqsave(&mvpwm->lock, flags); regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &u); /* Hardware treats zero as 2^32. See mvebu_pwm_apply(). */ if (u > 0) val = u; else val = UINT_MAX + 1ULL; state->duty_cycle = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC, mvpwm->clk_rate); regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u); /* period = on + off duration */ if (u > 0) val += u; else val += UINT_MAX + 1ULL; state->period = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC, mvpwm->clk_rate); regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u); if (u) state->enabled = true; else state->enabled = false; spin_unlock_irqrestore(&mvpwm->lock, flags); return 0; } static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip); struct mvebu_gpio_chip *mvchip = mvpwm->mvchip; unsigned long long val; unsigned long flags; unsigned int on, off; if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle; do_div(val, NSEC_PER_SEC); if (val > UINT_MAX + 1ULL) return -EINVAL; /* * Zero on/off values don't work as expected. Experimentation shows * that zero value is treated as 2^32. This behavior is not documented. */ if (val == UINT_MAX + 1ULL) on = 0; else if (val) on = val; else on = 1; val = (unsigned long long) mvpwm->clk_rate * state->period; do_div(val, NSEC_PER_SEC); val -= on; if (val > UINT_MAX + 1ULL) return -EINVAL; if (val == UINT_MAX + 1ULL) off = 0; else if (val) off = val; else off = 1; spin_lock_irqsave(&mvpwm->lock, flags); regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), on); regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), off); if (state->enabled) mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 1); else mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 0); spin_unlock_irqrestore(&mvpwm->lock, flags); return 0; } static const struct pwm_ops mvebu_pwm_ops = { .request = mvebu_pwm_request, .free = mvebu_pwm_free, .get_state = mvebu_pwm_get_state, .apply = mvebu_pwm_apply, .owner = THIS_MODULE, }; static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip) { struct mvebu_pwm *mvpwm = mvchip->mvpwm; regmap_read(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, &mvpwm->blink_select); regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &mvpwm->blink_on_duration); regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &mvpwm->blink_off_duration); } static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip) { struct mvebu_pwm *mvpwm = mvchip->mvpwm; regmap_write(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, mvpwm->blink_select); regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), mvpwm->blink_on_duration); regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), mvpwm->blink_off_duration); } static int mvebu_pwm_probe(struct platform_device *pdev, struct mvebu_gpio_chip *mvchip, int id) { struct device *dev = &pdev->dev; struct mvebu_pwm *mvpwm; void __iomem *base; u32 offset; u32 set; if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { int ret = of_property_read_u32(dev->of_node, "marvell,pwm-offset", &offset); if (ret < 0) return 0; } else { /* * There are only two sets of PWM configuration registers for * all the GPIO lines on those SoCs which this driver reserves * for the first two GPIO chips. So if the resource is missing * we can't treat it as an error. */ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) return 0; offset = 0; } if (IS_ERR(mvchip->clk)) return PTR_ERR(mvchip->clk); mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); if (!mvpwm) return -ENOMEM; mvchip->mvpwm = mvpwm; mvpwm->mvchip = mvchip; mvpwm->offset = offset; if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { mvpwm->regs = mvchip->regs; switch (mvchip->offset) { case AP80X_GPIO0_OFF_A8K: case CP11X_GPIO0_OFF_A8K: /* Blink counter A */ set = 0; break; case CP11X_GPIO1_OFF_A8K: /* Blink counter B */ set = U32_MAX; mvpwm->offset += PWM_BLINK_COUNTER_B_OFF; break; default: return -EINVAL; } } else { base = devm_platform_ioremap_resource_byname(pdev, "pwm"); if (IS_ERR(base)) return PTR_ERR(base); mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base, &mvebu_gpio_regmap_config); if (IS_ERR(mvpwm->regs)) return PTR_ERR(mvpwm->regs); /* * Use set A for lines of GPIO chip with id 0, B for GPIO chip * with id 1. Don't allow further GPIO chips to be used for PWM. */ if (id == 0) set = 0; else if (id == 1) set = U32_MAX; else return -EINVAL; } regmap_write(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set); mvpwm->clk_rate = clk_get_rate(mvchip->clk); if (!mvpwm->clk_rate) { dev_err(dev, "failed to get clock rate\n"); return -EINVAL; } mvpwm->chip.dev = dev; mvpwm->chip.ops = &mvebu_pwm_ops; mvpwm->chip.npwm = mvchip->chip.ngpio; spin_lock_init(&mvpwm->lock); return devm_pwmchip_add(dev, &mvpwm->chip); } #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); u32 out, io_conf, blink, in_pol, data_in, cause, edg_msk, lvl_msk; const char *label; int i; regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, &out); regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &io_conf); regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &blink); regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &in_pol); regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in); cause = mvebu_gpio_read_edge_cause(mvchip); edg_msk = mvebu_gpio_read_edge_mask(mvchip); lvl_msk = mvebu_gpio_read_level_mask(mvchip); for_each_requested_gpio(chip, i, label) { u32 msk; bool is_out; msk = BIT(i); is_out = !(io_conf & msk); seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label); if (is_out) { seq_printf(s, " out %s %s\n", out & msk ? "hi" : "lo", blink & msk ? "(blink )" : ""); continue; } seq_printf(s, " in %s (act %s) - IRQ", (data_in ^ in_pol) & msk ? "hi" : "lo", in_pol & msk ? "lo" : "hi"); if (!((edg_msk | lvl_msk) & msk)) { seq_puts(s, " disabled\n"); continue; } if (edg_msk & msk) seq_puts(s, " edge "); if (lvl_msk & msk) seq_puts(s, " level"); seq_printf(s, " (%s)\n", cause & msk ? "pending" : "clear "); } } #else #define mvebu_gpio_dbg_show NULL #endif static const struct of_device_id mvebu_gpio_of_match[] = { { .compatible = "marvell,orion-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, }, { .compatible = "marvell,mv78200-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_MV78200, }, { .compatible = "marvell,armadaxp-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, }, { .compatible = "marvell,armada-370-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, }, { .compatible = "marvell,armada-8k-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_A8K, }, { /* sentinel */ }, }; static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state) { struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev); int i; regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, &mvchip->out_reg); regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &mvchip->io_conf_reg); regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &mvchip->blink_en_reg); regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &mvchip->in_pol_reg); switch (mvchip->soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_A8K: regmap_read(mvchip->regs, GPIO_EDGE_MASK_OFF + mvchip->offset, &mvchip->edge_mask_regs[0]); regmap_read(mvchip->regs, GPIO_LEVEL_MASK_OFF + mvchip->offset, &mvchip->level_mask_regs[0]); break; case MVEBU_GPIO_SOC_VARIANT_MV78200: for (i = 0; i < 2; i++) { regmap_read(mvchip->regs, GPIO_EDGE_MASK_MV78200_OFF(i), &mvchip->edge_mask_regs[i]); regmap_read(mvchip->regs, GPIO_LEVEL_MASK_MV78200_OFF(i), &mvchip->level_mask_regs[i]); } break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: for (i = 0; i < 4; i++) { regmap_read(mvchip->regs, GPIO_EDGE_MASK_ARMADAXP_OFF(i), &mvchip->edge_mask_regs[i]); regmap_read(mvchip->regs, GPIO_LEVEL_MASK_ARMADAXP_OFF(i), &mvchip->level_mask_regs[i]); } break; default: BUG(); } if (IS_REACHABLE(CONFIG_PWM)) mvebu_pwm_suspend(mvchip); return 0; } static int mvebu_gpio_resume(struct platform_device *pdev) { struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev); int i; regmap_write(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, mvchip->out_reg); regmap_write(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, mvchip->io_conf_reg); regmap_write(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, mvchip->blink_en_reg); regmap_write(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, mvchip->in_pol_reg); switch (mvchip->soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_A8K: regmap_write(mvchip->regs, GPIO_EDGE_MASK_OFF + mvchip->offset, mvchip->edge_mask_regs[0]); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_OFF + mvchip->offset, mvchip->level_mask_regs[0]); break; case MVEBU_GPIO_SOC_VARIANT_MV78200: for (i = 0; i < 2; i++) { regmap_write(mvchip->regs, GPIO_EDGE_MASK_MV78200_OFF(i), mvchip->edge_mask_regs[i]); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_MV78200_OFF(i), mvchip->level_mask_regs[i]); } break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: for (i = 0; i < 4; i++) { regmap_write(mvchip->regs, GPIO_EDGE_MASK_ARMADAXP_OFF(i), mvchip->edge_mask_regs[i]); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_ARMADAXP_OFF(i), mvchip->level_mask_regs[i]); } break; default: BUG(); } if (IS_REACHABLE(CONFIG_PWM)) mvebu_pwm_resume(mvchip); return 0; } static int mvebu_gpio_probe_raw(struct platform_device *pdev, struct mvebu_gpio_chip *mvchip) { void __iomem *base; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); mvchip->regs = devm_regmap_init_mmio(&pdev->dev, base, &mvebu_gpio_regmap_config); if (IS_ERR(mvchip->regs)) return PTR_ERR(mvchip->regs); /* * For the legacy SoCs, the regmap directly maps to the GPIO * registers, so no offset is needed. */ mvchip->offset = 0; /* * The Armada XP has a second range of registers for the * per-CPU registers */ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(base)) return PTR_ERR(base); mvchip->percpu_regs = devm_regmap_init_mmio(&pdev->dev, base, &mvebu_gpio_regmap_config); if (IS_ERR(mvchip->percpu_regs)) return PTR_ERR(mvchip->percpu_regs); } return 0; } static int mvebu_gpio_probe_syscon(struct platform_device *pdev, struct mvebu_gpio_chip *mvchip) { mvchip->regs = syscon_node_to_regmap(pdev->dev.parent->of_node); if (IS_ERR(mvchip->regs)) return PTR_ERR(mvchip->regs); if (of_property_read_u32(pdev->dev.of_node, "offset", &mvchip->offset)) return -EINVAL; return 0; } static void mvebu_gpio_remove_irq_domain(void *data) { struct irq_domain *domain = data; irq_domain_remove(domain); } static int mvebu_gpio_probe(struct platform_device *pdev) { struct mvebu_gpio_chip *mvchip; const struct of_device_id *match; struct device_node *np = pdev->dev.of_node; struct irq_chip_generic *gc; struct irq_chip_type *ct; unsigned int ngpios; bool have_irqs; int soc_variant; int i, cpu, id; int err; match = of_match_device(mvebu_gpio_of_match, &pdev->dev); if (match) soc_variant = (unsigned long) match->data; else soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; /* Some gpio controllers do not provide irq support */ err = platform_irq_count(pdev); if (err < 0) return err; have_irqs = err != 0; mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL); if (!mvchip) return -ENOMEM; platform_set_drvdata(pdev, mvchip); if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) { dev_err(&pdev->dev, "Missing ngpios OF property\n"); return -ENODEV; } id = of_alias_get_id(pdev->dev.of_node, "gpio"); if (id < 0) { dev_err(&pdev->dev, "Couldn't get OF id\n"); return id; } mvchip->clk = devm_clk_get(&pdev->dev, NULL); /* Not all SoCs require a clock.*/ if (!IS_ERR(mvchip->clk)) clk_prepare_enable(mvchip->clk); mvchip->soc_variant = soc_variant; mvchip->chip.label = dev_name(&pdev->dev); mvchip->chip.parent = &pdev->dev; mvchip->chip.request = gpiochip_generic_request; mvchip->chip.free = gpiochip_generic_free; mvchip->chip.get_direction = mvebu_gpio_get_direction; mvchip->chip.direction_input = mvebu_gpio_direction_input; mvchip->chip.get = mvebu_gpio_get; mvchip->chip.direction_output = mvebu_gpio_direction_output; mvchip->chip.set = mvebu_gpio_set; if (have_irqs) mvchip->chip.to_irq = mvebu_gpio_to_irq; mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; mvchip->chip.ngpio = ngpios; mvchip->chip.can_sleep = false; mvchip->chip.dbg_show = mvebu_gpio_dbg_show; if (soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) err = mvebu_gpio_probe_syscon(pdev, mvchip); else err = mvebu_gpio_probe_raw(pdev, mvchip); if (err) return err; /* * Mask and clear GPIO interrupts. */ switch (soc_variant) { case MVEBU_GPIO_SOC_VARIANT_ORION: case MVEBU_GPIO_SOC_VARIANT_A8K: regmap_write(mvchip->regs, GPIO_EDGE_CAUSE_OFF + mvchip->offset, 0); regmap_write(mvchip->regs, GPIO_EDGE_MASK_OFF + mvchip->offset, 0); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_OFF + mvchip->offset, 0); break; case MVEBU_GPIO_SOC_VARIANT_MV78200: regmap_write(mvchip->regs, GPIO_EDGE_CAUSE_OFF, 0); for (cpu = 0; cpu < 2; cpu++) { regmap_write(mvchip->regs, GPIO_EDGE_MASK_MV78200_OFF(cpu), 0); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_MV78200_OFF(cpu), 0); } break; case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: regmap_write(mvchip->regs, GPIO_EDGE_CAUSE_OFF, 0); regmap_write(mvchip->regs, GPIO_EDGE_MASK_OFF, 0); regmap_write(mvchip->regs, GPIO_LEVEL_MASK_OFF, 0); for (cpu = 0; cpu < 4; cpu++) { regmap_write(mvchip->percpu_regs, GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu), 0); regmap_write(mvchip->percpu_regs, GPIO_EDGE_MASK_ARMADAXP_OFF(cpu), 0); regmap_write(mvchip->percpu_regs, GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu), 0); } break; default: BUG(); } devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); /* Some MVEBU SoCs have simple PWM support for GPIO lines */ if (IS_REACHABLE(CONFIG_PWM)) { err = mvebu_pwm_probe(pdev, mvchip, id); if (err) return err; } /* Some gpio controllers do not provide irq support */ if (!have_irqs) return 0; mvchip->domain = irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL); if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); return -ENODEV; } err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain, mvchip->domain); if (err) return err; err = irq_alloc_domain_generic_chips( mvchip->domain, ngpios, 2, np->name, handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0); if (err) { dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n", mvchip->chip.label); return err; } /* * NOTE: The common accessors cannot be used because of the percpu * access to the mask registers */ gc = irq_get_domain_generic_chip(mvchip->domain, 0); gc->private = mvchip; ct = &gc->chip_types[0]; ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct->chip.irq_mask = mvebu_gpio_level_irq_mask; ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask; ct->chip.irq_set_type = mvebu_gpio_irq_set_type; ct->chip.name = mvchip->chip.label; ct = &gc->chip_types[1]; ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct->chip.irq_ack = mvebu_gpio_irq_ack; ct->chip.irq_mask = mvebu_gpio_edge_irq_mask; ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask; ct->chip.irq_set_type = mvebu_gpio_irq_set_type; ct->handler = handle_edge_irq; ct->chip.name = mvchip->chip.label; /* * Setup the interrupt handlers. Each chip can have up to 4 * interrupt handlers, with each handler dealing with 8 GPIO * pins. */ for (i = 0; i < 4; i++) { int irq = platform_get_irq_optional(pdev, i); if (irq < 0) continue; irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, mvchip); } return 0; } static struct platform_driver mvebu_gpio_driver = { .driver = { .name = "mvebu-gpio", .of_match_table = mvebu_gpio_of_match, }, .probe = mvebu_gpio_probe, .suspend = mvebu_gpio_suspend, .resume = mvebu_gpio_resume, }; builtin_platform_driver(mvebu_gpio_driver);
linux-master
drivers/gpio/gpio-mvebu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Xilinx gpio driver for xps/axi_gpio IP. * * Copyright 2008 - 2013 Xilinx, Inc. */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> /* Register Offset Definitions */ #define XGPIO_DATA_OFFSET (0x0) /* Data register */ #define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */ #define XGPIO_CHANNEL0_OFFSET 0x0 #define XGPIO_CHANNEL1_OFFSET 0x8 #define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */ #define XGPIO_GIER_IE BIT(31) #define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */ #define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */ /* Read/Write access to the GPIO registers */ #if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86) # define xgpio_readreg(offset) readl(offset) # define xgpio_writereg(offset, val) writel(val, offset) #else # define xgpio_readreg(offset) __raw_readl(offset) # define xgpio_writereg(offset, val) __raw_writel(val, offset) #endif /** * struct xgpio_instance - Stores information about GPIO device * @gc: GPIO chip * @regs: register block * @hw_map: GPIO pin mapping on hardware side * @sw_map: GPIO pin mapping on software side * @state: GPIO write state shadow register * @last_irq_read: GPIO read state register from last interrupt * @dir: GPIO direction shadow register * @gpio_lock: Lock used for synchronization * @irq: IRQ used by GPIO device * @irqchip: IRQ chip * @enable: GPIO IRQ enable/disable bitfield * @rising_edge: GPIO IRQ rising edge enable/disable bitfield * @falling_edge: GPIO IRQ falling edge enable/disable bitfield * @clk: clock resource for this driver */ struct xgpio_instance { struct gpio_chip gc; void __iomem *regs; DECLARE_BITMAP(hw_map, 64); DECLARE_BITMAP(sw_map, 64); DECLARE_BITMAP(state, 64); DECLARE_BITMAP(last_irq_read, 64); DECLARE_BITMAP(dir, 64); spinlock_t gpio_lock; /* For serializing operations */ int irq; DECLARE_BITMAP(enable, 64); DECLARE_BITMAP(rising_edge, 64); DECLARE_BITMAP(falling_edge, 64); struct clk *clk; }; static inline int xgpio_from_bit(struct xgpio_instance *chip, int bit) { return bitmap_bitremap(bit, chip->hw_map, chip->sw_map, 64); } static inline int xgpio_to_bit(struct xgpio_instance *chip, int gpio) { return bitmap_bitremap(gpio, chip->sw_map, chip->hw_map, 64); } static inline u32 xgpio_get_value32(const unsigned long *map, int bit) { const size_t index = BIT_WORD(bit); const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); return (map[index] >> offset) & 0xFFFFFFFFul; } static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v) { const size_t index = BIT_WORD(bit); const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); map[index] &= ~(0xFFFFFFFFul << offset); map[index] |= (unsigned long)v << offset; } static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) { switch (ch) { case 0: return XGPIO_CHANNEL0_OFFSET; case 1: return XGPIO_CHANNEL1_OFFSET; default: return -EINVAL; } } static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a) { void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32); xgpio_set_value32(a, bit, xgpio_readreg(addr)); } static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a) { void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32); xgpio_writereg(addr, xgpio_get_value32(a, bit)); } static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a) { int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1); for (bit = 0; bit <= lastbit ; bit += 32) xgpio_read_ch(chip, reg, bit, a); } static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a) { int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1); for (bit = 0; bit <= lastbit ; bit += 32) xgpio_write_ch(chip, reg, bit, a); } /** * xgpio_get - Read the specified signal of the GPIO device. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * This function reads the specified signal of the GPIO device. * * Return: * 0 if direction of GPIO signals is set as input otherwise it * returns negative error value. */ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio) { struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); DECLARE_BITMAP(state, 64); xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, state); return test_bit(bit, state); } /** * xgpio_set - Write the specified signal of the GPIO device. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * This function writes the specified value in to the specified signal of the * GPIO device. */ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); spin_lock_irqsave(&chip->gpio_lock, flags); /* Write to GPIO signal and set its direction to output */ __assign_bit(bit, chip->state, val); xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state); spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** * xgpio_set_multiple - Write the specified signals of the GPIO device. * @gc: Pointer to gpio_chip device structure. * @mask: Mask of the GPIOS to modify. * @bits: Value to be wrote on each GPIO * * This function writes the specified values into the specified signals of the * GPIO devices. */ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { DECLARE_BITMAP(hw_mask, 64); DECLARE_BITMAP(hw_bits, 64); DECLARE_BITMAP(state, 64); unsigned long flags; struct xgpio_instance *chip = gpiochip_get_data(gc); bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64); bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64); spin_lock_irqsave(&chip->gpio_lock, flags); bitmap_replace(state, chip->state, hw_bits, hw_mask, 64); xgpio_write_ch_all(chip, XGPIO_DATA_OFFSET, state); bitmap_copy(chip->state, state, 64); spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** * xgpio_dir_in - Set the direction of the specified GPIO signal as input. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * Return: * 0 - if direction of GPIO signals is set as input * otherwise it returns negative error value. */ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { unsigned long flags; struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); spin_lock_irqsave(&chip->gpio_lock, flags); /* Set the GPIO bit in shadow register and set direction as input */ __set_bit(bit, chip->dir); xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } /** * xgpio_dir_out - Set the direction of the specified GPIO signal as output. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * This function sets the direction of specified GPIO signal as output. * * Return: * If all GPIO signals of GPIO chip is configured as input then it returns * error otherwise it returns 0. */ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); spin_lock_irqsave(&chip->gpio_lock, flags); /* Write state of GPIO signal */ __assign_bit(bit, chip->state, val); xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state); /* Clear the GPIO bit in shadow register and set direction as output */ __clear_bit(bit, chip->dir); xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } /** * xgpio_save_regs - Set initial values of GPIO pins * @chip: Pointer to GPIO instance */ static void xgpio_save_regs(struct xgpio_instance *chip) { xgpio_write_ch_all(chip, XGPIO_DATA_OFFSET, chip->state); xgpio_write_ch_all(chip, XGPIO_TRI_OFFSET, chip->dir); } static int xgpio_request(struct gpio_chip *chip, unsigned int offset) { int ret; ret = pm_runtime_get_sync(chip->parent); /* * If the device is already active pm_runtime_get() will return 1 on * success, but gpio_request still needs to return 0. */ return ret < 0 ? ret : 0; } static void xgpio_free(struct gpio_chip *chip, unsigned int offset) { pm_runtime_put(chip->parent); } static int __maybe_unused xgpio_suspend(struct device *dev) { struct xgpio_instance *gpio = dev_get_drvdata(dev); struct irq_data *data = irq_get_irq_data(gpio->irq); if (!data) { dev_dbg(dev, "IRQ not connected\n"); return pm_runtime_force_suspend(dev); } if (!irqd_is_wakeup_set(data)) return pm_runtime_force_suspend(dev); return 0; } /** * xgpio_remove - Remove method for the GPIO device. * @pdev: pointer to the platform device * * This function remove gpiochips and frees all the allocated resources. * * Return: 0 always */ static int xgpio_remove(struct platform_device *pdev) { struct xgpio_instance *gpio = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(gpio->clk); return 0; } /** * xgpio_irq_ack - Acknowledge a child GPIO interrupt. * @irq_data: per IRQ and chip data passed down to chip functions * This currently does nothing, but irq_ack is unconditionally called by * handle_edge_irq and therefore must be defined. */ static void xgpio_irq_ack(struct irq_data *irq_data) { } static int __maybe_unused xgpio_resume(struct device *dev) { struct xgpio_instance *gpio = dev_get_drvdata(dev); struct irq_data *data = irq_get_irq_data(gpio->irq); if (!data) { dev_dbg(dev, "IRQ not connected\n"); return pm_runtime_force_resume(dev); } if (!irqd_is_wakeup_set(data)) return pm_runtime_force_resume(dev); return 0; } static int __maybe_unused xgpio_runtime_suspend(struct device *dev) { struct xgpio_instance *gpio = dev_get_drvdata(dev); clk_disable(gpio->clk); return 0; } static int __maybe_unused xgpio_runtime_resume(struct device *dev) { struct xgpio_instance *gpio = dev_get_drvdata(dev); return clk_enable(gpio->clk); } static const struct dev_pm_ops xgpio_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume) SET_RUNTIME_PM_OPS(xgpio_runtime_suspend, xgpio_runtime_resume, NULL) }; /** * xgpio_irq_mask - Write the specified signal of the GPIO device. * @irq_data: per IRQ and chip data passed down to chip functions */ static void xgpio_irq_mask(struct irq_data *irq_data) { unsigned long flags; struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data); int irq_offset = irqd_to_hwirq(irq_data); int bit = xgpio_to_bit(chip, irq_offset); u32 mask = BIT(bit / 32), temp; spin_lock_irqsave(&chip->gpio_lock, flags); __clear_bit(bit, chip->enable); if (xgpio_get_value32(chip->enable, bit) == 0) { /* Disable per channel interrupt */ temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET); temp &= ~mask; xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp); } spin_unlock_irqrestore(&chip->gpio_lock, flags); gpiochip_disable_irq(&chip->gc, irq_offset); } /** * xgpio_irq_unmask - Write the specified signal of the GPIO device. * @irq_data: per IRQ and chip data passed down to chip functions */ static void xgpio_irq_unmask(struct irq_data *irq_data) { unsigned long flags; struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data); int irq_offset = irqd_to_hwirq(irq_data); int bit = xgpio_to_bit(chip, irq_offset); u32 old_enable = xgpio_get_value32(chip->enable, bit); u32 mask = BIT(bit / 32), val; gpiochip_enable_irq(&chip->gc, irq_offset); spin_lock_irqsave(&chip->gpio_lock, flags); __set_bit(bit, chip->enable); if (old_enable == 0) { /* Clear any existing per-channel interrupts */ val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET); val &= mask; xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, val); /* Update GPIO IRQ read data before enabling interrupt*/ xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, chip->last_irq_read); /* Enable per channel interrupt */ val = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET); val |= mask; xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val); } spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** * xgpio_set_irq_type - Write the specified signal of the GPIO device. * @irq_data: Per IRQ and chip data passed down to chip functions * @type: Interrupt type that is to be set for the gpio pin * * Return: * 0 if interrupt type is supported otherwise -EINVAL */ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type) { struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data); int irq_offset = irqd_to_hwirq(irq_data); int bit = xgpio_to_bit(chip, irq_offset); /* * The Xilinx GPIO hardware provides a single interrupt status * indication for any state change in a given GPIO channel (bank). * Therefore, only rising edge or falling edge triggers are * supported. */ switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: __set_bit(bit, chip->rising_edge); __set_bit(bit, chip->falling_edge); break; case IRQ_TYPE_EDGE_RISING: __set_bit(bit, chip->rising_edge); __clear_bit(bit, chip->falling_edge); break; case IRQ_TYPE_EDGE_FALLING: __clear_bit(bit, chip->rising_edge); __set_bit(bit, chip->falling_edge); break; default: return -EINVAL; } irq_set_handler_locked(irq_data, handle_edge_irq); return 0; } /** * xgpio_irqhandler - Gpio interrupt service routine * @desc: Pointer to interrupt description */ static void xgpio_irqhandler(struct irq_desc *desc) { struct xgpio_instance *chip = irq_desc_get_handler_data(desc); struct gpio_chip *gc = &chip->gc; struct irq_chip *irqchip = irq_desc_get_chip(desc); DECLARE_BITMAP(rising, 64); DECLARE_BITMAP(falling, 64); DECLARE_BITMAP(all, 64); int irq_offset; u32 status; u32 bit; status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET); xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status); chained_irq_enter(irqchip, desc); spin_lock(&chip->gpio_lock); xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all); bitmap_complement(rising, chip->last_irq_read, 64); bitmap_and(rising, rising, all, 64); bitmap_and(rising, rising, chip->enable, 64); bitmap_and(rising, rising, chip->rising_edge, 64); bitmap_complement(falling, all, 64); bitmap_and(falling, falling, chip->last_irq_read, 64); bitmap_and(falling, falling, chip->enable, 64); bitmap_and(falling, falling, chip->falling_edge, 64); bitmap_copy(chip->last_irq_read, all, 64); bitmap_or(all, rising, falling, 64); spin_unlock(&chip->gpio_lock); dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling); for_each_set_bit(bit, all, 64) { irq_offset = xgpio_from_bit(chip, bit); generic_handle_domain_irq(gc->irq.domain, irq_offset); } chained_irq_exit(irqchip, desc); } static const struct irq_chip xgpio_irq_chip = { .name = "gpio-xilinx", .irq_ack = xgpio_irq_ack, .irq_mask = xgpio_irq_mask, .irq_unmask = xgpio_irq_unmask, .irq_set_type = xgpio_set_irq_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; /** * xgpio_probe - Probe method for the GPIO device. * @pdev: pointer to the platform device * * Return: * It returns 0, if the driver is bound to the GPIO device, or * a negative value if there is an error. */ static int xgpio_probe(struct platform_device *pdev) { struct xgpio_instance *chip; int status = 0; struct device_node *np = pdev->dev.of_node; u32 is_dual = 0; u32 width[2]; u32 state[2]; u32 dir[2]; struct gpio_irq_chip *girq; u32 temp; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; platform_set_drvdata(pdev, chip); /* First, check if the device is dual-channel */ of_property_read_u32(np, "xlnx,is-dual", &is_dual); /* Setup defaults */ memset32(width, 0, ARRAY_SIZE(width)); memset32(state, 0, ARRAY_SIZE(state)); memset32(dir, 0xFFFFFFFF, ARRAY_SIZE(dir)); /* Update GPIO state shadow register with default value */ of_property_read_u32(np, "xlnx,dout-default", &state[0]); of_property_read_u32(np, "xlnx,dout-default-2", &state[1]); bitmap_from_arr32(chip->state, state, 64); /* Update GPIO direction shadow register with default value */ of_property_read_u32(np, "xlnx,tri-default", &dir[0]); of_property_read_u32(np, "xlnx,tri-default-2", &dir[1]); bitmap_from_arr32(chip->dir, dir, 64); /* * Check device node and parent device node for device width * and assume default width of 32 */ if (of_property_read_u32(np, "xlnx,gpio-width", &width[0])) width[0] = 32; if (width[0] > 32) return -EINVAL; if (is_dual && of_property_read_u32(np, "xlnx,gpio2-width", &width[1])) width[1] = 32; if (width[1] > 32) return -EINVAL; /* Setup software pin mapping */ bitmap_set(chip->sw_map, 0, width[0] + width[1]); /* Setup hardware pin mapping */ bitmap_set(chip->hw_map, 0, width[0]); bitmap_set(chip->hw_map, 32, width[1]); spin_lock_init(&chip->gpio_lock); chip->gc.base = -1; chip->gc.ngpio = bitmap_weight(chip->hw_map, 64); chip->gc.parent = &pdev->dev; chip->gc.direction_input = xgpio_dir_in; chip->gc.direction_output = xgpio_dir_out; chip->gc.get = xgpio_get; chip->gc.set = xgpio_set; chip->gc.request = xgpio_request; chip->gc.free = xgpio_free; chip->gc.set_multiple = xgpio_set_multiple; chip->gc.label = dev_name(&pdev->dev); chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) { dev_err(&pdev->dev, "failed to ioremap memory resource\n"); return PTR_ERR(chip->regs); } chip->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(chip->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n"); status = clk_prepare_enable(chip->clk); if (status < 0) { dev_err(&pdev->dev, "Failed to prepare clk\n"); return status; } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); xgpio_save_regs(chip); chip->irq = platform_get_irq_optional(pdev, 0); if (chip->irq <= 0) goto skip_irq; /* Disable per-channel interrupts */ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, 0); /* Clear any existing per-channel interrupts */ temp = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET); xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, temp); /* Enable global interrupts */ xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE); girq = &chip->gc.irq; gpio_irq_chip_set_chip(girq, &xgpio_irq_chip); girq->parent_handler = xgpio_irqhandler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { status = -ENOMEM; goto err_pm_put; } girq->parents[0] = chip->irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; skip_irq: status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip); if (status) { dev_err(&pdev->dev, "failed to add GPIO chip\n"); goto err_pm_put; } pm_runtime_put(&pdev->dev); return 0; err_pm_put: pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); clk_disable_unprepare(chip->clk); return status; } static const struct of_device_id xgpio_of_match[] = { { .compatible = "xlnx,xps-gpio-1.00.a", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xgpio_of_match); static struct platform_driver xgpio_plat_driver = { .probe = xgpio_probe, .remove = xgpio_remove, .driver = { .name = "gpio-xilinx", .of_match_table = xgpio_of_match, .pm = &xgpio_dev_pm_ops, }, }; static int __init xgpio_init(void) { return platform_driver_register(&xgpio_plat_driver); } subsys_initcall(xgpio_init); static void __exit xgpio_exit(void) { platform_driver_unregister(&xgpio_plat_driver); } module_exit(xgpio_exit); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx GPIO driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-xilinx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Intel La Jolla Cove Adapter USB-GPIO driver * * Copyright (c) 2023, Intel Corporation. */ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/dev_printk.h> #include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/mfd/ljca.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> /* GPIO commands */ #define LJCA_GPIO_CONFIG 1 #define LJCA_GPIO_READ 2 #define LJCA_GPIO_WRITE 3 #define LJCA_GPIO_INT_EVENT 4 #define LJCA_GPIO_INT_MASK 5 #define LJCA_GPIO_INT_UNMASK 6 #define LJCA_GPIO_CONF_DISABLE BIT(0) #define LJCA_GPIO_CONF_INPUT BIT(1) #define LJCA_GPIO_CONF_OUTPUT BIT(2) #define LJCA_GPIO_CONF_PULLUP BIT(3) #define LJCA_GPIO_CONF_PULLDOWN BIT(4) #define LJCA_GPIO_CONF_DEFAULT BIT(5) #define LJCA_GPIO_CONF_INTERRUPT BIT(6) #define LJCA_GPIO_INT_TYPE BIT(7) #define LJCA_GPIO_CONF_EDGE FIELD_PREP(LJCA_GPIO_INT_TYPE, 1) #define LJCA_GPIO_CONF_LEVEL FIELD_PREP(LJCA_GPIO_INT_TYPE, 0) /* Intentional overlap with PULLUP / PULLDOWN */ #define LJCA_GPIO_CONF_SET BIT(3) #define LJCA_GPIO_CONF_CLR BIT(4) struct gpio_op { u8 index; u8 value; } __packed; struct gpio_packet { u8 num; struct gpio_op item[]; } __packed; #define LJCA_GPIO_BUF_SIZE 60 struct ljca_gpio_dev { struct platform_device *pdev; struct gpio_chip gc; struct ljca_gpio_info *gpio_info; DECLARE_BITMAP(unmasked_irqs, LJCA_MAX_GPIO_NUM); DECLARE_BITMAP(enabled_irqs, LJCA_MAX_GPIO_NUM); DECLARE_BITMAP(reenable_irqs, LJCA_MAX_GPIO_NUM); u8 *connect_mode; /* mutex to protect irq bus */ struct mutex irq_lock; struct work_struct work; /* lock to protect package transfer to Hardware */ struct mutex trans_lock; u8 obuf[LJCA_GPIO_BUF_SIZE]; u8 ibuf[LJCA_GPIO_BUF_SIZE]; }; static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config) { struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; int ret; mutex_lock(&ljca_gpio->trans_lock); packet->item[0].index = gpio_id; packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id]; packet->num = 1; ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_CONFIG, packet, struct_size(packet, item, packet->num), NULL, NULL); mutex_unlock(&ljca_gpio->trans_lock); return ret; } static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id) { struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; struct gpio_packet *ack_packet = (struct gpio_packet *)ljca_gpio->ibuf; unsigned int ibuf_len = LJCA_GPIO_BUF_SIZE; int ret; mutex_lock(&ljca_gpio->trans_lock); packet->num = 1; packet->item[0].index = gpio_id; ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_READ, packet, struct_size(packet, item, packet->num), ljca_gpio->ibuf, &ibuf_len); if (ret) goto out_unlock; if (!ibuf_len || ack_packet->num != packet->num) { dev_err(&ljca_gpio->pdev->dev, "failed gpio_id:%u %u", gpio_id, ack_packet->num); ret = -EIO; } out_unlock: mutex_unlock(&ljca_gpio->trans_lock); if (ret) return ret; return ack_packet->item[0].value > 0; } static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, int value) { struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; int ret; mutex_lock(&ljca_gpio->trans_lock); packet->num = 1; packet->item[0].index = gpio_id; packet->item[0].value = value & 1; ret = ljca_transfer(ljca_gpio->gpio_info->ljca, LJCA_GPIO_WRITE, packet, struct_size(packet, item, packet->num), NULL, NULL); mutex_unlock(&ljca_gpio->trans_lock); return ret; } static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); return ljca_gpio_read(ljca_gpio, offset); } static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset, int val) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); int ret; ret = ljca_gpio_write(ljca_gpio, offset, val); if (ret) dev_err(chip->parent, "offset:%u val:%d set value failed %d\n", offset, val, ret); } static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); u8 config = LJCA_GPIO_CONF_INPUT | LJCA_GPIO_CONF_CLR; return gpio_config(ljca_gpio, offset, config); } static int ljca_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int val) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); u8 config = LJCA_GPIO_CONF_OUTPUT | LJCA_GPIO_CONF_CLR; int ret; ret = gpio_config(ljca_gpio, offset, config); if (ret) return ret; ljca_gpio_set_value(chip, offset, val); return 0; } static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); ljca_gpio->connect_mode[offset] = 0; switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: ljca_gpio->connect_mode[offset] |= LJCA_GPIO_CONF_PULLUP; break; case PIN_CONFIG_BIAS_PULL_DOWN: ljca_gpio->connect_mode[offset] |= LJCA_GPIO_CONF_PULLDOWN; break; case PIN_CONFIG_DRIVE_PUSH_PULL: case PIN_CONFIG_PERSIST_STATE: break; default: return -ENOTSUPP; } return 0; } static int ljca_gpio_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask, unsigned int ngpios) { struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip); WARN_ON_ONCE(ngpios != ljca_gpio->gpio_info->num); bitmap_copy(valid_mask, ljca_gpio->gpio_info->valid_pin_map, ngpios); return 0; } static void ljca_gpio_irq_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask, unsigned int ngpios) { ljca_gpio_init_valid_mask(chip, valid_mask, ngpios); } static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id, bool enable) { struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf; int ret; mutex_lock(&ljca_gpio->trans_lock); packet->num = 1; packet->item[0].index = gpio_id; packet->item[0].value = 0; ret = ljca_transfer(ljca_gpio->gpio_info->ljca, enable ? LJCA_GPIO_INT_UNMASK : LJCA_GPIO_INT_MASK, packet, struct_size(packet, item, packet->num), NULL, NULL); mutex_unlock(&ljca_gpio->trans_lock); return ret; } static void ljca_gpio_async(struct work_struct *work) { struct ljca_gpio_dev *ljca_gpio = container_of(work, struct ljca_gpio_dev, work); int gpio_id; int unmasked; for_each_set_bit(gpio_id, ljca_gpio->reenable_irqs, ljca_gpio->gc.ngpio) { clear_bit(gpio_id, ljca_gpio->reenable_irqs); unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs); if (unmasked) ljca_enable_irq(ljca_gpio, gpio_id, true); } } static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, int len) { const struct gpio_packet *packet = evt_data; struct ljca_gpio_dev *ljca_gpio = context; int i; int irq; if (cmd != LJCA_GPIO_INT_EVENT) return; for (i = 0; i < packet->num; i++) { irq = irq_find_mapping(ljca_gpio->gc.irq.domain, packet->item[i].index); if (!irq) { dev_err(ljca_gpio->gc.parent, "gpio_id %u does not mapped to IRQ yet\n", packet->item[i].index); return; } generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq); set_bit(packet->item[i].index, ljca_gpio->reenable_irqs); } schedule_work(&ljca_gpio->work); } static void ljca_irq_unmask(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); int gpio_id = irqd_to_hwirq(irqd); gpiochip_enable_irq(gc, gpio_id); set_bit(gpio_id, ljca_gpio->unmasked_irqs); } static void ljca_irq_mask(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); int gpio_id = irqd_to_hwirq(irqd); clear_bit(gpio_id, ljca_gpio->unmasked_irqs); gpiochip_disable_irq(gc, gpio_id); } static int ljca_irq_set_type(struct irq_data *irqd, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); int gpio_id = irqd_to_hwirq(irqd); ljca_gpio->connect_mode[gpio_id] = LJCA_GPIO_CONF_INTERRUPT; switch (type) { case IRQ_TYPE_LEVEL_HIGH: ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLUP); break; case IRQ_TYPE_LEVEL_LOW: ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_LEVEL | LJCA_GPIO_CONF_PULLDOWN); break; case IRQ_TYPE_EDGE_BOTH: break; case IRQ_TYPE_EDGE_RISING: ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLUP); break; case IRQ_TYPE_EDGE_FALLING: ljca_gpio->connect_mode[gpio_id] |= (LJCA_GPIO_CONF_EDGE | LJCA_GPIO_CONF_PULLDOWN); break; default: return -EINVAL; } return 0; } static void ljca_irq_bus_lock(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); mutex_lock(&ljca_gpio->irq_lock); } static void ljca_irq_bus_unlock(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc); int gpio_id = irqd_to_hwirq(irqd); int enabled; int unmasked; enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs); unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs); if (enabled != unmasked) { if (unmasked) { gpio_config(ljca_gpio, gpio_id, 0); ljca_enable_irq(ljca_gpio, gpio_id, true); set_bit(gpio_id, ljca_gpio->enabled_irqs); } else { ljca_enable_irq(ljca_gpio, gpio_id, false); clear_bit(gpio_id, ljca_gpio->enabled_irqs); } } mutex_unlock(&ljca_gpio->irq_lock); } static const struct irq_chip ljca_gpio_irqchip = { .name = "ljca-irq", .irq_mask = ljca_irq_mask, .irq_unmask = ljca_irq_unmask, .irq_set_type = ljca_irq_set_type, .irq_bus_lock = ljca_irq_bus_lock, .irq_bus_sync_unlock = ljca_irq_bus_unlock, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int ljca_gpio_probe(struct platform_device *pdev) { struct ljca_gpio_dev *ljca_gpio; struct gpio_irq_chip *girq; int ret; ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL); if (!ljca_gpio) return -ENOMEM; ljca_gpio->gpio_info = dev_get_platdata(&pdev->dev); ljca_gpio->connect_mode = devm_kcalloc(&pdev->dev, ljca_gpio->gpio_info->num, sizeof(*ljca_gpio->connect_mode), GFP_KERNEL); if (!ljca_gpio->connect_mode) return -ENOMEM; mutex_init(&ljca_gpio->irq_lock); mutex_init(&ljca_gpio->trans_lock); ljca_gpio->pdev = pdev; ljca_gpio->gc.direction_input = ljca_gpio_direction_input; ljca_gpio->gc.direction_output = ljca_gpio_direction_output; ljca_gpio->gc.get = ljca_gpio_get_value; ljca_gpio->gc.set = ljca_gpio_set_value; ljca_gpio->gc.set_config = ljca_gpio_set_config; ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask; ljca_gpio->gc.can_sleep = true; ljca_gpio->gc.parent = &pdev->dev; ljca_gpio->gc.base = -1; ljca_gpio->gc.ngpio = ljca_gpio->gpio_info->num; ljca_gpio->gc.label = ACPI_COMPANION(&pdev->dev) ? acpi_dev_name(ACPI_COMPANION(&pdev->dev)) : dev_name(&pdev->dev); ljca_gpio->gc.owner = THIS_MODULE; platform_set_drvdata(pdev, ljca_gpio); ljca_register_event_cb(ljca_gpio->gpio_info->ljca, ljca_gpio_event_cb, ljca_gpio); girq = &ljca_gpio->gc.irq; gpio_irq_chip_set_chip(girq, &ljca_gpio_irqchip); girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; girq->init_valid_mask = ljca_gpio_irq_init_valid_mask; INIT_WORK(&ljca_gpio->work, ljca_gpio_async); ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio); if (ret) { ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca); mutex_destroy(&ljca_gpio->irq_lock); mutex_destroy(&ljca_gpio->trans_lock); } return ret; } static int ljca_gpio_remove(struct platform_device *pdev) { struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev); gpiochip_remove(&ljca_gpio->gc); ljca_unregister_event_cb(ljca_gpio->gpio_info->ljca); mutex_destroy(&ljca_gpio->irq_lock); mutex_destroy(&ljca_gpio->trans_lock); return 0; } #define LJCA_GPIO_DRV_NAME "ljca-gpio" static const struct platform_device_id ljca_gpio_id[] = { { LJCA_GPIO_DRV_NAME, 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, ljca_gpio_id); static struct platform_driver ljca_gpio_driver = { .driver.name = LJCA_GPIO_DRV_NAME, .probe = ljca_gpio_probe, .remove = ljca_gpio_remove, }; module_platform_driver(ljca_gpio_driver); MODULE_AUTHOR("Ye Xiang <[email protected]>"); MODULE_AUTHOR("Wang Zhifeng <[email protected]>"); MODULE_AUTHOR("Zhang Lixu <[email protected]>"); MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(LJCA);
linux-master
drivers/gpio/gpio-ljca.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kontron PLD GPIO driver * * Copyright (c) 2010-2013 Kontron Europe GmbH * Author: Michael Brunner <[email protected]> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/gpio/driver.h> #include <linux/mfd/kempld.h> #define KEMPLD_GPIO_MAX_NUM 16 #define KEMPLD_GPIO_MASK(x) (BIT((x) % 8)) #define KEMPLD_GPIO_DIR_NUM(x) (0x40 + (x) / 8) #define KEMPLD_GPIO_LVL_NUM(x) (0x42 + (x) / 8) #define KEMPLD_GPIO_EVT_LVL_EDGE 0x46 #define KEMPLD_GPIO_IEN 0x4A struct kempld_gpio_data { struct gpio_chip chip; struct kempld_device_data *pld; }; /* * Set or clear GPIO bit * kempld_get_mutex must be called prior to calling this function. */ static void kempld_gpio_bitop(struct kempld_device_data *pld, u8 reg, u8 bit, u8 val) { u8 status; status = kempld_read8(pld, reg); if (val) status |= KEMPLD_GPIO_MASK(bit); else status &= ~KEMPLD_GPIO_MASK(bit); kempld_write8(pld, reg, status); } static int kempld_gpio_get_bit(struct kempld_device_data *pld, u8 reg, u8 bit) { u8 status; kempld_get_mutex(pld); status = kempld_read8(pld, reg); kempld_release_mutex(pld); return !!(status & KEMPLD_GPIO_MASK(bit)); } static int kempld_gpio_get(struct gpio_chip *chip, unsigned offset) { struct kempld_gpio_data *gpio = gpiochip_get_data(chip); struct kempld_device_data *pld = gpio->pld; return !!kempld_gpio_get_bit(pld, KEMPLD_GPIO_LVL_NUM(offset), offset); } static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct kempld_gpio_data *gpio = gpiochip_get_data(chip); struct kempld_device_data *pld = gpio->pld; kempld_get_mutex(pld); kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value); kempld_release_mutex(pld); } static int kempld_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct kempld_gpio_data *gpio = gpiochip_get_data(chip); struct kempld_device_data *pld = gpio->pld; kempld_get_mutex(pld); kempld_gpio_bitop(pld, KEMPLD_GPIO_DIR_NUM(offset), offset, 0); kempld_release_mutex(pld); return 0; } static int kempld_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct kempld_gpio_data *gpio = gpiochip_get_data(chip); struct kempld_device_data *pld = gpio->pld; kempld_get_mutex(pld); kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value); kempld_gpio_bitop(pld, KEMPLD_GPIO_DIR_NUM(offset), offset, 1); kempld_release_mutex(pld); return 0; } static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { struct kempld_gpio_data *gpio = gpiochip_get_data(chip); struct kempld_device_data *pld = gpio->pld; if (kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int kempld_gpio_pincount(struct kempld_device_data *pld) { u16 evt, evt_back; kempld_get_mutex(pld); /* Backup event register as it might be already initialized */ evt_back = kempld_read16(pld, KEMPLD_GPIO_EVT_LVL_EDGE); /* Clear event register */ kempld_write16(pld, KEMPLD_GPIO_EVT_LVL_EDGE, 0x0000); /* Read back event register */ evt = kempld_read16(pld, KEMPLD_GPIO_EVT_LVL_EDGE); /* Restore event register */ kempld_write16(pld, KEMPLD_GPIO_EVT_LVL_EDGE, evt_back); kempld_release_mutex(pld); return evt ? __ffs(evt) : 16; } static int kempld_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct kempld_device_data *pld = dev_get_drvdata(dev->parent); struct kempld_platform_data *pdata = dev_get_platdata(pld->dev); struct kempld_gpio_data *gpio; struct gpio_chip *chip; int ret; if (pld->info.spec_major < 2) { dev_err(dev, "Driver only supports GPIO devices compatible to PLD spec. rev. 2.0 or higher\n"); return -ENODEV; } gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->pld = pld; platform_set_drvdata(pdev, gpio); chip = &gpio->chip; chip->label = "gpio-kempld"; chip->owner = THIS_MODULE; chip->parent = dev; chip->can_sleep = true; if (pdata && pdata->gpio_base) chip->base = pdata->gpio_base; else chip->base = -1; chip->direction_input = kempld_gpio_direction_input; chip->direction_output = kempld_gpio_direction_output; chip->get_direction = kempld_gpio_get_direction; chip->get = kempld_gpio_get; chip->set = kempld_gpio_set; chip->ngpio = kempld_gpio_pincount(pld); if (chip->ngpio == 0) { dev_err(dev, "No GPIO pins detected\n"); return -ENODEV; } ret = devm_gpiochip_add_data(dev, chip, gpio); if (ret) { dev_err(dev, "Could not register GPIO chip\n"); return ret; } dev_info(dev, "GPIO functionality initialized with %d pins\n", chip->ngpio); return 0; } static struct platform_driver kempld_gpio_driver = { .driver = { .name = "kempld-gpio", }, .probe = kempld_gpio_probe, }; module_platform_driver(kempld_gpio_driver); MODULE_DESCRIPTION("KEM PLD GPIO Driver"); MODULE_AUTHOR("Michael Brunner <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kempld-gpio");
linux-master
drivers/gpio/gpio-kempld.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the ACCES 104-IDIO-16 family * Copyright (C) 2015 William Breathitt Gray * * This driver supports the following ACCES devices: 104-IDIO-16, * 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8. */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/isa.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/regmap.h> #include <linux/types.h> #include "gpio-idio-16.h" #define IDIO_16_EXTENT 8 #define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT) static unsigned int base[MAX_NUM_IDIO_16]; static unsigned int num_idio_16; module_param_hw_array(base, uint, ioport, &num_idio_16, 0); MODULE_PARM_DESC(base, "ACCES 104-IDIO-16 base addresses"); static unsigned int irq[MAX_NUM_IDIO_16]; static unsigned int num_irq; module_param_hw_array(irq, uint, irq, &num_irq, 0); MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers"); static const struct regmap_range idio_16_wr_ranges[] = { regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x4), }; static const struct regmap_range idio_16_rd_ranges[] = { regmap_reg_range(0x1, 0x2), regmap_reg_range(0x5, 0x5), }; static const struct regmap_range idio_16_precious_ranges[] = { regmap_reg_range(0x2, 0x2), }; static const struct regmap_access_table idio_16_wr_table = { .yes_ranges = idio_16_wr_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_wr_ranges), }; static const struct regmap_access_table idio_16_rd_table = { .yes_ranges = idio_16_rd_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_rd_ranges), }; static const struct regmap_access_table idio_16_precious_table = { .yes_ranges = idio_16_precious_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_precious_ranges), }; static const struct regmap_config idio_16_regmap_config = { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .io_port = true, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, .precious_table = &idio_16_precious_table, .cache_type = REGCACHE_FLAT, .use_raw_spinlock = true, }; /* Only input lines (GPIO 16-31) support interrupts */ #define IDIO_16_REGMAP_IRQ(_id) \ [16 + _id] = { \ .mask = BIT(_id), \ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \ } static const struct regmap_irq idio_16_regmap_irqs[] = { IDIO_16_REGMAP_IRQ(0), IDIO_16_REGMAP_IRQ(1), IDIO_16_REGMAP_IRQ(2), /* 0-2 */ IDIO_16_REGMAP_IRQ(3), IDIO_16_REGMAP_IRQ(4), IDIO_16_REGMAP_IRQ(5), /* 3-5 */ IDIO_16_REGMAP_IRQ(6), IDIO_16_REGMAP_IRQ(7), IDIO_16_REGMAP_IRQ(8), /* 6-8 */ IDIO_16_REGMAP_IRQ(9), IDIO_16_REGMAP_IRQ(10), IDIO_16_REGMAP_IRQ(11), /* 9-11 */ IDIO_16_REGMAP_IRQ(12), IDIO_16_REGMAP_IRQ(13), IDIO_16_REGMAP_IRQ(14), /* 12-14 */ IDIO_16_REGMAP_IRQ(15), /* 15 */ }; static int idio_16_probe(struct device *dev, unsigned int id) { const char *const name = dev_name(dev); struct idio_16_regmap_config config = {}; void __iomem *regs; struct regmap *map; if (!devm_request_region(dev, base[id], IDIO_16_EXTENT, name)) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", base[id], base[id] + IDIO_16_EXTENT); return -EBUSY; } regs = devm_ioport_map(dev, base[id], IDIO_16_EXTENT); if (!regs) return -ENOMEM; map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config); if (IS_ERR(map)) return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n"); config.parent = dev; config.map = map; config.regmap_irqs = idio_16_regmap_irqs; config.num_regmap_irqs = ARRAY_SIZE(idio_16_regmap_irqs); config.irq = irq[id]; config.no_status = true; return devm_idio_16_regmap_register(dev, &config); } static struct isa_driver idio_16_driver = { .probe = idio_16_probe, .driver = { .name = "104-idio-16" }, }; module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(GPIO_IDIO_16);
linux-master
drivers/gpio/gpio-104-idio-16.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-sa1100/gpio.c * * Generic SA-1100 GPIO handling */ #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <soc/sa1100/pwer.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/generic.h> struct sa1100_gpio_chip { struct gpio_chip chip; void __iomem *membase; int irqbase; u32 irqmask; u32 irqrising; u32 irqfalling; u32 irqwake; }; #define sa1100_gpio_chip(x) container_of(x, struct sa1100_gpio_chip, chip) enum { R_GPLR = 0x00, R_GPDR = 0x04, R_GPSR = 0x08, R_GPCR = 0x0c, R_GRER = 0x10, R_GFER = 0x14, R_GEDR = 0x18, R_GAFR = 0x1c, }; static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset) { return readl_relaxed(sa1100_gpio_chip(chip)->membase + R_GPLR) & BIT(offset); } static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { int reg = value ? R_GPSR : R_GPCR; writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg); } static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset) { void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR; if (readl_relaxed(gpdr) & BIT(offset)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset) { void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR; unsigned long flags; local_irq_save(flags); writel_relaxed(readl_relaxed(gpdr) & ~BIT(offset), gpdr); local_irq_restore(flags); return 0; } static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int value) { void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR; unsigned long flags; local_irq_save(flags); sa1100_gpio_set(chip, offset, value); writel_relaxed(readl_relaxed(gpdr) | BIT(offset), gpdr); local_irq_restore(flags); return 0; } static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset) { return sa1100_gpio_chip(chip)->irqbase + offset; } static struct sa1100_gpio_chip sa1100_gpio_chip = { .chip = { .label = "gpio", .get_direction = sa1100_get_direction, .direction_input = sa1100_direction_input, .direction_output = sa1100_direction_output, .set = sa1100_gpio_set, .get = sa1100_gpio_get, .to_irq = sa1100_to_irq, .base = 0, .ngpio = GPIO_MAX + 1, }, .membase = (void *)&GPLR, .irqbase = IRQ_GPIO0, }; /* * SA1100 GPIO edge detection for IRQs: * IRQs are generated on Falling-Edge, Rising-Edge, or both. * Use this instead of directly setting GRER/GFER. */ static void sa1100_update_edge_regs(struct sa1100_gpio_chip *sgc) { void *base = sgc->membase; u32 grer, gfer; grer = sgc->irqrising & sgc->irqmask; gfer = sgc->irqfalling & sgc->irqmask; writel_relaxed(grer, base + R_GRER); writel_relaxed(gfer, base + R_GFER); } static int sa1100_gpio_type(struct irq_data *d, unsigned int type) { struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d); unsigned int mask = BIT(d->hwirq); if (type == IRQ_TYPE_PROBE) { if ((sgc->irqrising | sgc->irqfalling) & mask) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } if (type & IRQ_TYPE_EDGE_RISING) sgc->irqrising |= mask; else sgc->irqrising &= ~mask; if (type & IRQ_TYPE_EDGE_FALLING) sgc->irqfalling |= mask; else sgc->irqfalling &= ~mask; sa1100_update_edge_regs(sgc); return 0; } /* * GPIO IRQs must be acknowledged. */ static void sa1100_gpio_ack(struct irq_data *d) { struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d); writel_relaxed(BIT(d->hwirq), sgc->membase + R_GEDR); } static void sa1100_gpio_mask(struct irq_data *d) { struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d); unsigned int mask = BIT(d->hwirq); sgc->irqmask &= ~mask; sa1100_update_edge_regs(sgc); } static void sa1100_gpio_unmask(struct irq_data *d) { struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d); unsigned int mask = BIT(d->hwirq); sgc->irqmask |= mask; sa1100_update_edge_regs(sgc); } static int sa1100_gpio_wake(struct irq_data *d, unsigned int on) { struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d); int ret = sa11x0_gpio_set_wake(d->hwirq, on); if (!ret) { if (on) sgc->irqwake |= BIT(d->hwirq); else sgc->irqwake &= ~BIT(d->hwirq); } return ret; } /* * This is for GPIO IRQs */ static struct irq_chip sa1100_gpio_irq_chip = { .name = "GPIO", .irq_ack = sa1100_gpio_ack, .irq_mask = sa1100_gpio_mask, .irq_unmask = sa1100_gpio_unmask, .irq_set_type = sa1100_gpio_type, .irq_set_wake = sa1100_gpio_wake, }; static int sa1100_gpio_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct sa1100_gpio_chip *sgc = d->host_data; irq_set_chip_data(irq, sgc); irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, handle_edge_irq); irq_set_probe(irq); return 0; } static const struct irq_domain_ops sa1100_gpio_irqdomain_ops = { .map = sa1100_gpio_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; static struct irq_domain *sa1100_gpio_irqdomain; /* * IRQ 0-11 (GPIO) handler. We enter here with the * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ static void sa1100_gpio_handler(struct irq_desc *desc) { struct sa1100_gpio_chip *sgc = irq_desc_get_handler_data(desc); unsigned int irq, mask; void __iomem *gedr = sgc->membase + R_GEDR; mask = readl_relaxed(gedr); do { /* * clear down all currently active IRQ sources. * We will be processing them all. */ writel_relaxed(mask, gedr); irq = sgc->irqbase; do { if (mask & 1) generic_handle_irq(irq); mask >>= 1; irq++; } while (mask); mask = readl_relaxed(gedr); } while (mask); } static int sa1100_gpio_suspend(void) { struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip; /* * Set the appropriate edges for wakeup. */ writel_relaxed(sgc->irqwake & sgc->irqrising, sgc->membase + R_GRER); writel_relaxed(sgc->irqwake & sgc->irqfalling, sgc->membase + R_GFER); /* * Clear any pending GPIO interrupts. */ writel_relaxed(readl_relaxed(sgc->membase + R_GEDR), sgc->membase + R_GEDR); return 0; } static void sa1100_gpio_resume(void) { sa1100_update_edge_regs(&sa1100_gpio_chip); } static struct syscore_ops sa1100_gpio_syscore_ops = { .suspend = sa1100_gpio_suspend, .resume = sa1100_gpio_resume, }; static int __init sa1100_gpio_init_devicefs(void) { register_syscore_ops(&sa1100_gpio_syscore_ops); return 0; } device_initcall(sa1100_gpio_init_devicefs); static const int sa1100_gpio_irqs[] __initconst = { /* Install handlers for GPIO 0-10 edge detect interrupts */ IRQ_GPIO0_SC, IRQ_GPIO1_SC, IRQ_GPIO2_SC, IRQ_GPIO3_SC, IRQ_GPIO4_SC, IRQ_GPIO5_SC, IRQ_GPIO6_SC, IRQ_GPIO7_SC, IRQ_GPIO8_SC, IRQ_GPIO9_SC, IRQ_GPIO10_SC, /* Install handler for GPIO 11-27 edge detect interrupts */ IRQ_GPIO11_27, }; void __init sa1100_init_gpio(void) { struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip; int i; /* clear all GPIO edge detects */ writel_relaxed(0, sgc->membase + R_GFER); writel_relaxed(0, sgc->membase + R_GRER); writel_relaxed(-1, sgc->membase + R_GEDR); gpiochip_add_data(&sa1100_gpio_chip.chip, NULL); sa1100_gpio_irqdomain = irq_domain_add_simple(NULL, 28, IRQ_GPIO0, &sa1100_gpio_irqdomain_ops, sgc); for (i = 0; i < ARRAY_SIZE(sa1100_gpio_irqs); i++) irq_set_chained_handler_and_data(sa1100_gpio_irqs[i], sa1100_gpio_handler, sgc); }
linux-master
drivers/gpio/gpio-sa1100.c
// SPDX-License-Identifier: GPL-2.0-only /* * SPEAr platform SPI chipselect abstraction over gpiolib * * Copyright (C) 2012 ST Microelectronics * Shiraz Hashim <[email protected]> */ #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/types.h> /* maximum chipselects */ #define NUM_OF_GPIO 4 /* * Provision is available on some SPEAr SoCs to control ARM PL022 spi cs * through system registers. This register lies outside spi (pl022) * address space into system registers. * * It provides control for spi chip select lines so that any chipselect * (out of 4 possible chipselects in pl022) can be made low to select * the particular slave. */ /** * struct spear_spics - represents spi chip select control * @base: base address * @perip_cfg: configuration register * @sw_enable_bit: bit to enable s/w control over chipselects * @cs_value_bit: bit to program high or low chipselect * @cs_enable_mask: mask to select bits required to select chipselect * @cs_enable_shift: bit pos of cs_enable_mask * @use_count: use count of a spi controller cs lines * @last_off: stores last offset caller of set_value() * @chip: gpio_chip abstraction */ struct spear_spics { void __iomem *base; u32 perip_cfg; u32 sw_enable_bit; u32 cs_value_bit; u32 cs_enable_mask; u32 cs_enable_shift; unsigned long use_count; int last_off; struct gpio_chip chip; }; /* gpio framework specific routines */ static int spics_get_value(struct gpio_chip *chip, unsigned offset) { return -ENXIO; } static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value) { struct spear_spics *spics = gpiochip_get_data(chip); u32 tmp; /* select chip select from register */ tmp = readl_relaxed(spics->base + spics->perip_cfg); if (spics->last_off != offset) { spics->last_off = offset; tmp &= ~(spics->cs_enable_mask << spics->cs_enable_shift); tmp |= offset << spics->cs_enable_shift; } /* toggle chip select line */ tmp &= ~(0x1 << spics->cs_value_bit); tmp |= value << spics->cs_value_bit; writel_relaxed(tmp, spics->base + spics->perip_cfg); } static int spics_direction_input(struct gpio_chip *chip, unsigned offset) { return -ENXIO; } static int spics_direction_output(struct gpio_chip *chip, unsigned offset, int value) { spics_set_value(chip, offset, value); return 0; } static int spics_request(struct gpio_chip *chip, unsigned offset) { struct spear_spics *spics = gpiochip_get_data(chip); u32 tmp; if (!spics->use_count++) { tmp = readl_relaxed(spics->base + spics->perip_cfg); tmp |= 0x1 << spics->sw_enable_bit; tmp |= 0x1 << spics->cs_value_bit; writel_relaxed(tmp, spics->base + spics->perip_cfg); } return 0; } static void spics_free(struct gpio_chip *chip, unsigned offset) { struct spear_spics *spics = gpiochip_get_data(chip); u32 tmp; if (!--spics->use_count) { tmp = readl_relaxed(spics->base + spics->perip_cfg); tmp &= ~(0x1 << spics->sw_enable_bit); writel_relaxed(tmp, spics->base + spics->perip_cfg); } } static int spics_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct spear_spics *spics; spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL); if (!spics) return -ENOMEM; spics->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(spics->base)) return PTR_ERR(spics->base); if (of_property_read_u32(np, "st-spics,peripcfg-reg", &spics->perip_cfg)) goto err_dt_data; if (of_property_read_u32(np, "st-spics,sw-enable-bit", &spics->sw_enable_bit)) goto err_dt_data; if (of_property_read_u32(np, "st-spics,cs-value-bit", &spics->cs_value_bit)) goto err_dt_data; if (of_property_read_u32(np, "st-spics,cs-enable-mask", &spics->cs_enable_mask)) goto err_dt_data; if (of_property_read_u32(np, "st-spics,cs-enable-shift", &spics->cs_enable_shift)) goto err_dt_data; spics->chip.ngpio = NUM_OF_GPIO; spics->chip.base = -1; spics->chip.request = spics_request; spics->chip.free = spics_free; spics->chip.direction_input = spics_direction_input; spics->chip.direction_output = spics_direction_output; spics->chip.get = spics_get_value; spics->chip.set = spics_set_value; spics->chip.label = dev_name(&pdev->dev); spics->chip.parent = &pdev->dev; spics->chip.owner = THIS_MODULE; spics->last_off = -1; return devm_gpiochip_add_data(&pdev->dev, &spics->chip, spics); err_dt_data: dev_err(&pdev->dev, "DT probe failed\n"); return -EINVAL; } static const struct of_device_id spics_gpio_of_match[] = { { .compatible = "st,spear-spics-gpio" }, {} }; static struct platform_driver spics_gpio_driver = { .probe = spics_gpio_probe, .driver = { .name = "spear-spics-gpio", .of_match_table = spics_gpio_of_match, }, }; static int __init spics_gpio_init(void) { return platform_driver_register(&spics_gpio_driver); } subsys_initcall(spics_gpio_init);
linux-master
drivers/gpio/gpio-spear-spics.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/gpio/gpio-mb86s7x.c * * Copyright (C) 2015 Fujitsu Semiconductor Limited * Copyright (C) 2015 Linaro Ltd. */ #include <linux/acpi.h> #include <linux/io.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/slab.h> #include "gpiolib.h" #include "gpiolib-acpi.h" /* * Only first 8bits of a register correspond to each pin, * so there are 4 registers for 32 pins. */ #define PDR(x) (0x0 + x / 8 * 4) #define DDR(x) (0x10 + x / 8 * 4) #define PFR(x) (0x20 + x / 8 * 4) #define OFFSET(x) BIT((x) % 8) struct mb86s70_gpio_chip { struct gpio_chip gc; void __iomem *base; struct clk *clk; spinlock_t lock; }; static int mb86s70_gpio_request(struct gpio_chip *gc, unsigned gpio) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); unsigned long flags; u32 val; spin_lock_irqsave(&gchip->lock, flags); val = readl(gchip->base + PFR(gpio)); val &= ~OFFSET(gpio); writel(val, gchip->base + PFR(gpio)); spin_unlock_irqrestore(&gchip->lock, flags); return 0; } static void mb86s70_gpio_free(struct gpio_chip *gc, unsigned gpio) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); unsigned long flags; u32 val; spin_lock_irqsave(&gchip->lock, flags); val = readl(gchip->base + PFR(gpio)); val |= OFFSET(gpio); writel(val, gchip->base + PFR(gpio)); spin_unlock_irqrestore(&gchip->lock, flags); } static int mb86s70_gpio_direction_input(struct gpio_chip *gc, unsigned gpio) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); unsigned long flags; unsigned char val; spin_lock_irqsave(&gchip->lock, flags); val = readl(gchip->base + DDR(gpio)); val &= ~OFFSET(gpio); writel(val, gchip->base + DDR(gpio)); spin_unlock_irqrestore(&gchip->lock, flags); return 0; } static int mb86s70_gpio_direction_output(struct gpio_chip *gc, unsigned gpio, int value) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); unsigned long flags; unsigned char val; spin_lock_irqsave(&gchip->lock, flags); val = readl(gchip->base + PDR(gpio)); if (value) val |= OFFSET(gpio); else val &= ~OFFSET(gpio); writel(val, gchip->base + PDR(gpio)); val = readl(gchip->base + DDR(gpio)); val |= OFFSET(gpio); writel(val, gchip->base + DDR(gpio)); spin_unlock_irqrestore(&gchip->lock, flags); return 0; } static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio)); } static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value) { struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc); unsigned long flags; unsigned char val; spin_lock_irqsave(&gchip->lock, flags); val = readl(gchip->base + PDR(gpio)); if (value) val |= OFFSET(gpio); else val &= ~OFFSET(gpio); writel(val, gchip->base + PDR(gpio)); spin_unlock_irqrestore(&gchip->lock, flags); } static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) { int irq, index; for (index = 0;; index++) { irq = platform_get_irq(to_platform_device(gc->parent), index); if (irq < 0) return irq; if (irq == 0) break; if (irq_get_irq_data(irq)->hwirq == offset) return irq; } return -EINVAL; } static int mb86s70_gpio_probe(struct platform_device *pdev) { struct mb86s70_gpio_chip *gchip; int ret; gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL); if (gchip == NULL) return -ENOMEM; platform_set_drvdata(pdev, gchip); gchip->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gchip->base)) return PTR_ERR(gchip->base); gchip->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(gchip->clk)) return PTR_ERR(gchip->clk); ret = clk_prepare_enable(gchip->clk); if (ret) return ret; spin_lock_init(&gchip->lock); gchip->gc.direction_output = mb86s70_gpio_direction_output; gchip->gc.direction_input = mb86s70_gpio_direction_input; gchip->gc.request = mb86s70_gpio_request; gchip->gc.free = mb86s70_gpio_free; gchip->gc.get = mb86s70_gpio_get; gchip->gc.set = mb86s70_gpio_set; gchip->gc.to_irq = mb86s70_gpio_to_irq; gchip->gc.label = dev_name(&pdev->dev); gchip->gc.ngpio = 32; gchip->gc.owner = THIS_MODULE; gchip->gc.parent = &pdev->dev; gchip->gc.base = -1; ret = gpiochip_add_data(&gchip->gc, gchip); if (ret) { dev_err(&pdev->dev, "couldn't register gpio driver\n"); clk_disable_unprepare(gchip->clk); return ret; } acpi_gpiochip_request_interrupts(&gchip->gc); return 0; } static int mb86s70_gpio_remove(struct platform_device *pdev) { struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev); acpi_gpiochip_free_interrupts(&gchip->gc); gpiochip_remove(&gchip->gc); clk_disable_unprepare(gchip->clk); return 0; } static const struct of_device_id mb86s70_gpio_dt_ids[] = { { .compatible = "fujitsu,mb86s70-gpio" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id mb86s70_gpio_acpi_ids[] = { { "SCX0007" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(acpi, mb86s70_gpio_acpi_ids); #endif static struct platform_driver mb86s70_gpio_driver = { .driver = { .name = "mb86s70-gpio", .of_match_table = mb86s70_gpio_dt_ids, .acpi_match_table = ACPI_PTR(mb86s70_gpio_acpi_ids), }, .probe = mb86s70_gpio_probe, .remove = mb86s70_gpio_remove, }; module_platform_driver(mb86s70_gpio_driver); MODULE_DESCRIPTION("MB86S7x GPIO Driver"); MODULE_ALIAS("platform:mb86s70-gpio"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-mb86s7x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * GPIO testing driver based on configfs. * * Copyright (C) 2021 Bartosz Golaszewski <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitmap.h> #include <linux/cleanup.h> #include <linux/completion.h> #include <linux/configfs.h> #include <linux/device.h> #include <linux/gpio/driver.h> #include <linux/gpio/machine.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irq_sim.h> #include <linux/list.h> #include <linux/minmax.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/string_helpers.h> #include <linux/sysfs.h> #include "gpiolib.h" #define GPIO_SIM_NGPIO_MAX 1024 #define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */ #define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */ static DEFINE_IDA(gpio_sim_ida); struct gpio_sim_chip { struct gpio_chip gc; unsigned long *direction_map; unsigned long *value_map; unsigned long *pull_map; struct irq_domain *irq_sim; struct mutex lock; const struct attribute_group **attr_groups; }; struct gpio_sim_attribute { struct device_attribute dev_attr; unsigned int offset; }; static struct gpio_sim_attribute * to_gpio_sim_attr(struct device_attribute *dev_attr) { return container_of(dev_attr, struct gpio_sim_attribute, dev_attr); } static int gpio_sim_apply_pull(struct gpio_sim_chip *chip, unsigned int offset, int value) { int irq, irq_type, ret; struct gpio_desc *desc; struct gpio_chip *gc; gc = &chip->gc; desc = &gc->gpiodev->descs[offset]; guard(mutex)(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && !test_bit(FLAG_IS_OUT, &desc->flags)) { if (value == !!test_bit(offset, chip->value_map)) goto set_pull; /* * This is fine - it just means, nobody is listening * for interrupts on this line, otherwise * irq_create_mapping() would have been called from * the to_irq() callback. */ irq = irq_find_mapping(chip->irq_sim, offset); if (!irq) goto set_value; irq_type = irq_get_trigger_type(irq); if ((value && (irq_type & IRQ_TYPE_EDGE_RISING)) || (!value && (irq_type & IRQ_TYPE_EDGE_FALLING))) { ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true); if (ret) goto set_pull; } } set_value: /* Change the value unless we're actively driving the line. */ if (!test_bit(FLAG_REQUESTED, &desc->flags) || !test_bit(FLAG_IS_OUT, &desc->flags)) __assign_bit(offset, chip->value_map, value); set_pull: __assign_bit(offset, chip->pull_map, value); return 0; } static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); guard(mutex)(&chip->lock); return !!test_bit(offset, chip->value_map); } static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) __assign_bit(offset, chip->value_map, value); } static int gpio_sim_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio); return 0; } static void gpio_sim_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio); } static int gpio_sim_direction_output(struct gpio_chip *gc, unsigned int offset, int value) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) { __clear_bit(offset, chip->direction_map); __assign_bit(offset, chip->value_map, value); } return 0; } static int gpio_sim_direction_input(struct gpio_chip *gc, unsigned int offset) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) __set_bit(offset, chip->direction_map); return 0; } static int gpio_sim_get_direction(struct gpio_chip *gc, unsigned int offset) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); int direction; scoped_guard(mutex, &chip->lock) direction = !!test_bit(offset, chip->direction_map); return direction ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; } static int gpio_sim_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: return gpio_sim_apply_pull(chip, offset, 1); case PIN_CONFIG_BIAS_PULL_DOWN: return gpio_sim_apply_pull(chip, offset, 0); default: break; } return -ENOTSUPP; } static int gpio_sim_to_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); return irq_create_mapping(chip->irq_sim, offset); } static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset) { struct gpio_sim_chip *chip = gpiochip_get_data(gc); scoped_guard(mutex, &chip->lock) __assign_bit(offset, chip->value_map, !!test_bit(offset, chip->pull_map)); } static ssize_t gpio_sim_sysfs_val_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr); struct gpio_sim_chip *chip = dev_get_drvdata(dev); int val; scoped_guard(mutex, &chip->lock) val = !!test_bit(line_attr->offset, chip->value_map); return sysfs_emit(buf, "%d\n", val); } static ssize_t gpio_sim_sysfs_val_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { /* * Not assigning this function will result in write() returning -EIO * which is confusing. Return -EPERM explicitly. */ return -EPERM; } static const char *const gpio_sim_sysfs_pull_strings[] = { [0] = "pull-down", [1] = "pull-up", }; static ssize_t gpio_sim_sysfs_pull_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr); struct gpio_sim_chip *chip = dev_get_drvdata(dev); int pull; scoped_guard(mutex, &chip->lock) pull = !!test_bit(line_attr->offset, chip->pull_map); return sysfs_emit(buf, "%s\n", gpio_sim_sysfs_pull_strings[pull]); } static ssize_t gpio_sim_sysfs_pull_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr); struct gpio_sim_chip *chip = dev_get_drvdata(dev); int ret, pull; pull = sysfs_match_string(gpio_sim_sysfs_pull_strings, buf); if (pull < 0) return pull; ret = gpio_sim_apply_pull(chip, line_attr->offset, pull); if (ret) return ret; return len; } static void gpio_sim_mutex_destroy(void *data) { struct mutex *lock = data; mutex_destroy(lock); } static void gpio_sim_dispose_mappings(void *data) { struct gpio_sim_chip *chip = data; unsigned int i; for (i = 0; i < chip->gc.ngpio; i++) irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i)); } static void gpio_sim_sysfs_remove(void *data) { struct gpio_sim_chip *chip = data; sysfs_remove_groups(&chip->gc.gpiodev->dev.kobj, chip->attr_groups); } static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip) { struct device_attribute *val_dev_attr, *pull_dev_attr; struct gpio_sim_attribute *val_attr, *pull_attr; unsigned int num_lines = chip->gc.ngpio; struct device *dev = chip->gc.parent; struct attribute_group *attr_group; struct attribute **attrs; int i, ret; chip->attr_groups = devm_kcalloc(dev, sizeof(*chip->attr_groups), num_lines + 1, GFP_KERNEL); if (!chip->attr_groups) return -ENOMEM; for (i = 0; i < num_lines; i++) { attr_group = devm_kzalloc(dev, sizeof(*attr_group), GFP_KERNEL); attrs = devm_kcalloc(dev, GPIO_SIM_NUM_ATTRS, sizeof(*attrs), GFP_KERNEL); val_attr = devm_kzalloc(dev, sizeof(*val_attr), GFP_KERNEL); pull_attr = devm_kzalloc(dev, sizeof(*pull_attr), GFP_KERNEL); if (!attr_group || !attrs || !val_attr || !pull_attr) return -ENOMEM; attr_group->name = devm_kasprintf(dev, GFP_KERNEL, "sim_gpio%u", i); if (!attr_group->name) return -ENOMEM; val_attr->offset = pull_attr->offset = i; val_dev_attr = &val_attr->dev_attr; pull_dev_attr = &pull_attr->dev_attr; sysfs_attr_init(&val_dev_attr->attr); sysfs_attr_init(&pull_dev_attr->attr); val_dev_attr->attr.name = "value"; pull_dev_attr->attr.name = "pull"; val_dev_attr->attr.mode = pull_dev_attr->attr.mode = 0644; val_dev_attr->show = gpio_sim_sysfs_val_show; val_dev_attr->store = gpio_sim_sysfs_val_store; pull_dev_attr->show = gpio_sim_sysfs_pull_show; pull_dev_attr->store = gpio_sim_sysfs_pull_store; attrs[0] = &val_dev_attr->attr; attrs[1] = &pull_dev_attr->attr; attr_group->attrs = attrs; chip->attr_groups[i] = attr_group; } ret = sysfs_create_groups(&chip->gc.gpiodev->dev.kobj, chip->attr_groups); if (ret) return ret; return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip); } static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) { struct gpio_sim_chip *chip; struct gpio_chip *gc; const char *label; u32 num_lines; int ret; ret = fwnode_property_read_u32(swnode, "ngpios", &num_lines); if (ret) return ret; if (num_lines > GPIO_SIM_NGPIO_MAX) return -ERANGE; ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label); if (ret) { label = devm_kasprintf(dev, GFP_KERNEL, "%s-%pfwP", dev_name(dev), swnode); if (!label) return -ENOMEM; } chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->direction_map = devm_bitmap_alloc(dev, num_lines, GFP_KERNEL); if (!chip->direction_map) return -ENOMEM; /* Default to input mode. */ bitmap_fill(chip->direction_map, num_lines); chip->value_map = devm_bitmap_zalloc(dev, num_lines, GFP_KERNEL); if (!chip->value_map) return -ENOMEM; chip->pull_map = devm_bitmap_zalloc(dev, num_lines, GFP_KERNEL); if (!chip->pull_map) return -ENOMEM; chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines); if (IS_ERR(chip->irq_sim)) return PTR_ERR(chip->irq_sim); ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip); if (ret) return ret; mutex_init(&chip->lock); ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy, &chip->lock); if (ret) return ret; gc = &chip->gc; gc->base = -1; gc->ngpio = num_lines; gc->label = label; gc->owner = THIS_MODULE; gc->parent = dev; gc->fwnode = swnode; gc->get = gpio_sim_get; gc->set = gpio_sim_set; gc->get_multiple = gpio_sim_get_multiple; gc->set_multiple = gpio_sim_set_multiple; gc->direction_output = gpio_sim_direction_output; gc->direction_input = gpio_sim_direction_input; gc->get_direction = gpio_sim_get_direction; gc->set_config = gpio_sim_set_config; gc->to_irq = gpio_sim_to_irq; gc->free = gpio_sim_free; gc->can_sleep = true; ret = devm_gpiochip_add_data(dev, gc, chip); if (ret) return ret; /* Used by sysfs and configfs callbacks. */ dev_set_drvdata(&gc->gpiodev->dev, chip); return gpio_sim_setup_sysfs(chip); } static int gpio_sim_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fwnode_handle *swnode; int ret; device_for_each_child_node(dev, swnode) { ret = gpio_sim_add_bank(swnode, dev); if (ret) { fwnode_handle_put(swnode); return ret; } } return 0; } static const struct of_device_id gpio_sim_of_match[] = { { .compatible = "gpio-simulator" }, { } }; MODULE_DEVICE_TABLE(of, gpio_sim_of_match); static struct platform_driver gpio_sim_driver = { .driver = { .name = "gpio-sim", .of_match_table = gpio_sim_of_match, }, .probe = gpio_sim_probe, }; struct gpio_sim_device { struct config_group group; /* * If pdev is NULL, the device is 'pending' (waiting for configuration). * Once the pointer is assigned, the device has been created and the * item is 'live'. */ struct platform_device *pdev; int id; /* * Each configfs filesystem operation is protected with the subsystem * mutex. Each separate attribute is protected with the buffer mutex. * This structure however can be modified by callbacks of different * attributes so we need another lock. * * We use this lock for protecting all data structures owned by this * object too. */ struct mutex lock; /* * This is used to synchronously wait for the driver's probe to complete * and notify the user-space about any errors. */ struct notifier_block bus_notifier; struct completion probe_completion; bool driver_bound; struct gpiod_hog *hogs; struct list_head bank_list; }; /* This is called with dev->lock already taken. */ static int gpio_sim_bus_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct gpio_sim_device *simdev = container_of(nb, struct gpio_sim_device, bus_notifier); struct device *dev = data; char devname[32]; snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id); if (strcmp(dev_name(dev), devname) == 0) { if (action == BUS_NOTIFY_BOUND_DRIVER) simdev->driver_bound = true; else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND) simdev->driver_bound = false; else return NOTIFY_DONE; complete(&simdev->probe_completion); return NOTIFY_OK; } return NOTIFY_DONE; } static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item) { struct config_group *group = to_config_group(item); return container_of(group, struct gpio_sim_device, group); } struct gpio_sim_bank { struct config_group group; /* * We could have used the ci_parent field of the config_item but * configfs is stupid and calls the item's release callback after * already having cleared the parent pointer even though the parent * is guaranteed to survive the child... * * So we need to store the pointer to the parent struct here. We can * dereference it anywhere we need with no checks and no locking as * it's guaranteed to survive the children and protected by configfs * locks. * * Same for other structures. */ struct gpio_sim_device *parent; struct list_head siblings; char *label; unsigned int num_lines; struct list_head line_list; struct fwnode_handle *swnode; }; static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item) { struct config_group *group = to_config_group(item); return container_of(group, struct gpio_sim_bank, group); } static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank) { return bank->label && *bank->label; } static struct gpio_sim_device * gpio_sim_bank_get_device(struct gpio_sim_bank *bank) { return bank->parent; } struct gpio_sim_hog; struct gpio_sim_line { struct config_group group; struct gpio_sim_bank *parent; struct list_head siblings; unsigned int offset; char *name; /* There can only be one hog per line. */ struct gpio_sim_hog *hog; }; static struct gpio_sim_line *to_gpio_sim_line(struct config_item *item) { struct config_group *group = to_config_group(item); return container_of(group, struct gpio_sim_line, group); } static struct gpio_sim_device * gpio_sim_line_get_device(struct gpio_sim_line *line) { struct gpio_sim_bank *bank = line->parent; return gpio_sim_bank_get_device(bank); } struct gpio_sim_hog { struct config_item item; struct gpio_sim_line *parent; char *name; int dir; }; static struct gpio_sim_hog *to_gpio_sim_hog(struct config_item *item) { return container_of(item, struct gpio_sim_hog, item); } static struct gpio_sim_device *gpio_sim_hog_get_device(struct gpio_sim_hog *hog) { struct gpio_sim_line *line = hog->parent; return gpio_sim_line_get_device(line); } static bool gpio_sim_device_is_live_unlocked(struct gpio_sim_device *dev) { return !!dev->pdev; } static char *gpio_sim_strdup_trimmed(const char *str, size_t count) { char *trimmed; trimmed = kstrndup(skip_spaces(str), count, GFP_KERNEL); if (!trimmed) return NULL; return strim(trimmed); } static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item, char *page) { struct gpio_sim_device *dev = to_gpio_sim_device(item); struct platform_device *pdev; guard(mutex)(&dev->lock); pdev = dev->pdev; if (pdev) return sprintf(page, "%s\n", dev_name(&pdev->dev)); return sprintf(page, "gpio-sim.%d\n", dev->id); } CONFIGFS_ATTR_RO(gpio_sim_device_config_, dev_name); static ssize_t gpio_sim_device_config_live_show(struct config_item *item, char *page) { struct gpio_sim_device *dev = to_gpio_sim_device(item); bool live; scoped_guard(mutex, &dev->lock) live = gpio_sim_device_is_live_unlocked(dev); return sprintf(page, "%c\n", live ? '1' : '0'); } static unsigned int gpio_sim_get_line_names_size(struct gpio_sim_bank *bank) { struct gpio_sim_line *line; unsigned int size = 0; list_for_each_entry(line, &bank->line_list, siblings) { if (!line->name || (line->offset >= bank->num_lines)) continue; size = max(size, line->offset + 1); } return size; } static void gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names) { struct gpio_sim_line *line; list_for_each_entry(line, &bank->line_list, siblings) { if (!line->name || (line->offset >= bank->num_lines)) continue; line_names[line->offset] = line->name; } } static void gpio_sim_remove_hogs(struct gpio_sim_device *dev) { struct gpiod_hog *hog; if (!dev->hogs) return; gpiod_remove_hogs(dev->hogs); for (hog = dev->hogs; hog->chip_label; hog++) { kfree(hog->chip_label); kfree(hog->line_name); } kfree(dev->hogs); dev->hogs = NULL; } static int gpio_sim_add_hogs(struct gpio_sim_device *dev) { unsigned int num_hogs = 0, idx = 0; struct gpio_sim_bank *bank; struct gpio_sim_line *line; struct gpiod_hog *hog; list_for_each_entry(bank, &dev->bank_list, siblings) { list_for_each_entry(line, &bank->line_list, siblings) { if (line->offset >= bank->num_lines) continue; if (line->hog) num_hogs++; } } if (!num_hogs) return 0; /* Allocate one more for the sentinel. */ dev->hogs = kcalloc(num_hogs + 1, sizeof(*dev->hogs), GFP_KERNEL); if (!dev->hogs) return -ENOMEM; list_for_each_entry(bank, &dev->bank_list, siblings) { list_for_each_entry(line, &bank->line_list, siblings) { if (line->offset >= bank->num_lines) continue; if (!line->hog) continue; hog = &dev->hogs[idx++]; /* * We need to make this string manually because at this * point the device doesn't exist yet and so dev_name() * is not available. */ if (gpio_sim_bank_has_label(bank)) hog->chip_label = kstrdup(bank->label, GFP_KERNEL); else hog->chip_label = kasprintf(GFP_KERNEL, "gpio-sim.%u-%pfwP", dev->id, bank->swnode); if (!hog->chip_label) { gpio_sim_remove_hogs(dev); return -ENOMEM; } /* * We need to duplicate this because the hog config * item can be removed at any time (and we can't block * it) and gpiolib doesn't make a deep copy of the hog * data. */ if (line->hog->name) { hog->line_name = kstrdup(line->hog->name, GFP_KERNEL); if (!hog->line_name) { gpio_sim_remove_hogs(dev); return -ENOMEM; } } hog->chip_hwnum = line->offset; hog->dflags = line->hog->dir; } } gpiod_add_hogs(dev->hogs); return 0; } static struct fwnode_handle * gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank, struct fwnode_handle *parent) { struct property_entry properties[GPIO_SIM_PROP_MAX]; unsigned int prop_idx = 0, line_names_size; char **line_names __free(kfree) = NULL; memset(properties, 0, sizeof(properties)); properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines); if (gpio_sim_bank_has_label(bank)) properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label", bank->label); line_names_size = gpio_sim_get_line_names_size(bank); if (line_names_size) { line_names = kcalloc(line_names_size, sizeof(*line_names), GFP_KERNEL); if (!line_names) return ERR_PTR(-ENOMEM); gpio_sim_set_line_names(bank, line_names); properties[prop_idx++] = PROPERTY_ENTRY_STRING_ARRAY_LEN( "gpio-line-names", line_names, line_names_size); } return fwnode_create_software_node(properties, parent); } static void gpio_sim_remove_swnode_recursive(struct fwnode_handle *swnode) { struct fwnode_handle *child; fwnode_for_each_child_node(swnode, child) fwnode_remove_software_node(child); fwnode_remove_software_node(swnode); } static bool gpio_sim_bank_labels_non_unique(struct gpio_sim_device *dev) { struct gpio_sim_bank *this, *pos; list_for_each_entry(this, &dev->bank_list, siblings) { list_for_each_entry(pos, &dev->bank_list, siblings) { if (this == pos || (!this->label || !pos->label)) continue; if (strcmp(this->label, pos->label) == 0) return true; } } return false; } static int gpio_sim_device_activate_unlocked(struct gpio_sim_device *dev) { struct platform_device_info pdevinfo; struct fwnode_handle *swnode; struct platform_device *pdev; struct gpio_sim_bank *bank; int ret; if (list_empty(&dev->bank_list)) return -ENODATA; /* * Non-unique GPIO device labels are a corner-case we don't support * as it would interfere with machine hogging mechanism and has little * use in real life. */ if (gpio_sim_bank_labels_non_unique(dev)) return -EINVAL; memset(&pdevinfo, 0, sizeof(pdevinfo)); swnode = fwnode_create_software_node(NULL, NULL); if (IS_ERR(swnode)) return PTR_ERR(swnode); list_for_each_entry(bank, &dev->bank_list, siblings) { bank->swnode = gpio_sim_make_bank_swnode(bank, swnode); if (IS_ERR(bank->swnode)) { ret = PTR_ERR(bank->swnode); gpio_sim_remove_swnode_recursive(swnode); return ret; } } ret = gpio_sim_add_hogs(dev); if (ret) { gpio_sim_remove_swnode_recursive(swnode); return ret; } pdevinfo.name = "gpio-sim"; pdevinfo.fwnode = swnode; pdevinfo.id = dev->id; reinit_completion(&dev->probe_completion); dev->driver_bound = false; bus_register_notifier(&platform_bus_type, &dev->bus_notifier); pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) { bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier); gpio_sim_remove_hogs(dev); gpio_sim_remove_swnode_recursive(swnode); return PTR_ERR(pdev); } wait_for_completion(&dev->probe_completion); bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier); if (!dev->driver_bound) { /* Probe failed, check kernel log. */ platform_device_unregister(pdev); gpio_sim_remove_hogs(dev); gpio_sim_remove_swnode_recursive(swnode); return -ENXIO; } dev->pdev = pdev; return 0; } static void gpio_sim_device_deactivate_unlocked(struct gpio_sim_device *dev) { struct fwnode_handle *swnode; swnode = dev_fwnode(&dev->pdev->dev); platform_device_unregister(dev->pdev); gpio_sim_remove_hogs(dev); gpio_sim_remove_swnode_recursive(swnode); dev->pdev = NULL; } static ssize_t gpio_sim_device_config_live_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_device *dev = to_gpio_sim_device(item); bool live; int ret; ret = kstrtobool(page, &live); if (ret) return ret; guard(mutex)(&dev->lock); if (live == gpio_sim_device_is_live_unlocked(dev)) ret = -EPERM; else if (live) ret = gpio_sim_device_activate_unlocked(dev); else gpio_sim_device_deactivate_unlocked(dev); return ret ?: count; } CONFIGFS_ATTR(gpio_sim_device_config_, live); static struct configfs_attribute *gpio_sim_device_config_attrs[] = { &gpio_sim_device_config_attr_dev_name, &gpio_sim_device_config_attr_live, NULL }; struct gpio_sim_chip_name_ctx { struct fwnode_handle *swnode; char *page; }; static int gpio_sim_emit_chip_name(struct device *dev, void *data) { struct gpio_sim_chip_name_ctx *ctx = data; /* This would be the sysfs device exported in /sys/class/gpio. */ if (dev->class) return 0; if (device_match_fwnode(dev, ctx->swnode)) return sprintf(ctx->page, "%s\n", dev_name(dev)); return 0; } static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item, char *page) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page }; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return device_for_each_child(&dev->pdev->dev, &ctx, gpio_sim_emit_chip_name); return sprintf(page, "none\n"); } CONFIGFS_ATTR_RO(gpio_sim_bank_config_, chip_name); static ssize_t gpio_sim_bank_config_label_show(struct config_item *item, char *page) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); guard(mutex)(&dev->lock); return sprintf(page, "%s\n", bank->label ?: ""); } static ssize_t gpio_sim_bank_config_label_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); char *trimmed; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return -EBUSY; trimmed = gpio_sim_strdup_trimmed(page, count); if (!trimmed) return -ENOMEM; kfree(bank->label); bank->label = trimmed; return count; } CONFIGFS_ATTR(gpio_sim_bank_config_, label); static ssize_t gpio_sim_bank_config_num_lines_show(struct config_item *item, char *page) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); guard(mutex)(&dev->lock); return sprintf(page, "%u\n", bank->num_lines); } static ssize_t gpio_sim_bank_config_num_lines_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); unsigned int num_lines; int ret; ret = kstrtouint(page, 0, &num_lines); if (ret) return ret; if (num_lines == 0) return -EINVAL; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return -EBUSY; bank->num_lines = num_lines; return count; } CONFIGFS_ATTR(gpio_sim_bank_config_, num_lines); static struct configfs_attribute *gpio_sim_bank_config_attrs[] = { &gpio_sim_bank_config_attr_chip_name, &gpio_sim_bank_config_attr_label, &gpio_sim_bank_config_attr_num_lines, NULL }; static ssize_t gpio_sim_line_config_name_show(struct config_item *item, char *page) { struct gpio_sim_line *line = to_gpio_sim_line(item); struct gpio_sim_device *dev = gpio_sim_line_get_device(line); guard(mutex)(&dev->lock); return sprintf(page, "%s\n", line->name ?: ""); } static ssize_t gpio_sim_line_config_name_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_line *line = to_gpio_sim_line(item); struct gpio_sim_device *dev = gpio_sim_line_get_device(line); char *trimmed; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return -EBUSY; trimmed = gpio_sim_strdup_trimmed(page, count); if (!trimmed) return -ENOMEM; kfree(line->name); line->name = trimmed; return count; } CONFIGFS_ATTR(gpio_sim_line_config_, name); static struct configfs_attribute *gpio_sim_line_config_attrs[] = { &gpio_sim_line_config_attr_name, NULL }; static ssize_t gpio_sim_hog_config_name_show(struct config_item *item, char *page) { struct gpio_sim_hog *hog = to_gpio_sim_hog(item); struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog); guard(mutex)(&dev->lock); return sprintf(page, "%s\n", hog->name ?: ""); } static ssize_t gpio_sim_hog_config_name_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_hog *hog = to_gpio_sim_hog(item); struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog); char *trimmed; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return -EBUSY; trimmed = gpio_sim_strdup_trimmed(page, count); if (!trimmed) return -ENOMEM; kfree(hog->name); hog->name = trimmed; return count; } CONFIGFS_ATTR(gpio_sim_hog_config_, name); static ssize_t gpio_sim_hog_config_direction_show(struct config_item *item, char *page) { struct gpio_sim_hog *hog = to_gpio_sim_hog(item); struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog); char *repr; int dir; scoped_guard(mutex, &dev->lock) dir = hog->dir; switch (dir) { case GPIOD_IN: repr = "input"; break; case GPIOD_OUT_HIGH: repr = "output-high"; break; case GPIOD_OUT_LOW: repr = "output-low"; break; default: /* This would be a programmer bug. */ WARN(1, "Unexpected hog direction value: %d", dir); return -EINVAL; } return sprintf(page, "%s\n", repr); } static ssize_t gpio_sim_hog_config_direction_store(struct config_item *item, const char *page, size_t count) { struct gpio_sim_hog *hog = to_gpio_sim_hog(item); struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog); int dir; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return -EBUSY; if (sysfs_streq(page, "input")) dir = GPIOD_IN; else if (sysfs_streq(page, "output-high")) dir = GPIOD_OUT_HIGH; else if (sysfs_streq(page, "output-low")) dir = GPIOD_OUT_LOW; else return -EINVAL; hog->dir = dir; return count; } CONFIGFS_ATTR(gpio_sim_hog_config_, direction); static struct configfs_attribute *gpio_sim_hog_config_attrs[] = { &gpio_sim_hog_config_attr_name, &gpio_sim_hog_config_attr_direction, NULL }; static void gpio_sim_hog_config_item_release(struct config_item *item) { struct gpio_sim_hog *hog = to_gpio_sim_hog(item); struct gpio_sim_line *line = hog->parent; struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog); scoped_guard(mutex, &dev->lock) line->hog = NULL; kfree(hog->name); kfree(hog); } static struct configfs_item_operations gpio_sim_hog_config_item_ops = { .release = gpio_sim_hog_config_item_release, }; static const struct config_item_type gpio_sim_hog_config_type = { .ct_item_ops = &gpio_sim_hog_config_item_ops, .ct_attrs = gpio_sim_hog_config_attrs, .ct_owner = THIS_MODULE, }; static struct config_item * gpio_sim_line_config_make_hog_item(struct config_group *group, const char *name) { struct gpio_sim_line *line = to_gpio_sim_line(&group->cg_item); struct gpio_sim_device *dev = gpio_sim_line_get_device(line); struct gpio_sim_hog *hog; if (strcmp(name, "hog") != 0) return ERR_PTR(-EINVAL); guard(mutex)(&dev->lock); hog = kzalloc(sizeof(*hog), GFP_KERNEL); if (!hog) return ERR_PTR(-ENOMEM); config_item_init_type_name(&hog->item, name, &gpio_sim_hog_config_type); hog->dir = GPIOD_IN; hog->name = NULL; hog->parent = line; line->hog = hog; return &hog->item; } static void gpio_sim_line_config_group_release(struct config_item *item) { struct gpio_sim_line *line = to_gpio_sim_line(item); struct gpio_sim_device *dev = gpio_sim_line_get_device(line); scoped_guard(mutex, &dev->lock) list_del(&line->siblings); kfree(line->name); kfree(line); } static struct configfs_item_operations gpio_sim_line_config_item_ops = { .release = gpio_sim_line_config_group_release, }; static struct configfs_group_operations gpio_sim_line_config_group_ops = { .make_item = gpio_sim_line_config_make_hog_item, }; static const struct config_item_type gpio_sim_line_config_type = { .ct_item_ops = &gpio_sim_line_config_item_ops, .ct_group_ops = &gpio_sim_line_config_group_ops, .ct_attrs = gpio_sim_line_config_attrs, .ct_owner = THIS_MODULE, }; static struct config_group * gpio_sim_bank_config_make_line_group(struct config_group *group, const char *name) { struct gpio_sim_bank *bank = to_gpio_sim_bank(&group->cg_item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); struct gpio_sim_line *line; unsigned int offset; int ret, nchar; ret = sscanf(name, "line%u%n", &offset, &nchar); if (ret != 1 || nchar != strlen(name)) return ERR_PTR(-EINVAL); guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return ERR_PTR(-EBUSY); line = kzalloc(sizeof(*line), GFP_KERNEL); if (!line) return ERR_PTR(-ENOMEM); config_group_init_type_name(&line->group, name, &gpio_sim_line_config_type); line->parent = bank; line->offset = offset; list_add_tail(&line->siblings, &bank->line_list); return &line->group; } static void gpio_sim_bank_config_group_release(struct config_item *item) { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); scoped_guard(mutex, &dev->lock) list_del(&bank->siblings); kfree(bank->label); kfree(bank); } static struct configfs_item_operations gpio_sim_bank_config_item_ops = { .release = gpio_sim_bank_config_group_release, }; static struct configfs_group_operations gpio_sim_bank_config_group_ops = { .make_group = gpio_sim_bank_config_make_line_group, }; static const struct config_item_type gpio_sim_bank_config_group_type = { .ct_item_ops = &gpio_sim_bank_config_item_ops, .ct_group_ops = &gpio_sim_bank_config_group_ops, .ct_attrs = gpio_sim_bank_config_attrs, .ct_owner = THIS_MODULE, }; static struct config_group * gpio_sim_device_config_make_bank_group(struct config_group *group, const char *name) { struct gpio_sim_device *dev = to_gpio_sim_device(&group->cg_item); struct gpio_sim_bank *bank; guard(mutex)(&dev->lock); if (gpio_sim_device_is_live_unlocked(dev)) return ERR_PTR(-EBUSY); bank = kzalloc(sizeof(*bank), GFP_KERNEL); if (!bank) return ERR_PTR(-ENOMEM); config_group_init_type_name(&bank->group, name, &gpio_sim_bank_config_group_type); bank->num_lines = 1; bank->parent = dev; INIT_LIST_HEAD(&bank->line_list); list_add_tail(&bank->siblings, &dev->bank_list); return &bank->group; } static void gpio_sim_device_config_group_release(struct config_item *item) { struct gpio_sim_device *dev = to_gpio_sim_device(item); scoped_guard(mutex, &dev->lock) { if (gpio_sim_device_is_live_unlocked(dev)) gpio_sim_device_deactivate_unlocked(dev); } mutex_destroy(&dev->lock); ida_free(&gpio_sim_ida, dev->id); kfree(dev); } static struct configfs_item_operations gpio_sim_device_config_item_ops = { .release = gpio_sim_device_config_group_release, }; static struct configfs_group_operations gpio_sim_device_config_group_ops = { .make_group = gpio_sim_device_config_make_bank_group, }; static const struct config_item_type gpio_sim_device_config_group_type = { .ct_item_ops = &gpio_sim_device_config_item_ops, .ct_group_ops = &gpio_sim_device_config_group_ops, .ct_attrs = gpio_sim_device_config_attrs, .ct_owner = THIS_MODULE, }; static struct config_group * gpio_sim_config_make_device_group(struct config_group *group, const char *name) { struct gpio_sim_device *dev __free(kfree) = NULL; int id; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); id = ida_alloc(&gpio_sim_ida, GFP_KERNEL); if (id < 0) return ERR_PTR(id); config_group_init_type_name(&dev->group, name, &gpio_sim_device_config_group_type); dev->id = id; mutex_init(&dev->lock); INIT_LIST_HEAD(&dev->bank_list); dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call; init_completion(&dev->probe_completion); return &no_free_ptr(dev)->group; } static struct configfs_group_operations gpio_sim_config_group_ops = { .make_group = gpio_sim_config_make_device_group, }; static const struct config_item_type gpio_sim_config_type = { .ct_group_ops = &gpio_sim_config_group_ops, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem gpio_sim_config_subsys = { .su_group = { .cg_item = { .ci_namebuf = "gpio-sim", .ci_type = &gpio_sim_config_type, }, }, }; static int __init gpio_sim_init(void) { int ret; ret = platform_driver_register(&gpio_sim_driver); if (ret) { pr_err("Error %d while registering the platform driver\n", ret); return ret; } config_group_init(&gpio_sim_config_subsys.su_group); mutex_init(&gpio_sim_config_subsys.su_mutex); ret = configfs_register_subsystem(&gpio_sim_config_subsys); if (ret) { pr_err("Error %d while registering the configfs subsystem %s\n", ret, gpio_sim_config_subsys.su_group.cg_item.ci_namebuf); mutex_destroy(&gpio_sim_config_subsys.su_mutex); platform_driver_unregister(&gpio_sim_driver); return ret; } return 0; } module_init(gpio_sim_init); static void __exit gpio_sim_exit(void) { configfs_unregister_subsystem(&gpio_sim_config_subsys); mutex_destroy(&gpio_sim_config_subsys.su_mutex); platform_driver_unregister(&gpio_sim_driver); } module_exit(gpio_sim_exit); MODULE_AUTHOR("Bartosz Golaszewski <[email protected]"); MODULE_DESCRIPTION("GPIO Simulator Module"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-sim.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO Driver for Loongson 1 SoC * * Copyright (C) 2015-2023 Keguang Zhang <[email protected]> */ #include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/bitops.h> /* Loongson 1 GPIO Register Definitions */ #define GPIO_CFG 0x0 #define GPIO_DIR 0x10 #define GPIO_DATA 0x20 #define GPIO_OUTPUT 0x30 struct ls1x_gpio_chip { struct gpio_chip gc; void __iomem *reg_base; }; static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset) { struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc); unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); __raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset), ls1x_gc->reg_base + GPIO_CFG); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); return 0; } static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset) { struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc); unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); __raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset), ls1x_gc->reg_base + GPIO_CFG); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static int ls1x_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ls1x_gpio_chip *ls1x_gc; int ret; ls1x_gc = devm_kzalloc(dev, sizeof(*ls1x_gc), GFP_KERNEL); if (!ls1x_gc) return -ENOMEM; ls1x_gc->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ls1x_gc->reg_base)) return PTR_ERR(ls1x_gc->reg_base); ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA, ls1x_gc->reg_base + GPIO_OUTPUT, NULL, NULL, ls1x_gc->reg_base + GPIO_DIR, 0); if (ret) goto err; ls1x_gc->gc.owner = THIS_MODULE; ls1x_gc->gc.request = ls1x_gpio_request; ls1x_gc->gc.free = ls1x_gpio_free; /* * Clear ngpio to let gpiolib get the correct number * by reading ngpios property */ ls1x_gc->gc.ngpio = 0; ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc); if (ret) goto err; platform_set_drvdata(pdev, ls1x_gc); dev_info(dev, "GPIO controller registered with %d pins\n", ls1x_gc->gc.ngpio); return 0; err: dev_err(dev, "failed to register GPIO controller\n"); return ret; } static const struct of_device_id ls1x_gpio_dt_ids[] = { { .compatible = "loongson,ls1x-gpio" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ls1x_gpio_dt_ids); static struct platform_driver ls1x_gpio_driver = { .probe = ls1x_gpio_probe, .driver = { .name = "ls1x-gpio", .of_match_table = ls1x_gpio_dt_ids, }, }; module_platform_driver(ls1x_gpio_driver); MODULE_AUTHOR("Keguang Zhang <[email protected]>"); MODULE_DESCRIPTION("Loongson1 GPIO driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-loongson1.c
// SPDX-License-Identifier: GPL-2.0-only /* * regmap based generic GPIO driver * * Copyright 2020 Michael Walle <[email protected]> */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/gpio/driver.h> #include <linux/gpio/regmap.h> struct gpio_regmap { struct device *parent; struct regmap *regmap; struct gpio_chip gpio_chip; int reg_stride; int ngpio_per_reg; unsigned int reg_dat_base; unsigned int reg_set_base; unsigned int reg_clr_base; unsigned int reg_dir_in_base; unsigned int reg_dir_out_base; int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, unsigned int *mask); void *driver_data; }; static unsigned int gpio_regmap_addr(unsigned int addr) { if (addr == GPIO_REGMAP_ADDR_ZERO) return 0; return addr; } static int gpio_regmap_simple_xlate(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, unsigned int *mask) { unsigned int line = offset % gpio->ngpio_per_reg; unsigned int stride = offset / gpio->ngpio_per_reg; *reg = base + stride * gpio->reg_stride; *mask = BIT(line); return 0; } static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset) { struct gpio_regmap *gpio = gpiochip_get_data(chip); unsigned int base, val, reg, mask; int ret; /* we might not have an output register if we are input only */ if (gpio->reg_dat_base) base = gpio_regmap_addr(gpio->reg_dat_base); else base = gpio_regmap_addr(gpio->reg_set_base); ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask); if (ret) return ret; ret = regmap_read(gpio->regmap, reg, &val); if (ret) return ret; return !!(val & mask); } static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset, int val) { struct gpio_regmap *gpio = gpiochip_get_data(chip); unsigned int base = gpio_regmap_addr(gpio->reg_set_base); unsigned int reg, mask; gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask); if (val) regmap_update_bits(gpio->regmap, reg, mask, mask); else regmap_update_bits(gpio->regmap, reg, mask, 0); } static void gpio_regmap_set_with_clear(struct gpio_chip *chip, unsigned int offset, int val) { struct gpio_regmap *gpio = gpiochip_get_data(chip); unsigned int base, reg, mask; if (val) base = gpio_regmap_addr(gpio->reg_set_base); else base = gpio_regmap_addr(gpio->reg_clr_base); gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask); regmap_write(gpio->regmap, reg, mask); } static int gpio_regmap_get_direction(struct gpio_chip *chip, unsigned int offset) { struct gpio_regmap *gpio = gpiochip_get_data(chip); unsigned int base, val, reg, mask; int invert, ret; if (gpio->reg_dat_base && !gpio->reg_set_base) return GPIO_LINE_DIRECTION_IN; if (gpio->reg_set_base && !gpio->reg_dat_base) return GPIO_LINE_DIRECTION_OUT; if (gpio->reg_dir_out_base) { base = gpio_regmap_addr(gpio->reg_dir_out_base); invert = 0; } else if (gpio->reg_dir_in_base) { base = gpio_regmap_addr(gpio->reg_dir_in_base); invert = 1; } else { return -EOPNOTSUPP; } ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask); if (ret) return ret; ret = regmap_read(gpio->regmap, reg, &val); if (ret) return ret; if (!!(val & mask) ^ invert) return GPIO_LINE_DIRECTION_OUT; else return GPIO_LINE_DIRECTION_IN; } static int gpio_regmap_set_direction(struct gpio_chip *chip, unsigned int offset, bool output) { struct gpio_regmap *gpio = gpiochip_get_data(chip); unsigned int base, val, reg, mask; int invert, ret; if (gpio->reg_dir_out_base) { base = gpio_regmap_addr(gpio->reg_dir_out_base); invert = 0; } else if (gpio->reg_dir_in_base) { base = gpio_regmap_addr(gpio->reg_dir_in_base); invert = 1; } else { return -EOPNOTSUPP; } ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask); if (ret) return ret; if (invert) val = output ? 0 : mask; else val = output ? mask : 0; return regmap_update_bits(gpio->regmap, reg, mask, val); } static int gpio_regmap_direction_input(struct gpio_chip *chip, unsigned int offset) { return gpio_regmap_set_direction(chip, offset, false); } static int gpio_regmap_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { gpio_regmap_set(chip, offset, value); return gpio_regmap_set_direction(chip, offset, true); } void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio) { return gpio->driver_data; } EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata); /** * gpio_regmap_register() - Register a generic regmap GPIO controller * @config: configuration for gpio_regmap * * Return: A pointer to the registered gpio_regmap or ERR_PTR error value. */ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config) { struct gpio_regmap *gpio; struct gpio_chip *chip; int ret; if (!config->parent) return ERR_PTR(-EINVAL); if (!config->ngpio) return ERR_PTR(-EINVAL); /* we need at least one */ if (!config->reg_dat_base && !config->reg_set_base) return ERR_PTR(-EINVAL); /* if we have a direction register we need both input and output */ if ((config->reg_dir_out_base || config->reg_dir_in_base) && (!config->reg_dat_base || !config->reg_set_base)) return ERR_PTR(-EINVAL); /* we don't support having both registers simultaneously for now */ if (config->reg_dir_out_base && config->reg_dir_in_base) return ERR_PTR(-EINVAL); gpio = kzalloc(sizeof(*gpio), GFP_KERNEL); if (!gpio) return ERR_PTR(-ENOMEM); gpio->parent = config->parent; gpio->driver_data = config->drvdata; gpio->regmap = config->regmap; gpio->ngpio_per_reg = config->ngpio_per_reg; gpio->reg_stride = config->reg_stride; gpio->reg_mask_xlate = config->reg_mask_xlate; gpio->reg_dat_base = config->reg_dat_base; gpio->reg_set_base = config->reg_set_base; gpio->reg_clr_base = config->reg_clr_base; gpio->reg_dir_in_base = config->reg_dir_in_base; gpio->reg_dir_out_base = config->reg_dir_out_base; /* if not set, assume there is only one register */ if (!gpio->ngpio_per_reg) gpio->ngpio_per_reg = config->ngpio; /* if not set, assume they are consecutive */ if (!gpio->reg_stride) gpio->reg_stride = 1; if (!gpio->reg_mask_xlate) gpio->reg_mask_xlate = gpio_regmap_simple_xlate; chip = &gpio->gpio_chip; chip->parent = config->parent; chip->fwnode = config->fwnode; chip->base = -1; chip->ngpio = config->ngpio; chip->names = config->names; chip->label = config->label ?: dev_name(config->parent); chip->can_sleep = regmap_might_sleep(config->regmap); chip->get = gpio_regmap_get; if (gpio->reg_set_base && gpio->reg_clr_base) chip->set = gpio_regmap_set_with_clear; else if (gpio->reg_set_base) chip->set = gpio_regmap_set; chip->get_direction = gpio_regmap_get_direction; if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) { chip->direction_input = gpio_regmap_direction_input; chip->direction_output = gpio_regmap_direction_output; } ret = gpiochip_add_data(chip, gpio); if (ret < 0) goto err_free_gpio; if (config->irq_domain) { ret = gpiochip_irqchip_add_domain(chip, config->irq_domain); if (ret) goto err_remove_gpiochip; } return gpio; err_remove_gpiochip: gpiochip_remove(chip); err_free_gpio: kfree(gpio); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(gpio_regmap_register); /** * gpio_regmap_unregister() - Unregister a generic regmap GPIO controller * @gpio: gpio_regmap device to unregister */ void gpio_regmap_unregister(struct gpio_regmap *gpio) { gpiochip_remove(&gpio->gpio_chip); kfree(gpio); } EXPORT_SYMBOL_GPL(gpio_regmap_unregister); static void devm_gpio_regmap_unregister(void *res) { gpio_regmap_unregister(res); } /** * devm_gpio_regmap_register() - resource managed gpio_regmap_register() * @dev: device that is registering this GPIO device * @config: configuration for gpio_regmap * * Managed gpio_regmap_register(). For generic regmap GPIO device registered by * this function, gpio_regmap_unregister() is automatically called on driver * detach. See gpio_regmap_register() for more information. * * Return: A pointer to the registered gpio_regmap or ERR_PTR error value. */ struct gpio_regmap *devm_gpio_regmap_register(struct device *dev, const struct gpio_regmap_config *config) { struct gpio_regmap *gpio; int ret; gpio = gpio_regmap_register(config); if (IS_ERR(gpio)) return gpio; ret = devm_add_action_or_reset(dev, devm_gpio_regmap_unregister, gpio); if (ret) return ERR_PTR(ret); return gpio; } EXPORT_SYMBOL_GPL(devm_gpio_regmap_register); MODULE_AUTHOR("Michael Walle <[email protected]>"); MODULE_DESCRIPTION("GPIO generic regmap driver core"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-regmap.c
// SPDX-License-Identifier: GPL-2.0+ /* * Access to GPOs on TWL6040 chip * * Copyright (C) 2012 Texas Instruments, Inc. * * Authors: * Sergio Aguirre <[email protected]> * Peter Ujfalusi <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <linux/of.h> #include <linux/mfd/twl6040.h> static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset) { struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent); int ret = 0; ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL); if (ret < 0) return ret; return !!(ret & BIT(offset)); } static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset) { return GPIO_LINE_DIRECTION_OUT; } static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset, int value) { /* This only drives GPOs, and can't change direction */ return 0; } static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value) { struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent); int ret; u8 gpoctl; ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL); if (ret < 0) return; if (value) gpoctl = ret | BIT(offset); else gpoctl = ret & ~BIT(offset); twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl); } static struct gpio_chip twl6040gpo_chip = { .label = "twl6040", .owner = THIS_MODULE, .get = twl6040gpo_get, .direction_output = twl6040gpo_direction_out, .get_direction = twl6040gpo_get_direction, .set = twl6040gpo_set, .can_sleep = true, }; /*----------------------------------------------------------------------*/ static int gpo_twl6040_probe(struct platform_device *pdev) { struct device *twl6040_core_dev = pdev->dev.parent; struct twl6040 *twl6040 = dev_get_drvdata(twl6040_core_dev); int ret; device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent)); twl6040gpo_chip.base = -1; if (twl6040_get_revid(twl6040) < TWL6041_REV_ES2_0) twl6040gpo_chip.ngpio = 3; /* twl6040 have 3 GPO */ else twl6040gpo_chip.ngpio = 1; /* twl6041 have 1 GPO */ twl6040gpo_chip.parent = &pdev->dev; ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL); if (ret < 0) { dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret); twl6040gpo_chip.ngpio = 0; } return ret; } /* Note: this hardware lives inside an I2C-based multi-function device. */ MODULE_ALIAS("platform:twl6040-gpo"); static struct platform_driver gpo_twl6040_driver = { .driver = { .name = "twl6040-gpo", }, .probe = gpo_twl6040_probe, }; module_platform_driver(gpo_twl6040_driver); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_DESCRIPTION("GPO interface for TWL6040"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-twl6040.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders * * Copyright (C) 2007 David Brownell */ #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/spinlock.h> static const struct i2c_device_id pcf857x_id[] = { { "pcf8574", 8 }, { "pcf8574a", 8 }, { "pca8574", 8 }, { "pca9670", 8 }, { "pca9672", 8 }, { "pca9674", 8 }, { "pcf8575", 16 }, { "pca8575", 16 }, { "pca9671", 16 }, { "pca9673", 16 }, { "pca9675", 16 }, { "max7328", 8 }, { "max7329", 8 }, { } }; MODULE_DEVICE_TABLE(i2c, pcf857x_id); static const struct of_device_id pcf857x_of_table[] = { { .compatible = "nxp,pcf8574", (void *)8 }, { .compatible = "nxp,pcf8574a", (void *)8 }, { .compatible = "nxp,pca8574", (void *)8 }, { .compatible = "nxp,pca9670", (void *)8 }, { .compatible = "nxp,pca9672", (void *)8 }, { .compatible = "nxp,pca9674", (void *)8 }, { .compatible = "nxp,pcf8575", (void *)16 }, { .compatible = "nxp,pca8575", (void *)16 }, { .compatible = "nxp,pca9671", (void *)16 }, { .compatible = "nxp,pca9673", (void *)16 }, { .compatible = "nxp,pca9675", (void *)16 }, { .compatible = "maxim,max7328", (void *)8 }, { .compatible = "maxim,max7329", (void *)8 }, { } }; MODULE_DEVICE_TABLE(of, pcf857x_of_table); /* * The pcf857x, pca857x, and pca967x chips only expose one read and one * write register. Writing a "one" bit (to match the reset state) lets * that pin be used as an input; it's not an open-drain model, but acts * a bit like one. This is described as "quasi-bidirectional"; read the * chip documentation for details. * * Many other I2C GPIO expander chips (like the pca953x models) have * more complex register models and more conventional circuitry using * push/pull drivers. They often use the same 0x20..0x27 addresses as * pcf857x parts, making the "legacy" I2C driver model problematic. */ struct pcf857x { struct gpio_chip chip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ unsigned int out; /* software latch */ unsigned int status; /* current status */ unsigned int irq_enabled; /* enabled irqs */ int (*write)(struct i2c_client *client, unsigned int data); int (*read)(struct i2c_client *client); }; /*-------------------------------------------------------------------------*/ /* Talk to 8-bit I/O expander */ static int i2c_write_le8(struct i2c_client *client, unsigned int data) { return i2c_smbus_write_byte(client, data); } static int i2c_read_le8(struct i2c_client *client) { return i2c_smbus_read_byte(client); } /* Talk to 16-bit I/O expander */ static int i2c_write_le16(struct i2c_client *client, unsigned int word) { u8 buf[2] = { word & 0xff, word >> 8, }; int status; status = i2c_master_send(client, buf, 2); return (status < 0) ? status : 0; } static int i2c_read_le16(struct i2c_client *client) { u8 buf[2]; int status; status = i2c_master_recv(client, buf, 2); if (status < 0) return status; return (buf[1] << 8) | buf[0]; } /*-------------------------------------------------------------------------*/ static int pcf857x_input(struct gpio_chip *chip, unsigned int offset) { struct pcf857x *gpio = gpiochip_get_data(chip); int status; mutex_lock(&gpio->lock); gpio->out |= (1 << offset); status = gpio->write(gpio->client, gpio->out); mutex_unlock(&gpio->lock); return status; } static int pcf857x_get(struct gpio_chip *chip, unsigned int offset) { struct pcf857x *gpio = gpiochip_get_data(chip); int value; value = gpio->read(gpio->client); return (value < 0) ? value : !!(value & (1 << offset)); } static int pcf857x_get_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct pcf857x *gpio = gpiochip_get_data(chip); int value = gpio->read(gpio->client); if (value < 0) return value; *bits &= ~*mask; *bits |= value & *mask; return 0; } static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value) { struct pcf857x *gpio = gpiochip_get_data(chip); unsigned int bit = 1 << offset; int status; mutex_lock(&gpio->lock); if (value) gpio->out |= bit; else gpio->out &= ~bit; status = gpio->write(gpio->client, gpio->out); mutex_unlock(&gpio->lock); return status; } static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value) { pcf857x_output(chip, offset, value); } static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct pcf857x *gpio = gpiochip_get_data(chip); mutex_lock(&gpio->lock); gpio->out &= ~*mask; gpio->out |= *bits & *mask; gpio->write(gpio->client, gpio->out); mutex_unlock(&gpio->lock); } /*-------------------------------------------------------------------------*/ static irqreturn_t pcf857x_irq(int irq, void *data) { struct pcf857x *gpio = data; unsigned long change, i, status; status = gpio->read(gpio->client); /* * call the interrupt handler iff gpio is used as * interrupt source, just to avoid bad irqs */ mutex_lock(&gpio->lock); change = (gpio->status ^ status) & gpio->irq_enabled; gpio->status = status; mutex_unlock(&gpio->lock); for_each_set_bit(i, &change, gpio->chip.ngpio) handle_nested_irq(irq_find_mapping(gpio->chip.irq.domain, i)); return IRQ_HANDLED; } /* * NOP functions */ static void noop(struct irq_data *data) { } static int pcf857x_irq_set_wake(struct irq_data *data, unsigned int on) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); return irq_set_irq_wake(gpio->client->irq, on); } static void pcf857x_irq_enable(struct irq_data *data) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); irq_hw_number_t hwirq = irqd_to_hwirq(data); gpiochip_enable_irq(&gpio->chip, hwirq); gpio->irq_enabled |= (1 << hwirq); } static void pcf857x_irq_disable(struct irq_data *data) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); irq_hw_number_t hwirq = irqd_to_hwirq(data); gpio->irq_enabled &= ~(1 << hwirq); gpiochip_disable_irq(&gpio->chip, hwirq); } static void pcf857x_irq_bus_lock(struct irq_data *data) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); mutex_lock(&gpio->lock); } static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); mutex_unlock(&gpio->lock); } static const struct irq_chip pcf857x_irq_chip = { .name = "pcf857x", .irq_enable = pcf857x_irq_enable, .irq_disable = pcf857x_irq_disable, .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, .irq_set_wake = pcf857x_irq_set_wake, .irq_bus_lock = pcf857x_irq_bus_lock, .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; /*-------------------------------------------------------------------------*/ static int pcf857x_probe(struct i2c_client *client) { struct pcf857x *gpio; unsigned int n_latch = 0; int status; device_property_read_u32(&client->dev, "lines-initial-states", &n_latch); /* Allocate, initialize, and register this gpio_chip. */ gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; mutex_init(&gpio->lock); gpio->chip.base = -1; gpio->chip.can_sleep = true; gpio->chip.parent = &client->dev; gpio->chip.owner = THIS_MODULE; gpio->chip.get = pcf857x_get; gpio->chip.get_multiple = pcf857x_get_multiple; gpio->chip.set = pcf857x_set; gpio->chip.set_multiple = pcf857x_set_multiple; gpio->chip.direction_input = pcf857x_input; gpio->chip.direction_output = pcf857x_output; gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client); /* NOTE: the OnSemi jlc1562b is also largely compatible with * these parts, notably for output. It has a low-resolution * DAC instead of pin change IRQs; and its inputs can be the * result of comparators. */ /* 8574 addresses are 0x20..0x27; 8574a uses 0x38..0x3f; * 9670, 9672, 9764, and 9764a use quite a variety. * * NOTE: we don't distinguish here between *4 and *4a parts. */ if (gpio->chip.ngpio == 8) { gpio->write = i2c_write_le8; gpio->read = i2c_read_le8; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) status = -EIO; /* fail if there's no chip present */ else status = i2c_smbus_read_byte(client); /* '75/'75c addresses are 0x20..0x27, just like the '74; * the '75c doesn't have a current source pulling high. * 9671, 9673, and 9765 use quite a variety of addresses. * * NOTE: we don't distinguish here between '75 and '75c parts. */ } else if (gpio->chip.ngpio == 16) { gpio->write = i2c_write_le16; gpio->read = i2c_read_le16; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) status = -EIO; /* fail if there's no chip present */ else status = i2c_read_le16(client); } else { dev_dbg(&client->dev, "unsupported number of gpios\n"); status = -EINVAL; } if (status < 0) goto fail; gpio->chip.label = client->name; gpio->client = client; i2c_set_clientdata(client, gpio); /* NOTE: these chips have strange "quasi-bidirectional" I/O pins. * We can't actually know whether a pin is configured (a) as output * and driving the signal low, or (b) as input and reporting a low * value ... without knowing the last value written since the chip * came out of reset (if any). We can't read the latched output. * * In short, the only reliable solution for setting up pin direction * is to do it explicitly. The setup() method can do that, but it * may cause transient glitching since it can't know the last value * written (some pins may need to be driven low). * * Using n_latch avoids that trouble. When left initialized to zero, * our software copy of the "latch" then matches the chip's all-ones * reset state. Otherwise it flags pins to be driven low. */ gpio->out = ~n_latch; gpio->status = gpio->read(gpio->client); /* Enable irqchip if we have an interrupt */ if (client->irq) { struct gpio_irq_chip *girq; status = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf857x_irq, IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_SHARED, dev_name(&client->dev), gpio); if (status) goto fail; girq = &gpio->chip.irq; gpio_irq_chip_set_chip(girq, &pcf857x_irq_chip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; girq->threaded = true; } status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); if (status < 0) goto fail; dev_info(&client->dev, "probed\n"); return 0; fail: dev_dbg(&client->dev, "probe error %d for '%s'\n", status, client->name); return status; } static void pcf857x_shutdown(struct i2c_client *client) { struct pcf857x *gpio = i2c_get_clientdata(client); /* Drive all the I/O lines high */ gpio->write(gpio->client, BIT(gpio->chip.ngpio) - 1); } static struct i2c_driver pcf857x_driver = { .driver = { .name = "pcf857x", .of_match_table = pcf857x_of_table, }, .probe = pcf857x_probe, .shutdown = pcf857x_shutdown, .id_table = pcf857x_id, }; static int __init pcf857x_init(void) { return i2c_add_driver(&pcf857x_driver); } /* register after i2c postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(pcf857x_init); static void __exit pcf857x_exit(void) { i2c_del_driver(&pcf857x_driver); } module_exit(pcf857x_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell");
linux-master
drivers/gpio/gpio-pcf857x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011-2012 Avionic Design GmbH */ #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/property.h> #include <linux/seq_file.h> #include <linux/slab.h> #define GPIO_DDR(gpio) (0x00 << (gpio)->reg_shift) #define GPIO_PLR(gpio) (0x01 << (gpio)->reg_shift) #define GPIO_IER(gpio) (0x02 << (gpio)->reg_shift) #define GPIO_ISR(gpio) (0x03 << (gpio)->reg_shift) #define GPIO_PTR(gpio) (0x04 << (gpio)->reg_shift) struct adnp { struct i2c_client *client; struct gpio_chip gpio; unsigned int reg_shift; struct mutex i2c_lock; struct mutex irq_lock; u8 *irq_enable; u8 *irq_level; u8 *irq_rise; u8 *irq_fall; u8 *irq_high; u8 *irq_low; }; static int adnp_read(struct adnp *adnp, unsigned offset, uint8_t *value) { int err; err = i2c_smbus_read_byte_data(adnp->client, offset); if (err < 0) { dev_err(adnp->gpio.parent, "%s failed: %d\n", "i2c_smbus_read_byte_data()", err); return err; } *value = err; return 0; } static int adnp_write(struct adnp *adnp, unsigned offset, uint8_t value) { int err; err = i2c_smbus_write_byte_data(adnp->client, offset, value); if (err < 0) { dev_err(adnp->gpio.parent, "%s failed: %d\n", "i2c_smbus_write_byte_data()", err); return err; } return 0; } static int adnp_gpio_get(struct gpio_chip *chip, unsigned offset) { struct adnp *adnp = gpiochip_get_data(chip); unsigned int reg = offset >> adnp->reg_shift; unsigned int pos = offset & 7; u8 value; int err; err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &value); if (err < 0) return err; return (value & BIT(pos)) ? 1 : 0; } static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value) { unsigned int reg = offset >> adnp->reg_shift; unsigned int pos = offset & 7; int err; u8 val; err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &val); if (err < 0) return; if (value) val |= BIT(pos); else val &= ~BIT(pos); adnp_write(adnp, GPIO_PLR(adnp) + reg, val); } static void adnp_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct adnp *adnp = gpiochip_get_data(chip); mutex_lock(&adnp->i2c_lock); __adnp_gpio_set(adnp, offset, value); mutex_unlock(&adnp->i2c_lock); } static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct adnp *adnp = gpiochip_get_data(chip); unsigned int reg = offset >> adnp->reg_shift; unsigned int pos = offset & 7; u8 value; int err; mutex_lock(&adnp->i2c_lock); err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value); if (err < 0) goto out; value &= ~BIT(pos); err = adnp_write(adnp, GPIO_DDR(adnp) + reg, value); if (err < 0) goto out; err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value); if (err < 0) goto out; if (value & BIT(pos)) { err = -EPERM; goto out; } err = 0; out: mutex_unlock(&adnp->i2c_lock); return err; } static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct adnp *adnp = gpiochip_get_data(chip); unsigned int reg = offset >> adnp->reg_shift; unsigned int pos = offset & 7; int err; u8 val; mutex_lock(&adnp->i2c_lock); err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val); if (err < 0) goto out; val |= BIT(pos); err = adnp_write(adnp, GPIO_DDR(adnp) + reg, val); if (err < 0) goto out; err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val); if (err < 0) goto out; if (!(val & BIT(pos))) { err = -EPERM; goto out; } __adnp_gpio_set(adnp, offset, value); err = 0; out: mutex_unlock(&adnp->i2c_lock); return err; } static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct adnp *adnp = gpiochip_get_data(chip); unsigned int num_regs = 1 << adnp->reg_shift, i, j; int err; for (i = 0; i < num_regs; i++) { u8 ddr, plr, ier, isr; mutex_lock(&adnp->i2c_lock); err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr); if (err < 0) goto unlock; err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr); if (err < 0) goto unlock; err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier); if (err < 0) goto unlock; err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr); if (err < 0) goto unlock; mutex_unlock(&adnp->i2c_lock); for (j = 0; j < 8; j++) { unsigned int bit = (i << adnp->reg_shift) + j; const char *direction = "input "; const char *level = "low "; const char *interrupt = "disabled"; const char *pending = ""; if (ddr & BIT(j)) direction = "output"; if (plr & BIT(j)) level = "high"; if (ier & BIT(j)) interrupt = "enabled "; if (isr & BIT(j)) pending = "pending"; seq_printf(s, "%2u: %s %s IRQ %s %s\n", bit, direction, level, interrupt, pending); } } return; unlock: mutex_unlock(&adnp->i2c_lock); } static irqreturn_t adnp_irq(int irq, void *data) { struct adnp *adnp = data; unsigned int num_regs, i; num_regs = 1 << adnp->reg_shift; for (i = 0; i < num_regs; i++) { unsigned int base = i << adnp->reg_shift, bit; u8 changed, level, isr, ier; unsigned long pending; int err; mutex_lock(&adnp->i2c_lock); err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level); if (err < 0) { mutex_unlock(&adnp->i2c_lock); continue; } err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr); if (err < 0) { mutex_unlock(&adnp->i2c_lock); continue; } err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier); if (err < 0) { mutex_unlock(&adnp->i2c_lock); continue; } mutex_unlock(&adnp->i2c_lock); /* determine pins that changed levels */ changed = level ^ adnp->irq_level[i]; /* compute edge-triggered interrupts */ pending = changed & ((adnp->irq_fall[i] & ~level) | (adnp->irq_rise[i] & level)); /* add in level-triggered interrupts */ pending |= (adnp->irq_high[i] & level) | (adnp->irq_low[i] & ~level); /* mask out non-pending and disabled interrupts */ pending &= isr & ier; for_each_set_bit(bit, &pending, 8) { unsigned int child_irq; child_irq = irq_find_mapping(adnp->gpio.irq.domain, base + bit); handle_nested_irq(child_irq); } } return IRQ_HANDLED; } static void adnp_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct adnp *adnp = gpiochip_get_data(gc); unsigned int reg = d->hwirq >> adnp->reg_shift; unsigned int pos = d->hwirq & 7; adnp->irq_enable[reg] &= ~BIT(pos); gpiochip_disable_irq(gc, irqd_to_hwirq(d)); } static void adnp_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct adnp *adnp = gpiochip_get_data(gc); unsigned int reg = d->hwirq >> adnp->reg_shift; unsigned int pos = d->hwirq & 7; gpiochip_enable_irq(gc, irqd_to_hwirq(d)); adnp->irq_enable[reg] |= BIT(pos); } static int adnp_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct adnp *adnp = gpiochip_get_data(gc); unsigned int reg = d->hwirq >> adnp->reg_shift; unsigned int pos = d->hwirq & 7; if (type & IRQ_TYPE_EDGE_RISING) adnp->irq_rise[reg] |= BIT(pos); else adnp->irq_rise[reg] &= ~BIT(pos); if (type & IRQ_TYPE_EDGE_FALLING) adnp->irq_fall[reg] |= BIT(pos); else adnp->irq_fall[reg] &= ~BIT(pos); if (type & IRQ_TYPE_LEVEL_HIGH) adnp->irq_high[reg] |= BIT(pos); else adnp->irq_high[reg] &= ~BIT(pos); if (type & IRQ_TYPE_LEVEL_LOW) adnp->irq_low[reg] |= BIT(pos); else adnp->irq_low[reg] &= ~BIT(pos); return 0; } static void adnp_irq_bus_lock(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct adnp *adnp = gpiochip_get_data(gc); mutex_lock(&adnp->irq_lock); } static void adnp_irq_bus_unlock(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct adnp *adnp = gpiochip_get_data(gc); unsigned int num_regs = 1 << adnp->reg_shift, i; mutex_lock(&adnp->i2c_lock); for (i = 0; i < num_regs; i++) adnp_write(adnp, GPIO_IER(adnp) + i, adnp->irq_enable[i]); mutex_unlock(&adnp->i2c_lock); mutex_unlock(&adnp->irq_lock); } static const struct irq_chip adnp_irq_chip = { .name = "gpio-adnp", .irq_mask = adnp_irq_mask, .irq_unmask = adnp_irq_unmask, .irq_set_type = adnp_irq_set_type, .irq_bus_lock = adnp_irq_bus_lock, .irq_bus_sync_unlock = adnp_irq_bus_unlock, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int adnp_irq_setup(struct adnp *adnp) { unsigned int num_regs = 1 << adnp->reg_shift, i; struct gpio_chip *chip = &adnp->gpio; int err; mutex_init(&adnp->irq_lock); /* * Allocate memory to keep track of the current level and trigger * modes of the interrupts. To avoid multiple allocations, a single * large buffer is allocated and pointers are setup to point at the * corresponding offsets. For consistency, the layout of the buffer * is chosen to match the register layout of the hardware in that * each segment contains the corresponding bits for all interrupts. */ adnp->irq_enable = devm_kcalloc(chip->parent, num_regs, 6, GFP_KERNEL); if (!adnp->irq_enable) return -ENOMEM; adnp->irq_level = adnp->irq_enable + (num_regs * 1); adnp->irq_rise = adnp->irq_enable + (num_regs * 2); adnp->irq_fall = adnp->irq_enable + (num_regs * 3); adnp->irq_high = adnp->irq_enable + (num_regs * 4); adnp->irq_low = adnp->irq_enable + (num_regs * 5); for (i = 0; i < num_regs; i++) { /* * Read the initial level of all pins to allow the emulation * of edge triggered interrupts. */ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &adnp->irq_level[i]); if (err < 0) return err; /* disable all interrupts */ err = adnp_write(adnp, GPIO_IER(adnp) + i, 0); if (err < 0) return err; adnp->irq_enable[i] = 0x00; } err = devm_request_threaded_irq(chip->parent, adnp->client->irq, NULL, adnp_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, dev_name(chip->parent), adnp); if (err != 0) { dev_err(chip->parent, "can't request IRQ#%d: %d\n", adnp->client->irq, err); return err; } return 0; } static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios, bool is_irq_controller) { struct gpio_chip *chip = &adnp->gpio; int err; adnp->reg_shift = get_count_order(num_gpios) - 3; chip->direction_input = adnp_gpio_direction_input; chip->direction_output = adnp_gpio_direction_output; chip->get = adnp_gpio_get; chip->set = adnp_gpio_set; chip->can_sleep = true; if (IS_ENABLED(CONFIG_DEBUG_FS)) chip->dbg_show = adnp_gpio_dbg_show; chip->base = -1; chip->ngpio = num_gpios; chip->label = adnp->client->name; chip->parent = &adnp->client->dev; chip->owner = THIS_MODULE; if (is_irq_controller) { struct gpio_irq_chip *girq; err = adnp_irq_setup(adnp); if (err) return err; girq = &chip->irq; gpio_irq_chip_set_chip(girq, &adnp_irq_chip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; girq->threaded = true; } err = devm_gpiochip_add_data(&adnp->client->dev, chip, adnp); if (err) return err; return 0; } static int adnp_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct adnp *adnp; u32 num_gpios; int err; err = device_property_read_u32(dev, "nr-gpios", &num_gpios); if (err < 0) return err; adnp = devm_kzalloc(&client->dev, sizeof(*adnp), GFP_KERNEL); if (!adnp) return -ENOMEM; mutex_init(&adnp->i2c_lock); adnp->client = client; err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller")); if (err) return err; i2c_set_clientdata(client, adnp); return 0; } static const struct i2c_device_id adnp_i2c_id[] = { { "gpio-adnp" }, { }, }; MODULE_DEVICE_TABLE(i2c, adnp_i2c_id); static const struct of_device_id adnp_of_match[] = { { .compatible = "ad,gpio-adnp", }, { }, }; MODULE_DEVICE_TABLE(of, adnp_of_match); static struct i2c_driver adnp_i2c_driver = { .driver = { .name = "gpio-adnp", .of_match_table = adnp_of_match, }, .probe = adnp_i2c_probe, .id_table = adnp_i2c_id, }; module_i2c_driver(adnp_i2c_driver); MODULE_DESCRIPTION("Avionic Design N-bit GPIO expander"); MODULE_AUTHOR("Thierry Reding <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-adnp.c
// SPDX-License-Identifier: GPL-2.0 /* * SAMA5D2 PIOBU GPIO controller * * Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries * * Author: Andrei Stefanescu <[email protected]> * */ #include <linux/bits.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #define PIOBU_NUM 8 #define PIOBU_REG_SIZE 4 /* * backup mode protection register for tamper detection * normal mode protection register for tamper detection * wakeup signal generation */ #define PIOBU_BMPR 0x7C #define PIOBU_NMPR 0x80 #define PIOBU_WKPR 0x90 #define PIOBU_BASE 0x18 /* PIOBU offset from SECUMOD base register address. */ #define PIOBU_DET_OFFSET 16 /* In the datasheet this bit is called OUTPUT */ #define PIOBU_DIRECTION BIT(8) #define PIOBU_OUT BIT(8) #define PIOBU_IN 0 #define PIOBU_SOD BIT(9) #define PIOBU_PDS BIT(10) #define PIOBU_HIGH BIT(9) #define PIOBU_LOW 0 struct sama5d2_piobu { struct gpio_chip chip; struct regmap *regmap; }; /* * sama5d2_piobu_setup_pin() - prepares a pin for set_direction call * * Do not consider pin for tamper detection (normal and backup modes) * Do not consider pin as tamper wakeup interrupt source */ static int sama5d2_piobu_setup_pin(struct gpio_chip *chip, unsigned int pin) { int ret; struct sama5d2_piobu *piobu = container_of(chip, struct sama5d2_piobu, chip); unsigned int mask = BIT(PIOBU_DET_OFFSET + pin); ret = regmap_update_bits(piobu->regmap, PIOBU_BMPR, mask, 0); if (ret) return ret; ret = regmap_update_bits(piobu->regmap, PIOBU_NMPR, mask, 0); if (ret) return ret; return regmap_update_bits(piobu->regmap, PIOBU_WKPR, mask, 0); } /* * sama5d2_piobu_write_value() - writes value & mask at the pin's PIOBU register */ static int sama5d2_piobu_write_value(struct gpio_chip *chip, unsigned int pin, unsigned int mask, unsigned int value) { int reg; struct sama5d2_piobu *piobu = container_of(chip, struct sama5d2_piobu, chip); reg = PIOBU_BASE + pin * PIOBU_REG_SIZE; return regmap_update_bits(piobu->regmap, reg, mask, value); } /* * sama5d2_piobu_read_value() - read the value with masking from the pin's PIOBU * register */ static int sama5d2_piobu_read_value(struct gpio_chip *chip, unsigned int pin, unsigned int mask) { struct sama5d2_piobu *piobu = container_of(chip, struct sama5d2_piobu, chip); unsigned int val, reg; int ret; reg = PIOBU_BASE + pin * PIOBU_REG_SIZE; ret = regmap_read(piobu->regmap, reg, &val); if (ret < 0) return ret; return val & mask; } /* * sama5d2_piobu_get_direction() - gpiochip get_direction */ static int sama5d2_piobu_get_direction(struct gpio_chip *chip, unsigned int pin) { int ret = sama5d2_piobu_read_value(chip, pin, PIOBU_DIRECTION); if (ret < 0) return ret; return (ret == PIOBU_IN) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; } /* * sama5d2_piobu_direction_input() - gpiochip direction_input */ static int sama5d2_piobu_direction_input(struct gpio_chip *chip, unsigned int pin) { return sama5d2_piobu_write_value(chip, pin, PIOBU_DIRECTION, PIOBU_IN); } /* * sama5d2_piobu_direction_output() - gpiochip direction_output */ static int sama5d2_piobu_direction_output(struct gpio_chip *chip, unsigned int pin, int value) { unsigned int val = PIOBU_OUT; if (value) val |= PIOBU_HIGH; return sama5d2_piobu_write_value(chip, pin, PIOBU_DIRECTION | PIOBU_SOD, val); } /* * sama5d2_piobu_get() - gpiochip get */ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin) { /* if pin is input, read value from PDS else read from SOD */ int ret = sama5d2_piobu_get_direction(chip, pin); if (ret == GPIO_LINE_DIRECTION_IN) ret = sama5d2_piobu_read_value(chip, pin, PIOBU_PDS); else if (ret == GPIO_LINE_DIRECTION_OUT) ret = sama5d2_piobu_read_value(chip, pin, PIOBU_SOD); if (ret < 0) return ret; return !!ret; } /* * sama5d2_piobu_set() - gpiochip set */ static void sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin, int value) { if (!value) value = PIOBU_LOW; else value = PIOBU_HIGH; sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value); } static int sama5d2_piobu_probe(struct platform_device *pdev) { struct sama5d2_piobu *piobu; int ret, i; piobu = devm_kzalloc(&pdev->dev, sizeof(*piobu), GFP_KERNEL); if (!piobu) return -ENOMEM; piobu->chip.label = pdev->name; piobu->chip.parent = &pdev->dev; piobu->chip.owner = THIS_MODULE, piobu->chip.get_direction = sama5d2_piobu_get_direction, piobu->chip.direction_input = sama5d2_piobu_direction_input, piobu->chip.direction_output = sama5d2_piobu_direction_output, piobu->chip.get = sama5d2_piobu_get, piobu->chip.set = sama5d2_piobu_set, piobu->chip.base = -1, piobu->chip.ngpio = PIOBU_NUM, piobu->chip.can_sleep = 0, piobu->regmap = syscon_node_to_regmap(pdev->dev.of_node); if (IS_ERR(piobu->regmap)) { dev_err(&pdev->dev, "Failed to get syscon regmap %ld\n", PTR_ERR(piobu->regmap)); return PTR_ERR(piobu->regmap); } ret = devm_gpiochip_add_data(&pdev->dev, &piobu->chip, piobu); if (ret) { dev_err(&pdev->dev, "Failed to add gpiochip %d\n", ret); return ret; } for (i = 0; i < PIOBU_NUM; ++i) { ret = sama5d2_piobu_setup_pin(&piobu->chip, i); if (ret) { dev_err(&pdev->dev, "Failed to setup pin: %d %d\n", i, ret); return ret; } } return 0; } static const struct of_device_id sama5d2_piobu_ids[] = { { .compatible = "atmel,sama5d2-secumod" }, {}, }; MODULE_DEVICE_TABLE(of, sama5d2_piobu_ids); static struct platform_driver sama5d2_piobu_driver = { .driver = { .name = "sama5d2-piobu", .of_match_table = sama5d2_piobu_ids, }, .probe = sama5d2_piobu_probe, }; module_platform_driver(sama5d2_piobu_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("SAMA5D2 PIOBU controller driver"); MODULE_AUTHOR("Andrei Stefanescu <[email protected]>");
linux-master
drivers/gpio/gpio-sama5d2-piobu.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the ACCES PCI-IDIO-16 * Copyright (C) 2017 William Breathitt Gray */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/regmap.h> #include <linux/types.h> #include "gpio-idio-16.h" static const struct regmap_range idio_16_wr_ranges[] = { regmap_reg_range(0x0, 0x2), regmap_reg_range(0x3, 0x4), }; static const struct regmap_range idio_16_rd_ranges[] = { regmap_reg_range(0x1, 0x2), regmap_reg_range(0x5, 0x6), }; static const struct regmap_range idio_16_precious_ranges[] = { regmap_reg_range(0x2, 0x2), }; static const struct regmap_access_table idio_16_wr_table = { .yes_ranges = idio_16_wr_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_wr_ranges), }; static const struct regmap_access_table idio_16_rd_table = { .yes_ranges = idio_16_rd_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_rd_ranges), }; static const struct regmap_access_table idio_16_precious_table = { .yes_ranges = idio_16_precious_ranges, .n_yes_ranges = ARRAY_SIZE(idio_16_precious_ranges), }; static const struct regmap_config idio_16_regmap_config = { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .io_port = true, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, .precious_table = &idio_16_precious_table, .cache_type = REGCACHE_FLAT, .use_raw_spinlock = true, }; /* Only input lines (GPIO 16-31) support interrupts */ #define IDIO_16_REGMAP_IRQ(_id) \ [16 + _id] = { \ .mask = BIT(2), \ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \ } static const struct regmap_irq idio_16_regmap_irqs[] = { IDIO_16_REGMAP_IRQ(0), IDIO_16_REGMAP_IRQ(1), IDIO_16_REGMAP_IRQ(2), /* 0-2 */ IDIO_16_REGMAP_IRQ(3), IDIO_16_REGMAP_IRQ(4), IDIO_16_REGMAP_IRQ(5), /* 3-5 */ IDIO_16_REGMAP_IRQ(6), IDIO_16_REGMAP_IRQ(7), IDIO_16_REGMAP_IRQ(8), /* 6-8 */ IDIO_16_REGMAP_IRQ(9), IDIO_16_REGMAP_IRQ(10), IDIO_16_REGMAP_IRQ(11), /* 9-11 */ IDIO_16_REGMAP_IRQ(12), IDIO_16_REGMAP_IRQ(13), IDIO_16_REGMAP_IRQ(14), /* 12-14 */ IDIO_16_REGMAP_IRQ(15), /* 15 */ }; static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *const dev = &pdev->dev; int err; const size_t pci_bar_index = 2; const char *const name = pci_name(pdev); struct idio_16_regmap_config config = {}; void __iomem *regs; struct regmap *map; err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device (%d)\n", err); return err; } err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name); if (err) { dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err); return err; } regs = pcim_iomap_table(pdev)[pci_bar_index]; map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config); if (IS_ERR(map)) return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n"); config.parent = dev; config.map = map; config.regmap_irqs = idio_16_regmap_irqs; config.num_regmap_irqs = ARRAY_SIZE(idio_16_regmap_irqs); config.irq = pdev->irq; config.filters = true; return devm_idio_16_regmap_register(dev, &config); } static const struct pci_device_id idio_16_pci_dev_id[] = { { PCI_DEVICE(0x494F, 0x0DC8) }, { 0 } }; MODULE_DEVICE_TABLE(pci, idio_16_pci_dev_id); static struct pci_driver idio_16_driver = { .name = "pci-idio-16", .id_table = idio_16_pci_dev_id, .probe = idio_16_probe }; module_pci_driver(idio_16_driver); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("ACCES PCI-IDIO-16 GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(GPIO_IDIO_16);
linux-master
drivers/gpio/gpio-pci-idio-16.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2020 Daniel Palmer<[email protected]> */ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/gpio/driver.h> #include <linux/module.h> #include <linux/platform_device.h> #include <dt-bindings/gpio/msc313-gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #define DRIVER_NAME "gpio-msc313" #define MSC313_GPIO_IN BIT(0) #define MSC313_GPIO_OUT BIT(4) #define MSC313_GPIO_OEN BIT(5) /* * These bits need to be saved to correctly restore the * gpio state when resuming from suspend to memory. */ #define MSC313_GPIO_BITSTOSAVE (MSC313_GPIO_OUT | MSC313_GPIO_OEN) /* pad names for fuart, same for all SoCs so far */ #define MSC313_PINNAME_FUART_RX "fuart_rx" #define MSC313_PINNAME_FUART_TX "fuart_tx" #define MSC313_PINNAME_FUART_CTS "fuart_cts" #define MSC313_PINNAME_FUART_RTS "fuart_rts" /* pad names for sr, mercury5 is different */ #define MSC313_PINNAME_SR_IO2 "sr_io2" #define MSC313_PINNAME_SR_IO3 "sr_io3" #define MSC313_PINNAME_SR_IO4 "sr_io4" #define MSC313_PINNAME_SR_IO5 "sr_io5" #define MSC313_PINNAME_SR_IO6 "sr_io6" #define MSC313_PINNAME_SR_IO7 "sr_io7" #define MSC313_PINNAME_SR_IO8 "sr_io8" #define MSC313_PINNAME_SR_IO9 "sr_io9" #define MSC313_PINNAME_SR_IO10 "sr_io10" #define MSC313_PINNAME_SR_IO11 "sr_io11" #define MSC313_PINNAME_SR_IO12 "sr_io12" #define MSC313_PINNAME_SR_IO13 "sr_io13" #define MSC313_PINNAME_SR_IO14 "sr_io14" #define MSC313_PINNAME_SR_IO15 "sr_io15" #define MSC313_PINNAME_SR_IO16 "sr_io16" #define MSC313_PINNAME_SR_IO17 "sr_io17" /* pad names for sd, same for all SoCs so far */ #define MSC313_PINNAME_SD_CLK "sd_clk" #define MSC313_PINNAME_SD_CMD "sd_cmd" #define MSC313_PINNAME_SD_D0 "sd_d0" #define MSC313_PINNAME_SD_D1 "sd_d1" #define MSC313_PINNAME_SD_D2 "sd_d2" #define MSC313_PINNAME_SD_D3 "sd_d3" /* pad names for i2c1, same for all SoCs so for */ #define MSC313_PINNAME_I2C1_SCL "i2c1_scl" #define MSC313_PINNAME_I2C1_SCA "i2c1_sda" /* pad names for spi0, same for all SoCs so far */ #define MSC313_PINNAME_SPI0_CZ "spi0_cz" #define MSC313_PINNAME_SPI0_CK "spi0_ck" #define MSC313_PINNAME_SPI0_DI "spi0_di" #define MSC313_PINNAME_SPI0_DO "spi0_do" #define FUART_NAMES \ MSC313_PINNAME_FUART_RX, \ MSC313_PINNAME_FUART_TX, \ MSC313_PINNAME_FUART_CTS, \ MSC313_PINNAME_FUART_RTS #define OFF_FUART_RX 0x50 #define OFF_FUART_TX 0x54 #define OFF_FUART_CTS 0x58 #define OFF_FUART_RTS 0x5c #define FUART_OFFSETS \ OFF_FUART_RX, \ OFF_FUART_TX, \ OFF_FUART_CTS, \ OFF_FUART_RTS #define SR_NAMES \ MSC313_PINNAME_SR_IO2, \ MSC313_PINNAME_SR_IO3, \ MSC313_PINNAME_SR_IO4, \ MSC313_PINNAME_SR_IO5, \ MSC313_PINNAME_SR_IO6, \ MSC313_PINNAME_SR_IO7, \ MSC313_PINNAME_SR_IO8, \ MSC313_PINNAME_SR_IO9, \ MSC313_PINNAME_SR_IO10, \ MSC313_PINNAME_SR_IO11, \ MSC313_PINNAME_SR_IO12, \ MSC313_PINNAME_SR_IO13, \ MSC313_PINNAME_SR_IO14, \ MSC313_PINNAME_SR_IO15, \ MSC313_PINNAME_SR_IO16, \ MSC313_PINNAME_SR_IO17 #define OFF_SR_IO2 0x88 #define OFF_SR_IO3 0x8c #define OFF_SR_IO4 0x90 #define OFF_SR_IO5 0x94 #define OFF_SR_IO6 0x98 #define OFF_SR_IO7 0x9c #define OFF_SR_IO8 0xa0 #define OFF_SR_IO9 0xa4 #define OFF_SR_IO10 0xa8 #define OFF_SR_IO11 0xac #define OFF_SR_IO12 0xb0 #define OFF_SR_IO13 0xb4 #define OFF_SR_IO14 0xb8 #define OFF_SR_IO15 0xbc #define OFF_SR_IO16 0xc0 #define OFF_SR_IO17 0xc4 #define SR_OFFSETS \ OFF_SR_IO2, \ OFF_SR_IO3, \ OFF_SR_IO4, \ OFF_SR_IO5, \ OFF_SR_IO6, \ OFF_SR_IO7, \ OFF_SR_IO8, \ OFF_SR_IO9, \ OFF_SR_IO10, \ OFF_SR_IO11, \ OFF_SR_IO12, \ OFF_SR_IO13, \ OFF_SR_IO14, \ OFF_SR_IO15, \ OFF_SR_IO16, \ OFF_SR_IO17 #define SD_NAMES \ MSC313_PINNAME_SD_CLK, \ MSC313_PINNAME_SD_CMD, \ MSC313_PINNAME_SD_D0, \ MSC313_PINNAME_SD_D1, \ MSC313_PINNAME_SD_D2, \ MSC313_PINNAME_SD_D3 #define OFF_SD_CLK 0x140 #define OFF_SD_CMD 0x144 #define OFF_SD_D0 0x148 #define OFF_SD_D1 0x14c #define OFF_SD_D2 0x150 #define OFF_SD_D3 0x154 #define SD_OFFSETS \ OFF_SD_CLK, \ OFF_SD_CMD, \ OFF_SD_D0, \ OFF_SD_D1, \ OFF_SD_D2, \ OFF_SD_D3 #define I2C1_NAMES \ MSC313_PINNAME_I2C1_SCL, \ MSC313_PINNAME_I2C1_SCA #define OFF_I2C1_SCL 0x188 #define OFF_I2C1_SCA 0x18c #define I2C1_OFFSETS \ OFF_I2C1_SCL, \ OFF_I2C1_SCA #define SPI0_NAMES \ MSC313_PINNAME_SPI0_CZ, \ MSC313_PINNAME_SPI0_CK, \ MSC313_PINNAME_SPI0_DI, \ MSC313_PINNAME_SPI0_DO #define OFF_SPI0_CZ 0x1c0 #define OFF_SPI0_CK 0x1c4 #define OFF_SPI0_DI 0x1c8 #define OFF_SPI0_DO 0x1cc #define SPI0_OFFSETS \ OFF_SPI0_CZ, \ OFF_SPI0_CK, \ OFF_SPI0_DI, \ OFF_SPI0_DO struct msc313_gpio_data { const char * const *names; const unsigned int *offsets; const unsigned int num; }; #define MSC313_GPIO_CHIPDATA(_chip) \ static const struct msc313_gpio_data _chip##_data = { \ .names = _chip##_names, \ .offsets = _chip##_offsets, \ .num = ARRAY_SIZE(_chip##_offsets), \ } #ifdef CONFIG_MACH_INFINITY static const char * const msc313_names[] = { FUART_NAMES, SR_NAMES, SD_NAMES, I2C1_NAMES, SPI0_NAMES, }; static const unsigned int msc313_offsets[] = { FUART_OFFSETS, SR_OFFSETS, SD_OFFSETS, I2C1_OFFSETS, SPI0_OFFSETS, }; MSC313_GPIO_CHIPDATA(msc313); /* * Unlike the msc313(e) the ssd20xd have a bunch of pins * that are actually called gpio probably because they * have no dedicated function. */ #define SSD20XD_PINNAME_GPIO0 "gpio0" #define SSD20XD_PINNAME_GPIO1 "gpio1" #define SSD20XD_PINNAME_GPIO2 "gpio2" #define SSD20XD_PINNAME_GPIO3 "gpio3" #define SSD20XD_PINNAME_GPIO4 "gpio4" #define SSD20XD_PINNAME_GPIO5 "gpio5" #define SSD20XD_PINNAME_GPIO6 "gpio6" #define SSD20XD_PINNAME_GPIO7 "gpio7" #define SSD20XD_PINNAME_GPIO10 "gpio10" #define SSD20XD_PINNAME_GPIO11 "gpio11" #define SSD20XD_PINNAME_GPIO12 "gpio12" #define SSD20XD_PINNAME_GPIO13 "gpio13" #define SSD20XD_PINNAME_GPIO14 "gpio14" #define SSD20XD_PINNAME_GPIO85 "gpio85" #define SSD20XD_PINNAME_GPIO86 "gpio86" #define SSD20XD_PINNAME_GPIO90 "gpio90" #define SSD20XD_GPIO_NAMES SSD20XD_PINNAME_GPIO0, \ SSD20XD_PINNAME_GPIO1, \ SSD20XD_PINNAME_GPIO2, \ SSD20XD_PINNAME_GPIO3, \ SSD20XD_PINNAME_GPIO4, \ SSD20XD_PINNAME_GPIO5, \ SSD20XD_PINNAME_GPIO6, \ SSD20XD_PINNAME_GPIO7, \ SSD20XD_PINNAME_GPIO10, \ SSD20XD_PINNAME_GPIO11, \ SSD20XD_PINNAME_GPIO12, \ SSD20XD_PINNAME_GPIO13, \ SSD20XD_PINNAME_GPIO14, \ SSD20XD_PINNAME_GPIO85, \ SSD20XD_PINNAME_GPIO86, \ SSD20XD_PINNAME_GPIO90 #define SSD20XD_GPIO_OFF_GPIO0 0x0 #define SSD20XD_GPIO_OFF_GPIO1 0x4 #define SSD20XD_GPIO_OFF_GPIO2 0x8 #define SSD20XD_GPIO_OFF_GPIO3 0xc #define SSD20XD_GPIO_OFF_GPIO4 0x10 #define SSD20XD_GPIO_OFF_GPIO5 0x14 #define SSD20XD_GPIO_OFF_GPIO6 0x18 #define SSD20XD_GPIO_OFF_GPIO7 0x1c #define SSD20XD_GPIO_OFF_GPIO10 0x28 #define SSD20XD_GPIO_OFF_GPIO11 0x2c #define SSD20XD_GPIO_OFF_GPIO12 0x30 #define SSD20XD_GPIO_OFF_GPIO13 0x34 #define SSD20XD_GPIO_OFF_GPIO14 0x38 #define SSD20XD_GPIO_OFF_GPIO85 0x100 #define SSD20XD_GPIO_OFF_GPIO86 0x104 #define SSD20XD_GPIO_OFF_GPIO90 0x114 #define SSD20XD_GPIO_OFFSETS SSD20XD_GPIO_OFF_GPIO0, \ SSD20XD_GPIO_OFF_GPIO1, \ SSD20XD_GPIO_OFF_GPIO2, \ SSD20XD_GPIO_OFF_GPIO3, \ SSD20XD_GPIO_OFF_GPIO4, \ SSD20XD_GPIO_OFF_GPIO5, \ SSD20XD_GPIO_OFF_GPIO6, \ SSD20XD_GPIO_OFF_GPIO7, \ SSD20XD_GPIO_OFF_GPIO10, \ SSD20XD_GPIO_OFF_GPIO11, \ SSD20XD_GPIO_OFF_GPIO12, \ SSD20XD_GPIO_OFF_GPIO13, \ SSD20XD_GPIO_OFF_GPIO14, \ SSD20XD_GPIO_OFF_GPIO85, \ SSD20XD_GPIO_OFF_GPIO86, \ SSD20XD_GPIO_OFF_GPIO90 /* "ttl" pins lcd interface pins */ #define SSD20XD_PINNAME_TTL0 "ttl0" #define SSD20XD_PINNAME_TTL1 "ttl1" #define SSD20XD_PINNAME_TTL2 "ttl2" #define SSD20XD_PINNAME_TTL3 "ttl3" #define SSD20XD_PINNAME_TTL4 "ttl4" #define SSD20XD_PINNAME_TTL5 "ttl5" #define SSD20XD_PINNAME_TTL6 "ttl6" #define SSD20XD_PINNAME_TTL7 "ttl7" #define SSD20XD_PINNAME_TTL8 "ttl8" #define SSD20XD_PINNAME_TTL9 "ttl9" #define SSD20XD_PINNAME_TTL10 "ttl10" #define SSD20XD_PINNAME_TTL11 "ttl11" #define SSD20XD_PINNAME_TTL12 "ttl12" #define SSD20XD_PINNAME_TTL13 "ttl13" #define SSD20XD_PINNAME_TTL14 "ttl14" #define SSD20XD_PINNAME_TTL15 "ttl15" #define SSD20XD_PINNAME_TTL16 "ttl16" #define SSD20XD_PINNAME_TTL17 "ttl17" #define SSD20XD_PINNAME_TTL18 "ttl18" #define SSD20XD_PINNAME_TTL19 "ttl19" #define SSD20XD_PINNAME_TTL20 "ttl20" #define SSD20XD_PINNAME_TTL21 "ttl21" #define SSD20XD_PINNAME_TTL22 "ttl22" #define SSD20XD_PINNAME_TTL23 "ttl23" #define SSD20XD_PINNAME_TTL24 "ttl24" #define SSD20XD_PINNAME_TTL25 "ttl25" #define SSD20XD_PINNAME_TTL26 "ttl26" #define SSD20XD_PINNAME_TTL27 "ttl27" #define SSD20XD_TTL_PINNAMES SSD20XD_PINNAME_TTL0, \ SSD20XD_PINNAME_TTL1, \ SSD20XD_PINNAME_TTL2, \ SSD20XD_PINNAME_TTL3, \ SSD20XD_PINNAME_TTL4, \ SSD20XD_PINNAME_TTL5, \ SSD20XD_PINNAME_TTL6, \ SSD20XD_PINNAME_TTL7, \ SSD20XD_PINNAME_TTL8, \ SSD20XD_PINNAME_TTL9, \ SSD20XD_PINNAME_TTL10, \ SSD20XD_PINNAME_TTL11, \ SSD20XD_PINNAME_TTL12, \ SSD20XD_PINNAME_TTL13, \ SSD20XD_PINNAME_TTL14, \ SSD20XD_PINNAME_TTL15, \ SSD20XD_PINNAME_TTL16, \ SSD20XD_PINNAME_TTL17, \ SSD20XD_PINNAME_TTL18, \ SSD20XD_PINNAME_TTL19, \ SSD20XD_PINNAME_TTL20, \ SSD20XD_PINNAME_TTL21, \ SSD20XD_PINNAME_TTL22, \ SSD20XD_PINNAME_TTL23, \ SSD20XD_PINNAME_TTL24, \ SSD20XD_PINNAME_TTL25, \ SSD20XD_PINNAME_TTL26, \ SSD20XD_PINNAME_TTL27 #define SSD20XD_TTL_OFFSET_TTL0 0x80 #define SSD20XD_TTL_OFFSET_TTL1 0x84 #define SSD20XD_TTL_OFFSET_TTL2 0x88 #define SSD20XD_TTL_OFFSET_TTL3 0x8c #define SSD20XD_TTL_OFFSET_TTL4 0x90 #define SSD20XD_TTL_OFFSET_TTL5 0x94 #define SSD20XD_TTL_OFFSET_TTL6 0x98 #define SSD20XD_TTL_OFFSET_TTL7 0x9c #define SSD20XD_TTL_OFFSET_TTL8 0xa0 #define SSD20XD_TTL_OFFSET_TTL9 0xa4 #define SSD20XD_TTL_OFFSET_TTL10 0xa8 #define SSD20XD_TTL_OFFSET_TTL11 0xac #define SSD20XD_TTL_OFFSET_TTL12 0xb0 #define SSD20XD_TTL_OFFSET_TTL13 0xb4 #define SSD20XD_TTL_OFFSET_TTL14 0xb8 #define SSD20XD_TTL_OFFSET_TTL15 0xbc #define SSD20XD_TTL_OFFSET_TTL16 0xc0 #define SSD20XD_TTL_OFFSET_TTL17 0xc4 #define SSD20XD_TTL_OFFSET_TTL18 0xc8 #define SSD20XD_TTL_OFFSET_TTL19 0xcc #define SSD20XD_TTL_OFFSET_TTL20 0xd0 #define SSD20XD_TTL_OFFSET_TTL21 0xd4 #define SSD20XD_TTL_OFFSET_TTL22 0xd8 #define SSD20XD_TTL_OFFSET_TTL23 0xdc #define SSD20XD_TTL_OFFSET_TTL24 0xe0 #define SSD20XD_TTL_OFFSET_TTL25 0xe4 #define SSD20XD_TTL_OFFSET_TTL26 0xe8 #define SSD20XD_TTL_OFFSET_TTL27 0xec #define SSD20XD_TTL_OFFSETS SSD20XD_TTL_OFFSET_TTL0, \ SSD20XD_TTL_OFFSET_TTL1, \ SSD20XD_TTL_OFFSET_TTL2, \ SSD20XD_TTL_OFFSET_TTL3, \ SSD20XD_TTL_OFFSET_TTL4, \ SSD20XD_TTL_OFFSET_TTL5, \ SSD20XD_TTL_OFFSET_TTL6, \ SSD20XD_TTL_OFFSET_TTL7, \ SSD20XD_TTL_OFFSET_TTL8, \ SSD20XD_TTL_OFFSET_TTL9, \ SSD20XD_TTL_OFFSET_TTL10, \ SSD20XD_TTL_OFFSET_TTL11, \ SSD20XD_TTL_OFFSET_TTL12, \ SSD20XD_TTL_OFFSET_TTL13, \ SSD20XD_TTL_OFFSET_TTL14, \ SSD20XD_TTL_OFFSET_TTL15, \ SSD20XD_TTL_OFFSET_TTL16, \ SSD20XD_TTL_OFFSET_TTL17, \ SSD20XD_TTL_OFFSET_TTL18, \ SSD20XD_TTL_OFFSET_TTL19, \ SSD20XD_TTL_OFFSET_TTL20, \ SSD20XD_TTL_OFFSET_TTL21, \ SSD20XD_TTL_OFFSET_TTL22, \ SSD20XD_TTL_OFFSET_TTL23, \ SSD20XD_TTL_OFFSET_TTL24, \ SSD20XD_TTL_OFFSET_TTL25, \ SSD20XD_TTL_OFFSET_TTL26, \ SSD20XD_TTL_OFFSET_TTL27 /* On the ssd20xd the two normal uarts have dedicated pins */ #define SSD20XD_PINNAME_UART0_RX "uart0_rx" #define SSD20XD_PINNAME_UART0_TX "uart0_tx" #define SSD20XD_UART0_NAMES \ SSD20XD_PINNAME_UART0_RX, \ SSD20XD_PINNAME_UART0_TX #define SSD20XD_PINNAME_UART1_RX "uart1_rx" #define SSD20XD_PINNAME_UART1_TX "uart1_tx" #define SSD20XD_UART1_NAMES \ SSD20XD_PINNAME_UART1_RX, \ SSD20XD_PINNAME_UART1_TX #define SSD20XD_OFF_UART0_RX 0x60 #define SSD20XD_OFF_UART0_TX 0x64 #define SSD20XD_UART0_OFFSETS \ SSD20XD_OFF_UART0_RX, \ SSD20XD_OFF_UART0_TX #define SSD20XD_OFF_UART1_RX 0x68 #define SSD20XD_OFF_UART1_TX 0x6c #define SSD20XD_UART1_OFFSETS \ SSD20XD_OFF_UART1_RX, \ SSD20XD_OFF_UART1_TX /* * ssd20x has the same pin names but different ordering * of the registers that control the gpio. */ #define SSD20XD_OFF_SD_D0 0x140 #define SSD20XD_OFF_SD_D1 0x144 #define SSD20XD_OFF_SD_D2 0x148 #define SSD20XD_OFF_SD_D3 0x14c #define SSD20XD_OFF_SD_CMD 0x150 #define SSD20XD_OFF_SD_CLK 0x154 #define SSD20XD_SD_OFFSETS SSD20XD_OFF_SD_CLK, \ SSD20XD_OFF_SD_CMD, \ SSD20XD_OFF_SD_D0, \ SSD20XD_OFF_SD_D1, \ SSD20XD_OFF_SD_D2, \ SSD20XD_OFF_SD_D3 static const char * const ssd20xd_names[] = { FUART_NAMES, SD_NAMES, SSD20XD_UART0_NAMES, SSD20XD_UART1_NAMES, SSD20XD_TTL_PINNAMES, SSD20XD_GPIO_NAMES, }; static const unsigned int ssd20xd_offsets[] = { FUART_OFFSETS, SSD20XD_SD_OFFSETS, SSD20XD_UART0_OFFSETS, SSD20XD_UART1_OFFSETS, SSD20XD_TTL_OFFSETS, SSD20XD_GPIO_OFFSETS, }; MSC313_GPIO_CHIPDATA(ssd20xd); #endif struct msc313_gpio { void __iomem *base; const struct msc313_gpio_data *gpio_data; u8 *saved; }; static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct msc313_gpio *gpio = gpiochip_get_data(chip); u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]); if (value) gpioreg |= MSC313_GPIO_OUT; else gpioreg &= ~MSC313_GPIO_OUT; writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]); } static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct msc313_gpio *gpio = gpiochip_get_data(chip); return readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]) & MSC313_GPIO_IN; } static int msc313_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct msc313_gpio *gpio = gpiochip_get_data(chip); u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]); gpioreg |= MSC313_GPIO_OEN; writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]); return 0; } static int msc313_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct msc313_gpio *gpio = gpiochip_get_data(chip); u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]); gpioreg &= ~MSC313_GPIO_OEN; if (value) gpioreg |= MSC313_GPIO_OUT; else gpioreg &= ~MSC313_GPIO_OUT; writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]); return 0; } static void msc313_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); irq_chip_mask_parent(d); gpiochip_disable_irq(gc, d->hwirq); } static void msc313_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); gpiochip_enable_irq(gc, d->hwirq); irq_chip_unmask_parent(d); } /* * The interrupt handling happens in the parent interrupt controller, * we don't do anything here. */ static const struct irq_chip msc313_gpio_irqchip = { .name = "GPIO", .irq_eoi = irq_chip_eoi_parent, .irq_mask = msc313_gpio_irq_mask, .irq_unmask = msc313_gpio_irq_unmask, .irq_set_type = irq_chip_set_type_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; /* * The parent interrupt controller needs the GIC interrupt type set to GIC_SPI * so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell * that puts GIC_SPI into the first cell. */ static int msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type) { struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 3; fwspec->param[0] = GIC_SPI; fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; return 0; } static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip, unsigned int child, unsigned int child_type, unsigned int *parent, unsigned int *parent_type) { struct msc313_gpio *priv = gpiochip_get_data(chip); unsigned int offset = priv->gpio_data->offsets[child]; /* * only the spi0 pins have interrupts on the parent * on all of the known chips and so far they are all * mapped to the same place */ if (offset >= OFF_SPI0_CZ && offset <= OFF_SPI0_DO) { *parent_type = child_type; *parent = ((offset - OFF_SPI0_CZ) >> 2) + 28; return 0; } return -EINVAL; } static int msc313_gpio_probe(struct platform_device *pdev) { const struct msc313_gpio_data *match_data; struct msc313_gpio *gpio; struct gpio_chip *gpiochip; struct gpio_irq_chip *gpioirqchip; struct irq_domain *parent_domain; struct device_node *parent_node; struct device *dev = &pdev->dev; match_data = of_device_get_match_data(dev); if (!match_data) return -EINVAL; parent_node = of_irq_find_parent(dev->of_node); if (!parent_node) return -ENODEV; parent_domain = irq_find_host(parent_node); if (!parent_domain) return -ENODEV; gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->gpio_data = match_data; gpio->saved = devm_kcalloc(dev, gpio->gpio_data->num, sizeof(*gpio->saved), GFP_KERNEL); if (!gpio->saved) return -ENOMEM; gpio->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); platform_set_drvdata(pdev, gpio); gpiochip = devm_kzalloc(dev, sizeof(*gpiochip), GFP_KERNEL); if (!gpiochip) return -ENOMEM; gpiochip->label = DRIVER_NAME; gpiochip->parent = dev; gpiochip->request = gpiochip_generic_request; gpiochip->free = gpiochip_generic_free; gpiochip->direction_input = msc313_gpio_direction_input; gpiochip->direction_output = msc313_gpio_direction_output; gpiochip->get = msc313_gpio_get; gpiochip->set = msc313_gpio_set; gpiochip->base = -1; gpiochip->ngpio = gpio->gpio_data->num; gpiochip->names = gpio->gpio_data->names; gpioirqchip = &gpiochip->irq; gpio_irq_chip_set_chip(gpioirqchip, &msc313_gpio_irqchip); gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node); gpioirqchip->parent_domain = parent_domain; gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq; gpioirqchip->populate_parent_alloc_arg = msc313_gpio_populate_parent_fwspec; gpioirqchip->handler = handle_bad_irq; gpioirqchip->default_type = IRQ_TYPE_NONE; return devm_gpiochip_add_data(dev, gpiochip, gpio); } static const struct of_device_id msc313_gpio_of_match[] = { #ifdef CONFIG_MACH_INFINITY { .compatible = "mstar,msc313-gpio", .data = &msc313_data, }, { .compatible = "sstar,ssd20xd-gpio", .data = &ssd20xd_data, }, #endif { } }; /* * The GPIO controller loses the state of the registers when the * SoC goes into suspend to memory mode so we need to save some * of the register bits before suspending and put it back when resuming */ static int __maybe_unused msc313_gpio_suspend(struct device *dev) { struct msc313_gpio *gpio = dev_get_drvdata(dev); int i; for (i = 0; i < gpio->gpio_data->num; i++) gpio->saved[i] = readb_relaxed(gpio->base + gpio->gpio_data->offsets[i]) & MSC313_GPIO_BITSTOSAVE; return 0; } static int __maybe_unused msc313_gpio_resume(struct device *dev) { struct msc313_gpio *gpio = dev_get_drvdata(dev); int i; for (i = 0; i < gpio->gpio_data->num; i++) writeb_relaxed(gpio->saved[i], gpio->base + gpio->gpio_data->offsets[i]); return 0; } static SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume); static struct platform_driver msc313_gpio_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = msc313_gpio_of_match, .pm = &msc313_gpio_ops, }, .probe = msc313_gpio_probe, }; builtin_platform_driver(msc313_gpio_driver);
linux-master
drivers/gpio/gpio-msc313.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright (C) 2018 BayLibre SAS // Author: Bartosz Golaszewski <[email protected]> // // GPIO driver for MAXIM 77650/77651 charger/power-supply. #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/mfd/max77650.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #define MAX77650_GPIO_DIR_MASK BIT(0) #define MAX77650_GPIO_INVAL_MASK BIT(1) #define MAX77650_GPIO_DRV_MASK BIT(2) #define MAX77650_GPIO_OUTVAL_MASK BIT(3) #define MAX77650_GPIO_DEBOUNCE_MASK BIT(4) #define MAX77650_GPIO_DIR_OUT 0x00 #define MAX77650_GPIO_DIR_IN BIT(0) #define MAX77650_GPIO_OUT_LOW 0x00 #define MAX77650_GPIO_OUT_HIGH BIT(3) #define MAX77650_GPIO_DRV_OPEN_DRAIN 0x00 #define MAX77650_GPIO_DRV_PUSH_PULL BIT(2) #define MAX77650_GPIO_DEBOUNCE BIT(4) #define MAX77650_GPIO_DIR_BITS(_reg) \ ((_reg) & MAX77650_GPIO_DIR_MASK) #define MAX77650_GPIO_INVAL_BITS(_reg) \ (((_reg) & MAX77650_GPIO_INVAL_MASK) >> 1) struct max77650_gpio_chip { struct regmap *map; struct gpio_chip gc; int irq; }; static int max77650_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, MAX77650_GPIO_DIR_MASK, MAX77650_GPIO_DIR_IN); } static int max77650_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); int mask, regval; mask = MAX77650_GPIO_DIR_MASK | MAX77650_GPIO_OUTVAL_MASK; regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW; regval |= MAX77650_GPIO_DIR_OUT; return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, mask, regval); } static void max77650_gpio_set_value(struct gpio_chip *gc, unsigned int offset, int value) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); int rv, regval; regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW; rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, MAX77650_GPIO_OUTVAL_MASK, regval); if (rv) dev_err(gc->parent, "cannot set GPIO value: %d\n", rv); } static int max77650_gpio_get_value(struct gpio_chip *gc, unsigned int offset) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); unsigned int val; int rv; rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val); if (rv) return rv; return MAX77650_GPIO_INVAL_BITS(val); } static int max77650_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); unsigned int val; int rv; rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val); if (rv) return rv; return MAX77650_GPIO_DIR_BITS(val); } static int max77650_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long cfg) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); switch (pinconf_to_config_param(cfg)) { case PIN_CONFIG_DRIVE_OPEN_DRAIN: return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, MAX77650_GPIO_DRV_MASK, MAX77650_GPIO_DRV_OPEN_DRAIN); case PIN_CONFIG_DRIVE_PUSH_PULL: return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, MAX77650_GPIO_DRV_MASK, MAX77650_GPIO_DRV_PUSH_PULL); case PIN_CONFIG_INPUT_DEBOUNCE: return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, MAX77650_GPIO_DEBOUNCE_MASK, MAX77650_GPIO_DEBOUNCE); default: return -ENOTSUPP; } } static int max77650_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) { struct max77650_gpio_chip *chip = gpiochip_get_data(gc); return chip->irq; } static int max77650_gpio_probe(struct platform_device *pdev) { struct max77650_gpio_chip *chip; struct device *dev, *parent; struct i2c_client *i2c; dev = &pdev->dev; parent = dev->parent; i2c = to_i2c_client(parent); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->map = dev_get_regmap(parent, NULL); if (!chip->map) return -ENODEV; chip->irq = platform_get_irq_byname(pdev, "GPI"); if (chip->irq < 0) return chip->irq; chip->gc.base = -1; chip->gc.ngpio = 1; chip->gc.label = i2c->name; chip->gc.parent = dev; chip->gc.owner = THIS_MODULE; chip->gc.can_sleep = true; chip->gc.direction_input = max77650_gpio_direction_input; chip->gc.direction_output = max77650_gpio_direction_output; chip->gc.set = max77650_gpio_set_value; chip->gc.get = max77650_gpio_get_value; chip->gc.get_direction = max77650_gpio_get_direction; chip->gc.set_config = max77650_gpio_set_config; chip->gc.to_irq = max77650_gpio_to_irq; return devm_gpiochip_add_data(dev, &chip->gc, chip); } static struct platform_driver max77650_gpio_driver = { .driver = { .name = "max77650-gpio", }, .probe = max77650_gpio_probe, }; module_platform_driver(max77650_gpio_driver); MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver"); MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:max77650-gpio");
linux-master
drivers/gpio/gpio-max77650.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * AppliedMicro X-Gene SoC GPIO-Standby Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Author: Tin Huynh <[email protected]>. * Y Vo <[email protected]>. * Quan Nguyen <[email protected]>. */ #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gpio/driver.h> #include <linux/acpi.h> #include "gpiolib.h" #include "gpiolib-acpi.h" /* Common property names */ #define XGENE_NIRQ_PROPERTY "apm,nr-irqs" #define XGENE_NGPIO_PROPERTY "apm,nr-gpios" #define XGENE_IRQ_START_PROPERTY "apm,irq-start" #define XGENE_DFLT_MAX_NGPIO 22 #define XGENE_DFLT_MAX_NIRQ 6 #define XGENE_DFLT_IRQ_START_PIN 8 #define GPIO_MASK(x) (1U << ((x) % 32)) #define MPA_GPIO_INT_LVL 0x0290 #define MPA_GPIO_OE_ADDR 0x029c #define MPA_GPIO_OUT_ADDR 0x02a0 #define MPA_GPIO_IN_ADDR 0x02a4 #define MPA_GPIO_SEL_LO 0x0294 #define GPIO_INT_LEVEL_H 0x000001 #define GPIO_INT_LEVEL_L 0x000000 /** * struct xgene_gpio_sb - GPIO-Standby private data structure. * @gc: memory-mapped GPIO controllers. * @regs: GPIO register base offset * @irq_domain: GPIO interrupt domain * @irq_start: GPIO pin that start support interrupt * @nirq: Number of GPIO pins that supports interrupt * @parent_irq_base: Start parent HWIRQ */ struct xgene_gpio_sb { struct gpio_chip gc; void __iomem *regs; struct irq_domain *irq_domain; u16 irq_start; u16 nirq; u16 parent_irq_base; }; #define HWIRQ_TO_GPIO(priv, hwirq) ((hwirq) + (priv)->irq_start) #define GPIO_TO_HWIRQ(priv, gpio) ((gpio) - (priv)->irq_start) static void xgene_gpio_set_bit(struct gpio_chip *gc, void __iomem *reg, u32 gpio, int val) { u32 data; data = gc->read_reg(reg); if (val) data |= GPIO_MASK(gpio); else data &= ~GPIO_MASK(gpio); gc->write_reg(reg, data); } static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type) { struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d); int gpio = HWIRQ_TO_GPIO(priv, d->hwirq); int lvl_type = GPIO_INT_LEVEL_H; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: lvl_type = GPIO_INT_LEVEL_H; break; case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: lvl_type = GPIO_INT_LEVEL_L; break; default: break; } xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO, gpio * 2, 1); xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL, d->hwirq, lvl_type); /* Propagate IRQ type setting to parent */ if (type & IRQ_TYPE_EDGE_BOTH) return irq_chip_set_type_parent(d, IRQ_TYPE_EDGE_RISING); else return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); } static struct irq_chip xgene_gpio_sb_irq_chip = { .name = "sbgpio", .irq_eoi = irq_chip_eoi_parent, .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_set_type = xgene_gpio_sb_irq_set_type, }; static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio) { struct xgene_gpio_sb *priv = gpiochip_get_data(gc); struct irq_fwspec fwspec; if ((gpio < priv->irq_start) || (gpio > HWIRQ_TO_GPIO(priv, priv->nirq))) return -ENXIO; fwspec.fwnode = gc->parent->fwnode; fwspec.param_count = 2; fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio); fwspec.param[1] = IRQ_TYPE_EDGE_RISING; return irq_create_fwspec_mapping(&fwspec); } static int xgene_gpio_sb_domain_activate(struct irq_domain *d, struct irq_data *irq_data, bool reserve) { struct xgene_gpio_sb *priv = d->host_data; u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); int ret; ret = gpiochip_lock_as_irq(&priv->gc, gpio); if (ret) { dev_err(priv->gc.parent, "Unable to configure XGene GPIO standby pin %d as IRQ\n", gpio); return ret; } xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO, gpio * 2, 1); return 0; } static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d, struct irq_data *irq_data) { struct xgene_gpio_sb *priv = d->host_data; u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); gpiochip_unlock_as_irq(&priv->gc, gpio); xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO, gpio * 2, 0); } static int xgene_gpio_sb_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { struct xgene_gpio_sb *priv = d->host_data; if ((fwspec->param_count != 2) || (fwspec->param[0] >= priv->nirq)) return -EINVAL; *hwirq = fwspec->param[0]; *type = fwspec->param[1]; return 0; } static int xgene_gpio_sb_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; struct xgene_gpio_sb *priv = domain->host_data; irq_hw_number_t hwirq; unsigned int i; hwirq = fwspec->param[0]; for (i = 0; i < nr_irqs; i++) irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &xgene_gpio_sb_irq_chip, priv); parent_fwspec.fwnode = domain->parent->fwnode; if (is_of_node(parent_fwspec.fwnode)) { parent_fwspec.param_count = 3; parent_fwspec.param[0] = 0;/* SPI */ /* Skip SGIs and PPIs*/ parent_fwspec.param[1] = hwirq + priv->parent_irq_base - 32; parent_fwspec.param[2] = fwspec->param[1]; } else if (is_fwnode_irqchip(parent_fwspec.fwnode)) { parent_fwspec.param_count = 2; parent_fwspec.param[0] = hwirq + priv->parent_irq_base; parent_fwspec.param[1] = fwspec->param[1]; } else return -EINVAL; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops xgene_gpio_sb_domain_ops = { .translate = xgene_gpio_sb_domain_translate, .alloc = xgene_gpio_sb_domain_alloc, .free = irq_domain_free_irqs_common, .activate = xgene_gpio_sb_domain_activate, .deactivate = xgene_gpio_sb_domain_deactivate, }; static int xgene_gpio_sb_probe(struct platform_device *pdev) { struct xgene_gpio_sb *priv; int ret; void __iomem *regs; struct irq_domain *parent_domain = NULL; u32 val32; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); priv->regs = regs; ret = platform_get_irq(pdev, 0); if (ret > 0) { priv->parent_irq_base = irq_get_irq_data(ret)->hwirq; parent_domain = irq_get_irq_data(ret)->domain; } if (!parent_domain) { dev_err(&pdev->dev, "unable to obtain parent domain\n"); return -ENODEV; } ret = bgpio_init(&priv->gc, &pdev->dev, 4, regs + MPA_GPIO_IN_ADDR, regs + MPA_GPIO_OUT_ADDR, NULL, regs + MPA_GPIO_OE_ADDR, NULL, 0); if (ret) return ret; priv->gc.to_irq = xgene_gpio_sb_to_irq; /* Retrieve start irq pin, use default if property not found */ priv->irq_start = XGENE_DFLT_IRQ_START_PIN; if (!device_property_read_u32(&pdev->dev, XGENE_IRQ_START_PROPERTY, &val32)) priv->irq_start = val32; /* Retrieve number irqs, use default if property not found */ priv->nirq = XGENE_DFLT_MAX_NIRQ; if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32)) priv->nirq = val32; /* Retrieve number gpio, use default if property not found */ priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO; if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32)) priv->gc.ngpio = val32; dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n", priv->gc.ngpio, priv->nirq, priv->irq_start); platform_set_drvdata(pdev, priv); priv->irq_domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, pdev->dev.fwnode, &xgene_gpio_sb_domain_ops, priv); if (!priv->irq_domain) return -ENODEV; priv->gc.irq.domain = priv->irq_domain; ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); if (ret) { dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n"); irq_domain_remove(priv->irq_domain); return ret; } dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n"); /* Register interrupt handlers for GPIO signaled ACPI Events */ acpi_gpiochip_request_interrupts(&priv->gc); return ret; } static int xgene_gpio_sb_remove(struct platform_device *pdev) { struct xgene_gpio_sb *priv = platform_get_drvdata(pdev); acpi_gpiochip_free_interrupts(&priv->gc); irq_domain_remove(priv->irq_domain); return 0; } static const struct of_device_id xgene_gpio_sb_of_match[] = { {.compatible = "apm,xgene-gpio-sb", }, {}, }; MODULE_DEVICE_TABLE(of, xgene_gpio_sb_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_gpio_sb_acpi_match[] = { {"APMC0D15", 0}, {}, }; MODULE_DEVICE_TABLE(acpi, xgene_gpio_sb_acpi_match); #endif static struct platform_driver xgene_gpio_sb_driver = { .driver = { .name = "xgene-gpio-sb", .of_match_table = xgene_gpio_sb_of_match, .acpi_match_table = ACPI_PTR(xgene_gpio_sb_acpi_match), }, .probe = xgene_gpio_sb_probe, .remove = xgene_gpio_sb_remove, }; module_platform_driver(xgene_gpio_sb_driver); MODULE_AUTHOR("AppliedMicro"); MODULE_DESCRIPTION("APM X-Gene GPIO Standby driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-xgene-sb.c
// SPDX-License-Identifier: GPL-2.0+ /* * gpiolib support for Wolfson WM835x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <[email protected]> * */ #include <linux/gpio/driver.h> #include <linux/kernel.h> #include <linux/mfd/core.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> struct wm8350_gpio_data { struct wm8350 *wm8350; struct gpio_chip gpio_chip; }; static int wm8350_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; return wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << offset); } static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; int ret; ret = wm8350_reg_read(wm8350, WM8350_GPIO_LEVEL); if (ret < 0) return ret; if (ret & (1 << offset)) return 1; else return 0; } static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; if (value) wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); else wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset); } static int wm8350_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; int ret; ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << offset); if (ret < 0) return ret; /* Don't have an atomic direction/value setup */ wm8350_gpio_set(chip, offset, value); return 0; } static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip); struct wm8350 *wm8350 = wm8350_gpio->wm8350; if (!wm8350->irq_base) return -EINVAL; return wm8350->irq_base + WM8350_IRQ_GPIO(offset); } static const struct gpio_chip template_chip = { .label = "wm8350", .owner = THIS_MODULE, .direction_input = wm8350_gpio_direction_in, .get = wm8350_gpio_get, .direction_output = wm8350_gpio_direction_out, .set = wm8350_gpio_set, .to_irq = wm8350_gpio_to_irq, .can_sleep = true, }; static int wm8350_gpio_probe(struct platform_device *pdev) { struct wm8350 *wm8350 = dev_get_drvdata(pdev->dev.parent); struct wm8350_platform_data *pdata = dev_get_platdata(wm8350->dev); struct wm8350_gpio_data *wm8350_gpio; wm8350_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8350_gpio), GFP_KERNEL); if (wm8350_gpio == NULL) return -ENOMEM; wm8350_gpio->wm8350 = wm8350; wm8350_gpio->gpio_chip = template_chip; wm8350_gpio->gpio_chip.ngpio = 13; wm8350_gpio->gpio_chip.parent = &pdev->dev; if (pdata && pdata->gpio_base) wm8350_gpio->gpio_chip.base = pdata->gpio_base; else wm8350_gpio->gpio_chip.base = -1; return devm_gpiochip_add_data(&pdev->dev, &wm8350_gpio->gpio_chip, wm8350_gpio); } static struct platform_driver wm8350_gpio_driver = { .driver.name = "wm8350-gpio", .probe = wm8350_gpio_probe, }; static int __init wm8350_gpio_init(void) { return platform_driver_register(&wm8350_gpio_driver); } subsys_initcall(wm8350_gpio_init); static void __exit wm8350_gpio_exit(void) { platform_driver_unregister(&wm8350_gpio_driver); } module_exit(wm8350_gpio_exit); MODULE_AUTHOR("Mark Brown <[email protected]>"); MODULE_DESCRIPTION("GPIO interface for WM8350 PMICs"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8350-gpio");
linux-master
drivers/gpio/gpio-wm8350.c
// SPDX-License-Identifier: GPL-2.0-only /* * Atheros AR71XX/AR724X/AR913X GPIO API support * * Copyright (C) 2015 Alban Bedel <[email protected]> * Copyright (C) 2010-2011 Jaiganesh Narayanan <[email protected]> * Copyright (C) 2008-2011 Gabor Juhos <[email protected]> * Copyright (C) 2008 Imre Kaloz <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/platform_data/gpio-ath79.h> #include <linux/of.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/irq.h> #define AR71XX_GPIO_REG_OE 0x00 #define AR71XX_GPIO_REG_IN 0x04 #define AR71XX_GPIO_REG_SET 0x0c #define AR71XX_GPIO_REG_CLEAR 0x10 #define AR71XX_GPIO_REG_INT_ENABLE 0x14 #define AR71XX_GPIO_REG_INT_TYPE 0x18 #define AR71XX_GPIO_REG_INT_POLARITY 0x1c #define AR71XX_GPIO_REG_INT_PENDING 0x20 #define AR71XX_GPIO_REG_INT_MASK 0x24 struct ath79_gpio_ctrl { struct gpio_chip gc; void __iomem *base; raw_spinlock_t lock; unsigned long both_edges; }; static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); return container_of(gc, struct ath79_gpio_ctrl, gc); } static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg) { return readl(ctrl->base + reg); } static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl, unsigned reg, u32 val) { writel(val, ctrl->base + reg); } static bool ath79_gpio_update_bits( struct ath79_gpio_ctrl *ctrl, unsigned reg, u32 mask, u32 bits) { u32 old_val, new_val; old_val = ath79_gpio_read(ctrl, reg); new_val = (old_val & ~mask) | (bits & mask); if (new_val != old_val) ath79_gpio_write(ctrl, reg, new_val); return new_val != old_val; } static void ath79_gpio_irq_unmask(struct irq_data *data) { struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data); u32 mask = BIT(irqd_to_hwirq(data)); unsigned long flags; gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data)); raw_spin_lock_irqsave(&ctrl->lock, flags); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask); raw_spin_unlock_irqrestore(&ctrl->lock, flags); } static void ath79_gpio_irq_mask(struct irq_data *data) { struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data); u32 mask = BIT(irqd_to_hwirq(data)); unsigned long flags; raw_spin_lock_irqsave(&ctrl->lock, flags); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0); raw_spin_unlock_irqrestore(&ctrl->lock, flags); gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data)); } static void ath79_gpio_irq_enable(struct irq_data *data) { struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data); u32 mask = BIT(irqd_to_hwirq(data)); unsigned long flags; raw_spin_lock_irqsave(&ctrl->lock, flags); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask); raw_spin_unlock_irqrestore(&ctrl->lock, flags); } static void ath79_gpio_irq_disable(struct irq_data *data) { struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data); u32 mask = BIT(irqd_to_hwirq(data)); unsigned long flags; raw_spin_lock_irqsave(&ctrl->lock, flags); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0); raw_spin_unlock_irqrestore(&ctrl->lock, flags); } static int ath79_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data); u32 mask = BIT(irqd_to_hwirq(data)); u32 type = 0, polarity = 0; unsigned long flags; bool disabled; switch (flow_type) { case IRQ_TYPE_EDGE_RISING: polarity |= mask; fallthrough; case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: break; case IRQ_TYPE_LEVEL_HIGH: polarity |= mask; fallthrough; case IRQ_TYPE_LEVEL_LOW: type |= mask; break; default: return -EINVAL; } raw_spin_lock_irqsave(&ctrl->lock, flags); if (flow_type == IRQ_TYPE_EDGE_BOTH) { ctrl->both_edges |= mask; polarity = ~ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN); } else { ctrl->both_edges &= ~mask; } /* As the IRQ configuration can't be loaded atomically we * have to disable the interrupt while the configuration state * is invalid. */ disabled = ath79_gpio_update_bits( ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0); ath79_gpio_update_bits( ctrl, AR71XX_GPIO_REG_INT_TYPE, mask, type); ath79_gpio_update_bits( ctrl, AR71XX_GPIO_REG_INT_POLARITY, mask, polarity); if (disabled) ath79_gpio_update_bits( ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask); raw_spin_unlock_irqrestore(&ctrl->lock, flags); return 0; } static const struct irq_chip ath79_gpio_irqchip = { .name = "gpio-ath79", .irq_enable = ath79_gpio_irq_enable, .irq_disable = ath79_gpio_irq_disable, .irq_mask = ath79_gpio_irq_mask, .irq_unmask = ath79_gpio_irq_unmask, .irq_set_type = ath79_gpio_irq_set_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void ath79_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); struct ath79_gpio_ctrl *ctrl = container_of(gc, struct ath79_gpio_ctrl, gc); unsigned long flags, pending; u32 both_edges, state; int irq; chained_irq_enter(irqchip, desc); raw_spin_lock_irqsave(&ctrl->lock, flags); pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING); /* Update the polarity of the both edges irqs */ both_edges = ctrl->both_edges & pending; if (both_edges) { state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN); ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY, both_edges, ~state); } raw_spin_unlock_irqrestore(&ctrl->lock, flags); for_each_set_bit(irq, &pending, gc->ngpio) generic_handle_domain_irq(gc->irq.domain, irq); chained_irq_exit(irqchip, desc); } static const struct of_device_id ath79_gpio_of_match[] = { { .compatible = "qca,ar7100-gpio" }, { .compatible = "qca,ar9340-gpio" }, {}, }; MODULE_DEVICE_TABLE(of, ath79_gpio_of_match); static int ath79_gpio_probe(struct platform_device *pdev) { struct ath79_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct ath79_gpio_ctrl *ctrl; struct gpio_irq_chip *girq; u32 ath79_gpio_count; bool oe_inverted; int err; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; if (np) { err = of_property_read_u32(np, "ngpios", &ath79_gpio_count); if (err) { dev_err(dev, "ngpios property is not valid\n"); return err; } oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio"); } else if (pdata) { ath79_gpio_count = pdata->ngpios; oe_inverted = pdata->oe_inverted; } else { dev_err(dev, "No DT node or platform data found\n"); return -EINVAL; } if (ath79_gpio_count >= 32) { dev_err(dev, "ngpios must be less than 32\n"); return -EINVAL; } ctrl->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctrl->base)) return PTR_ERR(ctrl->base); raw_spin_lock_init(&ctrl->lock); err = bgpio_init(&ctrl->gc, dev, 4, ctrl->base + AR71XX_GPIO_REG_IN, ctrl->base + AR71XX_GPIO_REG_SET, ctrl->base + AR71XX_GPIO_REG_CLEAR, oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE, oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL, 0); if (err) { dev_err(dev, "bgpio_init failed\n"); return err; } /* Use base 0 to stay compatible with legacy platforms */ ctrl->gc.base = 0; /* Optional interrupt setup */ if (!np || of_property_read_bool(np, "interrupt-controller")) { girq = &ctrl->gc.irq; gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip); girq->parent_handler = ath79_gpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; girq->parents[0] = platform_get_irq(pdev, 0); girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; } return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl); } static struct platform_driver ath79_gpio_driver = { .driver = { .name = "ath79-gpio", .of_match_table = ath79_gpio_of_match, }, .probe = ath79_gpio_probe, }; module_platform_driver(ath79_gpio_driver); MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-ath79.c
// SPDX-License-Identifier: GPL-2.0-only /* * MC33880 high-side/low-side switch GPIO driver * Copyright (c) 2009 Intel Corporation */ /* Supports: * Freescale MC33880 high-side/low-side switch */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/spi/spi.h> #include <linux/spi/mc33880.h> #include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/module.h> #define DRIVER_NAME "mc33880" /* * Pin configurations, see MAX7301 datasheet page 6 */ #define PIN_CONFIG_MASK 0x03 #define PIN_CONFIG_IN_PULLUP 0x03 #define PIN_CONFIG_IN_WO_PULLUP 0x02 #define PIN_CONFIG_OUT 0x01 #define PIN_NUMBER 8 /* * Some registers must be read back to modify. * To save time we cache them here in memory */ struct mc33880 { struct mutex lock; /* protect from simultaneous accesses */ u8 port_config; struct gpio_chip chip; struct spi_device *spi; }; static int mc33880_write_config(struct mc33880 *mc) { return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config)); } static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value) { if (value) mc->port_config |= 1 << offset; else mc->port_config &= ~(1 << offset); return mc33880_write_config(mc); } static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value) { struct mc33880 *mc = gpiochip_get_data(chip); mutex_lock(&mc->lock); __mc33880_set(mc, offset, value); mutex_unlock(&mc->lock); } static int mc33880_probe(struct spi_device *spi) { struct mc33880 *mc; struct mc33880_platform_data *pdata; int ret; pdata = dev_get_platdata(&spi->dev); if (!pdata || !pdata->base) { dev_dbg(&spi->dev, "incorrect or missing platform data\n"); return -EINVAL; } /* * bits_per_word cannot be configured in platform data */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) return ret; mc = devm_kzalloc(&spi->dev, sizeof(struct mc33880), GFP_KERNEL); if (!mc) return -ENOMEM; mutex_init(&mc->lock); spi_set_drvdata(spi, mc); mc->spi = spi; mc->chip.label = DRIVER_NAME, mc->chip.set = mc33880_set; mc->chip.base = pdata->base; mc->chip.ngpio = PIN_NUMBER; mc->chip.can_sleep = true; mc->chip.parent = &spi->dev; mc->chip.owner = THIS_MODULE; mc->port_config = 0x00; /* write twice, because during initialisation the first setting * is just for testing SPI communication, and the second is the * "real" configuration */ ret = mc33880_write_config(mc); mc->port_config = 0x00; if (!ret) ret = mc33880_write_config(mc); if (ret) { dev_err(&spi->dev, "Failed writing to " DRIVER_NAME ": %d\n", ret); goto exit_destroy; } ret = gpiochip_add_data(&mc->chip, mc); if (ret) goto exit_destroy; return ret; exit_destroy: mutex_destroy(&mc->lock); return ret; } static void mc33880_remove(struct spi_device *spi) { struct mc33880 *mc; mc = spi_get_drvdata(spi); gpiochip_remove(&mc->chip); mutex_destroy(&mc->lock); } static struct spi_driver mc33880_driver = { .driver = { .name = DRIVER_NAME, }, .probe = mc33880_probe, .remove = mc33880_remove, }; static int __init mc33880_init(void) { return spi_register_driver(&mc33880_driver); } /* register after spi postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(mc33880_init); static void __exit mc33880_exit(void) { spi_unregister_driver(&mc33880_driver); } module_exit(mc33880_exit); MODULE_AUTHOR("Mocean Laboratories <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-mc33880.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/device.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/resource.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> /* * There are 3 YU GPIO blocks: * gpio[0]: HOST_GPIO0->HOST_GPIO31 * gpio[1]: HOST_GPIO32->HOST_GPIO63 * gpio[2]: HOST_GPIO64->HOST_GPIO69 */ #define MLXBF2_GPIO_MAX_PINS_PER_BLOCK 32 /* * arm_gpio_lock register: * bit[31] lock status: active if set * bit[15:0] set lock * The lock is enabled only if 0xd42f is written to this field */ #define YU_ARM_GPIO_LOCK_ADDR 0x2801088 #define YU_ARM_GPIO_LOCK_SIZE 0x8 #define YU_LOCK_ACTIVE_BIT(val) (val >> 31) #define YU_ARM_GPIO_LOCK_ACQUIRE 0xd42f #define YU_ARM_GPIO_LOCK_RELEASE 0x0 /* * gpio[x] block registers and their offset */ #define YU_GPIO_DATAIN 0x04 #define YU_GPIO_MODE1 0x08 #define YU_GPIO_MODE0 0x0c #define YU_GPIO_DATASET 0x14 #define YU_GPIO_DATACLEAR 0x18 #define YU_GPIO_CAUSE_RISE_EN 0x44 #define YU_GPIO_CAUSE_FALL_EN 0x48 #define YU_GPIO_MODE1_CLEAR 0x50 #define YU_GPIO_MODE0_SET 0x54 #define YU_GPIO_MODE0_CLEAR 0x58 #define YU_GPIO_CAUSE_OR_CAUSE_EVTEN0 0x80 #define YU_GPIO_CAUSE_OR_EVTEN0 0x94 #define YU_GPIO_CAUSE_OR_CLRCAUSE 0x98 struct mlxbf2_gpio_context_save_regs { u32 gpio_mode0; u32 gpio_mode1; }; /* BlueField-2 gpio block context structure. */ struct mlxbf2_gpio_context { struct gpio_chip gc; /* YU GPIO blocks address */ void __iomem *gpio_io; struct device *dev; struct mlxbf2_gpio_context_save_regs *csave_regs; }; /* BlueField-2 gpio shared structure. */ struct mlxbf2_gpio_param { void __iomem *io; struct resource *res; struct mutex *lock; }; static struct resource yu_arm_gpio_lock_res = DEFINE_RES_MEM_NAMED(YU_ARM_GPIO_LOCK_ADDR, YU_ARM_GPIO_LOCK_SIZE, "YU_ARM_GPIO_LOCK"); static DEFINE_MUTEX(yu_arm_gpio_lock_mutex); static struct mlxbf2_gpio_param yu_arm_gpio_lock_param = { .res = &yu_arm_gpio_lock_res, .lock = &yu_arm_gpio_lock_mutex, }; /* Request memory region and map yu_arm_gpio_lock resource */ static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; resource_size_t size; int ret = 0; mutex_lock(yu_arm_gpio_lock_param.lock); /* Check if the memory map already exists */ if (yu_arm_gpio_lock_param.io) goto exit; res = yu_arm_gpio_lock_param.res; size = resource_size(res); if (!devm_request_mem_region(dev, res->start, size, res->name)) { ret = -EFAULT; goto exit; } yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size); if (!yu_arm_gpio_lock_param.io) ret = -ENOMEM; exit: mutex_unlock(yu_arm_gpio_lock_param.lock); return ret; } /* * Acquire the YU arm_gpio_lock to be able to change the direction * mode. If the lock_active bit is already set, return an error. */ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) { u32 arm_gpio_lock_val; mutex_lock(yu_arm_gpio_lock_param.lock); raw_spin_lock(&gs->gc.bgpio_lock); arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); /* * When lock active bit[31] is set, ModeX is write enabled */ if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { raw_spin_unlock(&gs->gc.bgpio_lock); mutex_unlock(yu_arm_gpio_lock_param.lock); return -EINVAL; } writel(YU_ARM_GPIO_LOCK_ACQUIRE, yu_arm_gpio_lock_param.io); return 0; } /* * Release the YU arm_gpio_lock after changing the direction mode. */ static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) __releases(&gs->gc.bgpio_lock) __releases(yu_arm_gpio_lock_param.lock) { writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); raw_spin_unlock(&gs->gc.bgpio_lock); mutex_unlock(yu_arm_gpio_lock_param.lock); } /* * mode0 and mode1 are both locked by the gpio_lock field. * * Together, mode0 and mode1 define the gpio Mode dependeing also * on Reg_DataOut. * * {mode1,mode0}:{Reg_DataOut=0,Reg_DataOut=1}->{DataOut=0,DataOut=1} * * {0,0}:Reg_DataOut{0,1}->{Z,Z} Input PAD * {0,1}:Reg_DataOut{0,1}->{0,1} Full drive Output PAD * {1,0}:Reg_DataOut{0,1}->{0,Z} 0-set PAD to low, 1-float * {1,1}:Reg_DataOut{0,1}->{Z,1} 0-float, 1-set PAD to high */ /* * Set input direction: * {mode1,mode0} = {0,0} */ static int mlxbf2_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip); int ret; /* * Although the arm_gpio_lock was set in the probe function, check again * if it is still enabled to be able to write to the ModeX registers. */ ret = mlxbf2_gpio_lock_acquire(gs); if (ret < 0) return ret; writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_CLEAR); writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR); mlxbf2_gpio_lock_release(gs); return ret; } /* * Set output direction: * {mode1,mode0} = {0,1} */ static int mlxbf2_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip); int ret = 0; /* * Although the arm_gpio_lock was set in the probe function, * check again it is still enabled to be able to write to the * ModeX registers. */ ret = mlxbf2_gpio_lock_acquire(gs); if (ret < 0) return ret; writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR); writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_SET); mlxbf2_gpio_lock_release(gs); return ret; } static void mlxbf2_gpio_irq_enable(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc); int offset = irqd_to_hwirq(irqd); unsigned long flags; u32 val; gpiochip_enable_irq(gc, irqd_to_hwirq(irqd)); raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags); val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE); val |= BIT(offset); writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE); val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0); val |= BIT(offset); writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0); raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags); } static void mlxbf2_gpio_irq_disable(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc); int offset = irqd_to_hwirq(irqd); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags); val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0); val &= ~BIT(offset); writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0); raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags); gpiochip_disable_irq(gc, irqd_to_hwirq(irqd)); } static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr) { struct mlxbf2_gpio_context *gs = ptr; struct gpio_chip *gc = &gs->gc; unsigned long pending; u32 level; pending = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CAUSE_EVTEN0); writel(pending, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE); for_each_set_bit(level, &pending, gc->ngpio) generic_handle_domain_irq_safe(gc->irq.domain, level); return IRQ_RETVAL(pending); } static int mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc); int offset = irqd_to_hwirq(irqd); unsigned long flags; bool fall = false; bool rise = false; u32 val; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: fall = true; rise = true; break; case IRQ_TYPE_EDGE_RISING: rise = true; break; case IRQ_TYPE_EDGE_FALLING: fall = true; break; default: return -EINVAL; } raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags); if (fall) { val = readl(gs->gpio_io + YU_GPIO_CAUSE_FALL_EN); val |= BIT(offset); writel(val, gs->gpio_io + YU_GPIO_CAUSE_FALL_EN); } if (rise) { val = readl(gs->gpio_io + YU_GPIO_CAUSE_RISE_EN); val |= BIT(offset); writel(val, gs->gpio_io + YU_GPIO_CAUSE_RISE_EN); } raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags); return 0; } static void mlxbf2_gpio_irq_print_chip(struct irq_data *irqd, struct seq_file *p) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc); seq_printf(p, dev_name(gs->dev)); } static const struct irq_chip mlxbf2_gpio_irq_chip = { .irq_set_type = mlxbf2_gpio_irq_set_type, .irq_enable = mlxbf2_gpio_irq_enable, .irq_disable = mlxbf2_gpio_irq_disable, .irq_print_chip = mlxbf2_gpio_irq_print_chip, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; /* BlueField-2 GPIO driver initialization routine. */ static int mlxbf2_gpio_probe(struct platform_device *pdev) { struct mlxbf2_gpio_context *gs; struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; struct gpio_chip *gc; unsigned int npins; const char *name; int ret, irq; name = dev_name(dev); gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); if (!gs) return -ENOMEM; gs->dev = dev; /* YU GPIO block address */ gs->gpio_io = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gs->gpio_io)) return PTR_ERR(gs->gpio_io); ret = mlxbf2_gpio_get_lock_res(pdev); if (ret) { dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n"); return ret; } if (device_property_read_u32(dev, "npins", &npins)) npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK; gc = &gs->gc; ret = bgpio_init(gc, dev, 4, gs->gpio_io + YU_GPIO_DATAIN, gs->gpio_io + YU_GPIO_DATASET, gs->gpio_io + YU_GPIO_DATACLEAR, NULL, NULL, 0); if (ret) { dev_err(dev, "bgpio_init failed\n"); return ret; } gc->direction_input = mlxbf2_gpio_direction_input; gc->direction_output = mlxbf2_gpio_direction_output; gc->ngpio = npins; gc->owner = THIS_MODULE; irq = platform_get_irq(pdev, 0); if (irq >= 0) { girq = &gs->gc.irq; gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip); girq->handler = handle_simple_irq; girq->default_type = IRQ_TYPE_NONE; /* This will let us handle the parent IRQ in the driver */ girq->num_parents = 0; girq->parents = NULL; girq->parent_handler = NULL; /* * Directly request the irq here instead of passing * a flow-handler because the irq is shared. */ ret = devm_request_irq(dev, irq, mlxbf2_gpio_irq_handler, IRQF_SHARED, name, gs); if (ret) { dev_err(dev, "failed to request IRQ"); return ret; } } platform_set_drvdata(pdev, gs); ret = devm_gpiochip_add_data(dev, &gs->gc, gs); if (ret) { dev_err(dev, "Failed adding memory mapped gpiochip\n"); return ret; } return 0; } static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev) { struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev); gs->csave_regs->gpio_mode0 = readl(gs->gpio_io + YU_GPIO_MODE0); gs->csave_regs->gpio_mode1 = readl(gs->gpio_io + YU_GPIO_MODE1); return 0; } static int __maybe_unused mlxbf2_gpio_resume(struct device *dev) { struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev); writel(gs->csave_regs->gpio_mode0, gs->gpio_io + YU_GPIO_MODE0); writel(gs->csave_regs->gpio_mode1, gs->gpio_io + YU_GPIO_MODE1); return 0; } static SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume); static const struct acpi_device_id __maybe_unused mlxbf2_gpio_acpi_match[] = { { "MLNXBF22", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, mlxbf2_gpio_acpi_match); static struct platform_driver mlxbf2_gpio_driver = { .driver = { .name = "mlxbf2_gpio", .acpi_match_table = mlxbf2_gpio_acpi_match, .pm = &mlxbf2_pm_ops, }, .probe = mlxbf2_gpio_probe, }; module_platform_driver(mlxbf2_gpio_driver); MODULE_DESCRIPTION("Mellanox BlueField-2 GPIO Driver"); MODULE_AUTHOR("Asmaa Mnebhi <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-mlxbf2.c
// SPDX-License-Identifier: GPL-2.0+ /* * gpiolib support for Wolfson WM831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <[email protected]> * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/mfd/core.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/gpio.h> #include <linux/mfd/wm831x/irq.h> struct wm831x_gpio { struct wm831x *wm831x; struct gpio_chip gpio_chip; }; static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = WM831X_GPN_DIR; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); } static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); if (ret < 0) return ret; if (ret & 1 << offset) return 1; else return 0; } static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, value << offset); } static int wm831x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = 0; int ret; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); if (ret < 0) return ret; /* Can only set GPIO state once it's in output mode */ wm831x_gpio_set(chip, offset, value); return 0; } static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; return irq_create_mapping(wm831x->irq_domain, WM831X_IRQ_GPIO_1 + offset); } static int wm831x_gpio_set_debounce(struct wm831x *wm831x, unsigned offset, unsigned debounce) { int reg = WM831X_GPIO1_CONTROL + offset; int ret, fn; ret = wm831x_reg_read(wm831x, reg); if (ret < 0) return ret; switch (ret & WM831X_GPN_FN_MASK) { case 0: case 1: break; default: /* Not in GPIO mode */ return -EBUSY; } if (debounce >= 32 && debounce <= 64) fn = 0; else if (debounce >= 4000 && debounce <= 8000) fn = 1; else return -EINVAL; return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn); } static int wm831x_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int reg = WM831X_GPIO1_CONTROL + offset; switch (pinconf_to_config_param(config)) { case PIN_CONFIG_DRIVE_OPEN_DRAIN: return wm831x_set_bits(wm831x, reg, WM831X_GPN_OD_MASK, WM831X_GPN_OD); case PIN_CONFIG_DRIVE_PUSH_PULL: return wm831x_set_bits(wm831x, reg, WM831X_GPN_OD_MASK, 0); case PIN_CONFIG_INPUT_DEBOUNCE: return wm831x_gpio_set_debounce(wm831x, offset, pinconf_to_config_argument(config)); default: break; } return -ENOTSUPP; } #ifdef CONFIG_DEBUG_FS static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int i, tristated; for (i = 0; i < chip->ngpio; i++) { int gpio = i + chip->base; int reg; const char *label, *pull, *powerdomain; /* We report the GPIO even if it's not requested since * we're also reporting things like alternate * functions which apply even when the GPIO is not in * use as a GPIO. */ label = gpiochip_is_requested(chip, i); if (!label) label = "Unrequested"; seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label); reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i); if (reg < 0) { dev_err(wm831x->dev, "GPIO control %d read failed: %d\n", gpio, reg); seq_putc(s, '\n'); continue; } switch (reg & WM831X_GPN_PULL_MASK) { case WM831X_GPIO_PULL_NONE: pull = "nopull"; break; case WM831X_GPIO_PULL_DOWN: pull = "pulldown"; break; case WM831X_GPIO_PULL_UP: pull = "pullup"; break; default: pull = "INVALID PULL"; break; } switch (i + 1) { case 1 ... 3: case 7 ... 9: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "VPMIC"; else powerdomain = "DBVDD"; break; case 4 ... 6: case 10 ... 12: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "SYSVDD"; else powerdomain = "DBVDD"; break; case 13 ... 16: powerdomain = "TPVDD"; break; default: BUG(); break; } tristated = reg & WM831X_GPN_TRI; if (wm831x->has_gpio_ena) tristated = !tristated; seq_printf(s, " %s %s %s %s%s\n" " %s%s (0x%4x)\n", reg & WM831X_GPN_DIR ? "in" : "out", wm831x_gpio_get(chip, i) ? "high" : "low", pull, powerdomain, reg & WM831X_GPN_POL ? "" : " inverted", reg & WM831X_GPN_OD ? "open-drain" : "push-pull", tristated ? " tristated" : "", reg); } } #else #define wm831x_gpio_dbg_show NULL #endif static const struct gpio_chip template_chip = { .label = "wm831x", .owner = THIS_MODULE, .direction_input = wm831x_gpio_direction_in, .get = wm831x_gpio_get, .direction_output = wm831x_gpio_direction_out, .set = wm831x_gpio_set, .to_irq = wm831x_gpio_to_irq, .set_config = wm831x_set_config, .dbg_show = wm831x_gpio_dbg_show, .can_sleep = true, }; static int wm831x_gpio_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = &wm831x->pdata; struct wm831x_gpio *wm831x_gpio; device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent)); wm831x_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm831x_gpio), GFP_KERNEL); if (wm831x_gpio == NULL) return -ENOMEM; wm831x_gpio->wm831x = wm831x; wm831x_gpio->gpio_chip = template_chip; wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio; wm831x_gpio->gpio_chip.parent = &pdev->dev; if (pdata && pdata->gpio_base) wm831x_gpio->gpio_chip.base = pdata->gpio_base; else wm831x_gpio->gpio_chip.base = -1; return devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip, wm831x_gpio); } static struct platform_driver wm831x_gpio_driver = { .driver.name = "wm831x-gpio", .probe = wm831x_gpio_probe, }; static int __init wm831x_gpio_init(void) { return platform_driver_register(&wm831x_gpio_driver); } subsys_initcall(wm831x_gpio_init); static void __exit wm831x_gpio_exit(void) { platform_driver_unregister(&wm831x_gpio_driver); } module_exit(wm831x_gpio_exit); MODULE_AUTHOR("Mark Brown <[email protected]>"); MODULE_DESCRIPTION("GPIO interface for WM831x PMICs"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-gpio");
linux-master
drivers/gpio/gpio-wm831x.c
// SPDX-License-Identifier: GPL-2.0 // // IXP4 GPIO driver // Copyright (C) 2019 Linus Walleij <[email protected]> // // based on previous work and know-how from: // Deepak Saxena <[email protected]> #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/bitops.h> #define IXP4XX_REG_GPOUT 0x00 #define IXP4XX_REG_GPOE 0x04 #define IXP4XX_REG_GPIN 0x08 #define IXP4XX_REG_GPIS 0x0C #define IXP4XX_REG_GPIT1 0x10 #define IXP4XX_REG_GPIT2 0x14 #define IXP4XX_REG_GPCLK 0x18 #define IXP4XX_REG_GPDBSEL 0x1C /* * The hardware uses 3 bits to indicate interrupt "style". * we clear and set these three bits accordingly. The lower 24 * bits in two registers (GPIT1 and GPIT2) are used to set up * the style for 8 lines each for a total of 16 GPIO lines. */ #define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0 #define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1 #define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2 #define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3 #define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4 #define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0) #define IXP4XX_GPIO_STYLE_SIZE 3 /** * struct ixp4xx_gpio - IXP4 GPIO state container * @dev: containing device for this instance * @fwnode: the fwnode for this GPIO chip * @gc: gpiochip for this instance * @base: remapped I/O-memory base * @irq_edge: Each bit represents an IRQ: 1: edge-triggered, * 0: level triggered */ struct ixp4xx_gpio { struct device *dev; struct fwnode_handle *fwnode; struct gpio_chip gc; void __iomem *base; unsigned long long irq_edge; }; static void ixp4xx_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ixp4xx_gpio *g = gpiochip_get_data(gc); __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS); } static void ixp4xx_gpio_mask_irq(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); irq_chip_mask_parent(d); gpiochip_disable_irq(gc, d->hwirq); } static void ixp4xx_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ixp4xx_gpio *g = gpiochip_get_data(gc); /* ACK when unmasking if not edge-triggered */ if (!(g->irq_edge & BIT(d->hwirq))) ixp4xx_gpio_irq_ack(d); gpiochip_enable_irq(gc, d->hwirq); irq_chip_unmask_parent(d); } static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ixp4xx_gpio *g = gpiochip_get_data(gc); int line = d->hwirq; unsigned long flags; u32 int_style; u32 int_reg; u32 val; switch (type) { case IRQ_TYPE_EDGE_BOTH: irq_set_handler_locked(d, handle_edge_irq); int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; g->irq_edge |= BIT(d->hwirq); break; case IRQ_TYPE_EDGE_RISING: irq_set_handler_locked(d, handle_edge_irq); int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; g->irq_edge |= BIT(d->hwirq); break; case IRQ_TYPE_EDGE_FALLING: irq_set_handler_locked(d, handle_edge_irq); int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; g->irq_edge |= BIT(d->hwirq); break; case IRQ_TYPE_LEVEL_HIGH: irq_set_handler_locked(d, handle_level_irq); int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; g->irq_edge &= ~BIT(d->hwirq); break; case IRQ_TYPE_LEVEL_LOW: irq_set_handler_locked(d, handle_level_irq); int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; g->irq_edge &= ~BIT(d->hwirq); break; default: return -EINVAL; } if (line >= 8) { /* pins 8-15 */ line -= 8; int_reg = IXP4XX_REG_GPIT2; } else { /* pins 0-7 */ int_reg = IXP4XX_REG_GPIT1; } raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags); /* Clear the style for the appropriate pin */ val = __raw_readl(g->base + int_reg); val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE)); __raw_writel(val, g->base + int_reg); __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS); /* Set the new style */ val = __raw_readl(g->base + int_reg); val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); __raw_writel(val, g->base + int_reg); /* Force-configure this line as an input */ val = __raw_readl(g->base + IXP4XX_REG_GPOE); val |= BIT(d->hwirq); __raw_writel(val, g->base + IXP4XX_REG_GPOE); raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags); /* This parent only accept level high (asserted) */ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); } static const struct irq_chip ixp4xx_gpio_irqchip = { .name = "IXP4GPIO", .irq_ack = ixp4xx_gpio_irq_ack, .irq_mask = ixp4xx_gpio_mask_irq, .irq_unmask = ixp4xx_gpio_irq_unmask, .irq_set_type = ixp4xx_gpio_irq_set_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, unsigned int child, unsigned int child_type, unsigned int *parent, unsigned int *parent_type) { /* All these interrupts are level high in the CPU */ *parent_type = IRQ_TYPE_LEVEL_HIGH; /* GPIO lines 0..12 have dedicated IRQs */ if (child == 0) { *parent = 6; return 0; } if (child == 1) { *parent = 7; return 0; } if (child >= 2 && child <= 12) { *parent = child + 17; return 0; } return -EINVAL; } static int ixp4xx_gpio_probe(struct platform_device *pdev) { unsigned long flags; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct irq_domain *parent; struct ixp4xx_gpio *g; struct gpio_irq_chip *girq; struct device_node *irq_parent; int ret; g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL); if (!g) return -ENOMEM; g->dev = dev; g->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(g->base)) return PTR_ERR(g->base); irq_parent = of_irq_find_parent(np); if (!irq_parent) { dev_err(dev, "no IRQ parent node\n"); return -ENODEV; } parent = irq_find_host(irq_parent); if (!parent) { dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; } g->fwnode = of_node_to_fwnode(np); /* * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on * specific machines. */ if (of_machine_is_compatible("dlink,dsm-g600-a") || of_machine_is_compatible("iom,nas-100d")) __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK); /* * This is a very special big-endian ARM issue: when the IXP4xx is * run in big endian mode, all registers in the machine are switched * around to the CPU-native endianness. As you see mostly in the * driver we use __raw_readl()/__raw_writel() to access the registers * in the appropriate order. With the GPIO library we need to specify * byte order explicitly, so this flag needs to be set when compiling * for big endian. */ #if defined(CONFIG_CPU_BIG_ENDIAN) flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER; #else flags = 0; #endif /* Populate and register gpio chip */ ret = bgpio_init(&g->gc, dev, 4, g->base + IXP4XX_REG_GPIN, g->base + IXP4XX_REG_GPOUT, NULL, NULL, g->base + IXP4XX_REG_GPOE, flags); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; } g->gc.ngpio = 16; g->gc.label = "IXP4XX_GPIO_CHIP"; /* * TODO: when we have migrated to device tree and all GPIOs * are fetched using phandles, set this to -1 to get rid of * the fixed gpiochip base. */ g->gc.base = 0; g->gc.parent = &pdev->dev; g->gc.owner = THIS_MODULE; girq = &g->gc.irq; gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip); girq->fwnode = g->fwnode; girq->parent_domain = parent; girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq; girq->handler = handle_bad_irq; girq->default_type = IRQ_TYPE_NONE; ret = devm_gpiochip_add_data(dev, &g->gc, g); if (ret) { dev_err(dev, "failed to add SoC gpiochip\n"); return ret; } platform_set_drvdata(pdev, g); dev_info(dev, "IXP4 GPIO registered\n"); return 0; } static const struct of_device_id ixp4xx_gpio_of_match[] = { { .compatible = "intel,ixp4xx-gpio", }, {}, }; static struct platform_driver ixp4xx_gpio_driver = { .driver = { .name = "ixp4xx-gpio", .of_match_table = ixp4xx_gpio_of_match, }, .probe = ixp4xx_gpio_probe, }; builtin_platform_driver(ixp4xx_gpio_driver);
linux-master
drivers/gpio/gpio-ixp4xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2012 John Crispin <[email protected]> */ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/of.h> #include <linux/mutex.h> #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> /* * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a * peripheral controller used to drive external shift register cascades. At most * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem * to drive the 2 LSBs of the cascade automatically. */ /* control register 0 */ #define XWAY_STP_CON0 0x00 /* control register 1 */ #define XWAY_STP_CON1 0x04 /* data register 0 */ #define XWAY_STP_CPU0 0x08 /* data register 1 */ #define XWAY_STP_CPU1 0x0C /* access register */ #define XWAY_STP_AR 0x10 /* software or hardware update select bit */ #define XWAY_STP_CON_SWU BIT(31) /* automatic update rates */ #define XWAY_STP_2HZ 0 #define XWAY_STP_4HZ BIT(23) #define XWAY_STP_8HZ BIT(24) #define XWAY_STP_10HZ (BIT(24) | BIT(23)) #define XWAY_STP_SPEED_MASK (BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) #define XWAY_STP_FPIS_VALUE BIT(21) #define XWAY_STP_FPIS_MASK (BIT(20) | BIT(21)) /* clock source for automatic update */ #define XWAY_STP_UPD_FPI BIT(31) #define XWAY_STP_UPD_MASK (BIT(31) | BIT(30)) /* let the adsl core drive the 2 LSBs */ #define XWAY_STP_ADSL_SHIFT 24 #define XWAY_STP_ADSL_MASK 0x3 /* 2 groups of 3 bits can be driven by the phys */ #define XWAY_STP_PHY_MASK 0x7 #define XWAY_STP_PHY1_SHIFT 27 #define XWAY_STP_PHY2_SHIFT 3 #define XWAY_STP_PHY3_SHIFT 6 #define XWAY_STP_PHY4_SHIFT 15 /* STP has 3 groups of 8 bits */ #define XWAY_STP_GROUP0 BIT(0) #define XWAY_STP_GROUP1 BIT(1) #define XWAY_STP_GROUP2 BIT(2) #define XWAY_STP_GROUP_MASK (0x7) /* Edge configuration bits */ #define XWAY_STP_FALLING BIT(26) #define XWAY_STP_EDGE_MASK BIT(26) #define xway_stp_r32(m, reg) __raw_readl(m + reg) #define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg) #define xway_stp_w32_mask(m, clear, set, reg) \ xway_stp_w32(m, (xway_stp_r32(m, reg) & ~(clear)) | (set), reg) struct xway_stp { struct gpio_chip gc; void __iomem *virt; u32 edge; /* rising or falling edge triggered shift register */ u32 shadow; /* shadow the shift registers state */ u8 groups; /* we can drive 1-3 groups of 8bit each */ u8 dsl; /* the 2 LSBs can be driven by the dsl core */ u8 phy1; /* 3 bits can be driven by phy1 */ u8 phy2; /* 3 bits can be driven by phy2 */ u8 phy3; /* 3 bits can be driven by phy3 */ u8 phy4; /* 3 bits can be driven by phy4 */ u8 reserved; /* mask out the hw driven bits in gpio_request */ }; /** * xway_stp_get() - gpio_chip->get - get gpios. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * Gets the shadow value. */ static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio) { struct xway_stp *chip = gpiochip_get_data(gc); return (xway_stp_r32(chip->virt, XWAY_STP_CPU0) & BIT(gpio)); } /** * xway_stp_set() - gpio_chip->set - set gpios. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * Set the shadow value and call ltq_ebu_apply. */ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val) { struct xway_stp *chip = gpiochip_get_data(gc); if (val) chip->shadow |= BIT(gpio); else chip->shadow &= ~BIT(gpio); xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0); if (!chip->reserved) xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0); } /** * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * @val: Value to be written to specified signal. * * Same as xway_stp_set, always returns 0. */ static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val) { xway_stp_set(gc, gpio, val); return 0; } /** * xway_stp_request() - gpio_chip->request * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. * * We mask out the HW driven pins */ static int xway_stp_request(struct gpio_chip *gc, unsigned gpio) { struct xway_stp *chip = gpiochip_get_data(gc); if ((gpio < 8) && (chip->reserved & BIT(gpio))) { dev_err(gc->parent, "GPIO %d is driven by hardware\n", gpio); return -ENODEV; } return 0; } /** * xway_stp_hw_init() - Configure the STP unit and enable the clock gate * @chip: Pointer to the xway_stp chip structure */ static void xway_stp_hw_init(struct xway_stp *chip) { /* sane defaults */ xway_stp_w32(chip->virt, 0, XWAY_STP_AR); xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0); xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1); xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0); xway_stp_w32(chip->virt, 0, XWAY_STP_CON1); /* apply edge trigger settings for the shift register */ xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK, chip->edge, XWAY_STP_CON0); /* apply led group settings */ xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK, chip->groups, XWAY_STP_CON1); /* tell the hardware which pins are controlled by the dsl modem */ xway_stp_w32_mask(chip->virt, XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT, chip->dsl << XWAY_STP_ADSL_SHIFT, XWAY_STP_CON0); /* tell the hardware which pins are controlled by the phys */ xway_stp_w32_mask(chip->virt, XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT, chip->phy1 << XWAY_STP_PHY1_SHIFT, XWAY_STP_CON0); xway_stp_w32_mask(chip->virt, XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT, chip->phy2 << XWAY_STP_PHY2_SHIFT, XWAY_STP_CON1); if (of_machine_is_compatible("lantiq,grx390") || of_machine_is_compatible("lantiq,ar10")) { xway_stp_w32_mask(chip->virt, XWAY_STP_PHY_MASK << XWAY_STP_PHY3_SHIFT, chip->phy3 << XWAY_STP_PHY3_SHIFT, XWAY_STP_CON1); } if (of_machine_is_compatible("lantiq,grx390")) { xway_stp_w32_mask(chip->virt, XWAY_STP_PHY_MASK << XWAY_STP_PHY4_SHIFT, chip->phy4 << XWAY_STP_PHY4_SHIFT, XWAY_STP_CON1); } /* mask out the hw driven bits in gpio_request */ chip->reserved = (chip->phy4 << 11) | (chip->phy3 << 8) | (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl; /* * if we have pins that are driven by hw, we need to tell the stp what * clock to use as a timer. */ if (chip->reserved) { xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK, XWAY_STP_UPD_FPI, XWAY_STP_CON1); xway_stp_w32_mask(chip->virt, XWAY_STP_SPEED_MASK, XWAY_STP_10HZ, XWAY_STP_CON1); xway_stp_w32_mask(chip->virt, XWAY_STP_FPIS_MASK, XWAY_STP_FPIS_VALUE, XWAY_STP_CON1); } } static int xway_stp_probe(struct platform_device *pdev) { u32 shadow, groups, dsl, phy; struct xway_stp *chip; struct clk *clk; int ret = 0; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->virt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->virt)) return PTR_ERR(chip->virt); chip->gc.parent = &pdev->dev; chip->gc.label = "stp-xway"; chip->gc.direction_output = xway_stp_dir_out; chip->gc.get = xway_stp_get; chip->gc.set = xway_stp_set; chip->gc.request = xway_stp_request; chip->gc.base = -1; chip->gc.owner = THIS_MODULE; /* store the shadow value if one was passed by the devicetree */ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow)) chip->shadow = shadow; /* find out which gpio groups should be enabled */ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,groups", &groups)) chip->groups = groups & XWAY_STP_GROUP_MASK; else chip->groups = XWAY_STP_GROUP0; chip->gc.ngpio = fls(chip->groups) * 8; /* find out which gpios are controlled by the dsl core */ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,dsl", &dsl)) chip->dsl = dsl & XWAY_STP_ADSL_MASK; /* find out which gpios are controlled by the phys */ if (of_machine_is_compatible("lantiq,ar9") || of_machine_is_compatible("lantiq,gr9") || of_machine_is_compatible("lantiq,vr9") || of_machine_is_compatible("lantiq,ar10") || of_machine_is_compatible("lantiq,grx390")) { if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy1", &phy)) chip->phy1 = phy & XWAY_STP_PHY_MASK; if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy2", &phy)) chip->phy2 = phy & XWAY_STP_PHY_MASK; } if (of_machine_is_compatible("lantiq,ar10") || of_machine_is_compatible("lantiq,grx390")) { if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy3", &phy)) chip->phy3 = phy & XWAY_STP_PHY_MASK; } if (of_machine_is_compatible("lantiq,grx390")) { if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy4", &phy)) chip->phy4 = phy & XWAY_STP_PHY_MASK; } /* check which edge trigger we should use, default to a falling edge */ if (!of_property_read_bool(pdev->dev.of_node, "lantiq,rising")) chip->edge = XWAY_STP_FALLING; clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) return ret; xway_stp_hw_init(chip); ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip); if (ret) { clk_disable_unprepare(clk); return ret; } dev_info(&pdev->dev, "Init done\n"); return 0; } static const struct of_device_id xway_stp_match[] = { { .compatible = "lantiq,gpio-stp-xway" }, {}, }; MODULE_DEVICE_TABLE(of, xway_stp_match); static struct platform_driver xway_stp_driver = { .probe = xway_stp_probe, .driver = { .name = "gpio-stp-xway", .of_match_table = xway_stp_match, }, }; static int __init xway_stp_init(void) { return platform_driver_register(&xway_stp_driver); } subsys_initcall(xway_stp_init);
linux-master
drivers/gpio/gpio-stp-xway.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2017-2018 Cadence * * Authors: * Jan Kotas <[email protected]> * Boris Brezillon <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #define CDNS_GPIO_BYPASS_MODE 0x00 #define CDNS_GPIO_DIRECTION_MODE 0x04 #define CDNS_GPIO_OUTPUT_EN 0x08 #define CDNS_GPIO_OUTPUT_VALUE 0x0c #define CDNS_GPIO_INPUT_VALUE 0x10 #define CDNS_GPIO_IRQ_MASK 0x14 #define CDNS_GPIO_IRQ_EN 0x18 #define CDNS_GPIO_IRQ_DIS 0x1c #define CDNS_GPIO_IRQ_STATUS 0x20 #define CDNS_GPIO_IRQ_TYPE 0x24 #define CDNS_GPIO_IRQ_VALUE 0x28 #define CDNS_GPIO_IRQ_ANY_EDGE 0x2c struct cdns_gpio_chip { struct gpio_chip gc; struct clk *pclk; void __iomem *regs; u32 bypass_orig; }; static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); unsigned long flags; raw_spin_lock_irqsave(&chip->bgpio_lock, flags); iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset), cgpio->regs + CDNS_GPIO_BYPASS_MODE); raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); return 0; } static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); unsigned long flags; raw_spin_lock_irqsave(&chip->bgpio_lock, flags); iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) | (BIT(offset) & cgpio->bypass_orig), cgpio->regs + CDNS_GPIO_BYPASS_MODE); raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); } static void cdns_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); iowrite32(BIT(d->hwirq), cgpio->regs + CDNS_GPIO_IRQ_DIS); gpiochip_disable_irq(chip, irqd_to_hwirq(d)); } static void cdns_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); gpiochip_enable_irq(chip, irqd_to_hwirq(d)); iowrite32(BIT(d->hwirq), cgpio->regs + CDNS_GPIO_IRQ_EN); } static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); unsigned long flags; u32 int_value; u32 int_type; u32 mask = BIT(d->hwirq); int ret = 0; raw_spin_lock_irqsave(&chip->bgpio_lock, flags); int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask; int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask; /* * The GPIO controller doesn't have an ACK register. * All interrupt statuses are cleared on a status register read. * Don't support edge interrupts for now. */ if (type == IRQ_TYPE_LEVEL_HIGH) { int_type |= mask; int_value |= mask; } else if (type == IRQ_TYPE_LEVEL_LOW) { int_type |= mask; } else { ret = -EINVAL; goto err_irq_type; } iowrite32(int_value, cgpio->regs + CDNS_GPIO_IRQ_VALUE); iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE); err_irq_type: raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); return ret; } static void cdns_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned long status; int hwirq; chained_irq_enter(irqchip, desc); status = ioread32(cgpio->regs + CDNS_GPIO_IRQ_STATUS) & ~ioread32(cgpio->regs + CDNS_GPIO_IRQ_MASK); for_each_set_bit(hwirq, &status, chip->ngpio) generic_handle_domain_irq(chip->irq.domain, hwirq); chained_irq_exit(irqchip, desc); } static const struct irq_chip cdns_gpio_irqchip = { .name = "cdns-gpio", .irq_mask = cdns_gpio_irq_mask, .irq_unmask = cdns_gpio_irq_unmask, .irq_set_type = cdns_gpio_irq_set_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int cdns_gpio_probe(struct platform_device *pdev) { struct cdns_gpio_chip *cgpio; int ret, irq; u32 dir_prev; u32 num_gpios = 32; cgpio = devm_kzalloc(&pdev->dev, sizeof(*cgpio), GFP_KERNEL); if (!cgpio) return -ENOMEM; cgpio->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cgpio->regs)) return PTR_ERR(cgpio->regs); of_property_read_u32(pdev->dev.of_node, "ngpios", &num_gpios); if (num_gpios > 32) { dev_err(&pdev->dev, "ngpios must be less or equal 32\n"); return -EINVAL; } /* * Set all pins as inputs by default, otherwise: * gpiochip_lock_as_irq: * tried to flag a GPIO set as output for IRQ * Generic GPIO driver stores the direction value internally, * so it needs to be changed before bgpio_init() is called. */ dir_prev = ioread32(cgpio->regs + CDNS_GPIO_DIRECTION_MODE); iowrite32(GENMASK(num_gpios - 1, 0), cgpio->regs + CDNS_GPIO_DIRECTION_MODE); ret = bgpio_init(&cgpio->gc, &pdev->dev, 4, cgpio->regs + CDNS_GPIO_INPUT_VALUE, cgpio->regs + CDNS_GPIO_OUTPUT_VALUE, NULL, NULL, cgpio->regs + CDNS_GPIO_DIRECTION_MODE, BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(&pdev->dev, "Failed to register generic gpio, %d\n", ret); goto err_revert_dir; } cgpio->gc.label = dev_name(&pdev->dev); cgpio->gc.ngpio = num_gpios; cgpio->gc.parent = &pdev->dev; cgpio->gc.base = -1; cgpio->gc.owner = THIS_MODULE; cgpio->gc.request = cdns_gpio_request; cgpio->gc.free = cdns_gpio_free; cgpio->pclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(cgpio->pclk)) { ret = PTR_ERR(cgpio->pclk); dev_err(&pdev->dev, "Failed to retrieve peripheral clock, %d\n", ret); goto err_revert_dir; } ret = clk_prepare_enable(cgpio->pclk); if (ret) { dev_err(&pdev->dev, "Failed to enable the peripheral clock, %d\n", ret); goto err_revert_dir; } /* * Optional irq_chip support */ irq = platform_get_irq(pdev, 0); if (irq >= 0) { struct gpio_irq_chip *girq; girq = &cgpio->gc.irq; gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip); girq->parent_handler = cdns_gpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { ret = -ENOMEM; goto err_disable_clk; } girq->parents[0] = irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; } ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); goto err_disable_clk; } cgpio->bypass_orig = ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE); /* * Enable gpio outputs, ignored for input direction */ iowrite32(GENMASK(num_gpios - 1, 0), cgpio->regs + CDNS_GPIO_OUTPUT_EN); iowrite32(0, cgpio->regs + CDNS_GPIO_BYPASS_MODE); platform_set_drvdata(pdev, cgpio); return 0; err_disable_clk: clk_disable_unprepare(cgpio->pclk); err_revert_dir: iowrite32(dir_prev, cgpio->regs + CDNS_GPIO_DIRECTION_MODE); return ret; } static int cdns_gpio_remove(struct platform_device *pdev) { struct cdns_gpio_chip *cgpio = platform_get_drvdata(pdev); iowrite32(cgpio->bypass_orig, cgpio->regs + CDNS_GPIO_BYPASS_MODE); clk_disable_unprepare(cgpio->pclk); return 0; } static const struct of_device_id cdns_of_ids[] = { { .compatible = "cdns,gpio-r1p02" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, cdns_of_ids); static struct platform_driver cdns_gpio_driver = { .driver = { .name = "cdns-gpio", .of_match_table = cdns_of_ids, }, .probe = cdns_gpio_probe, .remove = cdns_gpio_remove, }; module_platform_driver(cdns_gpio_driver); MODULE_AUTHOR("Jan Kotas <[email protected]>"); MODULE_DESCRIPTION("Cadence GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:cdns-gpio");
linux-master
drivers/gpio/gpio-cadence.c
// SPDX-License-Identifier: GPL-2.0-only /* * RDA Micro GPIO driver * * Copyright (C) 2012 RDA Micro Inc. * Copyright (C) 2019 Manivannan Sadhasivam */ #include <linux/bitops.h> #include <linux/gpio/driver.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #define RDA_GPIO_OEN_VAL 0x00 #define RDA_GPIO_OEN_SET_OUT 0x04 #define RDA_GPIO_OEN_SET_IN 0x08 #define RDA_GPIO_VAL 0x0c #define RDA_GPIO_SET 0x10 #define RDA_GPIO_CLR 0x14 #define RDA_GPIO_INT_CTRL_SET 0x18 #define RDA_GPIO_INT_CTRL_CLR 0x1c #define RDA_GPIO_INT_CLR 0x20 #define RDA_GPIO_INT_STATUS 0x24 #define RDA_GPIO_IRQ_RISE_SHIFT 0 #define RDA_GPIO_IRQ_FALL_SHIFT 8 #define RDA_GPIO_DEBOUCE_SHIFT 16 #define RDA_GPIO_LEVEL_SHIFT 24 #define RDA_GPIO_IRQ_MASK 0xff /* Each bank consists of 32 GPIOs */ #define RDA_GPIO_BANK_NR 32 struct rda_gpio { struct gpio_chip chip; void __iomem *base; spinlock_t lock; int irq; }; static inline void rda_gpio_update(struct gpio_chip *chip, unsigned int offset, u16 reg, int val) { struct rda_gpio *rda_gpio = gpiochip_get_data(chip); void __iomem *base = rda_gpio->base; unsigned long flags; u32 tmp; spin_lock_irqsave(&rda_gpio->lock, flags); tmp = readl_relaxed(base + reg); if (val) tmp |= BIT(offset); else tmp &= ~BIT(offset); writel_relaxed(tmp, base + reg); spin_unlock_irqrestore(&rda_gpio->lock, flags); } static void rda_gpio_irq_mask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); struct rda_gpio *rda_gpio = gpiochip_get_data(chip); void __iomem *base = rda_gpio->base; u32 offset = irqd_to_hwirq(data); u32 value; value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT; value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR); gpiochip_disable_irq(chip, offset); } static void rda_gpio_irq_ack(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); u32 offset = irqd_to_hwirq(data); rda_gpio_update(chip, offset, RDA_GPIO_INT_CLR, 1); } static int rda_gpio_set_irq(struct gpio_chip *chip, u32 offset, unsigned int flow_type) { struct rda_gpio *rda_gpio = gpiochip_get_data(chip); void __iomem *base = rda_gpio->base; u32 value; switch (flow_type) { case IRQ_TYPE_EDGE_RISING: /* Set rising edge trigger */ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET); /* Switch to edge trigger interrupt */ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR); break; case IRQ_TYPE_EDGE_FALLING: /* Set falling edge trigger */ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET); /* Switch to edge trigger interrupt */ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR); break; case IRQ_TYPE_EDGE_BOTH: /* Set both edge trigger */ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT; value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET); /* Switch to edge trigger interrupt */ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR); break; case IRQ_TYPE_LEVEL_HIGH: /* Set high level trigger */ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT; /* Switch to level trigger interrupt */ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET); break; case IRQ_TYPE_LEVEL_LOW: /* Set low level trigger */ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT; /* Switch to level trigger interrupt */ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT; writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET); break; default: return -EINVAL; } return 0; } static void rda_gpio_irq_unmask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); u32 offset = irqd_to_hwirq(data); u32 trigger = irqd_get_trigger_type(data); gpiochip_enable_irq(chip, offset); rda_gpio_set_irq(chip, offset, trigger); } static int rda_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); u32 offset = irqd_to_hwirq(data); int ret; ret = rda_gpio_set_irq(chip, offset, flow_type); if (ret) return ret; if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(data, handle_level_irq); else if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) irq_set_handler_locked(data, handle_edge_irq); return 0; } static void rda_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct irq_chip *ic = irq_desc_get_chip(desc); struct rda_gpio *rda_gpio = gpiochip_get_data(chip); unsigned long status; u32 n; chained_irq_enter(ic, desc); status = readl_relaxed(rda_gpio->base + RDA_GPIO_INT_STATUS); /* Only lower 8 bits are capable of generating interrupts */ status &= RDA_GPIO_IRQ_MASK; for_each_set_bit(n, &status, RDA_GPIO_BANK_NR) generic_handle_domain_irq(chip->irq.domain, n); chained_irq_exit(ic, desc); } static const struct irq_chip rda_gpio_irq_chip = { .name = "rda-gpio", .irq_ack = rda_gpio_irq_ack, .irq_mask = rda_gpio_irq_mask, .irq_unmask = rda_gpio_irq_unmask, .irq_set_type = rda_gpio_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int rda_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; struct rda_gpio *rda_gpio; u32 ngpios; int ret; rda_gpio = devm_kzalloc(dev, sizeof(*rda_gpio), GFP_KERNEL); if (!rda_gpio) return -ENOMEM; ret = device_property_read_u32(dev, "ngpios", &ngpios); if (ret < 0) return ret; /* * Not all ports have interrupt capability. For instance, on * RDA8810PL, GPIOC doesn't support interrupt. So we must handle * those also. */ rda_gpio->irq = platform_get_irq(pdev, 0); rda_gpio->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rda_gpio->base)) return PTR_ERR(rda_gpio->base); spin_lock_init(&rda_gpio->lock); ret = bgpio_init(&rda_gpio->chip, dev, 4, rda_gpio->base + RDA_GPIO_VAL, rda_gpio->base + RDA_GPIO_SET, rda_gpio->base + RDA_GPIO_CLR, rda_gpio->base + RDA_GPIO_OEN_SET_OUT, rda_gpio->base + RDA_GPIO_OEN_SET_IN, BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(dev, "bgpio_init failed\n"); return ret; } rda_gpio->chip.label = dev_name(dev); rda_gpio->chip.ngpio = ngpios; rda_gpio->chip.base = -1; if (rda_gpio->irq >= 0) { girq = &rda_gpio->chip.irq; gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip); girq->handler = handle_bad_irq; girq->default_type = IRQ_TYPE_NONE; girq->parent_handler = rda_gpio_irq_handler; girq->parent_handler_data = rda_gpio; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; girq->parents[0] = rda_gpio->irq; } platform_set_drvdata(pdev, rda_gpio); return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio); } static const struct of_device_id rda_gpio_of_match[] = { { .compatible = "rda,8810pl-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rda_gpio_of_match); static struct platform_driver rda_gpio_driver = { .probe = rda_gpio_probe, .driver = { .name = "rda-gpio", .of_match_table = rda_gpio_of_match, }, }; module_platform_driver_probe(rda_gpio_driver, rda_gpio_probe); MODULE_DESCRIPTION("RDA Micro GPIO driver"); MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
linux-master
drivers/gpio/gpio-rda.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2019 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #define LOGICVC_CTRL_REG 0x40 #define LOGICVC_CTRL_GPIO_SHIFT 11 #define LOGICVC_CTRL_GPIO_BITS 5 #define LOGICVC_POWER_CTRL_REG 0x78 #define LOGICVC_POWER_CTRL_GPIO_SHIFT 0 #define LOGICVC_POWER_CTRL_GPIO_BITS 4 struct logicvc_gpio { struct gpio_chip chip; struct regmap *regmap; }; static void logicvc_gpio_offset(struct logicvc_gpio *logicvc, unsigned offset, unsigned int *reg, unsigned int *bit) { if (offset >= LOGICVC_CTRL_GPIO_BITS) { *reg = LOGICVC_POWER_CTRL_REG; /* To the (virtual) power ctrl offset. */ offset -= LOGICVC_CTRL_GPIO_BITS; /* To the actual bit offset in reg. */ offset += LOGICVC_POWER_CTRL_GPIO_SHIFT; } else { *reg = LOGICVC_CTRL_REG; /* To the actual bit offset in reg. */ offset += LOGICVC_CTRL_GPIO_SHIFT; } *bit = BIT(offset); } static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset) { struct logicvc_gpio *logicvc = gpiochip_get_data(chip); unsigned int reg, bit, value; int ret; logicvc_gpio_offset(logicvc, offset, &reg, &bit); ret = regmap_read(logicvc->regmap, reg, &value); if (ret) return ret; return !!(value & bit); } static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct logicvc_gpio *logicvc = gpiochip_get_data(chip); unsigned int reg, bit; logicvc_gpio_offset(logicvc, offset, &reg, &bit); regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0); } static int logicvc_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { /* Pins are always configured as output, so just set the value. */ logicvc_gpio_set(chip, offset, value); return 0; } static struct regmap_config logicvc_gpio_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .name = "logicvc-gpio", }; static int logicvc_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *of_node = dev->of_node; struct logicvc_gpio *logicvc; int ret; logicvc = devm_kzalloc(dev, sizeof(*logicvc), GFP_KERNEL); if (!logicvc) return -ENOMEM; /* Try to get regmap from parent first. */ logicvc->regmap = syscon_node_to_regmap(of_node->parent); /* Grab our own regmap if that fails. */ if (IS_ERR(logicvc->regmap)) { struct resource res; void __iomem *base; ret = of_address_to_resource(of_node, 0, &res); if (ret) { dev_err(dev, "Failed to get resource from address\n"); return ret; } base = devm_ioremap_resource(dev, &res); if (IS_ERR(base)) return PTR_ERR(base); logicvc_gpio_regmap_config.max_register = resource_size(&res) - logicvc_gpio_regmap_config.reg_stride; logicvc->regmap = devm_regmap_init_mmio(dev, base, &logicvc_gpio_regmap_config); if (IS_ERR(logicvc->regmap)) { dev_err(dev, "Failed to create regmap for I/O\n"); return PTR_ERR(logicvc->regmap); } } logicvc->chip.parent = dev; logicvc->chip.owner = THIS_MODULE; logicvc->chip.label = dev_name(dev); logicvc->chip.base = -1; logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS + LOGICVC_POWER_CTRL_GPIO_BITS; logicvc->chip.get = logicvc_gpio_get; logicvc->chip.set = logicvc_gpio_set; logicvc->chip.direction_output = logicvc_gpio_direction_output; return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc); } static const struct of_device_id logicivc_gpio_of_table[] = { { .compatible = "xylon,logicvc-3.02.a-gpio", }, { } }; MODULE_DEVICE_TABLE(of, logicivc_gpio_of_table); static struct platform_driver logicvc_gpio_driver = { .driver = { .name = "gpio-logicvc", .of_match_table = logicivc_gpio_of_table, }, .probe = logicvc_gpio_probe, }; module_platform_driver(logicvc_gpio_driver); MODULE_AUTHOR("Paul Kocialkowski <[email protected]>"); MODULE_DESCRIPTION("Xylon LogiCVC GPIO driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-logicvc.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/plat-pxa/gpio.c * * Generic PXA GPIO handling * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/gpio-pxa.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/io.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/slab.h> /* * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with * one set of registers. The register offsets are organized below: * * GPLR GPDR GPSR GPCR GRER GFER GEDR * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048 * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050 * * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148 * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150 * * BANK 6 - 0x0200 0x020C 0x0218 0x0224 0x0230 0x023C 0x0248 * * NOTE: * BANK 3 is only available on PXA27x and later processors. * BANK 4 and 5 are only available on PXA935, PXA1928 * BANK 6 is only available on PXA1928 */ #define GPLR_OFFSET 0x00 #define GPDR_OFFSET 0x0C #define GPSR_OFFSET 0x18 #define GPCR_OFFSET 0x24 #define GRER_OFFSET 0x30 #define GFER_OFFSET 0x3C #define GEDR_OFFSET 0x48 #define GAFR_OFFSET 0x54 #define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */ #define BANK_OFF(n) (((n) / 3) << 8) + (((n) % 3) << 2) int pxa_last_gpio; static int irq_base; struct pxa_gpio_bank { void __iomem *regbase; unsigned long irq_mask; unsigned long irq_edge_rise; unsigned long irq_edge_fall; #ifdef CONFIG_PM unsigned long saved_gplr; unsigned long saved_gpdr; unsigned long saved_grer; unsigned long saved_gfer; #endif }; struct pxa_gpio_chip { struct device *dev; struct gpio_chip chip; struct pxa_gpio_bank *banks; struct irq_domain *irqdomain; int irq0; int irq1; int (*set_wake)(unsigned int gpio, unsigned int on); }; enum pxa_gpio_type { PXA25X_GPIO = 0, PXA26X_GPIO, PXA27X_GPIO, PXA3XX_GPIO, PXA93X_GPIO, MMP_GPIO = 0x10, MMP2_GPIO, PXA1928_GPIO, }; struct pxa_gpio_id { enum pxa_gpio_type type; int gpio_nums; }; static DEFINE_SPINLOCK(gpio_lock); static struct pxa_gpio_chip *pxa_gpio_chip; static enum pxa_gpio_type gpio_type; static struct pxa_gpio_id pxa25x_id = { .type = PXA25X_GPIO, .gpio_nums = 85, }; static struct pxa_gpio_id pxa26x_id = { .type = PXA26X_GPIO, .gpio_nums = 90, }; static struct pxa_gpio_id pxa27x_id = { .type = PXA27X_GPIO, .gpio_nums = 121, }; static struct pxa_gpio_id pxa3xx_id = { .type = PXA3XX_GPIO, .gpio_nums = 128, }; static struct pxa_gpio_id pxa93x_id = { .type = PXA93X_GPIO, .gpio_nums = 192, }; static struct pxa_gpio_id mmp_id = { .type = MMP_GPIO, .gpio_nums = 128, }; static struct pxa_gpio_id mmp2_id = { .type = MMP2_GPIO, .gpio_nums = 192, }; static struct pxa_gpio_id pxa1928_id = { .type = PXA1928_GPIO, .gpio_nums = 224, }; #define for_each_gpio_bank(i, b, pc) \ for (i = 0, b = pc->banks; i <= pxa_last_gpio; i += 32, b++) static inline struct pxa_gpio_chip *chip_to_pxachip(struct gpio_chip *c) { struct pxa_gpio_chip *pxa_chip = gpiochip_get_data(c); return pxa_chip; } static inline void __iomem *gpio_bank_base(struct gpio_chip *c, int gpio) { struct pxa_gpio_chip *p = gpiochip_get_data(c); struct pxa_gpio_bank *bank = p->banks + (gpio / 32); return bank->regbase; } static inline struct pxa_gpio_bank *gpio_to_pxabank(struct gpio_chip *c, unsigned gpio) { return chip_to_pxachip(c)->banks + gpio / 32; } static inline int gpio_is_mmp_type(int type) { return (type & MMP_GPIO) != 0; } /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted, * as well as their Alternate Function value being '1' for GPIO in GAFRx. */ static inline int __gpio_is_inverted(int gpio) { if ((gpio_type == PXA26X_GPIO) && (gpio > 85)) return 1; return 0; } /* * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate * function of a GPIO, and GPDRx cannot be altered once configured. It * is attributed as "occupied" here (I know this terminology isn't * accurate, you are welcome to propose a better one :-) */ static inline int __gpio_is_occupied(struct pxa_gpio_chip *pchip, unsigned gpio) { void __iomem *base; unsigned long gafr = 0, gpdr = 0; int ret, af = 0, dir = 0; base = gpio_bank_base(&pchip->chip, gpio); gpdr = readl_relaxed(base + GPDR_OFFSET); switch (gpio_type) { case PXA25X_GPIO: case PXA26X_GPIO: case PXA27X_GPIO: gafr = readl_relaxed(base + GAFR_OFFSET); af = (gafr >> ((gpio & 0xf) * 2)) & 0x3; dir = gpdr & GPIO_bit(gpio); if (__gpio_is_inverted(gpio)) ret = (af != 1) || (dir == 0); else ret = (af != 0) || (dir != 0); break; default: ret = gpdr & GPIO_bit(gpio); break; } return ret; } int pxa_irq_to_gpio(int irq) { struct pxa_gpio_chip *pchip = pxa_gpio_chip; int irq_gpio0; irq_gpio0 = irq_find_mapping(pchip->irqdomain, 0); if (irq_gpio0 > 0) return irq - irq_gpio0; return irq_gpio0; } static bool pxa_gpio_has_pinctrl(void) { switch (gpio_type) { case PXA3XX_GPIO: case MMP2_GPIO: return false; default: return true; } } static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct pxa_gpio_chip *pchip = chip_to_pxachip(chip); return irq_find_mapping(pchip->irqdomain, offset); } static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { void __iomem *base = gpio_bank_base(chip, offset); uint32_t value, mask = GPIO_bit(offset); unsigned long flags; int ret; if (pxa_gpio_has_pinctrl()) { ret = pinctrl_gpio_direction_input(chip->base + offset); if (ret) return ret; } spin_lock_irqsave(&gpio_lock, flags); value = readl_relaxed(base + GPDR_OFFSET); if (__gpio_is_inverted(chip->base + offset)) value |= mask; else value &= ~mask; writel_relaxed(value, base + GPDR_OFFSET); spin_unlock_irqrestore(&gpio_lock, flags); return 0; } static int pxa_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { void __iomem *base = gpio_bank_base(chip, offset); uint32_t tmp, mask = GPIO_bit(offset); unsigned long flags; int ret; writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); if (pxa_gpio_has_pinctrl()) { ret = pinctrl_gpio_direction_output(chip->base + offset); if (ret) return ret; } spin_lock_irqsave(&gpio_lock, flags); tmp = readl_relaxed(base + GPDR_OFFSET); if (__gpio_is_inverted(chip->base + offset)) tmp &= ~mask; else tmp |= mask; writel_relaxed(tmp, base + GPDR_OFFSET); spin_unlock_irqrestore(&gpio_lock, flags); return 0; } static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset) { void __iomem *base = gpio_bank_base(chip, offset); u32 gplr = readl_relaxed(base + GPLR_OFFSET); return !!(gplr & GPIO_bit(offset)); } static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { void __iomem *base = gpio_bank_base(chip, offset); writel_relaxed(GPIO_bit(offset), base + (value ? GPSR_OFFSET : GPCR_OFFSET)); } #ifdef CONFIG_OF_GPIO static int pxa_gpio_of_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { if (gpiospec->args[0] > pxa_last_gpio) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0]; } #endif static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iomem *regbase) { int i, gpio, nbanks = DIV_ROUND_UP(ngpio, 32); struct pxa_gpio_bank *bank; pchip->banks = devm_kcalloc(pchip->dev, nbanks, sizeof(*pchip->banks), GFP_KERNEL); if (!pchip->banks) return -ENOMEM; pchip->chip.parent = pchip->dev; pchip->chip.label = "gpio-pxa"; pchip->chip.direction_input = pxa_gpio_direction_input; pchip->chip.direction_output = pxa_gpio_direction_output; pchip->chip.get = pxa_gpio_get; pchip->chip.set = pxa_gpio_set; pchip->chip.to_irq = pxa_gpio_to_irq; pchip->chip.ngpio = ngpio; pchip->chip.request = gpiochip_generic_request; pchip->chip.free = gpiochip_generic_free; #ifdef CONFIG_OF_GPIO pchip->chip.of_xlate = pxa_gpio_of_xlate; pchip->chip.of_gpio_n_cells = 2; #endif for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) { bank = pchip->banks + i; bank->regbase = regbase + BANK_OFF(i); } return gpiochip_add_data(&pchip->chip, pchip); } /* Update only those GRERx and GFERx edge detection register bits if those * bits are set in c->irq_mask */ static inline void update_edge_detect(struct pxa_gpio_bank *c) { uint32_t grer, gfer; grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask; gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask; grer |= c->irq_edge_rise & c->irq_mask; gfer |= c->irq_edge_fall & c->irq_mask; writel_relaxed(grer, c->regbase + GRER_OFFSET); writel_relaxed(gfer, c->regbase + GFER_OFFSET); } static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) { struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d); unsigned int gpio = irqd_to_hwirq(d); struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio); unsigned long gpdr, mask = GPIO_bit(gpio); if (type == IRQ_TYPE_PROBE) { /* Don't mess with enabled GPIOs using preconfigured edges or * GPIOs set to alternate function or to output during probe */ if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio)) return 0; if (__gpio_is_occupied(pchip, gpio)) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); if (__gpio_is_inverted(gpio)) writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET); else writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET); if (type & IRQ_TYPE_EDGE_RISING) c->irq_edge_rise |= mask; else c->irq_edge_rise &= ~mask; if (type & IRQ_TYPE_EDGE_FALLING) c->irq_edge_fall |= mask; else c->irq_edge_fall &= ~mask; update_edge_detect(c); pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio, ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""), ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : "")); return 0; } static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d) { int loop, gpio, n, handled = 0; unsigned long gedr; struct pxa_gpio_chip *pchip = d; struct pxa_gpio_bank *c; do { loop = 0; for_each_gpio_bank(gpio, c, pchip) { gedr = readl_relaxed(c->regbase + GEDR_OFFSET); gedr = gedr & c->irq_mask; writel_relaxed(gedr, c->regbase + GEDR_OFFSET); for_each_set_bit(n, &gedr, BITS_PER_LONG) { loop = 1; generic_handle_domain_irq(pchip->irqdomain, gpio + n); } } handled += loop; } while (loop); return handled ? IRQ_HANDLED : IRQ_NONE; } static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d) { struct pxa_gpio_chip *pchip = d; if (in_irq == pchip->irq0) { generic_handle_domain_irq(pchip->irqdomain, 0); } else if (in_irq == pchip->irq1) { generic_handle_domain_irq(pchip->irqdomain, 1); } else { pr_err("%s() unknown irq %d\n", __func__, in_irq); return IRQ_NONE; } return IRQ_HANDLED; } static void pxa_ack_muxed_gpio(struct irq_data *d) { struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d); unsigned int gpio = irqd_to_hwirq(d); void __iomem *base = gpio_bank_base(&pchip->chip, gpio); writel_relaxed(GPIO_bit(gpio), base + GEDR_OFFSET); } static void pxa_mask_muxed_gpio(struct irq_data *d) { struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d); unsigned int gpio = irqd_to_hwirq(d); struct pxa_gpio_bank *b = gpio_to_pxabank(&pchip->chip, gpio); void __iomem *base = gpio_bank_base(&pchip->chip, gpio); uint32_t grer, gfer; b->irq_mask &= ~GPIO_bit(gpio); grer = readl_relaxed(base + GRER_OFFSET) & ~GPIO_bit(gpio); gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio); writel_relaxed(grer, base + GRER_OFFSET); writel_relaxed(gfer, base + GFER_OFFSET); } static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on) { struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d); unsigned int gpio = irqd_to_hwirq(d); if (pchip->set_wake) return pchip->set_wake(gpio, on); else return 0; } static void pxa_unmask_muxed_gpio(struct irq_data *d) { struct pxa_gpio_chip *pchip = irq_data_get_irq_chip_data(d); unsigned int gpio = irqd_to_hwirq(d); struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio); c->irq_mask |= GPIO_bit(gpio); update_edge_detect(c); } static struct irq_chip pxa_muxed_gpio_chip = { .name = "GPIO", .irq_ack = pxa_ack_muxed_gpio, .irq_mask = pxa_mask_muxed_gpio, .irq_unmask = pxa_unmask_muxed_gpio, .irq_set_type = pxa_gpio_irq_type, .irq_set_wake = pxa_gpio_set_wake, }; static int pxa_gpio_nums(struct platform_device *pdev) { const struct platform_device_id *id = platform_get_device_id(pdev); struct pxa_gpio_id *pxa_id = (struct pxa_gpio_id *)id->driver_data; int count = 0; switch (pxa_id->type) { case PXA25X_GPIO: case PXA26X_GPIO: case PXA27X_GPIO: case PXA3XX_GPIO: case PXA93X_GPIO: case MMP_GPIO: case MMP2_GPIO: case PXA1928_GPIO: gpio_type = pxa_id->type; count = pxa_id->gpio_nums - 1; break; default: count = -EINVAL; break; } return count; } static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, handle_edge_irq); irq_set_chip_data(irq, d->host_data); irq_set_noprobe(irq); return 0; } static const struct irq_domain_ops pxa_irq_domain_ops = { .map = pxa_irq_domain_map, .xlate = irq_domain_xlate_twocell, }; #ifdef CONFIG_OF static const struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "intel,pxa25x-gpio", .data = &pxa25x_id, }, { .compatible = "intel,pxa26x-gpio", .data = &pxa26x_id, }, { .compatible = "intel,pxa27x-gpio", .data = &pxa27x_id, }, { .compatible = "intel,pxa3xx-gpio", .data = &pxa3xx_id, }, { .compatible = "marvell,pxa93x-gpio", .data = &pxa93x_id, }, { .compatible = "marvell,mmp-gpio", .data = &mmp_id, }, { .compatible = "marvell,mmp2-gpio", .data = &mmp2_id, }, { .compatible = "marvell,pxa1928-gpio", .data = &pxa1928_id, }, {} }; static int pxa_gpio_probe_dt(struct platform_device *pdev, struct pxa_gpio_chip *pchip) { int nr_gpios; const struct pxa_gpio_id *gpio_id; gpio_id = of_device_get_match_data(&pdev->dev); gpio_type = gpio_id->type; nr_gpios = gpio_id->gpio_nums; pxa_last_gpio = nr_gpios - 1; irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, nr_gpios, 0); if (irq_base < 0) { dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); return irq_base; } return irq_base; } #else #define pxa_gpio_probe_dt(pdev, pchip) (-1) #endif static int pxa_gpio_probe(struct platform_device *pdev) { struct pxa_gpio_chip *pchip; struct pxa_gpio_bank *c; struct clk *clk; struct pxa_gpio_platform_data *info; void __iomem *gpio_reg_base; int gpio, ret; int irq0 = 0, irq1 = 0, irq_mux; pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL); if (!pchip) return -ENOMEM; pchip->dev = &pdev->dev; info = dev_get_platdata(&pdev->dev); if (info) { irq_base = info->irq_base; if (irq_base <= 0) return -EINVAL; pxa_last_gpio = pxa_gpio_nums(pdev); pchip->set_wake = info->gpio_set_wake; } else { irq_base = pxa_gpio_probe_dt(pdev, pchip); if (irq_base < 0) return -EINVAL; } if (!pxa_last_gpio) return -EINVAL; pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node, pxa_last_gpio + 1, irq_base, 0, &pxa_irq_domain_ops, pchip); if (!pchip->irqdomain) return -ENOMEM; irq0 = platform_get_irq_byname_optional(pdev, "gpio0"); irq1 = platform_get_irq_byname_optional(pdev, "gpio1"); irq_mux = platform_get_irq_byname(pdev, "gpio_mux"); if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0) || (irq_mux <= 0)) return -EINVAL; pchip->irq0 = irq0; pchip->irq1 = irq1; gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio_reg_base)) return PTR_ERR(gpio_reg_base); clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Error %ld to get gpio clock\n", PTR_ERR(clk)); return PTR_ERR(clk); } /* Initialize GPIO chips */ ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base); if (ret) return ret; /* clear all GPIO edge detects */ for_each_gpio_bank(gpio, c, pchip) { writel_relaxed(0, c->regbase + GFER_OFFSET); writel_relaxed(0, c->regbase + GRER_OFFSET); writel_relaxed(~0, c->regbase + GEDR_OFFSET); /* unmask GPIO edge detect for AP side */ if (gpio_is_mmp_type(gpio_type)) writel_relaxed(~0, c->regbase + ED_MASK_OFFSET); } if (irq0 > 0) { ret = devm_request_irq(&pdev->dev, irq0, pxa_gpio_direct_handler, 0, "gpio-0", pchip); if (ret) dev_err(&pdev->dev, "request of gpio0 irq failed: %d\n", ret); } if (irq1 > 0) { ret = devm_request_irq(&pdev->dev, irq1, pxa_gpio_direct_handler, 0, "gpio-1", pchip); if (ret) dev_err(&pdev->dev, "request of gpio1 irq failed: %d\n", ret); } ret = devm_request_irq(&pdev->dev, irq_mux, pxa_gpio_demux_handler, 0, "gpio-mux", pchip); if (ret) dev_err(&pdev->dev, "request of gpio-mux irq failed: %d\n", ret); pxa_gpio_chip = pchip; return 0; } static const struct platform_device_id gpio_id_table[] = { { "pxa25x-gpio", (unsigned long)&pxa25x_id }, { "pxa26x-gpio", (unsigned long)&pxa26x_id }, { "pxa27x-gpio", (unsigned long)&pxa27x_id }, { "pxa3xx-gpio", (unsigned long)&pxa3xx_id }, { "pxa93x-gpio", (unsigned long)&pxa93x_id }, { "mmp-gpio", (unsigned long)&mmp_id }, { "mmp2-gpio", (unsigned long)&mmp2_id }, { "pxa1928-gpio", (unsigned long)&pxa1928_id }, { }, }; static struct platform_driver pxa_gpio_driver = { .probe = pxa_gpio_probe, .driver = { .name = "pxa-gpio", .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, .id_table = gpio_id_table, }; static int __init pxa_gpio_legacy_init(void) { if (of_have_populated_dt()) return 0; return platform_driver_register(&pxa_gpio_driver); } postcore_initcall(pxa_gpio_legacy_init); static int __init pxa_gpio_dt_init(void) { if (of_have_populated_dt()) return platform_driver_register(&pxa_gpio_driver); return 0; } device_initcall(pxa_gpio_dt_init); #ifdef CONFIG_PM static int pxa_gpio_suspend(void) { struct pxa_gpio_chip *pchip = pxa_gpio_chip; struct pxa_gpio_bank *c; int gpio; if (!pchip) return 0; for_each_gpio_bank(gpio, c, pchip) { c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET); c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET); c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET); /* Clear GPIO transition detect bits */ writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET); } return 0; } static void pxa_gpio_resume(void) { struct pxa_gpio_chip *pchip = pxa_gpio_chip; struct pxa_gpio_bank *c; int gpio; if (!pchip) return; for_each_gpio_bank(gpio, c, pchip) { /* restore level with set/clear */ writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET); writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET); writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET); writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET); writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET); } } #else #define pxa_gpio_suspend NULL #define pxa_gpio_resume NULL #endif static struct syscore_ops pxa_gpio_syscore_ops = { .suspend = pxa_gpio_suspend, .resume = pxa_gpio_resume, }; static int __init pxa_gpio_sysinit(void) { register_syscore_ops(&pxa_gpio_syscore_ops); return 0; } postcore_initcall(pxa_gpio_sysinit);
linux-master
drivers/gpio/gpio-pxa.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * GPIO driver for the SMSC SCH311x Super-I/O chips * * Copyright (C) 2013 Bruno Randolf <[email protected]> * * SuperIO functions and chip detection: * (c) Copyright 2008 Wim Van Sebroeck <[email protected]>. */ #include <linux/ioport.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio/driver.h> #include <linux/bitops.h> #include <linux/io.h> #define DRV_NAME "gpio-sch311x" #define SCH311X_GPIO_CONF_DIR BIT(0) #define SCH311X_GPIO_CONF_INVERT BIT(1) #define SCH311X_GPIO_CONF_OPEN_DRAIN BIT(7) #define SIO_CONFIG_KEY_ENTER 0x55 #define SIO_CONFIG_KEY_EXIT 0xaa #define GP1 0x4b static int sch311x_ioports[] = { 0x2e, 0x4e, 0x162e, 0x164e }; static struct platform_device *sch311x_gpio_pdev; struct sch311x_pdev_data { /* platform device data */ unsigned short runtime_reg; /* runtime register base address */ }; struct sch311x_gpio_block { /* one GPIO block runtime data */ struct gpio_chip chip; unsigned short data_reg; /* from definition below */ unsigned short *config_regs; /* pointer to definition below */ unsigned short runtime_reg; /* runtime register */ spinlock_t lock; /* lock for this GPIO block */ }; struct sch311x_gpio_priv { /* driver private data */ struct sch311x_gpio_block blocks[6]; }; struct sch311x_gpio_block_def { /* register address definitions */ unsigned short data_reg; unsigned short config_regs[8]; unsigned short base; }; /* Note: some GPIOs are not available, these are marked with 0x00 */ static struct sch311x_gpio_block_def sch311x_gpio_blocks[] = { { .data_reg = 0x4b, /* GP1 */ .config_regs = {0x23, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2a, 0x2b}, .base = 10, }, { .data_reg = 0x4c, /* GP2 */ .config_regs = {0x00, 0x2c, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x32}, .base = 20, }, { .data_reg = 0x4d, /* GP3 */ .config_regs = {0x33, 0x34, 0x35, 0x36, 0x37, 0x00, 0x39, 0x3a}, .base = 30, }, { .data_reg = 0x4e, /* GP4 */ .config_regs = {0x3b, 0x00, 0x3d, 0x00, 0x6e, 0x6f, 0x72, 0x73}, .base = 40, }, { .data_reg = 0x4f, /* GP5 */ .config_regs = {0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46}, .base = 50, }, { .data_reg = 0x50, /* GP6 */ .config_regs = {0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59}, .base = 60, }, }; /* * Super-IO functions */ static inline int sch311x_sio_enter(int sio_config_port) { /* Don't step on other drivers' I/O space by accident. */ if (!request_muxed_region(sio_config_port, 2, DRV_NAME)) { pr_err(DRV_NAME "I/O address 0x%04x already in use\n", sio_config_port); return -EBUSY; } outb(SIO_CONFIG_KEY_ENTER, sio_config_port); return 0; } static inline void sch311x_sio_exit(int sio_config_port) { outb(SIO_CONFIG_KEY_EXIT, sio_config_port); release_region(sio_config_port, 2); } static inline int sch311x_sio_inb(int sio_config_port, int reg) { outb(reg, sio_config_port); return inb(sio_config_port + 1); } static inline void sch311x_sio_outb(int sio_config_port, int reg, int val) { outb(reg, sio_config_port); outb(val, sio_config_port + 1); } /* * GPIO functions */ static int sch311x_gpio_request(struct gpio_chip *chip, unsigned offset) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); if (block->config_regs[offset] == 0) /* GPIO is not available */ return -ENODEV; if (!request_region(block->runtime_reg + block->config_regs[offset], 1, DRV_NAME)) { dev_err(chip->parent, "Failed to request region 0x%04x.\n", block->runtime_reg + block->config_regs[offset]); return -EBUSY; } return 0; } static void sch311x_gpio_free(struct gpio_chip *chip, unsigned offset) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); if (block->config_regs[offset] == 0) /* GPIO is not available */ return; release_region(block->runtime_reg + block->config_regs[offset], 1); } static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); u8 data; spin_lock(&block->lock); data = inb(block->runtime_reg + block->data_reg); spin_unlock(&block->lock); return !!(data & BIT(offset)); } static void __sch311x_gpio_set(struct sch311x_gpio_block *block, unsigned offset, int value) { u8 data = inb(block->runtime_reg + block->data_reg); if (value) data |= BIT(offset); else data &= ~BIT(offset); outb(data, block->runtime_reg + block->data_reg); } static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); spin_lock(&block->lock); __sch311x_gpio_set(block, offset, value); spin_unlock(&block->lock); } static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); u8 data; spin_lock(&block->lock); data = inb(block->runtime_reg + block->config_regs[offset]); data |= SCH311X_GPIO_CONF_DIR; outb(data, block->runtime_reg + block->config_regs[offset]); spin_unlock(&block->lock); return 0; } static int sch311x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); u8 data; spin_lock(&block->lock); data = inb(block->runtime_reg + block->config_regs[offset]); data &= ~SCH311X_GPIO_CONF_DIR; outb(data, block->runtime_reg + block->config_regs[offset]); __sch311x_gpio_set(block, offset, value); spin_unlock(&block->lock); return 0; } static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); u8 data; spin_lock(&block->lock); data = inb(block->runtime_reg + block->config_regs[offset]); spin_unlock(&block->lock); if (data & SCH311X_GPIO_CONF_DIR) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset, unsigned long config) { struct sch311x_gpio_block *block = gpiochip_get_data(chip); enum pin_config_param param = pinconf_to_config_param(config); u8 data; switch (param) { case PIN_CONFIG_DRIVE_OPEN_DRAIN: spin_lock(&block->lock); data = inb(block->runtime_reg + block->config_regs[offset]); data |= SCH311X_GPIO_CONF_OPEN_DRAIN; outb(data, block->runtime_reg + block->config_regs[offset]); spin_unlock(&block->lock); return 0; case PIN_CONFIG_DRIVE_PUSH_PULL: spin_lock(&block->lock); data = inb(block->runtime_reg + block->config_regs[offset]); data &= ~SCH311X_GPIO_CONF_OPEN_DRAIN; outb(data, block->runtime_reg + block->config_regs[offset]); spin_unlock(&block->lock); return 0; default: break; } return -ENOTSUPP; } static int sch311x_gpio_probe(struct platform_device *pdev) { struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev); struct sch311x_gpio_priv *priv; struct sch311x_gpio_block *block; int err, i; /* we can register all GPIO data registers at once */ if (!devm_request_region(&pdev->dev, pdata->runtime_reg + GP1, 6, DRV_NAME)) { dev_err(&pdev->dev, "Failed to request region 0x%04x-0x%04x.\n", pdata->runtime_reg + GP1, pdata->runtime_reg + GP1 + 5); return -EBUSY; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) { block = &priv->blocks[i]; spin_lock_init(&block->lock); block->chip.label = DRV_NAME; block->chip.owner = THIS_MODULE; block->chip.request = sch311x_gpio_request; block->chip.free = sch311x_gpio_free; block->chip.direction_input = sch311x_gpio_direction_in; block->chip.direction_output = sch311x_gpio_direction_out; block->chip.get_direction = sch311x_gpio_get_direction; block->chip.set_config = sch311x_gpio_set_config; block->chip.get = sch311x_gpio_get; block->chip.set = sch311x_gpio_set; block->chip.ngpio = 8; block->chip.parent = &pdev->dev; block->chip.base = sch311x_gpio_blocks[i].base; block->config_regs = sch311x_gpio_blocks[i].config_regs; block->data_reg = sch311x_gpio_blocks[i].data_reg; block->runtime_reg = pdata->runtime_reg; err = devm_gpiochip_add_data(&pdev->dev, &block->chip, block); if (err < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", err); return err; } dev_info(&pdev->dev, "SMSC SCH311x GPIO block %d registered.\n", i); } return 0; } static struct platform_driver sch311x_gpio_driver = { .driver.name = DRV_NAME, .probe = sch311x_gpio_probe, }; /* * Init & exit routines */ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) { int err = 0, reg; unsigned short base_addr; u8 dev_id; err = sch311x_sio_enter(sio_config_port); if (err) return err; /* Check device ID. */ reg = sch311x_sio_inb(sio_config_port, 0x20); switch (reg) { case 0x7c: /* SCH3112 */ dev_id = 2; break; case 0x7d: /* SCH3114 */ dev_id = 4; break; case 0x7f: /* SCH3116 */ dev_id = 6; break; default: err = -ENODEV; goto exit; } /* Select logical device A (runtime registers) */ sch311x_sio_outb(sio_config_port, 0x07, 0x0a); /* Check if Logical Device Register is currently active */ if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0) pr_info("Seems that LDN 0x0a is not active...\n"); /* Get the base address of the runtime registers */ base_addr = (sch311x_sio_inb(sio_config_port, 0x60) << 8) | sch311x_sio_inb(sio_config_port, 0x61); if (!base_addr) { pr_err("Base address not set\n"); err = -ENODEV; goto exit; } *addr = base_addr; pr_info("Found an SMSC SCH311%d chip at 0x%04x\n", dev_id, base_addr); exit: sch311x_sio_exit(sio_config_port); return err; } static int __init sch311x_gpio_pdev_add(const unsigned short addr) { struct sch311x_pdev_data pdata; int err; pdata.runtime_reg = addr; sch311x_gpio_pdev = platform_device_alloc(DRV_NAME, -1); if (!sch311x_gpio_pdev) return -ENOMEM; err = platform_device_add_data(sch311x_gpio_pdev, &pdata, sizeof(pdata)); if (err) { pr_err(DRV_NAME "Platform data allocation failed\n"); goto err; } err = platform_device_add(sch311x_gpio_pdev); if (err) { pr_err(DRV_NAME "Device addition failed\n"); goto err; } return 0; err: platform_device_put(sch311x_gpio_pdev); return err; } static int __init sch311x_gpio_init(void) { int err, i; unsigned short addr = 0; for (i = 0; i < ARRAY_SIZE(sch311x_ioports); i++) if (sch311x_detect(sch311x_ioports[i], &addr) == 0) break; if (!addr) return -ENODEV; err = platform_driver_register(&sch311x_gpio_driver); if (err) return err; err = sch311x_gpio_pdev_add(addr); if (err) goto unreg_platform_driver; return 0; unreg_platform_driver: platform_driver_unregister(&sch311x_gpio_driver); return err; } static void __exit sch311x_gpio_exit(void) { platform_device_unregister(sch311x_gpio_pdev); platform_driver_unregister(&sch311x_gpio_driver); } module_init(sch311x_gpio_init); module_exit(sch311x_gpio_exit); MODULE_AUTHOR("Bruno Randolf <[email protected]>"); MODULE_DESCRIPTION("SMSC SCH311x GPIO Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio-sch311x");
linux-master
drivers/gpio/gpio-sch311x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011 Jamie Iles * * All enquiries to [email protected] */ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/reset.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "gpiolib.h" #include "gpiolib-acpi.h" #define GPIO_SWPORTA_DR 0x00 #define GPIO_SWPORTA_DDR 0x04 #define GPIO_SWPORTB_DR 0x0c #define GPIO_SWPORTB_DDR 0x10 #define GPIO_SWPORTC_DR 0x18 #define GPIO_SWPORTC_DDR 0x1c #define GPIO_SWPORTD_DR 0x24 #define GPIO_SWPORTD_DDR 0x28 #define GPIO_INTEN 0x30 #define GPIO_INTMASK 0x34 #define GPIO_INTTYPE_LEVEL 0x38 #define GPIO_INT_POLARITY 0x3c #define GPIO_INTSTATUS 0x40 #define GPIO_PORTA_DEBOUNCE 0x48 #define GPIO_PORTA_EOI 0x4c #define GPIO_EXT_PORTA 0x50 #define GPIO_EXT_PORTB 0x54 #define GPIO_EXT_PORTC 0x58 #define GPIO_EXT_PORTD 0x5c #define DWAPB_DRIVER_NAME "gpio-dwapb" #define DWAPB_MAX_PORTS 4 #define DWAPB_MAX_GPIOS 32 #define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ #define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ #define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ #define GPIO_REG_OFFSET_V1 0 #define GPIO_REG_OFFSET_V2 1 #define GPIO_REG_OFFSET_MASK BIT(0) #define GPIO_INTMASK_V2 0x44 #define GPIO_INTTYPE_LEVEL_V2 0x34 #define GPIO_INT_POLARITY_V2 0x38 #define GPIO_INTSTATUS_V2 0x3c #define GPIO_PORTA_EOI_V2 0x40 #define DWAPB_NR_CLOCKS 2 struct dwapb_gpio; struct dwapb_port_property { struct fwnode_handle *fwnode; unsigned int idx; unsigned int ngpio; unsigned int gpio_base; int irq[DWAPB_MAX_GPIOS]; }; struct dwapb_platform_data { struct dwapb_port_property *properties; unsigned int nports; }; #ifdef CONFIG_PM_SLEEP /* Store GPIO context across system-wide suspend/resume transitions */ struct dwapb_context { u32 data; u32 dir; u32 ext; u32 int_en; u32 int_mask; u32 int_type; u32 int_pol; u32 int_deb; u32 wake_en; }; #endif struct dwapb_gpio_port_irqchip { unsigned int nr_irqs; unsigned int irq[DWAPB_MAX_GPIOS]; }; struct dwapb_gpio_port { struct gpio_chip gc; struct dwapb_gpio_port_irqchip *pirq; struct dwapb_gpio *gpio; #ifdef CONFIG_PM_SLEEP struct dwapb_context *ctx; #endif unsigned int idx; }; #define to_dwapb_gpio(_gc) \ (container_of(_gc, struct dwapb_gpio_port, gc)->gpio) struct dwapb_gpio { struct device *dev; void __iomem *regs; struct dwapb_gpio_port *ports; unsigned int nr_ports; unsigned int flags; struct reset_control *rst; struct clk_bulk_data clks[DWAPB_NR_CLOCKS]; }; static inline u32 gpio_reg_v2_convert(unsigned int offset) { switch (offset) { case GPIO_INTMASK: return GPIO_INTMASK_V2; case GPIO_INTTYPE_LEVEL: return GPIO_INTTYPE_LEVEL_V2; case GPIO_INT_POLARITY: return GPIO_INT_POLARITY_V2; case GPIO_INTSTATUS: return GPIO_INTSTATUS_V2; case GPIO_PORTA_EOI: return GPIO_PORTA_EOI_V2; } return offset; } static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset) { if ((gpio->flags & GPIO_REG_OFFSET_MASK) == GPIO_REG_OFFSET_V2) return gpio_reg_v2_convert(offset); return offset; } static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset) { struct gpio_chip *gc = &gpio->ports[0].gc; void __iomem *reg_base = gpio->regs; return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset)); } static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset, u32 val) { struct gpio_chip *gc = &gpio->ports[0].gc; void __iomem *reg_base = gpio->regs; gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val); } static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs) { struct dwapb_gpio_port *port; int i; for (i = 0; i < gpio->nr_ports; i++) { port = &gpio->ports[i]; if (port->idx == offs / DWAPB_MAX_GPIOS) return port; } return NULL; } static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs) { struct dwapb_gpio_port *port = dwapb_offs_to_port(gpio, offs); struct gpio_chip *gc; u32 pol; int val; if (!port) return; gc = &port->gc; pol = dwapb_read(gpio, GPIO_INT_POLARITY); /* Just read the current value right out of the data register */ val = gc->get(gc, offs % DWAPB_MAX_GPIOS); if (val) pol &= ~BIT(offs); else pol |= BIT(offs); dwapb_write(gpio, GPIO_INT_POLARITY, pol); } static u32 dwapb_do_irq(struct dwapb_gpio *gpio) { struct gpio_chip *gc = &gpio->ports[0].gc; unsigned long irq_status; irq_hw_number_t hwirq; irq_status = dwapb_read(gpio, GPIO_INTSTATUS); for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) { int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq); u32 irq_type = irq_get_trigger_type(gpio_irq); generic_handle_irq(gpio_irq); if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) dwapb_toggle_trigger(gpio, hwirq); } return irq_status; } static void dwapb_irq_handler(struct irq_desc *desc) { struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); dwapb_do_irq(gpio); chained_irq_exit(chip, desc); } static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id) { return IRQ_RETVAL(dwapb_do_irq(dev_id)); } static void dwapb_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); u32 val = BIT(irqd_to_hwirq(d)); unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); dwapb_write(gpio, GPIO_PORTA_EOI, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void dwapb_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq); dwapb_write(gpio, GPIO_INTMASK, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); gpiochip_disable_irq(gc, hwirq); } static void dwapb_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 val; gpiochip_enable_irq(gc, hwirq); raw_spin_lock_irqsave(&gc->bgpio_lock, flags); val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq); dwapb_write(gpio, GPIO_INTMASK, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void dwapb_irq_enable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); val = dwapb_read(gpio, GPIO_INTEN); val |= BIT(irqd_to_hwirq(d)); dwapb_write(gpio, GPIO_INTEN, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void dwapb_irq_disable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); val = dwapb_read(gpio, GPIO_INTEN); val &= ~BIT(irqd_to_hwirq(d)); dwapb_write(gpio, GPIO_INTEN, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static int dwapb_irq_set_type(struct irq_data *d, u32 type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); irq_hw_number_t bit = irqd_to_hwirq(d); unsigned long level, polarity, flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL); polarity = dwapb_read(gpio, GPIO_INT_POLARITY); switch (type) { case IRQ_TYPE_EDGE_BOTH: level |= BIT(bit); dwapb_toggle_trigger(gpio, bit); break; case IRQ_TYPE_EDGE_RISING: level |= BIT(bit); polarity |= BIT(bit); break; case IRQ_TYPE_EDGE_FALLING: level |= BIT(bit); polarity &= ~BIT(bit); break; case IRQ_TYPE_LEVEL_HIGH: level &= ~BIT(bit); polarity |= BIT(bit); break; case IRQ_TYPE_LEVEL_LOW: level &= ~BIT(bit); polarity &= ~BIT(bit); break; } if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); else if (type & IRQ_TYPE_EDGE_BOTH) irq_set_handler_locked(d, handle_edge_irq); dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level); if (type != IRQ_TYPE_EDGE_BOTH) dwapb_write(gpio, GPIO_INT_POLARITY, polarity); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); return 0; } #ifdef CONFIG_PM_SLEEP static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); struct dwapb_context *ctx = gpio->ports[0].ctx; irq_hw_number_t bit = irqd_to_hwirq(d); if (enable) ctx->wake_en |= BIT(bit); else ctx->wake_en &= ~BIT(bit); return 0; } #else #define dwapb_irq_set_wake NULL #endif static const struct irq_chip dwapb_irq_chip = { .name = DWAPB_DRIVER_NAME, .irq_ack = dwapb_irq_ack, .irq_mask = dwapb_irq_mask, .irq_unmask = dwapb_irq_unmask, .irq_set_type = dwapb_irq_set_type, .irq_enable = dwapb_irq_enable, .irq_disable = dwapb_irq_disable, .irq_set_wake = dwapb_irq_set_wake, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int dwapb_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, unsigned debounce) { struct dwapb_gpio_port *port = gpiochip_get_data(gc); struct dwapb_gpio *gpio = port->gpio; unsigned long flags, val_deb; unsigned long mask = BIT(offset); raw_spin_lock_irqsave(&gc->bgpio_lock, flags); val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE); if (debounce) val_deb |= mask; else val_deb &= ~mask; dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); return 0; } static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset, unsigned long config) { u32 debounce; if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) return -ENOTSUPP; debounce = pinconf_to_config_argument(config); return dwapb_gpio_set_debounce(gc, offset, debounce); } static int dwapb_convert_irqs(struct dwapb_gpio_port_irqchip *pirq, struct dwapb_port_property *pp) { int i; /* Group all available IRQs into an array of parental IRQs. */ for (i = 0; i < pp->ngpio; ++i) { if (!pp->irq[i]) continue; pirq->irq[pirq->nr_irqs++] = pp->irq[i]; } return pirq->nr_irqs ? 0 : -ENOENT; } static void dwapb_configure_irqs(struct dwapb_gpio *gpio, struct dwapb_gpio_port *port, struct dwapb_port_property *pp) { struct dwapb_gpio_port_irqchip *pirq; struct gpio_chip *gc = &port->gc; struct gpio_irq_chip *girq; int err; pirq = devm_kzalloc(gpio->dev, sizeof(*pirq), GFP_KERNEL); if (!pirq) return; if (dwapb_convert_irqs(pirq, pp)) { dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx); goto err_kfree_pirq; } girq = &gc->irq; girq->handler = handle_bad_irq; girq->default_type = IRQ_TYPE_NONE; port->pirq = pirq; /* * Intel ACPI-based platforms mostly have the DesignWare APB GPIO * IRQ lane shared between several devices. In that case the parental * IRQ has to be handled in the shared way so to be properly delivered * to all the connected devices. */ if (has_acpi_companion(gpio->dev)) { girq->num_parents = 0; girq->parents = NULL; girq->parent_handler = NULL; err = devm_request_irq(gpio->dev, pp->irq[0], dwapb_irq_handler_mfd, IRQF_SHARED, DWAPB_DRIVER_NAME, gpio); if (err) { dev_err(gpio->dev, "error requesting IRQ\n"); goto err_kfree_pirq; } } else { girq->num_parents = pirq->nr_irqs; girq->parents = pirq->irq; girq->parent_handler_data = gpio; girq->parent_handler = dwapb_irq_handler; } gpio_irq_chip_set_chip(girq, &dwapb_irq_chip); return; err_kfree_pirq: devm_kfree(gpio->dev, pirq); } static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, struct dwapb_port_property *pp, unsigned int offs) { struct dwapb_gpio_port *port; void __iomem *dat, *set, *dirout; int err; port = &gpio->ports[offs]; port->gpio = gpio; port->idx = pp->idx; #ifdef CONFIG_PM_SLEEP port->ctx = devm_kzalloc(gpio->dev, sizeof(*port->ctx), GFP_KERNEL); if (!port->ctx) return -ENOMEM; #endif dat = gpio->regs + GPIO_EXT_PORTA + pp->idx * GPIO_EXT_PORT_STRIDE; set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE; dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE; /* This registers 32 GPIO lines per port */ err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout, NULL, 0); if (err) { dev_err(gpio->dev, "failed to init gpio chip for port%d\n", port->idx); return err; } port->gc.fwnode = pp->fwnode; port->gc.ngpio = pp->ngpio; port->gc.base = pp->gpio_base; /* Only port A support debounce */ if (pp->idx == 0) port->gc.set_config = dwapb_gpio_set_config; /* Only port A can provide interrupts in all configurations of the IP */ if (pp->idx == 0) dwapb_configure_irqs(gpio, port, pp); err = devm_gpiochip_add_data(gpio->dev, &port->gc, port); if (err) { dev_err(gpio->dev, "failed to register gpiochip for port%d\n", port->idx); return err; } return 0; } static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode, struct dwapb_port_property *pp) { int irq, j; for (j = 0; j < pp->ngpio; j++) { if (has_acpi_companion(dev)) irq = platform_get_irq_optional(to_platform_device(dev), j); else irq = fwnode_irq_get(fwnode, j); if (irq > 0) pp->irq[j] = irq; } } static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev) { struct fwnode_handle *fwnode; struct dwapb_platform_data *pdata; struct dwapb_port_property *pp; int nports; int i; nports = device_get_child_node_count(dev); if (nports == 0) return ERR_PTR(-ENODEV); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); if (!pdata->properties) return ERR_PTR(-ENOMEM); pdata->nports = nports; i = 0; device_for_each_child_node(dev, fwnode) { pp = &pdata->properties[i++]; pp->fwnode = fwnode; if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || pp->idx >= DWAPB_MAX_PORTS) { dev_err(dev, "missing/invalid port index for port%d\n", i); fwnode_handle_put(fwnode); return ERR_PTR(-EINVAL); } if (fwnode_property_read_u32(fwnode, "ngpios", &pp->ngpio) && fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) { dev_info(dev, "failed to get number of gpios for port%d\n", i); pp->ngpio = DWAPB_MAX_GPIOS; } pp->gpio_base = -1; /* For internal use only, new platforms mustn't exercise this */ if (is_software_node(fwnode)) fwnode_property_read_u32(fwnode, "gpio-base", &pp->gpio_base); /* * Only port A can provide interrupts in all configurations of * the IP. */ if (pp->idx == 0) dwapb_get_irq(dev, fwnode, pp); } return pdata; } static void dwapb_assert_reset(void *data) { struct dwapb_gpio *gpio = data; reset_control_assert(gpio->rst); } static int dwapb_get_reset(struct dwapb_gpio *gpio) { int err; gpio->rst = devm_reset_control_get_optional_shared(gpio->dev, NULL); if (IS_ERR(gpio->rst)) return dev_err_probe(gpio->dev, PTR_ERR(gpio->rst), "Cannot get reset descriptor\n"); err = reset_control_deassert(gpio->rst); if (err) { dev_err(gpio->dev, "Cannot deassert reset lane\n"); return err; } return devm_add_action_or_reset(gpio->dev, dwapb_assert_reset, gpio); } static void dwapb_disable_clks(void *data) { struct dwapb_gpio *gpio = data; clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks); } static int dwapb_get_clks(struct dwapb_gpio *gpio) { int err; /* Optional bus and debounce clocks */ gpio->clks[0].id = "bus"; gpio->clks[1].id = "db"; err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, gpio->clks); if (err) return dev_err_probe(gpio->dev, err, "Cannot get APB/Debounce clocks\n"); err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { dev_err(gpio->dev, "Cannot enable APB/Debounce clocks\n"); return err; } return devm_add_action_or_reset(gpio->dev, dwapb_disable_clks, gpio); } static const struct of_device_id dwapb_of_match[] = { { .compatible = "snps,dw-apb-gpio", .data = (void *)GPIO_REG_OFFSET_V1}, { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2}, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, dwapb_of_match); static const struct acpi_device_id dwapb_acpi_match[] = { {"HISI0181", GPIO_REG_OFFSET_V1}, {"APMC0D07", GPIO_REG_OFFSET_V1}, {"APMC0D81", GPIO_REG_OFFSET_V2}, { } }; MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match); static int dwapb_gpio_probe(struct platform_device *pdev) { unsigned int i; struct dwapb_gpio *gpio; int err; struct dwapb_platform_data *pdata; struct device *dev = &pdev->dev; pdata = dwapb_gpio_get_pdata(dev); if (IS_ERR(pdata)) return PTR_ERR(pdata); gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->dev = &pdev->dev; gpio->nr_ports = pdata->nports; err = dwapb_get_reset(gpio); if (err) return err; gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports, sizeof(*gpio->ports), GFP_KERNEL); if (!gpio->ports) return -ENOMEM; gpio->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio->regs)) return PTR_ERR(gpio->regs); err = dwapb_get_clks(gpio); if (err) return err; gpio->flags = (uintptr_t)device_get_match_data(dev); for (i = 0; i < gpio->nr_ports; i++) { err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i); if (err) return err; } platform_set_drvdata(pdev, gpio); return 0; } #ifdef CONFIG_PM_SLEEP static int dwapb_gpio_suspend(struct device *dev) { struct dwapb_gpio *gpio = dev_get_drvdata(dev); struct gpio_chip *gc = &gpio->ports[0].gc; unsigned long flags; int i; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); for (i = 0; i < gpio->nr_ports; i++) { unsigned int offset; unsigned int idx = gpio->ports[i].idx; struct dwapb_context *ctx = gpio->ports[i].ctx; offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; ctx->dir = dwapb_read(gpio, offset); offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; ctx->data = dwapb_read(gpio, offset); offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; ctx->ext = dwapb_read(gpio, offset); /* Only port A can provide interrupts */ if (idx == 0) { ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK); ctx->int_en = dwapb_read(gpio, GPIO_INTEN); ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY); ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL); ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE); /* Mask out interrupts */ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en); } } raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks); return 0; } static int dwapb_gpio_resume(struct device *dev) { struct dwapb_gpio *gpio = dev_get_drvdata(dev); struct gpio_chip *gc = &gpio->ports[0].gc; unsigned long flags; int i, err; err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { dev_err(gpio->dev, "Cannot reenable APB/Debounce clocks\n"); return err; } raw_spin_lock_irqsave(&gc->bgpio_lock, flags); for (i = 0; i < gpio->nr_ports; i++) { unsigned int offset; unsigned int idx = gpio->ports[i].idx; struct dwapb_context *ctx = gpio->ports[i].ctx; offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; dwapb_write(gpio, offset, ctx->data); offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; dwapb_write(gpio, offset, ctx->dir); offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; dwapb_write(gpio, offset, ctx->ext); /* Only port A can provide interrupts */ if (idx == 0) { dwapb_write(gpio, GPIO_INTTYPE_LEVEL, ctx->int_type); dwapb_write(gpio, GPIO_INT_POLARITY, ctx->int_pol); dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); dwapb_write(gpio, GPIO_INTEN, ctx->int_en); dwapb_write(gpio, GPIO_INTMASK, ctx->int_mask); /* Clear out spurious interrupts */ dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff); } } raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); return 0; } #endif static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend, dwapb_gpio_resume); static struct platform_driver dwapb_gpio_driver = { .driver = { .name = DWAPB_DRIVER_NAME, .pm = &dwapb_gpio_pm_ops, .of_match_table = dwapb_of_match, .acpi_match_table = dwapb_acpi_match, }, .probe = dwapb_gpio_probe, }; module_platform_driver(dwapb_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver"); MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME);
linux-master
drivers/gpio/gpio-dwapb.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the ACCES PCIe-IDIO-24 family * Copyright (C) 2018 William Breathitt Gray * * This driver supports the following ACCES devices: PCIe-IDIO-24, * PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12. */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio/regmap.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/regmap.h> #include <linux/spinlock.h> #include <linux/types.h> /* * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status * * Bit: Description * 0: Enable Interrupt Sources (Bit 0) * 1: Enable Interrupt Sources (Bit 1) * 2: Generate Internal PCI Bus Internal SERR# Interrupt * 3: Mailbox Interrupt Enable * 4: Power Management Interrupt Enable * 5: Power Management Interrupt * 6: Slave Read Local Data Parity Check Error Enable * 7: Slave Read Local Data Parity Check Error Status * 8: Internal PCI Wire Interrupt Enable * 9: PCI Express Doorbell Interrupt Enable * 10: PCI Abort Interrupt Enable * 11: Local Interrupt Input Enable * 12: Retry Abort Enable * 13: PCI Express Doorbell Interrupt Active * 14: PCI Abort Interrupt Active * 15: Local Interrupt Input Active * 16: Local Interrupt Output Enable * 17: Local Doorbell Interrupt Enable * 18: DMA Channel 0 Interrupt Enable * 19: DMA Channel 1 Interrupt Enable * 20: Local Doorbell Interrupt Active * 21: DMA Channel 0 Interrupt Active * 22: DMA Channel 1 Interrupt Active * 23: Built-In Self-Test (BIST) Interrupt Active * 24: Direct Master was the Bus Master during a Master or Target Abort * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort * 27: Target Abort after internal 256 consecutive Master Retrys * 28: PCI Bus wrote data to LCS_MBOX0 * 29: PCI Bus wrote data to LCS_MBOX1 * 30: PCI Bus wrote data to LCS_MBOX2 * 31: PCI Bus wrote data to LCS_MBOX3 */ #define PLX_PEX8311_PCI_LCS_INTCSR 0x68 #define INTCSR_INTERNAL_PCI_WIRE BIT(8) #define INTCSR_LOCAL_INPUT BIT(11) #define IDIO_24_ENABLE_IRQ (INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) #define IDIO_24_OUT_BASE 0x0 #define IDIO_24_TTLCMOS_OUT_REG 0x3 #define IDIO_24_IN_BASE 0x4 #define IDIO_24_TTLCMOS_IN_REG 0x7 #define IDIO_24_COS_STATUS_BASE 0x8 #define IDIO_24_CONTROL_REG 0xC #define IDIO_24_COS_ENABLE 0xE #define IDIO_24_SOFT_RESET 0xF #define CONTROL_REG_OUT_MODE BIT(1) #define COS_ENABLE_RISING BIT(1) #define COS_ENABLE_FALLING BIT(4) #define COS_ENABLE_BOTH (COS_ENABLE_RISING | COS_ENABLE_FALLING) static const struct regmap_config pex8311_intcsr_regmap_config = { .name = "pex8311_intcsr", .reg_bits = 32, .reg_stride = 1, .reg_base = PLX_PEX8311_PCI_LCS_INTCSR, .val_bits = 32, .io_port = true, }; static const struct regmap_range idio_24_wr_ranges[] = { regmap_reg_range(0x0, 0x3), regmap_reg_range(0x8, 0xC), regmap_reg_range(0xE, 0xF), }; static const struct regmap_range idio_24_rd_ranges[] = { regmap_reg_range(0x0, 0xC), regmap_reg_range(0xE, 0xF), }; static const struct regmap_range idio_24_volatile_ranges[] = { regmap_reg_range(0x4, 0xB), regmap_reg_range(0xF, 0xF), }; static const struct regmap_access_table idio_24_wr_table = { .yes_ranges = idio_24_wr_ranges, .n_yes_ranges = ARRAY_SIZE(idio_24_wr_ranges), }; static const struct regmap_access_table idio_24_rd_table = { .yes_ranges = idio_24_rd_ranges, .n_yes_ranges = ARRAY_SIZE(idio_24_rd_ranges), }; static const struct regmap_access_table idio_24_volatile_table = { .yes_ranges = idio_24_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(idio_24_volatile_ranges), }; static const struct regmap_config idio_24_regmap_config = { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .io_port = true, .wr_table = &idio_24_wr_table, .rd_table = &idio_24_rd_table, .volatile_table = &idio_24_volatile_table, .cache_type = REGCACHE_FLAT, .use_raw_spinlock = true, }; #define IDIO_24_NGPIO_PER_REG 8 #define IDIO_24_REGMAP_IRQ(_id) \ [24 + _id] = { \ .reg_offset = (_id) / IDIO_24_NGPIO_PER_REG, \ .mask = BIT((_id) % IDIO_24_NGPIO_PER_REG), \ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \ } #define IDIO_24_IIN_IRQ(_id) IDIO_24_REGMAP_IRQ(_id) #define IDIO_24_TTL_IRQ(_id) IDIO_24_REGMAP_IRQ(24 + _id) static const struct regmap_irq idio_24_regmap_irqs[] = { IDIO_24_IIN_IRQ(0), IDIO_24_IIN_IRQ(1), IDIO_24_IIN_IRQ(2), /* IIN 0-2 */ IDIO_24_IIN_IRQ(3), IDIO_24_IIN_IRQ(4), IDIO_24_IIN_IRQ(5), /* IIN 3-5 */ IDIO_24_IIN_IRQ(6), IDIO_24_IIN_IRQ(7), IDIO_24_IIN_IRQ(8), /* IIN 6-8 */ IDIO_24_IIN_IRQ(9), IDIO_24_IIN_IRQ(10), IDIO_24_IIN_IRQ(11), /* IIN 9-11 */ IDIO_24_IIN_IRQ(12), IDIO_24_IIN_IRQ(13), IDIO_24_IIN_IRQ(14), /* IIN 12-14 */ IDIO_24_IIN_IRQ(15), IDIO_24_IIN_IRQ(16), IDIO_24_IIN_IRQ(17), /* IIN 15-17 */ IDIO_24_IIN_IRQ(18), IDIO_24_IIN_IRQ(19), IDIO_24_IIN_IRQ(20), /* IIN 18-20 */ IDIO_24_IIN_IRQ(21), IDIO_24_IIN_IRQ(22), IDIO_24_IIN_IRQ(23), /* IIN 21-23 */ IDIO_24_TTL_IRQ(0), IDIO_24_TTL_IRQ(1), IDIO_24_TTL_IRQ(2), /* TTL 0-2 */ IDIO_24_TTL_IRQ(3), IDIO_24_TTL_IRQ(4), IDIO_24_TTL_IRQ(5), /* TTL 3-5 */ IDIO_24_TTL_IRQ(6), IDIO_24_TTL_IRQ(7), /* TTL 6-7 */ }; /** * struct idio_24_gpio - GPIO device private data structure * @map: regmap for the device * @lock: synchronization lock to prevent I/O race conditions * @irq_type: type configuration for IRQs */ struct idio_24_gpio { struct regmap *map; raw_spinlock_t lock; u8 irq_type; }; static int idio_24_handle_mask_sync(const int index, const unsigned int mask_buf_def, const unsigned int mask_buf, void *const irq_drv_data) { const unsigned int type_mask = COS_ENABLE_BOTH << index; struct idio_24_gpio *const idio24gpio = irq_drv_data; u8 type; int ret; raw_spin_lock(&idio24gpio->lock); /* if all are masked, then disable interrupts, else set to type */ type = (mask_buf == mask_buf_def) ? ~type_mask : idio24gpio->irq_type; ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, type_mask, type); raw_spin_unlock(&idio24gpio->lock); return ret; } static int idio_24_set_type_config(unsigned int **const buf, const unsigned int type, const struct regmap_irq *const irq_data, const int idx, void *const irq_drv_data) { const unsigned int offset = irq_data->reg_offset; const unsigned int rising = COS_ENABLE_RISING << offset; const unsigned int falling = COS_ENABLE_FALLING << offset; const unsigned int mask = COS_ENABLE_BOTH << offset; struct idio_24_gpio *const idio24gpio = irq_drv_data; unsigned int new; unsigned int cos_enable; int ret; switch (type) { case IRQ_TYPE_EDGE_RISING: new = rising; break; case IRQ_TYPE_EDGE_FALLING: new = falling; break; case IRQ_TYPE_EDGE_BOTH: new = mask; break; default: return -EINVAL; } raw_spin_lock(&idio24gpio->lock); /* replace old bitmap with new bitmap */ idio24gpio->irq_type = (idio24gpio->irq_type & ~mask) | (new & mask); ret = regmap_read(idio24gpio->map, IDIO_24_COS_ENABLE, &cos_enable); if (ret) goto exit_unlock; /* if COS is currently enabled then update the edge type */ if (cos_enable & mask) { ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, mask, idio24gpio->irq_type); if (ret) goto exit_unlock; } exit_unlock: raw_spin_unlock(&idio24gpio->lock); return ret; } static int idio_24_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned int base, const unsigned int offset, unsigned int *const reg, unsigned int *const mask) { const unsigned int out_stride = offset / IDIO_24_NGPIO_PER_REG; const unsigned int in_stride = (offset - 24) / IDIO_24_NGPIO_PER_REG; struct regmap *const map = gpio_regmap_get_drvdata(gpio); int err; unsigned int ctrl_reg; switch (base) { case IDIO_24_OUT_BASE: *mask = BIT(offset % IDIO_24_NGPIO_PER_REG); /* FET Outputs */ if (offset < 24) { *reg = IDIO_24_OUT_BASE + out_stride; return 0; } /* Isolated Inputs */ if (offset < 48) { *reg = IDIO_24_IN_BASE + in_stride; return 0; } err = regmap_read(map, IDIO_24_CONTROL_REG, &ctrl_reg); if (err) return err; /* TTL/CMOS Outputs */ if (ctrl_reg & CONTROL_REG_OUT_MODE) { *reg = IDIO_24_TTLCMOS_OUT_REG; return 0; } /* TTL/CMOS Inputs */ *reg = IDIO_24_TTLCMOS_IN_REG; return 0; case IDIO_24_CONTROL_REG: /* We can only set direction for TTL/CMOS lines */ if (offset < 48) return -EOPNOTSUPP; *reg = IDIO_24_CONTROL_REG; *mask = CONTROL_REG_OUT_MODE; return 0; default: /* Should never reach this path */ return -EINVAL; } } #define IDIO_24_NGPIO 56 static const char *idio_24_names[IDIO_24_NGPIO] = { "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7", "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15", "OUT16", "OUT17", "OUT18", "OUT19", "OUT20", "OUT21", "OUT22", "OUT23", "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7", "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15", "IIN16", "IIN17", "IIN18", "IIN19", "IIN20", "IIN21", "IIN22", "IIN23", "TTL0", "TTL1", "TTL2", "TTL3", "TTL4", "TTL5", "TTL6", "TTL7" }; static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *const dev = &pdev->dev; struct idio_24_gpio *idio24gpio; int err; const size_t pci_plx_bar_index = 1; const size_t pci_bar_index = 2; const char *const name = pci_name(pdev); struct gpio_regmap_config gpio_config = {}; void __iomem *pex8311_regs; void __iomem *idio_24_regs; struct regmap *intcsr_map; struct regmap_irq_chip *chip; struct regmap_irq_chip_data *chip_data; err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device (%d)\n", err); return err; } err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name); if (err) { dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err); return err; } pex8311_regs = pcim_iomap_table(pdev)[pci_plx_bar_index]; idio_24_regs = pcim_iomap_table(pdev)[pci_bar_index]; intcsr_map = devm_regmap_init_mmio(dev, pex8311_regs, &pex8311_intcsr_regmap_config); if (IS_ERR(intcsr_map)) return dev_err_probe(dev, PTR_ERR(intcsr_map), "Unable to initialize PEX8311 register map\n"); idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL); if (!idio24gpio) return -ENOMEM; idio24gpio->map = devm_regmap_init_mmio(dev, idio_24_regs, &idio_24_regmap_config); if (IS_ERR(idio24gpio->map)) return dev_err_probe(dev, PTR_ERR(idio24gpio->map), "Unable to initialize register map\n"); raw_spin_lock_init(&idio24gpio->lock); /* Initialize all IRQ type configuration to IRQ_TYPE_EDGE_BOTH */ idio24gpio->irq_type = GENMASK(7, 0); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->name = name; chip->status_base = IDIO_24_COS_STATUS_BASE; chip->mask_base = IDIO_24_COS_ENABLE; chip->ack_base = IDIO_24_COS_STATUS_BASE; chip->num_regs = 4; chip->irqs = idio_24_regmap_irqs; chip->num_irqs = ARRAY_SIZE(idio_24_regmap_irqs); chip->handle_mask_sync = idio_24_handle_mask_sync; chip->set_type_config = idio_24_set_type_config; chip->irq_drv_data = idio24gpio; /* Software board reset */ err = regmap_write(idio24gpio->map, IDIO_24_SOFT_RESET, 0); if (err) return err; /* * enable PLX PEX8311 internal PCI wire interrupt and local interrupt * input */ err = regmap_update_bits(intcsr_map, 0x0, IDIO_24_ENABLE_IRQ, IDIO_24_ENABLE_IRQ); if (err) return err; err = devm_regmap_add_irq_chip(dev, idio24gpio->map, pdev->irq, 0, 0, chip, &chip_data); if (err) return dev_err_probe(dev, err, "IRQ registration failed\n"); gpio_config.parent = dev; gpio_config.regmap = idio24gpio->map; gpio_config.ngpio = IDIO_24_NGPIO; gpio_config.names = idio_24_names; gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(IDIO_24_OUT_BASE); gpio_config.reg_set_base = GPIO_REGMAP_ADDR(IDIO_24_OUT_BASE); gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(IDIO_24_CONTROL_REG); gpio_config.ngpio_per_reg = IDIO_24_NGPIO_PER_REG; gpio_config.irq_domain = regmap_irq_get_domain(chip_data); gpio_config.reg_mask_xlate = idio_24_reg_mask_xlate; gpio_config.drvdata = idio24gpio->map; return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); } static const struct pci_device_id idio_24_pci_dev_id[] = { { PCI_DEVICE(0x494F, 0x0FD0) }, { PCI_DEVICE(0x494F, 0x0BD0) }, { PCI_DEVICE(0x494F, 0x07D0) }, { PCI_DEVICE(0x494F, 0x0FC0) }, { 0 } }; MODULE_DEVICE_TABLE(pci, idio_24_pci_dev_id); static struct pci_driver idio_24_driver = { .name = "pcie-idio-24", .id_table = idio_24_pci_dev_id, .probe = idio_24_probe }; module_pci_driver(idio_24_driver); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("ACCES PCIe-IDIO-24 GPIO driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-pcie-idio-24.c
// SPDX-License-Identifier: GPL-2.0-or-later /* bt8xx GPIO abuser Copyright (C) 2008 Michael Buesch <[email protected]> Please do _only_ contact the people listed _above_ with issues related to this driver. All the other people listed below are not related to this driver. Their names are only here, because this driver is derived from the bt848 driver. Derived from the bt848 driver: Copyright (C) 1996,97,98 Ralph Metzler & Marcus Metzler (c) 1999-2002 Gerd Knorr some v4l2 code lines are taken from Justin's bttv2 driver which is (c) 2000 Justin Schoeman V4L1 removal from: (c) 2005-2006 Nickolay V. Shmyrev Fixes to be fully V4L2 compliant by (c) 2006 Mauro Carvalho Chehab Cropping and overscan support Copyright (C) 2005, 2006 Michael H. Schimek Sponsored by OPQ Systems AB */ #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/gpio/driver.h> #include <linux/slab.h> /* Steal the hardware definitions from the bttv driver. */ #include "../media/pci/bt8xx/bt848.h" #define BT8XXGPIO_NR_GPIOS 24 /* We have 24 GPIO pins */ struct bt8xxgpio { spinlock_t lock; void __iomem *mmio; struct pci_dev *pdev; struct gpio_chip gpio; #ifdef CONFIG_PM u32 saved_outen; u32 saved_data; #endif }; #define bgwrite(dat, adr) writel((dat), bg->mmio+(adr)) #define bgread(adr) readl(bg->mmio+(adr)) static int modparam_gpiobase = -1/* dynamic */; module_param_named(gpiobase, modparam_gpiobase, int, 0444); MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the default."); static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) { struct bt8xxgpio *bg = gpiochip_get_data(gpio); unsigned long flags; u32 outen, data; spin_lock_irqsave(&bg->lock, flags); data = bgread(BT848_GPIO_DATA); data &= ~(1 << nr); bgwrite(data, BT848_GPIO_DATA); outen = bgread(BT848_GPIO_OUT_EN); outen &= ~(1 << nr); bgwrite(outen, BT848_GPIO_OUT_EN); spin_unlock_irqrestore(&bg->lock, flags); return 0; } static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr) { struct bt8xxgpio *bg = gpiochip_get_data(gpio); unsigned long flags; u32 val; spin_lock_irqsave(&bg->lock, flags); val = bgread(BT848_GPIO_DATA); spin_unlock_irqrestore(&bg->lock, flags); return !!(val & (1 << nr)); } static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, int val) { struct bt8xxgpio *bg = gpiochip_get_data(gpio); unsigned long flags; u32 outen, data; spin_lock_irqsave(&bg->lock, flags); outen = bgread(BT848_GPIO_OUT_EN); outen |= (1 << nr); bgwrite(outen, BT848_GPIO_OUT_EN); data = bgread(BT848_GPIO_DATA); if (val) data |= (1 << nr); else data &= ~(1 << nr); bgwrite(data, BT848_GPIO_DATA); spin_unlock_irqrestore(&bg->lock, flags); return 0; } static void bt8xxgpio_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) { struct bt8xxgpio *bg = gpiochip_get_data(gpio); unsigned long flags; u32 data; spin_lock_irqsave(&bg->lock, flags); data = bgread(BT848_GPIO_DATA); if (val) data |= (1 << nr); else data &= ~(1 << nr); bgwrite(data, BT848_GPIO_DATA); spin_unlock_irqrestore(&bg->lock, flags); } static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg) { struct gpio_chip *c = &bg->gpio; c->label = dev_name(&bg->pdev->dev); c->owner = THIS_MODULE; c->direction_input = bt8xxgpio_gpio_direction_input; c->get = bt8xxgpio_gpio_get; c->direction_output = bt8xxgpio_gpio_direction_output; c->set = bt8xxgpio_gpio_set; c->dbg_show = NULL; c->base = modparam_gpiobase; c->ngpio = BT8XXGPIO_NR_GPIOS; c->can_sleep = false; } static int bt8xxgpio_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) { struct bt8xxgpio *bg; int err; bg = devm_kzalloc(&dev->dev, sizeof(struct bt8xxgpio), GFP_KERNEL); if (!bg) return -ENOMEM; bg->pdev = dev; spin_lock_init(&bg->lock); err = pci_enable_device(dev); if (err) { dev_err(&dev->dev, "can't enable device.\n"); return err; } if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0), pci_resource_len(dev, 0), "bt8xxgpio")) { dev_warn(&dev->dev, "can't request iomem (0x%llx).\n", (unsigned long long)pci_resource_start(dev, 0)); err = -EBUSY; goto err_disable; } pci_set_master(dev); pci_set_drvdata(dev, bg); bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000); if (!bg->mmio) { dev_err(&dev->dev, "ioremap() failed\n"); err = -EIO; goto err_disable; } /* Disable interrupts */ bgwrite(0, BT848_INT_MASK); /* gpio init */ bgwrite(0, BT848_GPIO_DMA_CTL); bgwrite(0, BT848_GPIO_REG_INP); bgwrite(0, BT848_GPIO_OUT_EN); bt8xxgpio_gpio_setup(bg); err = gpiochip_add_data(&bg->gpio, bg); if (err) { dev_err(&dev->dev, "failed to register GPIOs\n"); goto err_disable; } return 0; err_disable: pci_disable_device(dev); return err; } static void bt8xxgpio_remove(struct pci_dev *pdev) { struct bt8xxgpio *bg = pci_get_drvdata(pdev); gpiochip_remove(&bg->gpio); bgwrite(0, BT848_INT_MASK); bgwrite(~0x0, BT848_INT_STAT); bgwrite(0x0, BT848_GPIO_OUT_EN); pci_disable_device(pdev); } #ifdef CONFIG_PM static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state) { struct bt8xxgpio *bg = pci_get_drvdata(pdev); unsigned long flags; spin_lock_irqsave(&bg->lock, flags); bg->saved_outen = bgread(BT848_GPIO_OUT_EN); bg->saved_data = bgread(BT848_GPIO_DATA); bgwrite(0, BT848_INT_MASK); bgwrite(~0x0, BT848_INT_STAT); bgwrite(0x0, BT848_GPIO_OUT_EN); spin_unlock_irqrestore(&bg->lock, flags); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int bt8xxgpio_resume(struct pci_dev *pdev) { struct bt8xxgpio *bg = pci_get_drvdata(pdev); unsigned long flags; int err; pci_set_power_state(pdev, PCI_D0); err = pci_enable_device(pdev); if (err) return err; pci_restore_state(pdev); spin_lock_irqsave(&bg->lock, flags); bgwrite(0, BT848_INT_MASK); bgwrite(0, BT848_GPIO_DMA_CTL); bgwrite(0, BT848_GPIO_REG_INP); bgwrite(bg->saved_outen, BT848_GPIO_OUT_EN); bgwrite(bg->saved_data & bg->saved_outen, BT848_GPIO_DATA); spin_unlock_irqrestore(&bg->lock, flags); return 0; } #else #define bt8xxgpio_suspend NULL #define bt8xxgpio_resume NULL #endif /* CONFIG_PM */ static const struct pci_device_id bt8xxgpio_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) }, { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849) }, { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878) }, { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT879) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, bt8xxgpio_pci_tbl); static struct pci_driver bt8xxgpio_pci_driver = { .name = "bt8xxgpio", .id_table = bt8xxgpio_pci_tbl, .probe = bt8xxgpio_probe, .remove = bt8xxgpio_remove, .suspend = bt8xxgpio_suspend, .resume = bt8xxgpio_resume, }; module_pci_driver(bt8xxgpio_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Buesch"); MODULE_DESCRIPTION("Abuse a BT8xx framegrabber card as generic GPIO card");
linux-master
drivers/gpio/gpio-bt8xx.c
// SPDX-License-Identifier: GPL-2.0+ /* * Access to GPIOs on TWL4030/TPS659x0 chips * * Copyright (C) 2006-2007 Texas Instruments, Inc. * Copyright (C) 2006 MontaVista Software, Inc. * * Code re-arranged and cleaned up by: * Syed Mohammed Khasim <[email protected]> * * Initial Code: * Andy Lowe / Nishanth Menon */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/gpio/machine.h> #include <linux/gpio/driver.h> #include <linux/gpio/consumer.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/irqdomain.h> #include <linux/mfd/twl.h> /* * The GPIO "subchip" supports 18 GPIOs which can be configured as * inputs or outputs, with pullups or pulldowns on each pin. Each * GPIO can trigger interrupts on either or both edges. * * GPIO interrupts can be fed to either of two IRQ lines; this is * intended to support multiple hosts. * * There are also two LED pins used sometimes as output-only GPIOs. */ /* genirq interfaces are not available to modules */ #ifdef MODULE #define is_module() true #else #define is_module() false #endif /* GPIO_CTRL Fields */ #define MASK_GPIO_CTRL_GPIO0CD1 BIT(0) #define MASK_GPIO_CTRL_GPIO1CD2 BIT(1) #define MASK_GPIO_CTRL_GPIO_ON BIT(2) /* Mask for GPIO registers when aggregated into a 32-bit integer */ #define GPIO_32_MASK 0x0003ffff struct gpio_twl4030_priv { struct gpio_chip gpio_chip; struct mutex mutex; int irq_base; /* Bitfields for state caching */ unsigned int usage_count; unsigned int direction; unsigned int out_state; }; /*----------------------------------------------------------------------*/ /* * To configure TWL4030 GPIO module registers */ static inline int gpio_twl4030_write(u8 address, u8 data) { return twl_i2c_write_u8(TWL4030_MODULE_GPIO, data, address); } /*----------------------------------------------------------------------*/ /* * LED register offsets from TWL_MODULE_LED base * PWMs A and B are dedicated to LEDs A and B, respectively. */ #define TWL4030_LED_LEDEN_REG 0x00 #define TWL4030_PWMAON_REG 0x01 #define TWL4030_PWMAOFF_REG 0x02 #define TWL4030_PWMBON_REG 0x03 #define TWL4030_PWMBOFF_REG 0x04 /* LEDEN bits */ #define LEDEN_LEDAON BIT(0) #define LEDEN_LEDBON BIT(1) #define LEDEN_LEDAEXT BIT(2) #define LEDEN_LEDBEXT BIT(3) #define LEDEN_LEDAPWM BIT(4) #define LEDEN_LEDBPWM BIT(5) #define LEDEN_PWM_LENGTHA BIT(6) #define LEDEN_PWM_LENGTHB BIT(7) #define PWMxON_LENGTH BIT(7) /*----------------------------------------------------------------------*/ /* * To read a TWL4030 GPIO module register */ static inline int gpio_twl4030_read(u8 address) { u8 data; int ret = 0; ret = twl_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address); return (ret < 0) ? ret : data; } /*----------------------------------------------------------------------*/ static u8 cached_leden; /* The LED lines are open drain outputs ... a FET pulls to GND, so an * external pullup is needed. We could also expose the integrated PWM * as a LED brightness control; we initialize it as "always on". */ static void twl4030_led_set_value(int led, int value) { u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM; if (led) mask <<= 1; if (value) cached_leden &= ~mask; else cached_leden |= mask; WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, TWL4030_LED_LEDEN_REG)); } static int twl4030_set_gpio_direction(int gpio, int is_input) { u8 d_bnk = gpio >> 3; u8 d_msk = BIT(gpio & 0x7); u8 reg = 0; u8 base = REG_GPIODATADIR1 + d_bnk; int ret = 0; ret = gpio_twl4030_read(base); if (ret >= 0) { if (is_input) reg = ret & ~d_msk; else reg = ret | d_msk; ret = gpio_twl4030_write(base, reg); } return ret; } static int twl4030_get_gpio_direction(int gpio) { u8 d_bnk = gpio >> 3; u8 d_msk = BIT(gpio & 0x7); u8 base = REG_GPIODATADIR1 + d_bnk; int ret = 0; ret = gpio_twl4030_read(base); if (ret < 0) return ret; if (ret & d_msk) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int twl4030_set_gpio_dataout(int gpio, int enable) { u8 d_bnk = gpio >> 3; u8 d_msk = BIT(gpio & 0x7); u8 base = 0; if (enable) base = REG_SETGPIODATAOUT1 + d_bnk; else base = REG_CLEARGPIODATAOUT1 + d_bnk; return gpio_twl4030_write(base, d_msk); } static int twl4030_get_gpio_datain(int gpio) { u8 d_bnk = gpio >> 3; u8 d_off = gpio & 0x7; u8 base = 0; int ret = 0; base = REG_GPIODATAIN1 + d_bnk; ret = gpio_twl4030_read(base); if (ret > 0) ret = (ret >> d_off) & 0x1; return ret; } /*----------------------------------------------------------------------*/ static int twl_request(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); int status = 0; mutex_lock(&priv->mutex); /* Support the two LED outputs as output-only GPIOs. */ if (offset >= TWL4030_GPIO_MAX) { u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA; u8 reg = TWL4030_PWMAON_REG; offset -= TWL4030_GPIO_MAX; if (offset) { ledclr_mask <<= 1; reg = TWL4030_PWMBON_REG; } /* initialize PWM to always-drive */ /* Configure PWM OFF register first */ status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg + 1); if (status < 0) goto done; /* Followed by PWM ON register */ status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg); if (status < 0) goto done; /* init LED to not-driven (high) */ status = twl_i2c_read_u8(TWL4030_MODULE_LED, &cached_leden, TWL4030_LED_LEDEN_REG); if (status < 0) goto done; cached_leden &= ~ledclr_mask; status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, TWL4030_LED_LEDEN_REG); if (status < 0) goto done; status = 0; goto done; } /* on first use, turn GPIO module "on" */ if (!priv->usage_count) { struct twl4030_gpio_platform_data *pdata; u8 value = MASK_GPIO_CTRL_GPIO_ON; /* optionally have the first two GPIOs switch vMMC1 * and vMMC2 power supplies based on card presence. */ pdata = dev_get_platdata(chip->parent); if (pdata) value |= pdata->mmc_cd & 0x03; status = gpio_twl4030_write(REG_GPIO_CTRL, value); } done: if (!status) priv->usage_count |= BIT(offset); mutex_unlock(&priv->mutex); return status; } static void twl_free(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); mutex_lock(&priv->mutex); if (offset >= TWL4030_GPIO_MAX) { twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1); goto out; } priv->usage_count &= ~BIT(offset); /* on last use, switch off GPIO module */ if (!priv->usage_count) gpio_twl4030_write(REG_GPIO_CTRL, 0x0); out: mutex_unlock(&priv->mutex); } static int twl_direction_in(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); int ret; mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) ret = twl4030_set_gpio_direction(offset, 1); else ret = -EINVAL; /* LED outputs can't be set as input */ if (!ret) priv->direction &= ~BIT(offset); mutex_unlock(&priv->mutex); return ret; } static int twl_get(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); int ret; int status = 0; mutex_lock(&priv->mutex); if (!(priv->usage_count & BIT(offset))) { ret = -EPERM; goto out; } if (priv->direction & BIT(offset)) status = priv->out_state & BIT(offset); else status = twl4030_get_gpio_datain(offset); ret = (status < 0) ? status : !!status; out: mutex_unlock(&priv->mutex); return ret; } static void twl_set(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) twl4030_set_gpio_dataout(offset, value); else twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); if (value) priv->out_state |= BIT(offset); else priv->out_state &= ~BIT(offset); mutex_unlock(&priv->mutex); } static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); int ret = 0; mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) { ret = twl4030_set_gpio_direction(offset, 0); if (ret) { mutex_unlock(&priv->mutex); return ret; } } /* * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output */ priv->direction |= BIT(offset); mutex_unlock(&priv->mutex); twl_set(chip, offset, value); return ret; } static int twl_get_direction(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); /* * Default GPIO_LINE_DIRECTION_OUT * LED GPIOs >= TWL4030_GPIO_MAX are always output */ int ret = GPIO_LINE_DIRECTION_OUT; mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) { ret = twl4030_get_gpio_direction(offset); if (ret) { mutex_unlock(&priv->mutex); return ret; } } mutex_unlock(&priv->mutex); return ret; } static int twl_to_irq(struct gpio_chip *chip, unsigned offset) { struct gpio_twl4030_priv *priv = gpiochip_get_data(chip); return (priv->irq_base && (offset < TWL4030_GPIO_MAX)) ? (priv->irq_base + offset) : -EINVAL; } static const struct gpio_chip template_chip = { .label = "twl4030", .owner = THIS_MODULE, .request = twl_request, .free = twl_free, .direction_input = twl_direction_in, .direction_output = twl_direction_out, .get_direction = twl_get_direction, .get = twl_get, .set = twl_set, .to_irq = twl_to_irq, .can_sleep = true, }; /*----------------------------------------------------------------------*/ static int gpio_twl4030_pulls(u32 ups, u32 downs) { u8 message[5]; unsigned i, gpio_bit; /* For most pins, a pulldown was enabled by default. * We should have data that's specific to this board. */ for (gpio_bit = 1, i = 0; i < 5; i++) { u8 bit_mask; unsigned j; for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) { if (ups & gpio_bit) bit_mask |= 1 << (j + 1); else if (downs & gpio_bit) bit_mask |= 1 << (j + 0); } message[i] = bit_mask; } return twl_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIOPUPDCTR1, 5); } static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd) { u8 message[3]; /* 30 msec of debouncing is always used for MMC card detect, * and is optional for everything else. */ message[0] = (debounce & 0xff) | (mmc_cd & 0x03); debounce >>= 8; message[1] = (debounce & 0xff); debounce >>= 8; message[2] = (debounce & 0x03); return twl_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIO_DEBEN1, 3); } static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev) { struct twl4030_gpio_platform_data *omap_twl_info; omap_twl_info = devm_kzalloc(dev, sizeof(*omap_twl_info), GFP_KERNEL); if (!omap_twl_info) return NULL; omap_twl_info->use_leds = of_property_read_bool(dev->of_node, "ti,use-leds"); of_property_read_u32(dev->of_node, "ti,debounce", &omap_twl_info->debounce); of_property_read_u32(dev->of_node, "ti,mmc-cd", (u32 *)&omap_twl_info->mmc_cd); of_property_read_u32(dev->of_node, "ti,pullups", &omap_twl_info->pullups); of_property_read_u32(dev->of_node, "ti,pulldowns", &omap_twl_info->pulldowns); return omap_twl_info; } /* Called from the registered devm action */ static void gpio_twl4030_power_off_action(void *data) { struct gpio_desc *d = data; gpiod_unexport(d); gpiochip_free_own_desc(d); } static int gpio_twl4030_probe(struct platform_device *pdev) { struct twl4030_gpio_platform_data *pdata; struct device_node *node = pdev->dev.of_node; struct gpio_twl4030_priv *priv; int ret, irq_base; priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_twl4030_priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* maybe setup IRQs */ if (is_module()) { dev_err(&pdev->dev, "can't dispatch IRQs from modules\n"); goto no_irqs; } irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, TWL4030_GPIO_MAX, 0); if (irq_base < 0) { dev_err(&pdev->dev, "Failed to alloc irq_descs\n"); return irq_base; } irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0, &irq_domain_simple_ops, NULL); ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base); if (ret < 0) return ret; priv->irq_base = irq_base; no_irqs: priv->gpio_chip = template_chip; priv->gpio_chip.base = -1; priv->gpio_chip.ngpio = TWL4030_GPIO_MAX; priv->gpio_chip.parent = &pdev->dev; mutex_init(&priv->mutex); pdata = of_gpio_twl4030(&pdev->dev); if (pdata == NULL) { dev_err(&pdev->dev, "Platform data is missing\n"); return -ENXIO; } /* * NOTE: boards may waste power if they don't set pullups * and pulldowns correctly ... default for non-ULPI pins is * pulldown, and some other pins may have external pullups * or pulldowns. Careful! */ ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns); if (ret) dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n", pdata->pullups, pdata->pulldowns, ret); ret = gpio_twl4030_debounce(pdata->debounce, pdata->mmc_cd); if (ret) dev_dbg(&pdev->dev, "debounce %.03x %.01x --> %d\n", pdata->debounce, pdata->mmc_cd, ret); /* * NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE, * is (still) clear if use_leds is set. */ if (pdata->use_leds) priv->gpio_chip.ngpio += 2; ret = devm_gpiochip_add_data(&pdev->dev, &priv->gpio_chip, priv); if (ret < 0) { dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret); priv->gpio_chip.ngpio = 0; return ret; } /* * Special quirk for the OMAP3 to hog and export a WLAN power * GPIO. */ if (IS_ENABLED(CONFIG_ARCH_OMAP3) && of_machine_is_compatible("compulab,omap3-sbc-t3730")) { struct gpio_desc *d; d = gpiochip_request_own_desc(&priv->gpio_chip, 2, "wlan pwr", GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH); if (IS_ERR(d)) return dev_err_probe(&pdev->dev, PTR_ERR(d), "unable to hog wlan pwr GPIO\n"); gpiod_export(d, 0); ret = devm_add_action_or_reset(&pdev->dev, gpio_twl4030_power_off_action, d); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to install power off handler\n"); } return 0; } static const struct of_device_id twl_gpio_match[] = { { .compatible = "ti,twl4030-gpio", }, { }, }; MODULE_DEVICE_TABLE(of, twl_gpio_match); /* Note: this hardware lives inside an I2C-based multi-function device. */ MODULE_ALIAS("platform:twl4030_gpio"); static struct platform_driver gpio_twl4030_driver = { .driver = { .name = "twl4030_gpio", .of_match_table = twl_gpio_match, }, .probe = gpio_twl4030_probe, }; static int __init gpio_twl4030_init(void) { return platform_driver_register(&gpio_twl4030_driver); } subsys_initcall(gpio_twl4030_init); static void __exit gpio_twl4030_exit(void) { platform_driver_unregister(&gpio_twl4030_driver); } module_exit(gpio_twl4030_exit); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_DESCRIPTION("GPIO interface for TWL4030"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-twl4030.c
// SPDX-License-Identifier: GPL-2.0 /* * Faraday Technolog FTGPIO010 gpiochip and interrupt routines * Copyright (C) 2017 Linus Walleij <[email protected]> * * Based on arch/arm/mach-gemini/gpio.c: * Copyright (C) 2008-2009 Paulius Zaleckas <[email protected]> * * Based on plat-mxc/gpio.c: * MXC GPIO support. (c) 2008 Daniel Mack <[email protected]> * Copyright 2008 Juergen Beisert, [email protected] */ #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <linux/clk.h> /* GPIO registers definition */ #define GPIO_DATA_OUT 0x00 #define GPIO_DATA_IN 0x04 #define GPIO_DIR 0x08 #define GPIO_BYPASS_IN 0x0C #define GPIO_DATA_SET 0x10 #define GPIO_DATA_CLR 0x14 #define GPIO_PULL_EN 0x18 #define GPIO_PULL_TYPE 0x1C #define GPIO_INT_EN 0x20 #define GPIO_INT_STAT_RAW 0x24 #define GPIO_INT_STAT_MASKED 0x28 #define GPIO_INT_MASK 0x2C #define GPIO_INT_CLR 0x30 #define GPIO_INT_TYPE 0x34 #define GPIO_INT_BOTH_EDGE 0x38 #define GPIO_INT_LEVEL 0x3C #define GPIO_DEBOUNCE_EN 0x40 #define GPIO_DEBOUNCE_PRESCALE 0x44 /** * struct ftgpio_gpio - Gemini GPIO state container * @dev: containing device for this instance * @gc: gpiochip for this instance * @base: remapped I/O-memory base * @clk: silicon clock */ struct ftgpio_gpio { struct device *dev; struct gpio_chip gc; void __iomem *base; struct clk *clk; }; static void ftgpio_gpio_ack_irq(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ftgpio_gpio *g = gpiochip_get_data(gc); writel(BIT(irqd_to_hwirq(d)), g->base + GPIO_INT_CLR); } static void ftgpio_gpio_mask_irq(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ftgpio_gpio *g = gpiochip_get_data(gc); u32 val; val = readl(g->base + GPIO_INT_EN); val &= ~BIT(irqd_to_hwirq(d)); writel(val, g->base + GPIO_INT_EN); gpiochip_disable_irq(gc, irqd_to_hwirq(d)); } static void ftgpio_gpio_unmask_irq(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ftgpio_gpio *g = gpiochip_get_data(gc); u32 val; gpiochip_enable_irq(gc, irqd_to_hwirq(d)); val = readl(g->base + GPIO_INT_EN); val |= BIT(irqd_to_hwirq(d)); writel(val, g->base + GPIO_INT_EN); } static int ftgpio_gpio_set_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ftgpio_gpio *g = gpiochip_get_data(gc); u32 mask = BIT(irqd_to_hwirq(d)); u32 reg_both, reg_level, reg_type; reg_type = readl(g->base + GPIO_INT_TYPE); reg_level = readl(g->base + GPIO_INT_LEVEL); reg_both = readl(g->base + GPIO_INT_BOTH_EDGE); switch (type) { case IRQ_TYPE_EDGE_BOTH: irq_set_handler_locked(d, handle_edge_irq); reg_type &= ~mask; reg_both |= mask; break; case IRQ_TYPE_EDGE_RISING: irq_set_handler_locked(d, handle_edge_irq); reg_type &= ~mask; reg_both &= ~mask; reg_level &= ~mask; break; case IRQ_TYPE_EDGE_FALLING: irq_set_handler_locked(d, handle_edge_irq); reg_type &= ~mask; reg_both &= ~mask; reg_level |= mask; break; case IRQ_TYPE_LEVEL_HIGH: irq_set_handler_locked(d, handle_level_irq); reg_type |= mask; reg_level &= ~mask; break; case IRQ_TYPE_LEVEL_LOW: irq_set_handler_locked(d, handle_level_irq); reg_type |= mask; reg_level |= mask; break; default: irq_set_handler_locked(d, handle_bad_irq); return -EINVAL; } writel(reg_type, g->base + GPIO_INT_TYPE); writel(reg_level, g->base + GPIO_INT_LEVEL); writel(reg_both, g->base + GPIO_INT_BOTH_EDGE); ftgpio_gpio_ack_irq(d); return 0; } static void ftgpio_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct ftgpio_gpio *g = gpiochip_get_data(gc); struct irq_chip *irqchip = irq_desc_get_chip(desc); int offset; unsigned long stat; chained_irq_enter(irqchip, desc); stat = readl(g->base + GPIO_INT_STAT_RAW); if (stat) for_each_set_bit(offset, &stat, gc->ngpio) generic_handle_domain_irq(gc->irq.domain, offset); chained_irq_exit(irqchip, desc); } static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { enum pin_config_param param = pinconf_to_config_param(config); u32 arg = pinconf_to_config_argument(config); struct ftgpio_gpio *g = gpiochip_get_data(gc); unsigned long pclk_freq; u32 deb_div; u32 val; if (param != PIN_CONFIG_INPUT_DEBOUNCE) return -ENOTSUPP; /* * Debounce only works if interrupts are enabled. The manual * states that if PCLK is 66 MHz, and this is set to 0x7D0, then * PCLK is divided down to 33 kHz for the debounce timer. 0x7D0 is * 2000 decimal, so what they mean is simply that the PCLK is * divided by this value. * * As we get a debounce setting in microseconds, we calculate the * desired period time and see if we can get a suitable debounce * time. */ pclk_freq = clk_get_rate(g->clk); deb_div = DIV_ROUND_CLOSEST(pclk_freq, arg); /* This register is only 24 bits wide */ if (deb_div > (1 << 24)) return -ENOTSUPP; dev_dbg(g->dev, "prescale divisor: %08x, resulting frequency %lu Hz\n", deb_div, (pclk_freq/deb_div)); val = readl(g->base + GPIO_DEBOUNCE_PRESCALE); if (val == deb_div) { /* * The debounce timer happens to already be set to the * desirable value, what a coincidence! We can just enable * debounce on this GPIO line and return. This happens more * often than you think, for example when all GPIO keys * on a system are requesting the same debounce interval. */ val = readl(g->base + GPIO_DEBOUNCE_EN); val |= BIT(offset); writel(val, g->base + GPIO_DEBOUNCE_EN); return 0; } val = readl(g->base + GPIO_DEBOUNCE_EN); if (val) { /* * Oh no! Someone is already using the debounce with * another setting than what we need. Bummer. */ return -ENOTSUPP; } /* First come, first serve */ writel(deb_div, g->base + GPIO_DEBOUNCE_PRESCALE); /* Enable debounce */ val |= BIT(offset); writel(val, g->base + GPIO_DEBOUNCE_EN); return 0; } static const struct irq_chip ftgpio_irq_chip = { .name = "FTGPIO010", .irq_ack = ftgpio_gpio_ack_irq, .irq_mask = ftgpio_gpio_mask_irq, .irq_unmask = ftgpio_gpio_unmask_irq, .irq_set_type = ftgpio_gpio_set_irq_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int ftgpio_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ftgpio_gpio *g; struct gpio_irq_chip *girq; int irq; int ret; g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL); if (!g) return -ENOMEM; g->dev = dev; g->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(g->base)) return PTR_ERR(g->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; g->clk = devm_clk_get(dev, NULL); if (!IS_ERR(g->clk)) { ret = clk_prepare_enable(g->clk); if (ret) return ret; } else if (PTR_ERR(g->clk) == -EPROBE_DEFER) { /* * Percolate deferrals, for anything else, * just live without the clocking. */ return PTR_ERR(g->clk); } ret = bgpio_init(&g->gc, dev, 4, g->base + GPIO_DATA_IN, g->base + GPIO_DATA_SET, g->base + GPIO_DATA_CLR, g->base + GPIO_DIR, NULL, 0); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); goto dis_clk; } g->gc.label = dev_name(dev); g->gc.base = -1; g->gc.parent = dev; g->gc.owner = THIS_MODULE; /* ngpio is set by bgpio_init() */ /* We need a silicon clock to do debounce */ if (!IS_ERR(g->clk)) g->gc.set_config = ftgpio_gpio_set_config; girq = &g->gc.irq; gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip); girq->parent_handler = ftgpio_gpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { ret = -ENOMEM; goto dis_clk; } girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parents[0] = irq; /* Disable, unmask and clear all interrupts */ writel(0x0, g->base + GPIO_INT_EN); writel(0x0, g->base + GPIO_INT_MASK); writel(~0x0, g->base + GPIO_INT_CLR); /* Clear any use of debounce */ writel(0x0, g->base + GPIO_DEBOUNCE_EN); ret = devm_gpiochip_add_data(dev, &g->gc, g); if (ret) goto dis_clk; platform_set_drvdata(pdev, g); dev_info(dev, "FTGPIO010 @%p registered\n", g->base); return 0; dis_clk: clk_disable_unprepare(g->clk); return ret; } static int ftgpio_gpio_remove(struct platform_device *pdev) { struct ftgpio_gpio *g = platform_get_drvdata(pdev); clk_disable_unprepare(g->clk); return 0; } static const struct of_device_id ftgpio_gpio_of_match[] = { { .compatible = "cortina,gemini-gpio", }, { .compatible = "moxa,moxart-gpio", }, { .compatible = "faraday,ftgpio010", }, {}, }; static struct platform_driver ftgpio_gpio_driver = { .driver = { .name = "ftgpio010-gpio", .of_match_table = ftgpio_gpio_of_match, }, .probe = ftgpio_gpio_probe, .remove = ftgpio_gpio_remove, }; builtin_platform_driver(ftgpio_gpio_driver);
linux-master
drivers/gpio/gpio-ftgpio010.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * GPIO Testing Device Driver * * Copyright (C) 2014 Kamlakant Patel <[email protected]> * Copyright (C) 2015-2016 Bamvor Jian Zhang <[email protected]> * Copyright (C) 2017 Bartosz Golaszewski <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/debugfs.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irq_sim.h> #include <linux/irqdomain.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/uaccess.h> #include "gpiolib.h" #define GPIO_MOCKUP_MAX_GC 10 /* * We're storing two values per chip: the GPIO base and the number * of GPIO lines. */ #define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2) /* Maximum of four properties + the sentinel. */ #define GPIO_MOCKUP_MAX_PROP 5 /* * struct gpio_pin_status - structure describing a GPIO status * @dir: Configures direction of gpio as "in" or "out" * @value: Configures status of the gpio as 0(low) or 1(high) */ struct gpio_mockup_line_status { int dir; int value; int pull; }; struct gpio_mockup_chip { struct gpio_chip gc; struct gpio_mockup_line_status *lines; struct irq_domain *irq_sim_domain; struct dentry *dbg_dir; struct mutex lock; }; struct gpio_mockup_dbgfs_private { struct gpio_mockup_chip *chip; struct gpio_desc *desc; unsigned int offset; }; static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES]; static int gpio_mockup_num_ranges; module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400); static bool gpio_mockup_named_lines; module_param_named(gpio_mockup_named_lines, gpio_mockup_named_lines, bool, 0400); static struct dentry *gpio_mockup_dbg_dir; static int gpio_mockup_range_base(unsigned int index) { return gpio_mockup_ranges[index * 2]; } static int gpio_mockup_range_ngpio(unsigned int index) { return gpio_mockup_ranges[index * 2 + 1]; } static int __gpio_mockup_get(struct gpio_mockup_chip *chip, unsigned int offset) { return chip->lines[offset].value; } static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); int val; mutex_lock(&chip->lock); val = __gpio_mockup_get(chip, offset); mutex_unlock(&chip->lock); return val; } static int gpio_mockup_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); unsigned int bit, val; mutex_lock(&chip->lock); for_each_set_bit(bit, mask, gc->ngpio) { val = __gpio_mockup_get(chip, bit); __assign_bit(bit, bits, val); } mutex_unlock(&chip->lock); return 0; } static void __gpio_mockup_set(struct gpio_mockup_chip *chip, unsigned int offset, int value) { chip->lines[offset].value = !!value; } static void gpio_mockup_set(struct gpio_chip *gc, unsigned int offset, int value) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); mutex_lock(&chip->lock); __gpio_mockup_set(chip, offset, value); mutex_unlock(&chip->lock); } static void gpio_mockup_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); unsigned int bit; mutex_lock(&chip->lock); for_each_set_bit(bit, mask, gc->ngpio) __gpio_mockup_set(chip, bit, test_bit(bit, bits)); mutex_unlock(&chip->lock); } static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, unsigned int offset, int value) { struct gpio_chip *gc = &chip->gc; struct gpio_desc *desc = gpiochip_get_desc(gc, offset); int curr, irq, irq_type, ret = 0; mutex_lock(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && !test_bit(FLAG_IS_OUT, &desc->flags)) { curr = __gpio_mockup_get(chip, offset); if (curr == value) goto out; irq = irq_find_mapping(chip->irq_sim_domain, offset); if (!irq) /* * This is fine - it just means, nobody is listening * for interrupts on this line, otherwise * irq_create_mapping() would have been called from * the to_irq() callback. */ goto set_value; irq_type = irq_get_trigger_type(irq); if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) { ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true); if (ret) goto out; } } set_value: /* Change the value unless we're actively driving the line. */ if (!test_bit(FLAG_REQUESTED, &desc->flags) || !test_bit(FLAG_IS_OUT, &desc->flags)) __gpio_mockup_set(chip, offset, value); out: chip->lines[offset].pull = value; mutex_unlock(&chip->lock); return ret; } static int gpio_mockup_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: return gpio_mockup_apply_pull(chip, offset, 1); case PIN_CONFIG_BIAS_PULL_DOWN: return gpio_mockup_apply_pull(chip, offset, 0); default: break; } return -ENOTSUPP; } static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset, int value) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); mutex_lock(&chip->lock); chip->lines[offset].dir = GPIO_LINE_DIRECTION_OUT; __gpio_mockup_set(chip, offset, value); mutex_unlock(&chip->lock); return 0; } static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); mutex_lock(&chip->lock); chip->lines[offset].dir = GPIO_LINE_DIRECTION_IN; mutex_unlock(&chip->lock); return 0; } static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); int direction; mutex_lock(&chip->lock); direction = chip->lines[offset].dir; mutex_unlock(&chip->lock); return direction; } static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); return irq_create_mapping(chip->irq_sim_domain, offset); } static void gpio_mockup_free(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); __gpio_mockup_set(chip, offset, chip->lines[offset].pull); } static ssize_t gpio_mockup_debugfs_read(struct file *file, char __user *usr_buf, size_t size, loff_t *ppos) { struct gpio_mockup_dbgfs_private *priv; struct gpio_mockup_chip *chip; struct seq_file *sfile; struct gpio_chip *gc; int val, cnt; char buf[3]; if (*ppos != 0) return 0; sfile = file->private_data; priv = sfile->private; chip = priv->chip; gc = &chip->gc; val = gpio_mockup_get(gc, priv->offset); cnt = snprintf(buf, sizeof(buf), "%d\n", val); return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt); } static ssize_t gpio_mockup_debugfs_write(struct file *file, const char __user *usr_buf, size_t size, loff_t *ppos) { struct gpio_mockup_dbgfs_private *priv; int rv, val; struct seq_file *sfile; if (*ppos != 0) return -EINVAL; rv = kstrtoint_from_user(usr_buf, size, 0, &val); if (rv) return rv; if (val != 0 && val != 1) return -EINVAL; sfile = file->private_data; priv = sfile->private; rv = gpio_mockup_apply_pull(priv->chip, priv->offset, val); if (rv) return rv; return size; } static int gpio_mockup_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, NULL, inode->i_private); } /* * Each mockup chip is represented by a directory named after the chip's device * name under /sys/kernel/debug/gpio-mockup/. Each line is represented by * a file using the line's offset as the name under the chip's directory. * * Reading from the line's file yields the current *value*, writing to the * line's file changes the current *pull*. Default pull for mockup lines is * down. * * Examples: * - when a line pulled down is requested in output mode and driven high, its * value will return to 0 once it's released * - when the line is requested in output mode and driven high, writing 0 to * the corresponding debugfs file will change the pull to down but the * reported value will still be 1 until the line is released * - line requested in input mode always reports the same value as its pull * configuration * - when the line is requested in input mode and monitored for events, writing * the same value to the debugfs file will be a noop, while writing the * opposite value will generate a dummy interrupt with an appropriate edge */ static const struct file_operations gpio_mockup_debugfs_ops = { .owner = THIS_MODULE, .open = gpio_mockup_debugfs_open, .read = gpio_mockup_debugfs_read, .write = gpio_mockup_debugfs_write, .llseek = no_llseek, .release = single_release, }; static void gpio_mockup_debugfs_setup(struct device *dev, struct gpio_mockup_chip *chip) { struct gpio_mockup_dbgfs_private *priv; struct gpio_chip *gc; const char *devname; char *name; int i; gc = &chip->gc; devname = dev_name(&gc->gpiodev->dev); chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir); for (i = 0; i < gc->ngpio; i++) { name = devm_kasprintf(dev, GFP_KERNEL, "%d", i); if (!name) return; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return; priv->chip = chip; priv->offset = i; priv->desc = gpiochip_get_desc(gc, i); debugfs_create_file(name, 0600, chip->dbg_dir, priv, &gpio_mockup_debugfs_ops); } } static void gpio_mockup_debugfs_cleanup(void *data) { struct gpio_mockup_chip *chip = data; debugfs_remove_recursive(chip->dbg_dir); } static void gpio_mockup_dispose_mappings(void *data) { struct gpio_mockup_chip *chip = data; struct gpio_chip *gc = &chip->gc; int i, irq; for (i = 0; i < gc->ngpio; i++) { irq = irq_find_mapping(chip->irq_sim_domain, i); if (irq) irq_dispose_mapping(irq); } } static int gpio_mockup_probe(struct platform_device *pdev) { struct gpio_mockup_chip *chip; struct gpio_chip *gc; struct device *dev; const char *name; int rv, base, i; u16 ngpio; dev = &pdev->dev; rv = device_property_read_u32(dev, "gpio-base", &base); if (rv) base = -1; rv = device_property_read_u16(dev, "nr-gpios", &ngpio); if (rv) return rv; rv = device_property_read_string(dev, "chip-label", &name); if (rv) name = dev_name(dev); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; mutex_init(&chip->lock); gc = &chip->gc; gc->base = base; gc->ngpio = ngpio; gc->label = name; gc->owner = THIS_MODULE; gc->parent = dev; gc->get = gpio_mockup_get; gc->set = gpio_mockup_set; gc->get_multiple = gpio_mockup_get_multiple; gc->set_multiple = gpio_mockup_set_multiple; gc->direction_output = gpio_mockup_dirout; gc->direction_input = gpio_mockup_dirin; gc->get_direction = gpio_mockup_get_direction; gc->set_config = gpio_mockup_set_config; gc->to_irq = gpio_mockup_to_irq; gc->free = gpio_mockup_free; chip->lines = devm_kcalloc(dev, gc->ngpio, sizeof(*chip->lines), GFP_KERNEL); if (!chip->lines) return -ENOMEM; for (i = 0; i < gc->ngpio; i++) chip->lines[i].dir = GPIO_LINE_DIRECTION_IN; chip->irq_sim_domain = devm_irq_domain_create_sim(dev, NULL, gc->ngpio); if (IS_ERR(chip->irq_sim_domain)) return PTR_ERR(chip->irq_sim_domain); rv = devm_add_action_or_reset(dev, gpio_mockup_dispose_mappings, chip); if (rv) return rv; rv = devm_gpiochip_add_data(dev, &chip->gc, chip); if (rv) return rv; gpio_mockup_debugfs_setup(dev, chip); return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip); } static const struct of_device_id gpio_mockup_of_match[] = { { .compatible = "gpio-mockup", }, {}, }; MODULE_DEVICE_TABLE(of, gpio_mockup_of_match); static struct platform_driver gpio_mockup_driver = { .driver = { .name = "gpio-mockup", .of_match_table = gpio_mockup_of_match, }, .probe = gpio_mockup_probe, }; static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC]; static void gpio_mockup_unregister_pdevs(void) { struct platform_device *pdev; struct fwnode_handle *fwnode; int i; for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) { pdev = gpio_mockup_pdevs[i]; if (!pdev) continue; fwnode = dev_fwnode(&pdev->dev); platform_device_unregister(pdev); fwnode_remove_software_node(fwnode); } } static int __init gpio_mockup_register_chip(int idx) { struct property_entry properties[GPIO_MOCKUP_MAX_PROP]; struct platform_device_info pdevinfo; struct platform_device *pdev; struct fwnode_handle *fwnode; char **line_names = NULL; char chip_label[32]; int prop = 0, base; u16 ngpio; memset(properties, 0, sizeof(properties)); memset(&pdevinfo, 0, sizeof(pdevinfo)); snprintf(chip_label, sizeof(chip_label), "gpio-mockup-%c", idx + 'A'); properties[prop++] = PROPERTY_ENTRY_STRING("chip-label", chip_label); base = gpio_mockup_range_base(idx); if (base >= 0) properties[prop++] = PROPERTY_ENTRY_U32("gpio-base", base); ngpio = base < 0 ? gpio_mockup_range_ngpio(idx) : gpio_mockup_range_ngpio(idx) - base; properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio); if (gpio_mockup_named_lines) { line_names = kasprintf_strarray(GFP_KERNEL, chip_label, ngpio); if (!line_names) return -ENOMEM; properties[prop++] = PROPERTY_ENTRY_STRING_ARRAY_LEN( "gpio-line-names", line_names, ngpio); } fwnode = fwnode_create_software_node(properties, NULL); if (IS_ERR(fwnode)) { kfree_strarray(line_names, ngpio); return PTR_ERR(fwnode); } pdevinfo.name = "gpio-mockup"; pdevinfo.id = idx; pdevinfo.fwnode = fwnode; pdev = platform_device_register_full(&pdevinfo); kfree_strarray(line_names, ngpio); if (IS_ERR(pdev)) { fwnode_remove_software_node(fwnode); pr_err("error registering device"); return PTR_ERR(pdev); } gpio_mockup_pdevs[idx] = pdev; return 0; } static int __init gpio_mockup_init(void) { int i, num_chips, err; if ((gpio_mockup_num_ranges % 2) || (gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES)) return -EINVAL; /* Each chip is described by two values. */ num_chips = gpio_mockup_num_ranges / 2; /* * The second value in the <base GPIO - number of GPIOS> pair must * always be greater than 0. */ for (i = 0; i < num_chips; i++) { if (gpio_mockup_range_ngpio(i) < 0) return -EINVAL; } gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup", NULL); err = platform_driver_register(&gpio_mockup_driver); if (err) { pr_err("error registering platform driver\n"); debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } for (i = 0; i < num_chips; i++) { err = gpio_mockup_register_chip(i); if (err) { platform_driver_unregister(&gpio_mockup_driver); gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } } return 0; } static void __exit gpio_mockup_exit(void) { gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); } module_init(gpio_mockup_init); module_exit(gpio_mockup_exit); MODULE_AUTHOR("Kamlakant Patel <[email protected]>"); MODULE_AUTHOR("Bamvor Jian Zhang <[email protected]>"); MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>"); MODULE_DESCRIPTION("GPIO Testing driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-mockup.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the ps-mode pin configuration. * * Copyright (c) 2021 Xilinx, Inc. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/firmware/xlnx-zynqmp.h> /* 4-bit boot mode pins */ #define MODE_PINS 4 /** * modepin_gpio_get_value - Get the state of the specified pin of GPIO device * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * * This function reads the state of the specified pin of the GPIO device. * * Return: 0 if the pin is low, 1 if pin is high, -EINVAL wrong pin configured * or error value. */ static int modepin_gpio_get_value(struct gpio_chip *chip, unsigned int pin) { u32 regval = 0; int ret; ret = zynqmp_pm_bootmode_read(&regval); if (ret) return ret; /* When [0:3] corresponding bit is set, then read output bit [8:11], * if the bit is clear then read input bit [4:7] for status or value. */ if (regval & BIT(pin)) return !!(regval & BIT(pin + 8)); else return !!(regval & BIT(pin + 4)); } /** * modepin_gpio_set_value - Modify the state of the pin with specified value * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * @state: value used to modify the state of the specified pin * * This function reads the state of the specified pin of the GPIO device, mask * with the capture state of GPIO pin, and update pin of GPIO device. * * Return: None. */ static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin, int state) { u32 bootpin_val = 0; int ret; zynqmp_pm_bootmode_read(&bootpin_val); /* Configure pin as an output by set bit [0:3] */ bootpin_val |= BIT(pin); if (state) bootpin_val |= BIT(pin + 8); else bootpin_val &= ~BIT(pin + 8); /* Configure bootpin value */ ret = zynqmp_pm_bootmode_write(bootpin_val); if (ret) pr_err("modepin: set value error %d for pin %d\n", ret, pin); } /** * modepin_gpio_dir_in - Set the direction of the specified GPIO pin as input * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * * Return: 0 always */ static int modepin_gpio_dir_in(struct gpio_chip *chip, unsigned int pin) { return 0; } /** * modepin_gpio_dir_out - Set the direction of the specified GPIO pin as output * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * @state: value to be written to specified pin * * Return: 0 always */ static int modepin_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int state) { return 0; } /** * modepin_gpio_probe - Initialization method for modepin_gpio * @pdev: platform device instance * * Return: 0 on success, negative error otherwise. */ static int modepin_gpio_probe(struct platform_device *pdev) { struct gpio_chip *chip; int status; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; platform_set_drvdata(pdev, chip); /* configure the gpio chip */ chip->base = -1; chip->ngpio = MODE_PINS; chip->owner = THIS_MODULE; chip->parent = &pdev->dev; chip->get = modepin_gpio_get_value; chip->set = modepin_gpio_set_value; chip->direction_input = modepin_gpio_dir_in; chip->direction_output = modepin_gpio_dir_out; chip->label = dev_name(&pdev->dev); /* modepin gpio registration */ status = devm_gpiochip_add_data(&pdev->dev, chip, chip); if (status) return dev_err_probe(&pdev->dev, status, "Failed to add GPIO chip\n"); return status; } static const struct of_device_id modepin_platform_id[] = { { .compatible = "xlnx,zynqmp-gpio-modepin", }, { } }; static struct platform_driver modepin_platform_driver = { .driver = { .name = "modepin-gpio", .of_match_table = modepin_platform_id, }, .probe = modepin_gpio_probe, }; module_platform_driver(modepin_platform_driver); MODULE_AUTHOR("Piyush Mehta <[email protected]>"); MODULE_DESCRIPTION("ZynqMP Boot PS_MODE Configuration"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-zynqmp-modepin.c
// SPDX-License-Identifier: GPL-2.0+ /* * TI TPS6591x GPIO driver * * Copyright 2010 Texas Instruments Inc. * * Author: Graeme Gregory <[email protected]> * Author: Jorge Eduardo Candelaria <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/mfd/tps65910.h> #include <linux/of.h> struct tps65910_gpio { struct gpio_chip gpio_chip; struct tps65910 *tps65910; }; static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset) { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; unsigned int val; regmap_read(tps65910->regmap, TPS65910_GPIO0 + offset, &val); if (val & GPIO_STS_MASK) return 1; return 0; } static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset, int value) { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; if (value) regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset, GPIO_SET_MASK); else regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset, GPIO_SET_MASK); } static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, int value) { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; /* Set the initial value */ tps65910_gpio_set(gc, offset, value); return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset, GPIO_CFG_MASK); } static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset) { struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc); struct tps65910 *tps65910 = tps65910_gpio->tps65910; return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset, GPIO_CFG_MASK); } #ifdef CONFIG_OF static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev, struct tps65910 *tps65910, int chip_ngpio) { struct tps65910_board *tps65910_board = tps65910->of_plat_data; unsigned int prop_array[TPS6591X_MAX_NUM_GPIO]; int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO); int ret; int idx; tps65910_board->gpio_base = -1; ret = of_property_read_u32_array(tps65910->dev->of_node, "ti,en-gpio-sleep", prop_array, ngpio); if (ret < 0) { dev_dbg(dev, "ti,en-gpio-sleep not specified\n"); return tps65910_board; } for (idx = 0; idx < ngpio; idx++) tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0); return tps65910_board; } #else static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev, struct tps65910 *tps65910, int chip_ngpio) { return NULL; } #endif static int tps65910_gpio_probe(struct platform_device *pdev) { struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); struct tps65910_board *pdata = dev_get_platdata(tps65910->dev); struct tps65910_gpio *tps65910_gpio; int ret; int i; device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent)); tps65910_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65910_gpio), GFP_KERNEL); if (!tps65910_gpio) return -ENOMEM; tps65910_gpio->tps65910 = tps65910; tps65910_gpio->gpio_chip.owner = THIS_MODULE; tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name; switch (tps65910_chip_id(tps65910)) { case TPS65910: tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO; break; case TPS65911: tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO; break; default: return -EINVAL; } tps65910_gpio->gpio_chip.can_sleep = true; tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input; tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output; tps65910_gpio->gpio_chip.set = tps65910_gpio_set; tps65910_gpio->gpio_chip.get = tps65910_gpio_get; tps65910_gpio->gpio_chip.parent = &pdev->dev; if (pdata && pdata->gpio_base) tps65910_gpio->gpio_chip.base = pdata->gpio_base; else tps65910_gpio->gpio_chip.base = -1; if (!pdata && tps65910->dev->of_node) pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910, tps65910_gpio->gpio_chip.ngpio); if (!pdata) goto skip_init; /* Configure sleep control for gpios if provided */ for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) { if (!pdata->en_gpio_sleep[i]) continue; ret = regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + i, GPIO_SLEEP_MASK); if (ret < 0) dev_warn(tps65910->dev, "GPIO Sleep setting failed with err %d\n", ret); } skip_init: return devm_gpiochip_add_data(&pdev->dev, &tps65910_gpio->gpio_chip, tps65910_gpio); } static struct platform_driver tps65910_gpio_driver = { .driver.name = "tps65910-gpio", .probe = tps65910_gpio_probe, }; static int __init tps65910_gpio_init(void) { return platform_driver_register(&tps65910_gpio_driver); } subsys_initcall(tps65910_gpio_init);
linux-master
drivers/gpio/gpio-tps65910.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015-2023 Texas Instruments Incorporated - https://www.ti.com/ * Andrew Davis <[email protected]> */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/spi/spi.h> #define DEFAULT_NGPIO 8 /** * struct pisosr_gpio - GPIO driver data * @chip: GPIO controller chip * @spi: SPI device pointer * @buffer: Buffer for device reads * @buffer_size: Size of buffer * @load_gpio: GPIO pin used to load input into device * @lock: Protects read sequences */ struct pisosr_gpio { struct gpio_chip chip; struct spi_device *spi; u8 *buffer; size_t buffer_size; struct gpio_desc *load_gpio; struct mutex lock; }; static int pisosr_gpio_refresh(struct pisosr_gpio *gpio) { int ret; mutex_lock(&gpio->lock); if (gpio->load_gpio) { gpiod_set_value_cansleep(gpio->load_gpio, 1); udelay(1); /* registers load time (~10ns) */ gpiod_set_value_cansleep(gpio->load_gpio, 0); udelay(1); /* registers recovery time (~5ns) */ } ret = spi_read(gpio->spi, gpio->buffer, gpio->buffer_size); mutex_unlock(&gpio->lock); return ret; } static int pisosr_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { /* This device always input */ return GPIO_LINE_DIRECTION_IN; } static int pisosr_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { /* This device always input */ return 0; } static int pisosr_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { /* This device is input only */ return -EINVAL; } static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset) { struct pisosr_gpio *gpio = gpiochip_get_data(chip); /* Refresh may not always be needed */ pisosr_gpio_refresh(gpio); return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1; } static int pisosr_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct pisosr_gpio *gpio = gpiochip_get_data(chip); unsigned long offset; unsigned long gpio_mask; unsigned long buffer_state; pisosr_gpio_refresh(gpio); bitmap_zero(bits, chip->ngpio); for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) { buffer_state = gpio->buffer[offset / 8] & gpio_mask; bitmap_set_value8(bits, buffer_state, offset); } return 0; } static const struct gpio_chip template_chip = { .label = "pisosr-gpio", .owner = THIS_MODULE, .get_direction = pisosr_gpio_get_direction, .direction_input = pisosr_gpio_direction_input, .direction_output = pisosr_gpio_direction_output, .get = pisosr_gpio_get, .get_multiple = pisosr_gpio_get_multiple, .base = -1, .ngpio = DEFAULT_NGPIO, .can_sleep = true, }; static void pisosr_mutex_destroy(void *lock) { mutex_destroy(lock); } static int pisosr_gpio_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct pisosr_gpio *gpio; int ret; gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->chip = template_chip; gpio->chip.parent = dev; of_property_read_u16(dev->of_node, "ngpios", &gpio->chip.ngpio); gpio->spi = spi; gpio->buffer_size = DIV_ROUND_UP(gpio->chip.ngpio, 8); gpio->buffer = devm_kzalloc(dev, gpio->buffer_size, GFP_KERNEL); if (!gpio->buffer) return -ENOMEM; gpio->load_gpio = devm_gpiod_get_optional(dev, "load", GPIOD_OUT_LOW); if (IS_ERR(gpio->load_gpio)) return dev_err_probe(dev, PTR_ERR(gpio->load_gpio), "Unable to allocate load GPIO\n"); mutex_init(&gpio->lock); ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock); if (ret) return ret; ret = devm_gpiochip_add_data(dev, &gpio->chip, gpio); if (ret < 0) { dev_err(dev, "Unable to register gpiochip\n"); return ret; } return 0; } static const struct spi_device_id pisosr_gpio_id_table[] = { { "pisosr-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, pisosr_gpio_id_table); static const struct of_device_id pisosr_gpio_of_match_table[] = { { .compatible = "pisosr-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pisosr_gpio_of_match_table); static struct spi_driver pisosr_gpio_driver = { .driver = { .name = "pisosr-gpio", .of_match_table = pisosr_gpio_of_match_table, }, .probe = pisosr_gpio_probe, .id_table = pisosr_gpio_id_table, }; module_spi_driver(pisosr_gpio_driver); MODULE_AUTHOR("Andrew Davis <[email protected]>"); MODULE_DESCRIPTION("SPI Compatible PISO Shift Register GPIO Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-pisosr.c
// SPDX-License-Identifier: GPL-2.0+ /* * Software Node helpers for the GPIO API * * Copyright 2022 Google LLC */ #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/property.h> #include <linux/string.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include "gpiolib.h" #include "gpiolib-swnode.h" static void swnode_format_propname(const char *con_id, char *propname, size_t max_size) { /* * Note we do not need to try both -gpios and -gpio suffixes, * as, unlike OF and ACPI, we can fix software nodes to conform * to the proper binding. */ if (con_id) snprintf(propname, max_size, "%s-gpios", con_id); else strscpy(propname, "gpios", max_size); } static int swnode_gpiochip_match_name(struct gpio_chip *chip, void *data) { return !strcmp(chip->label, data); } static struct gpio_chip *swnode_get_chip(struct fwnode_handle *fwnode) { const struct software_node *chip_node; struct gpio_chip *chip; chip_node = to_software_node(fwnode); if (!chip_node || !chip_node->name) return ERR_PTR(-EINVAL); chip = gpiochip_find((void *)chip_node->name, swnode_gpiochip_match_name); return chip ?: ERR_PTR(-EPROBE_DEFER); } struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, unsigned long *flags) { const struct software_node *swnode; struct fwnode_reference_args args; struct gpio_chip *chip; struct gpio_desc *desc; char propname[32]; /* 32 is max size of property name */ int error; swnode = to_software_node(fwnode); if (!swnode) return ERR_PTR(-EINVAL); swnode_format_propname(con_id, propname, sizeof(propname)); /* * We expect all swnode-described GPIOs have GPIO number and * polarity arguments, hence nargs is set to 2. */ error = fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, &args); if (error) { pr_debug("%s: can't parse '%s' property of node '%pfwP[%d]'\n", __func__, propname, fwnode, idx); return ERR_PTR(error); } chip = swnode_get_chip(args.fwnode); fwnode_handle_put(args.fwnode); if (IS_ERR(chip)) return ERR_CAST(chip); desc = gpiochip_get_desc(chip, args.args[0]); *flags = args.args[1]; /* We expect native GPIO flags */ pr_debug("%s: parsed '%s' property of node '%pfwP[%d]' - status (%d)\n", __func__, propname, fwnode, idx, PTR_ERR_OR_ZERO(desc)); return desc; } /** * swnode_gpio_count - count the GPIOs associated with a device / function * @fwnode: firmware node of the GPIO consumer, can be %NULL for * system-global GPIOs * @con_id: function within the GPIO consumer * * Return: * The number of GPIOs associated with a device / function or %-ENOENT, * if no GPIO has been assigned to the requested function. */ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) { struct fwnode_reference_args args; char propname[32]; int count; swnode_format_propname(con_id, propname, sizeof(propname)); /* * This is not very efficient, but GPIO lists usually have only * 1 or 2 entries. */ count = 0; while (fwnode_property_get_reference_args(fwnode, propname, NULL, 0, count, &args) == 0) { fwnode_handle_put(args.fwnode); count++; } return count ?: -ENOENT; }
linux-master
drivers/gpio/gpiolib-swnode.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2018 Pengutronix, Uwe Kleine-König <[email protected]> */ #include <linux/module.h> #include <linux/siox.h> #include <linux/gpio/driver.h> #include <linux/of.h> struct gpio_siox_ddata { struct gpio_chip gchip; struct mutex lock; u8 setdata[1]; u8 getdata[3]; raw_spinlock_t irqlock; u32 irq_enable; u32 irq_status; u32 irq_type[20]; }; /* * Note that this callback only sets the value that is clocked out in the next * cycle. */ static int gpio_siox_set_data(struct siox_device *sdevice, u8 status, u8 buf[]) { struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev); mutex_lock(&ddata->lock); buf[0] = ddata->setdata[0]; mutex_unlock(&ddata->lock); return 0; } static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[]) { struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev); size_t offset; u32 trigger; mutex_lock(&ddata->lock); raw_spin_lock_irq(&ddata->irqlock); for (offset = 0; offset < 12; ++offset) { unsigned int bitpos = 11 - offset; unsigned int gpiolevel = buf[bitpos / 8] & (1 << bitpos % 8); unsigned int prev_level = ddata->getdata[bitpos / 8] & (1 << (bitpos % 8)); u32 irq_type = ddata->irq_type[offset]; if (gpiolevel) { if ((irq_type & IRQ_TYPE_LEVEL_HIGH) || ((irq_type & IRQ_TYPE_EDGE_RISING) && !prev_level)) ddata->irq_status |= 1 << offset; } else { if ((irq_type & IRQ_TYPE_LEVEL_LOW) || ((irq_type & IRQ_TYPE_EDGE_FALLING) && prev_level)) ddata->irq_status |= 1 << offset; } } trigger = ddata->irq_status & ddata->irq_enable; raw_spin_unlock_irq(&ddata->irqlock); ddata->getdata[0] = buf[0]; ddata->getdata[1] = buf[1]; ddata->getdata[2] = buf[2]; mutex_unlock(&ddata->lock); for (offset = 0; offset < 12; ++offset) { if (trigger & (1 << offset)) { struct irq_domain *irqdomain = ddata->gchip.irq.domain; unsigned int irq = irq_find_mapping(irqdomain, offset); /* * Conceptually handle_nested_irq should call the flow * handler of the irq chip. But it doesn't, so we have * to clean the irq_status here. */ raw_spin_lock_irq(&ddata->irqlock); ddata->irq_status &= ~(1 << offset); raw_spin_unlock_irq(&ddata->irqlock); handle_nested_irq(irq); } } return 0; } static void gpio_siox_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_siox_ddata *ddata = gpiochip_get_data(gc); raw_spin_lock(&ddata->irqlock); ddata->irq_status &= ~(1 << d->hwirq); raw_spin_unlock(&ddata->irqlock); } static void gpio_siox_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_siox_ddata *ddata = gpiochip_get_data(gc); raw_spin_lock(&ddata->irqlock); ddata->irq_enable &= ~(1 << d->hwirq); raw_spin_unlock(&ddata->irqlock); gpiochip_disable_irq(gc, irqd_to_hwirq(d)); } static void gpio_siox_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_siox_ddata *ddata = gpiochip_get_data(gc); gpiochip_enable_irq(gc, irqd_to_hwirq(d)); raw_spin_lock(&ddata->irqlock); ddata->irq_enable |= 1 << d->hwirq; raw_spin_unlock(&ddata->irqlock); } static int gpio_siox_irq_set_type(struct irq_data *d, u32 type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_siox_ddata *ddata = gpiochip_get_data(gc); raw_spin_lock(&ddata->irqlock); ddata->irq_type[d->hwirq] = type; raw_spin_unlock(&ddata->irqlock); return 0; } static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset) { struct gpio_siox_ddata *ddata = gpiochip_get_data(chip); int ret; mutex_lock(&ddata->lock); if (offset >= 12) { unsigned int bitpos = 19 - offset; ret = ddata->setdata[0] & (1 << bitpos); } else { unsigned int bitpos = 11 - offset; ret = ddata->getdata[bitpos / 8] & (1 << (bitpos % 8)); } mutex_unlock(&ddata->lock); return ret; } static void gpio_siox_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpio_siox_ddata *ddata = gpiochip_get_data(chip); u8 mask = 1 << (19 - offset); mutex_lock(&ddata->lock); if (value) ddata->setdata[0] |= mask; else ddata->setdata[0] &= ~mask; mutex_unlock(&ddata->lock); } static int gpio_siox_direction_input(struct gpio_chip *chip, unsigned int offset) { if (offset >= 12) return -EINVAL; return 0; } static int gpio_siox_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { if (offset < 12) return -EINVAL; gpio_siox_set(chip, offset, value); return 0; } static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset) { if (offset < 12) return GPIO_LINE_DIRECTION_IN; else return GPIO_LINE_DIRECTION_OUT; } static const struct irq_chip gpio_siox_irq_chip = { .name = "siox-gpio", .irq_ack = gpio_siox_irq_ack, .irq_mask = gpio_siox_irq_mask, .irq_unmask = gpio_siox_irq_unmask, .irq_set_type = gpio_siox_irq_set_type, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int gpio_siox_probe(struct siox_device *sdevice) { struct gpio_siox_ddata *ddata; struct gpio_irq_chip *girq; struct device *dev = &sdevice->dev; struct gpio_chip *gc; int ret; ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; dev_set_drvdata(dev, ddata); mutex_init(&ddata->lock); raw_spin_lock_init(&ddata->irqlock); gc = &ddata->gchip; gc->base = -1; gc->can_sleep = 1; gc->parent = dev; gc->owner = THIS_MODULE; gc->get = gpio_siox_get; gc->set = gpio_siox_set; gc->direction_input = gpio_siox_direction_input; gc->direction_output = gpio_siox_direction_output; gc->get_direction = gpio_siox_get_direction; gc->ngpio = 20; girq = &gc->irq; gpio_irq_chip_set_chip(girq, &gpio_siox_irq_chip); girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; girq->threaded = true; ret = devm_gpiochip_add_data(dev, gc, ddata); if (ret) dev_err(dev, "Failed to register gpio chip (%d)\n", ret); return ret; } static struct siox_driver gpio_siox_driver = { .probe = gpio_siox_probe, .set_data = gpio_siox_set_data, .get_data = gpio_siox_get_data, .driver = { .name = "gpio-siox", }, }; module_siox_driver(gpio_siox_driver); MODULE_AUTHOR("Uwe Kleine-Koenig <[email protected]>"); MODULE_DESCRIPTION("SIOX gpio driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-siox.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI Palma series PMIC's GPIO driver. * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mfd/palmas.h> #include <linux/of.h> #include <linux/platform_device.h> struct palmas_gpio { struct gpio_chip gpio_chip; struct palmas *palmas; }; struct palmas_device_data { int ngpio; }; static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; unsigned int val; int ret; unsigned int reg; int gpio16 = (offset/8); offset %= 8; reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR; ret = palmas_read(palmas, PALMAS_GPIO_BASE, reg, &val); if (ret < 0) { dev_err(gc->parent, "Reg 0x%02x read failed, %d\n", reg, ret); return ret; } if (val & BIT(offset)) reg = (gpio16) ? PALMAS_GPIO_DATA_OUT2 : PALMAS_GPIO_DATA_OUT; else reg = (gpio16) ? PALMAS_GPIO_DATA_IN2 : PALMAS_GPIO_DATA_IN; ret = palmas_read(palmas, PALMAS_GPIO_BASE, reg, &val); if (ret < 0) { dev_err(gc->parent, "Reg 0x%02x read failed, %d\n", reg, ret); return ret; } return !!(val & BIT(offset)); } static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset, int value) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; int ret; unsigned int reg; int gpio16 = (offset/8); offset %= 8; if (gpio16) reg = (value) ? PALMAS_GPIO_SET_DATA_OUT2 : PALMAS_GPIO_CLEAR_DATA_OUT2; else reg = (value) ? PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT; ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset)); if (ret < 0) dev_err(gc->parent, "Reg 0x%02x write failed, %d\n", reg, ret); } static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset, int value) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; int ret; unsigned int reg; int gpio16 = (offset/8); offset %= 8; reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR; /* Set the initial value */ palmas_gpio_set(gc, offset, value); ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg, BIT(offset), BIT(offset)); if (ret < 0) dev_err(gc->parent, "Reg 0x%02x update failed, %d\n", reg, ret); return ret; } static int palmas_gpio_input(struct gpio_chip *gc, unsigned offset) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; int ret; unsigned int reg; int gpio16 = (offset/8); offset %= 8; reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR; ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg, BIT(offset), 0); if (ret < 0) dev_err(gc->parent, "Reg 0x%02x update failed, %d\n", reg, ret); return ret; } static int palmas_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct palmas_gpio *pg = gpiochip_get_data(gc); struct palmas *palmas = pg->palmas; return palmas_irq_get_virq(palmas, PALMAS_GPIO_0_IRQ + offset); } static const struct palmas_device_data palmas_dev_data = { .ngpio = 8, }; static const struct palmas_device_data tps80036_dev_data = { .ngpio = 16, }; static const struct of_device_id of_palmas_gpio_match[] = { { .compatible = "ti,palmas-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65913-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65914-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,}, { }, }; static int palmas_gpio_probe(struct platform_device *pdev) { struct palmas *palmas = dev_get_drvdata(pdev->dev.parent); struct palmas_platform_data *palmas_pdata; struct palmas_gpio *palmas_gpio; int ret; const struct palmas_device_data *dev_data; dev_data = of_device_get_match_data(&pdev->dev); if (!dev_data) dev_data = &palmas_dev_data; palmas_gpio = devm_kzalloc(&pdev->dev, sizeof(*palmas_gpio), GFP_KERNEL); if (!palmas_gpio) return -ENOMEM; palmas_gpio->palmas = palmas; palmas_gpio->gpio_chip.owner = THIS_MODULE; palmas_gpio->gpio_chip.label = dev_name(&pdev->dev); palmas_gpio->gpio_chip.ngpio = dev_data->ngpio; palmas_gpio->gpio_chip.can_sleep = true; palmas_gpio->gpio_chip.direction_input = palmas_gpio_input; palmas_gpio->gpio_chip.direction_output = palmas_gpio_output; palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq; palmas_gpio->gpio_chip.set = palmas_gpio_set; palmas_gpio->gpio_chip.get = palmas_gpio_get; palmas_gpio->gpio_chip.parent = &pdev->dev; palmas_pdata = dev_get_platdata(palmas->dev); if (palmas_pdata && palmas_pdata->gpio_base) palmas_gpio->gpio_chip.base = palmas_pdata->gpio_base; else palmas_gpio->gpio_chip.base = -1; ret = devm_gpiochip_add_data(&pdev->dev, &palmas_gpio->gpio_chip, palmas_gpio); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); return ret; } return ret; } static struct platform_driver palmas_gpio_driver = { .driver.name = "palmas-gpio", .driver.of_match_table = of_palmas_gpio_match, .probe = palmas_gpio_probe, }; static int __init palmas_gpio_init(void) { return platform_driver_register(&palmas_gpio_driver); } subsys_initcall(palmas_gpio_init);
linux-master
drivers/gpio/gpio-palmas.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the TS-4800 board * * Copyright (c) 2016 - Savoir-faire Linux */ #include <linux/gpio/driver.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #define DEFAULT_PIN_NUMBER 16 #define INPUT_REG_OFFSET 0x00 #define OUTPUT_REG_OFFSET 0x02 #define DIRECTION_REG_OFFSET 0x04 static int ts4800_gpio_probe(struct platform_device *pdev) { struct device_node *node; struct gpio_chip *chip; void __iomem *base_addr; int retval; u32 ngpios; chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL); if (!chip) return -ENOMEM; base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base_addr)) return PTR_ERR(base_addr); node = pdev->dev.of_node; if (!node) return -EINVAL; retval = of_property_read_u32(node, "ngpios", &ngpios); if (retval == -EINVAL) ngpios = DEFAULT_PIN_NUMBER; else if (retval) return retval; retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET, base_addr + OUTPUT_REG_OFFSET, NULL, base_addr + DIRECTION_REG_OFFSET, NULL, 0); if (retval) { dev_err(&pdev->dev, "bgpio_init failed\n"); return retval; } chip->ngpio = ngpios; platform_set_drvdata(pdev, chip); return devm_gpiochip_add_data(&pdev->dev, chip, NULL); } static const struct of_device_id ts4800_gpio_of_match[] = { { .compatible = "technologic,ts4800-gpio", }, {}, }; MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match); static struct platform_driver ts4800_gpio_driver = { .driver = { .name = "ts4800-gpio", .of_match_table = ts4800_gpio_of_match, }, .probe = ts4800_gpio_probe, }; module_platform_driver_probe(ts4800_gpio_driver, ts4800_gpio_probe); MODULE_AUTHOR("Julien Grossholtz <[email protected]>"); MODULE_DESCRIPTION("TS4800 FPGA GPIO driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-ts4800.c
// SPDX-License-Identifier: GPL-2.0 /* * GPIO library for the ACCES IDIO-16 family * Copyright (C) 2022 William Breathitt Gray */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/gpio/regmap.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/types.h> #include "gpio-idio-16.h" #define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16 #define IDIO_16_DAT_BASE 0x0 #define IDIO_16_OUT_BASE IDIO_16_DAT_BASE #define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1) #define IDIO_16_CLEAR_INTERRUPT 0x1 #define IDIO_16_ENABLE_IRQ 0x2 #define IDIO_16_DEACTIVATE_INPUT_FILTERS 0x3 #define IDIO_16_DISABLE_IRQ IDIO_16_ENABLE_IRQ #define IDIO_16_INTERRUPT_STATUS 0x6 #define IDIO_16_NGPIO 32 #define IDIO_16_NGPIO_PER_REG 8 #define IDIO_16_REG_STRIDE 4 struct idio_16_data { struct regmap *map; unsigned int irq_mask; }; static int idio_16_handle_mask_sync(const int index, const unsigned int mask_buf_def, const unsigned int mask_buf, void *const irq_drv_data) { struct idio_16_data *const data = irq_drv_data; const unsigned int prev_mask = data->irq_mask; int err; unsigned int val; /* exit early if no change since the previous mask */ if (mask_buf == prev_mask) return 0; /* remember the current mask for the next mask sync */ data->irq_mask = mask_buf; /* if all previously masked, enable interrupts when unmasking */ if (prev_mask == mask_buf_def) { err = regmap_write(data->map, IDIO_16_CLEAR_INTERRUPT, 0x00); if (err) return err; return regmap_read(data->map, IDIO_16_ENABLE_IRQ, &val); } /* if all are currently masked, disable interrupts */ if (mask_buf == mask_buf_def) return regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00); return 0; } static int idio_16_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned int base, const unsigned int offset, unsigned int *const reg, unsigned int *const mask) { unsigned int stride; /* Input lines start at GPIO 16 */ if (offset < 16) { stride = offset / IDIO_16_NGPIO_PER_REG; *reg = IDIO_16_OUT_BASE + stride * IDIO_16_REG_STRIDE; } else { stride = (offset - 16) / IDIO_16_NGPIO_PER_REG; *reg = IDIO_16_IN_BASE + stride * IDIO_16_REG_STRIDE; } *mask = BIT(offset % IDIO_16_NGPIO_PER_REG); return 0; } static const char *idio_16_names[IDIO_16_NGPIO] = { "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7", "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15", "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7", "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15", }; /** * devm_idio_16_regmap_register - Register an IDIO-16 GPIO device * @dev: device that is registering this IDIO-16 GPIO device * @config: configuration for idio_16_regmap_config * * Registers an IDIO-16 GPIO device. Returns 0 on success and negative error number on failure. */ int devm_idio_16_regmap_register(struct device *const dev, const struct idio_16_regmap_config *const config) { struct gpio_regmap_config gpio_config = {}; int err; struct idio_16_data *data; struct regmap_irq_chip *chip; struct regmap_irq_chip_data *chip_data; if (!config->parent) return -EINVAL; if (!config->map) return -EINVAL; if (!config->regmap_irqs) return -EINVAL; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->map = config->map; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->name = dev_name(dev); chip->status_base = IDIO_16_INTERRUPT_STATUS; chip->mask_base = IDIO_16_ENABLE_IRQ; chip->ack_base = IDIO_16_CLEAR_INTERRUPT; chip->no_status = config->no_status; chip->num_regs = 1; chip->irqs = config->regmap_irqs; chip->num_irqs = config->num_regmap_irqs; chip->handle_mask_sync = idio_16_handle_mask_sync; chip->irq_drv_data = data; /* Disable IRQ to prevent spurious interrupts before we're ready */ err = regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00); if (err) return err; err = devm_regmap_add_irq_chip(dev, data->map, config->irq, 0, 0, chip, &chip_data); if (err) return dev_err_probe(dev, err, "IRQ registration failed\n"); if (config->filters) { /* Deactivate input filters */ err = regmap_write(data->map, IDIO_16_DEACTIVATE_INPUT_FILTERS, 0x00); if (err) return err; } gpio_config.parent = config->parent; gpio_config.regmap = data->map; gpio_config.ngpio = IDIO_16_NGPIO; gpio_config.names = idio_16_names; gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(IDIO_16_DAT_BASE); gpio_config.reg_set_base = GPIO_REGMAP_ADDR(IDIO_16_DAT_BASE); gpio_config.ngpio_per_reg = IDIO_16_NGPIO_PER_REG; gpio_config.reg_stride = IDIO_16_REG_STRIDE; gpio_config.irq_domain = regmap_irq_get_domain(chip_data); gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate; return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); } EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register); MODULE_AUTHOR("William Breathitt Gray"); MODULE_DESCRIPTION("ACCES IDIO-16 GPIO Library"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-idio-16.c
// SPDX-License-Identifier: GPL-2.0 /* * Digital I/O driver for Technologic Systems I2C FPGA Core * * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux */ #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/of.h> #include <linux/module.h> #include <linux/regmap.h> #define DEFAULT_PIN_NUMBER 32 /* * Register bits used by the GPIO device * Some boards, such as TS-7970 do not have a separate input bit */ #define TS4900_GPIO_OE 0x01 #define TS4900_GPIO_OUT 0x02 #define TS4900_GPIO_IN 0x04 #define TS7970_GPIO_IN 0x02 struct ts4900_gpio_priv { struct regmap *regmap; struct gpio_chip gpio_chip; unsigned int input_bit; }; static int ts4900_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); unsigned int reg; regmap_read(priv->regmap, offset, &reg); if (reg & TS4900_GPIO_OE) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int ts4900_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); /* * Only clear the OE bit here, requires a RMW. Prevents a potential issue * with OE and DAT getting to the physical pin at different times. */ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); unsigned int reg; int ret; /* * If changing from an input to an output, we need to first set the * GPIO's DAT bit to what is requested and then set the OE bit. This * prevents a glitch that can occur on the IO line. */ regmap_read(priv->regmap, offset, &reg); if (!(reg & TS4900_GPIO_OE)) { if (value) reg = TS4900_GPIO_OUT; else reg &= ~TS4900_GPIO_OUT; regmap_write(priv->regmap, offset, reg); } if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); else ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE); return ret; } static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); unsigned int reg; regmap_read(priv->regmap, offset, &reg); return !!(reg & priv->input_bit); } static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); if (value) regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, TS4900_GPIO_OUT); else regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0); } static const struct regmap_config ts4900_regmap_config = { .reg_bits = 16, .val_bits = 8, }; static const struct gpio_chip template_chip = { .label = "ts4900-gpio", .owner = THIS_MODULE, .get_direction = ts4900_gpio_get_direction, .direction_input = ts4900_gpio_direction_input, .direction_output = ts4900_gpio_direction_output, .get = ts4900_gpio_get, .set = ts4900_gpio_set, .base = -1, .can_sleep = true, }; static const struct of_device_id ts4900_gpio_of_match_table[] = { { .compatible = "technologic,ts4900-gpio", .data = (void *)TS4900_GPIO_IN, }, { .compatible = "technologic,ts7970-gpio", .data = (void *)TS7970_GPIO_IN, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, ts4900_gpio_of_match_table); static int ts4900_gpio_probe(struct i2c_client *client) { struct ts4900_gpio_priv *priv; u32 ngpio; int ret; if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio)) ngpio = DEFAULT_PIN_NUMBER; priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->gpio_chip = template_chip; priv->gpio_chip.label = "ts4900-gpio"; priv->gpio_chip.ngpio = ngpio; priv->gpio_chip.parent = &client->dev; priv->input_bit = (uintptr_t)of_device_get_match_data(&client->dev); priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config); if (IS_ERR(priv->regmap)) { ret = PTR_ERR(priv->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", ret); return ret; } ret = devm_gpiochip_add_data(&client->dev, &priv->gpio_chip, priv); if (ret < 0) { dev_err(&client->dev, "Unable to register gpiochip\n"); return ret; } i2c_set_clientdata(client, priv); return 0; } static const struct i2c_device_id ts4900_gpio_id_table[] = { { "ts4900-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, ts4900_gpio_id_table); static struct i2c_driver ts4900_gpio_driver = { .driver = { .name = "ts4900-gpio", .of_match_table = ts4900_gpio_of_match_table, }, .probe = ts4900_gpio_probe, .id_table = ts4900_gpio_id_table, }; module_i2c_driver(ts4900_gpio_driver); MODULE_AUTHOR("Technologic Systems"); MODULE_DESCRIPTION("GPIO interface for Technologic Systems I2C-FPGA core"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-ts4900.c
// SPDX-License-Identifier: GPL-2.0 /* * GPIO driver for TPS68470 PMIC * * Copyright (C) 2017 Intel Corporation * * Authors: * Antti Laakso <[email protected]> * Tianshu Qiu <[email protected]> * Jian Xu Zheng <[email protected]> * Yuning Pu <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/mfd/tps68470.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #define TPS68470_N_LOGIC_OUTPUT 3 #define TPS68470_N_REGULAR_GPIO 7 #define TPS68470_N_GPIO (TPS68470_N_LOGIC_OUTPUT + TPS68470_N_REGULAR_GPIO) struct tps68470_gpio_data { struct regmap *tps68470_regmap; struct gpio_chip gc; }; static int tps68470_gpio_get(struct gpio_chip *gc, unsigned int offset) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; unsigned int reg = TPS68470_REG_GPDO; int val, ret; if (offset >= TPS68470_N_REGULAR_GPIO) { offset -= TPS68470_N_REGULAR_GPIO; reg = TPS68470_REG_SGPO; } ret = regmap_read(regmap, reg, &val); if (ret) { dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n", TPS68470_REG_SGPO); return ret; } return !!(val & BIT(offset)); } static int tps68470_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; int val, ret; /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) return GPIO_LINE_DIRECTION_OUT; ret = regmap_read(regmap, TPS68470_GPIO_CTL_REG_A(offset), &val); if (ret) { dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n", TPS68470_GPIO_CTL_REG_A(offset)); return ret; } val &= TPS68470_GPIO_MODE_MASK; return val >= TPS68470_GPIO_MODE_OUT_CMOS ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; unsigned int reg = TPS68470_REG_GPDO; if (offset >= TPS68470_N_REGULAR_GPIO) { reg = TPS68470_REG_SGPO; offset -= TPS68470_N_REGULAR_GPIO; } regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0); } static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, int value) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; /* Set the initial value */ tps68470_gpio_set(gc, offset, value); /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) return 0; return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset), TPS68470_GPIO_MODE_MASK, TPS68470_GPIO_MODE_OUT_CMOS); } static int tps68470_gpio_input(struct gpio_chip *gc, unsigned int offset) { struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) return -EINVAL; return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset), TPS68470_GPIO_MODE_MASK, 0x00); } static const char *tps68470_names[TPS68470_N_GPIO] = { "gpio.0", "gpio.1", "gpio.2", "gpio.3", "gpio.4", "gpio.5", "gpio.6", "s_enable", "s_idle", "s_resetn", }; static int tps68470_gpio_probe(struct platform_device *pdev) { struct tps68470_gpio_data *tps68470_gpio; tps68470_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps68470_gpio), GFP_KERNEL); if (!tps68470_gpio) return -ENOMEM; tps68470_gpio->tps68470_regmap = dev_get_drvdata(pdev->dev.parent); tps68470_gpio->gc.label = "tps68470-gpio"; tps68470_gpio->gc.owner = THIS_MODULE; tps68470_gpio->gc.direction_input = tps68470_gpio_input; tps68470_gpio->gc.direction_output = tps68470_gpio_output; tps68470_gpio->gc.get = tps68470_gpio_get; tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction; tps68470_gpio->gc.set = tps68470_gpio_set; tps68470_gpio->gc.can_sleep = true; tps68470_gpio->gc.names = tps68470_names; tps68470_gpio->gc.ngpio = TPS68470_N_GPIO; tps68470_gpio->gc.base = -1; tps68470_gpio->gc.parent = &pdev->dev; return devm_gpiochip_add_data(&pdev->dev, &tps68470_gpio->gc, tps68470_gpio); } static struct platform_driver tps68470_gpio_driver = { .driver = { .name = "tps68470-gpio", }, .probe = tps68470_gpio_probe, }; module_platform_driver(tps68470_gpio_driver); MODULE_ALIAS("platform:tps68470-gpio"); MODULE_DESCRIPTION("GPIO driver for TPS68470 PMIC"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-tps68470.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016-2022 NVIDIA Corporation * * Author: Thierry Reding <[email protected]> * Dipen Patel <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/hte.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <dt-bindings/gpio/tegra186-gpio.h> #include <dt-bindings/gpio/tegra194-gpio.h> #include <dt-bindings/gpio/tegra234-gpio.h> #include <dt-bindings/gpio/tegra241-gpio.h> /* security registers */ #define TEGRA186_GPIO_CTL_SCR 0x0c #define TEGRA186_GPIO_CTL_SCR_SEC_WEN BIT(28) #define TEGRA186_GPIO_CTL_SCR_SEC_REN BIT(27) #define TEGRA186_GPIO_INT_ROUTE_MAPPING(p, x) (0x14 + (p) * 0x20 + (x) * 4) #define TEGRA186_GPIO_VM 0x00 #define TEGRA186_GPIO_VM_RW_MASK 0x03 #define TEGRA186_GPIO_SCR 0x04 #define TEGRA186_GPIO_SCR_PIN_SIZE 0x08 #define TEGRA186_GPIO_SCR_PORT_SIZE 0x40 #define TEGRA186_GPIO_SCR_SEC_WEN BIT(28) #define TEGRA186_GPIO_SCR_SEC_REN BIT(27) #define TEGRA186_GPIO_SCR_SEC_G1W BIT(9) #define TEGRA186_GPIO_SCR_SEC_G1R BIT(1) #define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \ TEGRA186_GPIO_SCR_SEC_REN | \ TEGRA186_GPIO_SCR_SEC_G1R | \ TEGRA186_GPIO_SCR_SEC_G1W) #define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \ TEGRA186_GPIO_SCR_SEC_REN) /* control registers */ #define TEGRA186_GPIO_ENABLE_CONFIG 0x00 #define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0) #define TEGRA186_GPIO_ENABLE_CONFIG_OUT BIT(1) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_NONE (0x0 << 2) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL (0x1 << 2) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE (0x2 << 2) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE (0x3 << 2) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK (0x3 << 2) #define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4) #define TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5) #define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6) #define TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC BIT(7) #define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04 #define TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff) #define TEGRA186_GPIO_INPUT 0x08 #define TEGRA186_GPIO_INPUT_HIGH BIT(0) #define TEGRA186_GPIO_OUTPUT_CONTROL 0x0c #define TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED BIT(0) #define TEGRA186_GPIO_OUTPUT_VALUE 0x10 #define TEGRA186_GPIO_OUTPUT_VALUE_HIGH BIT(0) #define TEGRA186_GPIO_INTERRUPT_CLEAR 0x14 #define TEGRA186_GPIO_INTERRUPT_STATUS(x) (0x100 + (x) * 4) struct tegra_gpio_port { const char *name; unsigned int bank; unsigned int port; unsigned int pins; }; struct tegra186_pin_range { unsigned int offset; const char *group; }; struct tegra_gpio_soc { const struct tegra_gpio_port *ports; unsigned int num_ports; const char *name; unsigned int instance; unsigned int num_irqs_per_bank; const struct tegra186_pin_range *pin_ranges; unsigned int num_pin_ranges; const char *pinmux; bool has_gte; bool has_vm_support; }; struct tegra_gpio { struct gpio_chip gpio; unsigned int num_irq; unsigned int *irq; const struct tegra_gpio_soc *soc; unsigned int num_irqs_per_bank; unsigned int num_banks; void __iomem *secure; void __iomem *base; }; static const struct tegra_gpio_port * tegra186_gpio_get_port(struct tegra_gpio *gpio, unsigned int *pin) { unsigned int start = 0, i; for (i = 0; i < gpio->soc->num_ports; i++) { const struct tegra_gpio_port *port = &gpio->soc->ports[i]; if (*pin >= start && *pin < start + port->pins) { *pin -= start; return port; } start += port->pins; } return NULL; } static void __iomem *tegra186_gpio_get_base(struct tegra_gpio *gpio, unsigned int pin) { const struct tegra_gpio_port *port; unsigned int offset; port = tegra186_gpio_get_port(gpio, &pin); if (!port) return NULL; offset = port->bank * 0x1000 + port->port * 0x200; return gpio->base + offset + pin * 0x20; } static void __iomem *tegra186_gpio_get_secure_base(struct tegra_gpio *gpio, unsigned int pin) { const struct tegra_gpio_port *port; unsigned int offset; port = tegra186_gpio_get_port(gpio, &pin); if (!port) return NULL; offset = port->bank * 0x1000 + port->port * TEGRA186_GPIO_SCR_PORT_SIZE; return gpio->secure + offset + pin * TEGRA186_GPIO_SCR_PIN_SIZE; } static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned int pin) { void __iomem *secure; u32 value; secure = tegra186_gpio_get_secure_base(gpio, pin); if (gpio->soc->has_vm_support) { value = readl(secure + TEGRA186_GPIO_VM); if ((value & TEGRA186_GPIO_VM_RW_MASK) != TEGRA186_GPIO_VM_RW_MASK) return false; } value = __raw_readl(secure + TEGRA186_GPIO_SCR); if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0) return true; if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS) return true; return false; } static int tegra186_init_valid_mask(struct gpio_chip *chip, unsigned long *valid_mask, unsigned int ngpios) { struct tegra_gpio *gpio = gpiochip_get_data(chip); unsigned int j; for (j = 0; j < ngpios; j++) { if (!tegra186_gpio_is_accessible(gpio, j)) clear_bit(j, valid_mask); } return 0; } static int tegra186_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -ENODEV; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int tegra186_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -ENODEV; value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL); value |= TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED; writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL); value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE; value &= ~TEGRA186_GPIO_ENABLE_CONFIG_OUT; writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); return 0; } static int tegra186_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int level) { struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; /* configure output level first */ chip->set(chip, offset, level); base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -EINVAL; /* set the direction */ value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL); value &= ~TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED; writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL); value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE; value |= TEGRA186_GPIO_ENABLE_CONFIG_OUT; writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); return 0; } #define HTE_BOTH_EDGES (HTE_RISING_EDGE_TS | HTE_FALLING_EDGE_TS) static int tegra186_gpio_en_hw_ts(struct gpio_chip *gc, u32 offset, unsigned long flags) { struct tegra_gpio *gpio; void __iomem *base; int value; if (!gc) return -EINVAL; gpio = gpiochip_get_data(gc); if (!gpio) return -ENODEV; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -EINVAL; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value |= TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC; if (flags == HTE_BOTH_EDGES) { value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE; } else if (flags == HTE_RISING_EDGE_TS) { value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL; } else if (flags == HTE_FALLING_EDGE_TS) { value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; } writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); return 0; } static int tegra186_gpio_dis_hw_ts(struct gpio_chip *gc, u32 offset, unsigned long flags) { struct tegra_gpio *gpio; void __iomem *base; int value; if (!gc) return -EINVAL; gpio = gpiochip_get_data(gc); if (!gpio) return -ENODEV; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -EINVAL; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC; if (flags == HTE_BOTH_EDGES) { value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE; } else if (flags == HTE_RISING_EDGE_TS) { value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL; } else if (flags == HTE_FALLING_EDGE_TS) { value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; } writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); return 0; } static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return -ENODEV; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT) value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE); else value = readl(base + TEGRA186_GPIO_INPUT); return value & BIT(0); } static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset, int level) { struct tegra_gpio *gpio = gpiochip_get_data(chip); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, offset); if (WARN_ON(base == NULL)) return; value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE); if (level == 0) value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH; else value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH; writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE); } static int tegra186_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { struct tegra_gpio *gpio = gpiochip_get_data(chip); u32 debounce, value; void __iomem *base; base = tegra186_gpio_get_base(gpio, offset); if (base == NULL) return -ENXIO; if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) return -ENOTSUPP; debounce = pinconf_to_config_argument(config); /* * The Tegra186 GPIO controller supports a maximum of 255 ms debounce * time. */ if (debounce > 255000) return -EINVAL; debounce = DIV_ROUND_UP(debounce, USEC_PER_MSEC); value = TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(debounce); writel(value, base + TEGRA186_GPIO_DEBOUNCE_CONTROL); value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value |= TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE; writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); return 0; } static int tegra186_gpio_add_pin_ranges(struct gpio_chip *chip) { struct tegra_gpio *gpio = gpiochip_get_data(chip); struct pinctrl_dev *pctldev; struct device_node *np; unsigned int i, j; int err; if (!gpio->soc->pinmux || gpio->soc->num_pin_ranges == 0) return 0; np = of_find_compatible_node(NULL, NULL, gpio->soc->pinmux); if (!np) return -ENODEV; pctldev = of_pinctrl_get(np); of_node_put(np); if (!pctldev) return -EPROBE_DEFER; for (i = 0; i < gpio->soc->num_pin_ranges; i++) { unsigned int pin = gpio->soc->pin_ranges[i].offset, port; const char *group = gpio->soc->pin_ranges[i].group; port = pin / 8; pin = pin % 8; if (port >= gpio->soc->num_ports) { dev_warn(chip->parent, "invalid port %u for %s\n", port, group); continue; } for (j = 0; j < port; j++) pin += gpio->soc->ports[j].pins; err = gpiochip_add_pingroup_range(chip, pctldev, pin, group); if (err < 0) return err; } return 0; } static int tegra186_gpio_of_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) { struct tegra_gpio *gpio = gpiochip_get_data(chip); unsigned int port, pin, i, offset = 0; if (WARN_ON(chip->of_gpio_n_cells < 2)) return -EINVAL; if (WARN_ON(spec->args_count < chip->of_gpio_n_cells)) return -EINVAL; port = spec->args[0] / 8; pin = spec->args[0] % 8; if (port >= gpio->soc->num_ports) { dev_err(chip->parent, "invalid port number: %u\n", port); return -EINVAL; } for (i = 0; i < port; i++) offset += gpio->soc->ports[i].pins; if (flags) *flags = spec->args[1]; return offset + pin; } #define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) static void tegra186_irq_ack(struct irq_data *data) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); if (WARN_ON(base == NULL)) return; writel(1, base + TEGRA186_GPIO_INTERRUPT_CLEAR); } static void tegra186_irq_mask(struct irq_data *data) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, data->hwirq); if (WARN_ON(base == NULL)) return; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value &= ~TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT; writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); gpiochip_disable_irq(&gpio->gpio, data->hwirq); } static void tegra186_irq_unmask(struct irq_data *data) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, data->hwirq); if (WARN_ON(base == NULL)) return; gpiochip_enable_irq(&gpio->gpio, data->hwirq); value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value |= TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT; writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); } static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; base = tegra186_gpio_get_base(gpio, data->hwirq); if (WARN_ON(base == NULL)) return -ENODEV; value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG); value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK; value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: break; case IRQ_TYPE_EDGE_RISING: value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL; break; case IRQ_TYPE_EDGE_FALLING: value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE; break; case IRQ_TYPE_EDGE_BOTH: value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE; break; case IRQ_TYPE_LEVEL_HIGH: value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL; value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL; break; case IRQ_TYPE_LEVEL_LOW: value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL; break; default: return -EINVAL; } writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG); if ((type & IRQ_TYPE_EDGE_BOTH) == 0) irq_set_handler_locked(data, handle_level_irq); else irq_set_handler_locked(data, handle_edge_irq); if (data->parent_data) return irq_chip_set_type_parent(data, type); return 0; } static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on) { if (data->parent_data) return irq_chip_set_wake_parent(data, on); return 0; } static void tegra186_irq_print_chip(struct irq_data *data, struct seq_file *p) { struct gpio_chip *gc = irq_data_get_irq_chip_data(data); seq_printf(p, dev_name(gc->parent)); } static const struct irq_chip tegra186_gpio_irq_chip = { .irq_ack = tegra186_irq_ack, .irq_mask = tegra186_irq_mask, .irq_unmask = tegra186_irq_unmask, .irq_set_type = tegra186_irq_set_type, .irq_set_wake = tegra186_irq_set_wake, .irq_print_chip = tegra186_irq_print_chip, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void tegra186_gpio_irq(struct irq_desc *desc) { struct tegra_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_domain *domain = gpio->gpio.irq.domain; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int parent = irq_desc_get_irq(desc); unsigned int i, j, offset = 0; chained_irq_enter(chip, desc); for (i = 0; i < gpio->soc->num_ports; i++) { const struct tegra_gpio_port *port = &gpio->soc->ports[i]; unsigned int pin; unsigned long value; void __iomem *base; base = gpio->base + port->bank * 0x1000 + port->port * 0x200; /* skip ports that are not associated with this bank */ for (j = 0; j < gpio->num_irqs_per_bank; j++) { if (parent == gpio->irq[port->bank * gpio->num_irqs_per_bank + j]) break; } if (j == gpio->num_irqs_per_bank) goto skip; value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1)); for_each_set_bit(pin, &value, port->pins) { int ret = generic_handle_domain_irq(domain, offset + pin); WARN_RATELIMIT(ret, "hwirq = %d", offset + pin); } skip: offset += port->pins; } chained_irq_exit(chip, desc); } static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { struct tegra_gpio *gpio = gpiochip_get_data(domain->host_data); unsigned int port, pin, i, offset = 0; if (WARN_ON(gpio->gpio.of_gpio_n_cells < 2)) return -EINVAL; if (WARN_ON(fwspec->param_count < gpio->gpio.of_gpio_n_cells)) return -EINVAL; port = fwspec->param[0] / 8; pin = fwspec->param[0] % 8; if (port >= gpio->soc->num_ports) return -EINVAL; for (i = 0; i < port; i++) offset += gpio->soc->ports[i].pins; *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; *hwirq = offset + pin; return 0; } static int tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type) { struct tegra_gpio *gpio = gpiochip_get_data(chip); struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; fwspec->param[0] = gpio->soc->instance; fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; return 0; } static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip, unsigned int hwirq, unsigned int type, unsigned int *parent_hwirq, unsigned int *parent_type) { *parent_hwirq = chip->irq.child_offset_to_irq(chip, hwirq); *parent_type = type; return 0; } static unsigned int tegra186_gpio_child_offset_to_irq(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio *gpio = gpiochip_get_data(chip); unsigned int i; for (i = 0; i < gpio->soc->num_ports; i++) { if (offset < gpio->soc->ports[i].pins) break; offset -= gpio->soc->ports[i].pins; } return offset + i * 8; } static const struct of_device_id tegra186_pmc_of_match[] = { { .compatible = "nvidia,tegra186-pmc" }, { .compatible = "nvidia,tegra194-pmc" }, { .compatible = "nvidia,tegra234-pmc" }, { /* sentinel */ } }; static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio) { struct device *dev = gpio->gpio.parent; unsigned int i; u32 value; for (i = 0; i < gpio->soc->num_ports; i++) { const struct tegra_gpio_port *port = &gpio->soc->ports[i]; unsigned int offset, p = port->port; void __iomem *base; base = gpio->secure + port->bank * 0x1000 + 0x800; value = readl(base + TEGRA186_GPIO_CTL_SCR); /* * For controllers that haven't been locked down yet, make * sure to program the default interrupt route mapping. */ if ((value & TEGRA186_GPIO_CTL_SCR_SEC_REN) == 0 && (value & TEGRA186_GPIO_CTL_SCR_SEC_WEN) == 0) { /* * On Tegra194 and later, each pin can be routed to one or more * interrupts. */ dev_dbg(dev, "programming default interrupt routing for port %s\n", port->name); offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, 0); /* * By default we only want to route GPIO pins to IRQ 0. This works * only under the assumption that we're running as the host kernel * and hence all GPIO pins are owned by Linux. * * For cases where Linux is the guest OS, the hypervisor will have * to configure the interrupt routing and pass only the valid * interrupts via device tree. */ value = readl(base + offset); value = BIT(port->pins) - 1; writel(value, base + offset); } } } static unsigned int tegra186_gpio_irqs_per_bank(struct tegra_gpio *gpio) { struct device *dev = gpio->gpio.parent; if (gpio->num_irq > gpio->num_banks) { if (gpio->num_irq % gpio->num_banks != 0) goto error; } if (gpio->num_irq < gpio->num_banks) goto error; gpio->num_irqs_per_bank = gpio->num_irq / gpio->num_banks; if (gpio->num_irqs_per_bank > gpio->soc->num_irqs_per_bank) goto error; return 0; error: dev_err(dev, "invalid number of interrupts (%u) for %u banks\n", gpio->num_irq, gpio->num_banks); return -EINVAL; } static int tegra186_gpio_probe(struct platform_device *pdev) { unsigned int i, j, offset; struct gpio_irq_chip *irq; struct tegra_gpio *gpio; struct device_node *np; char **names; int err; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->soc = device_get_match_data(&pdev->dev); gpio->gpio.label = gpio->soc->name; gpio->gpio.parent = &pdev->dev; /* count the number of banks in the controller */ for (i = 0; i < gpio->soc->num_ports; i++) if (gpio->soc->ports[i].bank > gpio->num_banks) gpio->num_banks = gpio->soc->ports[i].bank; gpio->num_banks++; /* get register apertures */ gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security"); if (IS_ERR(gpio->secure)) { gpio->secure = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio->secure)) return PTR_ERR(gpio->secure); } gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); if (IS_ERR(gpio->base)) { gpio->base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); } err = platform_irq_count(pdev); if (err < 0) return err; gpio->num_irq = err; err = tegra186_gpio_irqs_per_bank(gpio); if (err < 0) return err; gpio->irq = devm_kcalloc(&pdev->dev, gpio->num_irq, sizeof(*gpio->irq), GFP_KERNEL); if (!gpio->irq) return -ENOMEM; for (i = 0; i < gpio->num_irq; i++) { err = platform_get_irq(pdev, i); if (err < 0) return err; gpio->irq[i] = err; } gpio->gpio.request = gpiochip_generic_request; gpio->gpio.free = gpiochip_generic_free; gpio->gpio.get_direction = tegra186_gpio_get_direction; gpio->gpio.direction_input = tegra186_gpio_direction_input; gpio->gpio.direction_output = tegra186_gpio_direction_output; gpio->gpio.get = tegra186_gpio_get; gpio->gpio.set = tegra186_gpio_set; gpio->gpio.set_config = tegra186_gpio_set_config; gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges; gpio->gpio.init_valid_mask = tegra186_init_valid_mask; if (gpio->soc->has_gte) { gpio->gpio.en_hw_timestamp = tegra186_gpio_en_hw_ts; gpio->gpio.dis_hw_timestamp = tegra186_gpio_dis_hw_ts; } gpio->gpio.base = -1; for (i = 0; i < gpio->soc->num_ports; i++) gpio->gpio.ngpio += gpio->soc->ports[i].pins; names = devm_kcalloc(gpio->gpio.parent, gpio->gpio.ngpio, sizeof(*names), GFP_KERNEL); if (!names) return -ENOMEM; for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) { const struct tegra_gpio_port *port = &gpio->soc->ports[i]; char *name; for (j = 0; j < port->pins; j++) { name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL, "P%s.%02x", port->name, j); if (!name) return -ENOMEM; names[offset + j] = name; } offset += port->pins; } gpio->gpio.names = (const char * const *)names; #if defined(CONFIG_OF_GPIO) gpio->gpio.of_gpio_n_cells = 2; gpio->gpio.of_xlate = tegra186_gpio_of_xlate; #endif /* CONFIG_OF_GPIO */ irq = &gpio->gpio.irq; gpio_irq_chip_set_chip(irq, &tegra186_gpio_irq_chip); irq->fwnode = of_node_to_fwnode(pdev->dev.of_node); irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq; irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec; irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq; irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate; irq->handler = handle_simple_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = tegra186_gpio_irq; irq->parent_handler_data = gpio; irq->num_parents = gpio->num_irq; /* * To simplify things, use a single interrupt per bank for now. Some * chips support up to 8 interrupts per bank, which can be useful to * distribute the load and decrease the processing latency for GPIOs * but it also requires a more complicated interrupt routing than we * currently program. */ if (gpio->num_irqs_per_bank > 1) { irq->parents = devm_kcalloc(&pdev->dev, gpio->num_banks, sizeof(*irq->parents), GFP_KERNEL); if (!irq->parents) return -ENOMEM; for (i = 0; i < gpio->num_banks; i++) irq->parents[i] = gpio->irq[i * gpio->num_irqs_per_bank]; irq->num_parents = gpio->num_banks; } else { irq->num_parents = gpio->num_irq; irq->parents = gpio->irq; } if (gpio->soc->num_irqs_per_bank > 1) tegra186_gpio_init_route_mapping(gpio); np = of_find_matching_node(NULL, tegra186_pmc_of_match); if (np) { if (of_device_is_available(np)) { irq->parent_domain = irq_find_host(np); of_node_put(np); if (!irq->parent_domain) return -EPROBE_DEFER; } else { of_node_put(np); } } irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio, sizeof(*irq->map), GFP_KERNEL); if (!irq->map) return -ENOMEM; for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) { const struct tegra_gpio_port *port = &gpio->soc->ports[i]; for (j = 0; j < port->pins; j++) irq->map[offset + j] = irq->parents[port->bank]; offset += port->pins; } return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio); } #define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA186_MAIN_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra186_main_ports[] = { TEGRA186_MAIN_GPIO_PORT( A, 2, 0, 7), TEGRA186_MAIN_GPIO_PORT( B, 3, 0, 7), TEGRA186_MAIN_GPIO_PORT( C, 3, 1, 7), TEGRA186_MAIN_GPIO_PORT( D, 3, 2, 6), TEGRA186_MAIN_GPIO_PORT( E, 2, 1, 8), TEGRA186_MAIN_GPIO_PORT( F, 2, 2, 6), TEGRA186_MAIN_GPIO_PORT( G, 4, 1, 6), TEGRA186_MAIN_GPIO_PORT( H, 1, 0, 7), TEGRA186_MAIN_GPIO_PORT( I, 0, 4, 8), TEGRA186_MAIN_GPIO_PORT( J, 5, 0, 8), TEGRA186_MAIN_GPIO_PORT( K, 5, 1, 1), TEGRA186_MAIN_GPIO_PORT( L, 1, 1, 8), TEGRA186_MAIN_GPIO_PORT( M, 5, 3, 6), TEGRA186_MAIN_GPIO_PORT( N, 0, 0, 7), TEGRA186_MAIN_GPIO_PORT( O, 0, 1, 4), TEGRA186_MAIN_GPIO_PORT( P, 4, 0, 7), TEGRA186_MAIN_GPIO_PORT( Q, 0, 2, 6), TEGRA186_MAIN_GPIO_PORT( R, 0, 5, 6), TEGRA186_MAIN_GPIO_PORT( T, 0, 3, 4), TEGRA186_MAIN_GPIO_PORT( X, 1, 2, 8), TEGRA186_MAIN_GPIO_PORT( Y, 1, 3, 7), TEGRA186_MAIN_GPIO_PORT(BB, 2, 3, 2), TEGRA186_MAIN_GPIO_PORT(CC, 5, 2, 4), }; static const struct tegra_gpio_soc tegra186_main_soc = { .num_ports = ARRAY_SIZE(tegra186_main_ports), .ports = tegra186_main_ports, .name = "tegra186-gpio", .instance = 0, .num_irqs_per_bank = 1, .has_vm_support = false, }; #define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA186_AON_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra186_aon_ports[] = { TEGRA186_AON_GPIO_PORT( S, 0, 1, 5), TEGRA186_AON_GPIO_PORT( U, 0, 2, 6), TEGRA186_AON_GPIO_PORT( V, 0, 4, 8), TEGRA186_AON_GPIO_PORT( W, 0, 5, 8), TEGRA186_AON_GPIO_PORT( Z, 0, 7, 4), TEGRA186_AON_GPIO_PORT(AA, 0, 6, 8), TEGRA186_AON_GPIO_PORT(EE, 0, 3, 3), TEGRA186_AON_GPIO_PORT(FF, 0, 0, 5), }; static const struct tegra_gpio_soc tegra186_aon_soc = { .num_ports = ARRAY_SIZE(tegra186_aon_ports), .ports = tegra186_aon_ports, .name = "tegra186-gpio-aon", .instance = 1, .num_irqs_per_bank = 1, .has_vm_support = false, }; #define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA194_MAIN_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra194_main_ports[] = { TEGRA194_MAIN_GPIO_PORT( A, 1, 2, 8), TEGRA194_MAIN_GPIO_PORT( B, 4, 7, 2), TEGRA194_MAIN_GPIO_PORT( C, 4, 3, 8), TEGRA194_MAIN_GPIO_PORT( D, 4, 4, 4), TEGRA194_MAIN_GPIO_PORT( E, 4, 5, 8), TEGRA194_MAIN_GPIO_PORT( F, 4, 6, 6), TEGRA194_MAIN_GPIO_PORT( G, 4, 0, 8), TEGRA194_MAIN_GPIO_PORT( H, 4, 1, 8), TEGRA194_MAIN_GPIO_PORT( I, 4, 2, 5), TEGRA194_MAIN_GPIO_PORT( J, 5, 1, 6), TEGRA194_MAIN_GPIO_PORT( K, 3, 0, 8), TEGRA194_MAIN_GPIO_PORT( L, 3, 1, 4), TEGRA194_MAIN_GPIO_PORT( M, 2, 3, 8), TEGRA194_MAIN_GPIO_PORT( N, 2, 4, 3), TEGRA194_MAIN_GPIO_PORT( O, 5, 0, 6), TEGRA194_MAIN_GPIO_PORT( P, 2, 5, 8), TEGRA194_MAIN_GPIO_PORT( Q, 2, 6, 8), TEGRA194_MAIN_GPIO_PORT( R, 2, 7, 6), TEGRA194_MAIN_GPIO_PORT( S, 3, 3, 8), TEGRA194_MAIN_GPIO_PORT( T, 3, 4, 8), TEGRA194_MAIN_GPIO_PORT( U, 3, 5, 1), TEGRA194_MAIN_GPIO_PORT( V, 1, 0, 8), TEGRA194_MAIN_GPIO_PORT( W, 1, 1, 2), TEGRA194_MAIN_GPIO_PORT( X, 2, 0, 8), TEGRA194_MAIN_GPIO_PORT( Y, 2, 1, 8), TEGRA194_MAIN_GPIO_PORT( Z, 2, 2, 8), TEGRA194_MAIN_GPIO_PORT(FF, 3, 2, 2), TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2) }; static const struct tegra186_pin_range tegra194_main_pin_ranges[] = { { TEGRA194_MAIN_GPIO(GG, 0), "pex_l5_clkreq_n_pgg0" }, { TEGRA194_MAIN_GPIO(GG, 1), "pex_l5_rst_n_pgg1" }, }; static const struct tegra_gpio_soc tegra194_main_soc = { .num_ports = ARRAY_SIZE(tegra194_main_ports), .ports = tegra194_main_ports, .name = "tegra194-gpio", .instance = 0, .num_irqs_per_bank = 8, .num_pin_ranges = ARRAY_SIZE(tegra194_main_pin_ranges), .pin_ranges = tegra194_main_pin_ranges, .pinmux = "nvidia,tegra194-pinmux", .has_vm_support = true, }; #define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA194_AON_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra194_aon_ports[] = { TEGRA194_AON_GPIO_PORT(AA, 0, 3, 8), TEGRA194_AON_GPIO_PORT(BB, 0, 4, 4), TEGRA194_AON_GPIO_PORT(CC, 0, 1, 8), TEGRA194_AON_GPIO_PORT(DD, 0, 2, 3), TEGRA194_AON_GPIO_PORT(EE, 0, 0, 7) }; static const struct tegra_gpio_soc tegra194_aon_soc = { .num_ports = ARRAY_SIZE(tegra194_aon_ports), .ports = tegra194_aon_ports, .name = "tegra194-gpio-aon", .instance = 1, .num_irqs_per_bank = 8, .has_gte = true, .has_vm_support = false, }; #define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA234_MAIN_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra234_main_ports[] = { TEGRA234_MAIN_GPIO_PORT( A, 0, 0, 8), TEGRA234_MAIN_GPIO_PORT( B, 0, 3, 1), TEGRA234_MAIN_GPIO_PORT( C, 5, 1, 8), TEGRA234_MAIN_GPIO_PORT( D, 5, 2, 4), TEGRA234_MAIN_GPIO_PORT( E, 5, 3, 8), TEGRA234_MAIN_GPIO_PORT( F, 5, 4, 6), TEGRA234_MAIN_GPIO_PORT( G, 4, 0, 8), TEGRA234_MAIN_GPIO_PORT( H, 4, 1, 8), TEGRA234_MAIN_GPIO_PORT( I, 4, 2, 7), TEGRA234_MAIN_GPIO_PORT( J, 5, 0, 6), TEGRA234_MAIN_GPIO_PORT( K, 3, 0, 8), TEGRA234_MAIN_GPIO_PORT( L, 3, 1, 4), TEGRA234_MAIN_GPIO_PORT( M, 2, 0, 8), TEGRA234_MAIN_GPIO_PORT( N, 2, 1, 8), TEGRA234_MAIN_GPIO_PORT( P, 2, 2, 8), TEGRA234_MAIN_GPIO_PORT( Q, 2, 3, 8), TEGRA234_MAIN_GPIO_PORT( R, 2, 4, 6), TEGRA234_MAIN_GPIO_PORT( X, 1, 0, 8), TEGRA234_MAIN_GPIO_PORT( Y, 1, 1, 8), TEGRA234_MAIN_GPIO_PORT( Z, 1, 2, 8), TEGRA234_MAIN_GPIO_PORT(AC, 0, 1, 8), TEGRA234_MAIN_GPIO_PORT(AD, 0, 2, 4), TEGRA234_MAIN_GPIO_PORT(AE, 3, 3, 2), TEGRA234_MAIN_GPIO_PORT(AF, 3, 4, 4), TEGRA234_MAIN_GPIO_PORT(AG, 3, 2, 8), }; static const struct tegra_gpio_soc tegra234_main_soc = { .num_ports = ARRAY_SIZE(tegra234_main_ports), .ports = tegra234_main_ports, .name = "tegra234-gpio", .instance = 0, .num_irqs_per_bank = 8, .has_vm_support = true, }; #define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA234_AON_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra234_aon_ports[] = { TEGRA234_AON_GPIO_PORT(AA, 0, 4, 8), TEGRA234_AON_GPIO_PORT(BB, 0, 5, 4), TEGRA234_AON_GPIO_PORT(CC, 0, 2, 8), TEGRA234_AON_GPIO_PORT(DD, 0, 3, 3), TEGRA234_AON_GPIO_PORT(EE, 0, 0, 8), TEGRA234_AON_GPIO_PORT(GG, 0, 1, 1), }; static const struct tegra_gpio_soc tegra234_aon_soc = { .num_ports = ARRAY_SIZE(tegra234_aon_ports), .ports = tegra234_aon_ports, .name = "tegra234-gpio-aon", .instance = 1, .num_irqs_per_bank = 8, .has_gte = true, .has_vm_support = false, }; #define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA241_MAIN_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra241_main_ports[] = { TEGRA241_MAIN_GPIO_PORT(A, 0, 0, 8), TEGRA241_MAIN_GPIO_PORT(B, 0, 1, 8), TEGRA241_MAIN_GPIO_PORT(C, 0, 2, 2), TEGRA241_MAIN_GPIO_PORT(D, 0, 3, 6), TEGRA241_MAIN_GPIO_PORT(E, 0, 4, 8), TEGRA241_MAIN_GPIO_PORT(F, 1, 0, 8), TEGRA241_MAIN_GPIO_PORT(G, 1, 1, 8), TEGRA241_MAIN_GPIO_PORT(H, 1, 2, 8), TEGRA241_MAIN_GPIO_PORT(J, 1, 3, 8), TEGRA241_MAIN_GPIO_PORT(K, 1, 4, 4), TEGRA241_MAIN_GPIO_PORT(L, 1, 5, 6), }; static const struct tegra_gpio_soc tegra241_main_soc = { .num_ports = ARRAY_SIZE(tegra241_main_ports), .ports = tegra241_main_ports, .name = "tegra241-gpio", .instance = 0, .num_irqs_per_bank = 8, .has_vm_support = false, }; #define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \ [TEGRA241_AON_GPIO_PORT_##_name] = { \ .name = #_name, \ .bank = _bank, \ .port = _port, \ .pins = _pins, \ } static const struct tegra_gpio_port tegra241_aon_ports[] = { TEGRA241_AON_GPIO_PORT(AA, 0, 0, 8), TEGRA241_AON_GPIO_PORT(BB, 0, 0, 4), }; static const struct tegra_gpio_soc tegra241_aon_soc = { .num_ports = ARRAY_SIZE(tegra241_aon_ports), .ports = tegra241_aon_ports, .name = "tegra241-gpio-aon", .instance = 1, .num_irqs_per_bank = 8, .has_vm_support = false, }; static const struct of_device_id tegra186_gpio_of_match[] = { { .compatible = "nvidia,tegra186-gpio", .data = &tegra186_main_soc }, { .compatible = "nvidia,tegra186-gpio-aon", .data = &tegra186_aon_soc }, { .compatible = "nvidia,tegra194-gpio", .data = &tegra194_main_soc }, { .compatible = "nvidia,tegra194-gpio-aon", .data = &tegra194_aon_soc }, { .compatible = "nvidia,tegra234-gpio", .data = &tegra234_main_soc }, { .compatible = "nvidia,tegra234-gpio-aon", .data = &tegra234_aon_soc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tegra186_gpio_of_match); static const struct acpi_device_id tegra186_gpio_acpi_match[] = { { .id = "NVDA0108", .driver_data = (kernel_ulong_t)&tegra186_main_soc }, { .id = "NVDA0208", .driver_data = (kernel_ulong_t)&tegra186_aon_soc }, { .id = "NVDA0308", .driver_data = (kernel_ulong_t)&tegra194_main_soc }, { .id = "NVDA0408", .driver_data = (kernel_ulong_t)&tegra194_aon_soc }, { .id = "NVDA0508", .driver_data = (kernel_ulong_t)&tegra241_main_soc }, { .id = "NVDA0608", .driver_data = (kernel_ulong_t)&tegra241_aon_soc }, {} }; MODULE_DEVICE_TABLE(acpi, tegra186_gpio_acpi_match); static struct platform_driver tegra186_gpio_driver = { .driver = { .name = "tegra186-gpio", .of_match_table = tegra186_gpio_of_match, .acpi_match_table = tegra186_gpio_acpi_match, }, .probe = tegra186_gpio_probe, }; module_platform_driver(tegra186_gpio_driver); MODULE_DESCRIPTION("NVIDIA Tegra186 GPIO controller driver"); MODULE_AUTHOR("Thierry Reding <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-tegra186.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for GE FPGA based GPIO * * Author: Martyn Welch <[email protected]> * * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc. */ /* * TODO: * * Configuration of output modes (totem-pole/open-drain). * Interrupt configuration - interrupts are always generated, the FPGA relies * on the I/O interrupt controllers mask to stop them from being propagated. */ #include <linux/gpio/driver.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> #define GEF_GPIO_DIRECT 0x00 #define GEF_GPIO_IN 0x04 #define GEF_GPIO_OUT 0x08 #define GEF_GPIO_TRIG 0x0C #define GEF_GPIO_POLAR_A 0x10 #define GEF_GPIO_POLAR_B 0x14 #define GEF_GPIO_INT_STAT 0x18 #define GEF_GPIO_OVERRUN 0x1C #define GEF_GPIO_MODE 0x20 static const struct of_device_id gef_gpio_ids[] = { { .compatible = "gef,sbc610-gpio", .data = (void *)19, }, { .compatible = "gef,sbc310-gpio", .data = (void *)6, }, { .compatible = "ge,imp3a-gpio", .data = (void *)16, }, { } }; MODULE_DEVICE_TABLE(of, gef_gpio_ids); static int __init gef_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct gpio_chip *gc; void __iomem *regs; int ret; gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); if (!gc) return -ENOMEM; regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT, NULL, NULL, regs + GEF_GPIO_DIRECT, BGPIOF_BIG_ENDIAN_BYTE_ORDER); if (ret) return dev_err_probe(dev, ret, "bgpio_init failed\n"); /* Setup pointers to chip functions */ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev)); if (!gc->label) return -ENOMEM; gc->base = -1; gc->ngpio = (uintptr_t)device_get_match_data(dev); /* This function adds a memory mapped GPIO chip */ ret = devm_gpiochip_add_data(dev, gc, NULL); if (ret) return dev_err_probe(dev, ret, "GPIO chip registration failed\n"); return 0; }; static struct platform_driver gef_gpio_driver = { .driver = { .name = "gef-gpio", .of_match_table = gef_gpio_ids, }, }; module_platform_driver_probe(gef_gpio_driver, gef_gpio_probe); MODULE_DESCRIPTION("GE I/O FPGA GPIO driver"); MODULE_AUTHOR("Martyn Welch <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-ge.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2019 American Megatrends International LLC. * * Author: Karthikeyan Mani <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/gpio/driver.h> #include <linux/hashtable.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/string.h> #define ASPEED_SGPIO_CTRL 0x54 #define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16) #define ASPEED_SGPIO_ENABLE BIT(0) #define ASPEED_SGPIO_PINS_SHIFT 6 struct aspeed_sgpio_pdata { const u32 pin_mask; }; struct aspeed_sgpio { struct gpio_chip chip; struct device *dev; struct clk *pclk; raw_spinlock_t lock; void __iomem *base; int irq; }; struct aspeed_sgpio_bank { u16 val_regs; u16 rdata_reg; u16 irq_regs; u16 tolerance_regs; const char names[4][3]; }; /* * Note: The "value" register returns the input value when the GPIO is * configured as an input. * * The "rdata" register returns the output value when the GPIO is * configured as an output. */ static const struct aspeed_sgpio_bank aspeed_sgpio_banks[] = { { .val_regs = 0x0000, .rdata_reg = 0x0070, .irq_regs = 0x0004, .tolerance_regs = 0x0018, .names = { "A", "B", "C", "D" }, }, { .val_regs = 0x001C, .rdata_reg = 0x0074, .irq_regs = 0x0020, .tolerance_regs = 0x0034, .names = { "E", "F", "G", "H" }, }, { .val_regs = 0x0038, .rdata_reg = 0x0078, .irq_regs = 0x003C, .tolerance_regs = 0x0050, .names = { "I", "J", "K", "L" }, }, { .val_regs = 0x0090, .rdata_reg = 0x007C, .irq_regs = 0x0094, .tolerance_regs = 0x00A8, .names = { "M", "N", "O", "P" }, }, }; enum aspeed_sgpio_reg { reg_val, reg_rdata, reg_irq_enable, reg_irq_type0, reg_irq_type1, reg_irq_type2, reg_irq_status, reg_tolerance, }; #define GPIO_VAL_VALUE 0x00 #define GPIO_IRQ_ENABLE 0x00 #define GPIO_IRQ_TYPE0 0x04 #define GPIO_IRQ_TYPE1 0x08 #define GPIO_IRQ_TYPE2 0x0C #define GPIO_IRQ_STATUS 0x10 static void __iomem *bank_reg(struct aspeed_sgpio *gpio, const struct aspeed_sgpio_bank *bank, const enum aspeed_sgpio_reg reg) { switch (reg) { case reg_val: return gpio->base + bank->val_regs + GPIO_VAL_VALUE; case reg_rdata: return gpio->base + bank->rdata_reg; case reg_irq_enable: return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE; case reg_irq_type0: return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0; case reg_irq_type1: return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1; case reg_irq_type2: return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2; case reg_irq_status: return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; case reg_tolerance: return gpio->base + bank->tolerance_regs; default: /* acturally if code runs to here, it's an error case */ BUG(); } } #define GPIO_BANK(x) ((x) >> 6) #define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) static const struct aspeed_sgpio_bank *to_bank(unsigned int offset) { unsigned int bank; bank = GPIO_BANK(offset); WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks)); return &aspeed_sgpio_banks[bank]; } static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios) { bitmap_set(valid_mask, 0, ngpios); return 0; } static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios) { unsigned int i; /* input GPIOs are even bits */ for (i = 0; i < ngpios; i++) { if (i % 2) clear_bit(i, valid_mask); } } static bool aspeed_sgpio_is_input(unsigned int offset) { return !(offset % 2); } static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); unsigned long flags; enum aspeed_sgpio_reg reg; int rc = 0; raw_spin_lock_irqsave(&gpio->lock, flags); reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); void __iomem *addr_r, *addr_w; u32 reg = 0; if (aspeed_sgpio_is_input(offset)) return -EINVAL; /* Since this is an output, read the cached value from rdata, then * update val. */ addr_r = bank_reg(gpio, bank, reg_rdata); addr_w = bank_reg(gpio, bank, reg_val); reg = ioread32(addr_r); if (val) reg |= GPIO_BIT(offset); else reg &= ~GPIO_BIT(offset); iowrite32(reg, addr_w); return 0; } static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); unsigned long flags; raw_spin_lock_irqsave(&gpio->lock, flags); sgpio_set_value(gc, offset, val); raw_spin_unlock_irqrestore(&gpio->lock, flags); } static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) { return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL; } static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); unsigned long flags; int rc; /* No special action is required for setting the direction; we'll * error-out in sgpio_set_value if this isn't an output GPIO */ raw_spin_lock_irqsave(&gpio->lock, flags); rc = sgpio_set_value(gc, offset, val); raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) { return !!aspeed_sgpio_is_input(offset); } static void irqd_to_aspeed_sgpio_data(struct irq_data *d, struct aspeed_sgpio **gpio, const struct aspeed_sgpio_bank **bank, u32 *bit, int *offset) { struct aspeed_sgpio *internal; *offset = irqd_to_hwirq(d); internal = irq_data_get_irq_chip_data(d); WARN_ON(!internal); *gpio = internal; *bank = to_bank(*offset); *bit = GPIO_BIT(*offset); } static void aspeed_sgpio_irq_ack(struct irq_data *d) { const struct aspeed_sgpio_bank *bank; struct aspeed_sgpio *gpio; unsigned long flags; void __iomem *status_addr; int offset; u32 bit; irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset); status_addr = bank_reg(gpio, bank, reg_irq_status); raw_spin_lock_irqsave(&gpio->lock, flags); iowrite32(bit, status_addr); raw_spin_unlock_irqrestore(&gpio->lock, flags); } static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set) { const struct aspeed_sgpio_bank *bank; struct aspeed_sgpio *gpio; unsigned long flags; u32 reg, bit; void __iomem *addr; int offset; irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset); addr = bank_reg(gpio, bank, reg_irq_enable); /* Unmasking the IRQ */ if (set) gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d)); raw_spin_lock_irqsave(&gpio->lock, flags); reg = ioread32(addr); if (set) reg |= bit; else reg &= ~bit; iowrite32(reg, addr); raw_spin_unlock_irqrestore(&gpio->lock, flags); /* Masking the IRQ */ if (!set) gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d)); } static void aspeed_sgpio_irq_mask(struct irq_data *d) { aspeed_sgpio_irq_set_mask(d, false); } static void aspeed_sgpio_irq_unmask(struct irq_data *d) { aspeed_sgpio_irq_set_mask(d, true); } static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) { u32 type0 = 0; u32 type1 = 0; u32 type2 = 0; u32 bit, reg; const struct aspeed_sgpio_bank *bank; irq_flow_handler_t handler; struct aspeed_sgpio *gpio; unsigned long flags; void __iomem *addr; int offset; irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: type2 |= bit; fallthrough; case IRQ_TYPE_EDGE_RISING: type0 |= bit; fallthrough; case IRQ_TYPE_EDGE_FALLING: handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: type0 |= bit; fallthrough; case IRQ_TYPE_LEVEL_LOW: type1 |= bit; handler = handle_level_irq; break; default: return -EINVAL; } raw_spin_lock_irqsave(&gpio->lock, flags); addr = bank_reg(gpio, bank, reg_irq_type0); reg = ioread32(addr); reg = (reg & ~bit) | type0; iowrite32(reg, addr); addr = bank_reg(gpio, bank, reg_irq_type1); reg = ioread32(addr); reg = (reg & ~bit) | type1; iowrite32(reg, addr); addr = bank_reg(gpio, bank, reg_irq_type2); reg = ioread32(addr); reg = (reg & ~bit) | type2; iowrite32(reg, addr); raw_spin_unlock_irqrestore(&gpio->lock, flags); irq_set_handler_locked(d, handler); return 0; } static void aspeed_sgpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct irq_chip *ic = irq_desc_get_chip(desc); struct aspeed_sgpio *data = gpiochip_get_data(gc); unsigned int i, p; unsigned long reg; chained_irq_enter(ic, desc); for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { const struct aspeed_sgpio_bank *bank = &aspeed_sgpio_banks[i]; reg = ioread32(bank_reg(data, bank, reg_irq_status)); for_each_set_bit(p, &reg, 32) generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2); } chained_irq_exit(ic, desc); } static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p) { const struct aspeed_sgpio_bank *bank; struct aspeed_sgpio *gpio; u32 bit; int offset; irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset); seq_printf(p, dev_name(gpio->dev)); } static const struct irq_chip aspeed_sgpio_irq_chip = { .irq_ack = aspeed_sgpio_irq_ack, .irq_mask = aspeed_sgpio_irq_mask, .irq_unmask = aspeed_sgpio_irq_unmask, .irq_set_type = aspeed_sgpio_set_type, .irq_print_chip = aspeed_sgpio_irq_print_chip, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, struct platform_device *pdev) { int rc, i; const struct aspeed_sgpio_bank *bank; struct gpio_irq_chip *irq; rc = platform_get_irq(pdev, 0); if (rc < 0) return rc; gpio->irq = rc; /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { bank = &aspeed_sgpio_banks[i]; /* disable irq enable bits */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable)); /* clear status bits */ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status)); } irq = &gpio->chip.irq; gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip); irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask; irq->handler = handle_bad_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = aspeed_sgpio_irq_handler; irq->parent_handler_data = gpio; irq->parents = &gpio->irq; irq->num_parents = 1; /* Apply default IRQ settings */ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { bank = &aspeed_sgpio_banks[i]; /* set falling or level-low irq */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); /* trigger type is edge */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); /* single edge trigger */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); } return 0; } static const struct aspeed_sgpio_pdata ast2400_sgpio_pdata = { .pin_mask = GENMASK(9, 6), }; static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip, unsigned int offset, bool enable) { struct aspeed_sgpio *gpio = gpiochip_get_data(chip); unsigned long flags; void __iomem *reg; u32 val; reg = bank_reg(gpio, to_bank(offset), reg_tolerance); raw_spin_lock_irqsave(&gpio->lock, flags); val = readl(reg); if (enable) val |= GPIO_BIT(offset); else val &= ~GPIO_BIT(offset); writel(val, reg); raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } static int aspeed_sgpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { unsigned long param = pinconf_to_config_param(config); u32 arg = pinconf_to_config_argument(config); if (param == PIN_CONFIG_PERSIST_STATE) return aspeed_sgpio_reset_tolerance(chip, offset, arg); return -ENOTSUPP; } static const struct aspeed_sgpio_pdata ast2600_sgpiom_pdata = { .pin_mask = GENMASK(10, 6), }; static const struct of_device_id aspeed_sgpio_of_table[] = { { .compatible = "aspeed,ast2400-sgpio", .data = &ast2400_sgpio_pdata, }, { .compatible = "aspeed,ast2500-sgpio", .data = &ast2400_sgpio_pdata, }, { .compatible = "aspeed,ast2600-sgpiom", .data = &ast2600_sgpiom_pdata, }, {} }; MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table); static int __init aspeed_sgpio_probe(struct platform_device *pdev) { u32 nr_gpios, sgpio_freq, sgpio_clk_div, gpio_cnt_regval, pin_mask; const struct aspeed_sgpio_pdata *pdata; struct aspeed_sgpio *gpio; unsigned long apb_freq; int rc; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); gpio->dev = &pdev->dev; pdata = device_get_match_data(&pdev->dev); if (!pdata) return -EINVAL; pin_mask = pdata->pin_mask; rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios); if (rc < 0) { dev_err(&pdev->dev, "Could not read ngpios property\n"); return -EINVAL; } else if (nr_gpios % 8) { dev_err(&pdev->dev, "Number of GPIOs not multiple of 8: %d\n", nr_gpios); return -EINVAL; } rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq); if (rc < 0) { dev_err(&pdev->dev, "Could not read bus-frequency property\n"); return -EINVAL; } gpio->pclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(gpio->pclk)) { dev_err(&pdev->dev, "devm_clk_get failed\n"); return PTR_ERR(gpio->pclk); } apb_freq = clk_get_rate(gpio->pclk); /* * From the datasheet, * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1) * period = 2 * (GPIO254[31:16] + 1) / PCLK * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK) * frequency = PCLK / (2 * (GPIO254[31:16] + 1)) * frequency * 2 * (GPIO254[31:16] + 1) = PCLK * GPIO254[31:16] = PCLK / (frequency * 2) - 1 */ if (sgpio_freq == 0) return -EINVAL; sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1; if (sgpio_clk_div > (1 << 16) - 1) return -EINVAL; gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask; iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval | ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL); raw_spin_lock_init(&gpio->lock); gpio->chip.parent = &pdev->dev; gpio->chip.ngpio = nr_gpios * 2; gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask; gpio->chip.direction_input = aspeed_sgpio_dir_in; gpio->chip.direction_output = aspeed_sgpio_dir_out; gpio->chip.get_direction = aspeed_sgpio_get_direction; gpio->chip.request = NULL; gpio->chip.free = NULL; gpio->chip.get = aspeed_sgpio_get; gpio->chip.set = aspeed_sgpio_set; gpio->chip.set_config = aspeed_sgpio_set_config; gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; aspeed_sgpio_setup_irqs(gpio, pdev); rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); if (rc < 0) return rc; return 0; } static struct platform_driver aspeed_sgpio_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = aspeed_sgpio_of_table, }, }; module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe); MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
linux-master
drivers/gpio/gpio-aspeed-sgpio.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the Diamond Systems GPIO-MM * Copyright (C) 2016 William Breathitt Gray * * This driver supports the following Diamond Systems devices: GPIO-MM and * GPIO-MM-12. */ #include <linux/device.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/isa.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/regmap.h> #include <linux/types.h> #include "gpio-i8255.h" MODULE_IMPORT_NS(I8255); #define GPIOMM_EXTENT 8 #define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT) static unsigned int base[MAX_NUM_GPIOMM]; static unsigned int num_gpiomm; module_param_hw_array(base, uint, ioport, &num_gpiomm, 0); MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses"); #define GPIOMM_NUM_PPI 2 static const struct regmap_range gpiomm_volatile_ranges[] = { i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4), }; static const struct regmap_access_table gpiomm_volatile_table = { .yes_ranges = gpiomm_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(gpiomm_volatile_ranges), }; static const struct regmap_config gpiomm_regmap_config = { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .io_port = true, .max_register = 0x7, .volatile_table = &gpiomm_volatile_table, .cache_type = REGCACHE_FLAT, }; #define GPIOMM_NGPIO 48 static const char *gpiomm_names[GPIOMM_NGPIO] = { "Port 1A0", "Port 1A1", "Port 1A2", "Port 1A3", "Port 1A4", "Port 1A5", "Port 1A6", "Port 1A7", "Port 1B0", "Port 1B1", "Port 1B2", "Port 1B3", "Port 1B4", "Port 1B5", "Port 1B6", "Port 1B7", "Port 1C0", "Port 1C1", "Port 1C2", "Port 1C3", "Port 1C4", "Port 1C5", "Port 1C6", "Port 1C7", "Port 2A0", "Port 2A1", "Port 2A2", "Port 2A3", "Port 2A4", "Port 2A5", "Port 2A6", "Port 2A7", "Port 2B0", "Port 2B1", "Port 2B2", "Port 2B3", "Port 2B4", "Port 2B5", "Port 2B6", "Port 2B7", "Port 2C0", "Port 2C1", "Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7", }; static int gpiomm_probe(struct device *dev, unsigned int id) { const char *const name = dev_name(dev); struct i8255_regmap_config config = {}; void __iomem *regs; if (!devm_request_region(dev, base[id], GPIOMM_EXTENT, name)) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", base[id], base[id] + GPIOMM_EXTENT); return -EBUSY; } regs = devm_ioport_map(dev, base[id], GPIOMM_EXTENT); if (!regs) return -ENOMEM; config.map = devm_regmap_init_mmio(dev, regs, &gpiomm_regmap_config); if (IS_ERR(config.map)) return dev_err_probe(dev, PTR_ERR(config.map), "Unable to initialize register map\n"); config.parent = dev; config.num_ppi = GPIOMM_NUM_PPI; config.names = gpiomm_names; return devm_i8255_regmap_register(dev, &config); } static struct isa_driver gpiomm_driver = { .probe = gpiomm_probe, .driver = { .name = "gpio-mm" }, }; module_isa_driver(gpiomm_driver, num_gpiomm); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("Diamond Systems GPIO-MM GPIO driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-gpio-mm.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/compat.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/gpio.h> #include <linux/gpio/driver.h> #include <linux/gpio/machine.h> #include <uapi/linux/gpio.h> #include "gpiolib-acpi.h" #include "gpiolib-cdev.h" #include "gpiolib-of.h" #include "gpiolib-swnode.h" #include "gpiolib-sysfs.h" #include "gpiolib.h" #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> /* Implementation infrastructure for GPIO interfaces. * * The GPIO programming interface allows for inlining speed-critical * get/set operations for common cases, so that access to SOC-integrated * GPIOs can sometimes cost only an instruction or two per bit. */ /* When debugging, extend minimal trust to callers and platform code. * Also emit diagnostic messages that may help initial bringup, when * board setup or driver bugs are most common. * * Otherwise, minimize overhead in what may be bitbanging codepaths. */ #ifdef DEBUG #define extra_checks 1 #else #define extra_checks 0 #endif /* Device and char device-related information */ static DEFINE_IDA(gpio_ida); static dev_t gpio_devt; #define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */ static int gpio_bus_match(struct device *dev, struct device_driver *drv) { struct fwnode_handle *fwnode = dev_fwnode(dev); /* * Only match if the fwnode doesn't already have a proper struct device * created for it. */ if (fwnode && fwnode->dev != dev) return 0; return 1; } static struct bus_type gpio_bus_type = { .name = "gpio", .match = gpio_bus_match, }; /* * Number of GPIOs to use for the fast path in set array */ #define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT /* gpio_lock prevents conflicts during gpio_desc[] table updates. * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ DEFINE_SPINLOCK(gpio_lock); static DEFINE_MUTEX(gpio_lookup_lock); static LIST_HEAD(gpio_lookup_list); LIST_HEAD(gpio_devices); static DEFINE_MUTEX(gpio_machine_hogs_mutex); static LIST_HEAD(gpio_machine_hogs); static void gpiochip_free_hogs(struct gpio_chip *gc); static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key); static void gpiochip_irqchip_remove(struct gpio_chip *gc); static int gpiochip_irqchip_init_hw(struct gpio_chip *gc); static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc); static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc); static bool gpiolib_initialized; static inline void desc_set_label(struct gpio_desc *d, const char *label) { d->label = label; } /** * gpio_to_desc - Convert a GPIO number to its descriptor * @gpio: global GPIO number * * Returns: * The GPIO descriptor associated with the given GPIO, or %NULL if no GPIO * with the given number exists in the system. */ struct gpio_desc *gpio_to_desc(unsigned gpio) { struct gpio_device *gdev; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) { if (gdev->base <= gpio && gdev->base + gdev->ngpio > gpio) { spin_unlock_irqrestore(&gpio_lock, flags); return &gdev->descs[gpio - gdev->base]; } } spin_unlock_irqrestore(&gpio_lock, flags); if (!gpio_is_valid(gpio)) pr_warn("invalid GPIO %d\n", gpio); return NULL; } EXPORT_SYMBOL_GPL(gpio_to_desc); /** * gpiochip_get_desc - get the GPIO descriptor corresponding to the given * hardware number for this chip * @gc: GPIO chip * @hwnum: hardware number of the GPIO for this chip * * Returns: * A pointer to the GPIO descriptor or ``ERR_PTR(-EINVAL)`` if no GPIO exists * in the given chip for the specified hardware number. */ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum) { struct gpio_device *gdev = gc->gpiodev; if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL); return &gdev->descs[hwnum]; } EXPORT_SYMBOL_GPL(gpiochip_get_desc); /** * desc_to_gpio - convert a GPIO descriptor to the integer namespace * @desc: GPIO descriptor * * This should disappear in the future but is needed since we still * use GPIO numbers for error messages and sysfs nodes. * * Returns: * The global GPIO number for the GPIO specified by its descriptor. */ int desc_to_gpio(const struct gpio_desc *desc) { return desc->gdev->base + (desc - &desc->gdev->descs[0]); } EXPORT_SYMBOL_GPL(desc_to_gpio); /** * gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs * @desc: descriptor to return the chip of */ struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) { if (!desc || !desc->gdev) return NULL; return desc->gdev->chip; } EXPORT_SYMBOL_GPL(gpiod_to_chip); /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ static int gpiochip_find_base(int ngpio) { struct gpio_device *gdev; int base = GPIO_DYNAMIC_BASE; list_for_each_entry(gdev, &gpio_devices, list) { /* found a free space? */ if (gdev->base >= base + ngpio) break; /* nope, check the space right after the chip */ base = gdev->base + gdev->ngpio; if (base < GPIO_DYNAMIC_BASE) base = GPIO_DYNAMIC_BASE; } if (gpio_is_valid(base)) { pr_debug("%s: found new base at %d\n", __func__, base); return base; } else { pr_err("%s: cannot find free range\n", __func__); return -ENOSPC; } } /** * gpiod_get_direction - return the current direction of a GPIO * @desc: GPIO to get the direction of * * Returns 0 for output, 1 for input, or an error code in case of error. * * This function may sleep if gpiod_cansleep() is true. */ int gpiod_get_direction(struct gpio_desc *desc) { struct gpio_chip *gc; unsigned int offset; int ret; gc = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); /* * Open drain emulation using input mode may incorrectly report * input here, fix that up. */ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && test_bit(FLAG_IS_OUT, &desc->flags)) return 0; if (!gc->get_direction) return -ENOTSUPP; ret = gc->get_direction(gc, offset); if (ret < 0) return ret; /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */ if (ret > 0) ret = 1; assign_bit(FLAG_IS_OUT, &desc->flags, !ret); return ret; } EXPORT_SYMBOL_GPL(gpiod_get_direction); /* * Add a new chip to the global chips list, keeping the list of chips sorted * by range(means [base, base + ngpio - 1]) order. * * Return -EBUSY if the new chip overlaps with some other chip's integer * space. */ static int gpiodev_add_to_list(struct gpio_device *gdev) { struct gpio_device *prev, *next; if (list_empty(&gpio_devices)) { /* initial entry in list */ list_add_tail(&gdev->list, &gpio_devices); return 0; } next = list_first_entry(&gpio_devices, struct gpio_device, list); if (gdev->base + gdev->ngpio <= next->base) { /* add before first entry */ list_add(&gdev->list, &gpio_devices); return 0; } prev = list_last_entry(&gpio_devices, struct gpio_device, list); if (prev->base + prev->ngpio <= gdev->base) { /* add behind last entry */ list_add_tail(&gdev->list, &gpio_devices); return 0; } list_for_each_entry_safe(prev, next, &gpio_devices, list) { /* at the end of the list */ if (&next->list == &gpio_devices) break; /* add between prev and next */ if (prev->base + prev->ngpio <= gdev->base && gdev->base + gdev->ngpio <= next->base) { list_add(&gdev->list, &prev->list); return 0; } } return -EBUSY; } /* * Convert a GPIO name to its descriptor * Note that there is no guarantee that GPIO names are globally unique! * Hence this function will return, if it exists, a reference to the first GPIO * line found that matches the given name. */ static struct gpio_desc *gpio_name_to_desc(const char * const name) { struct gpio_device *gdev; unsigned long flags; if (!name) return NULL; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) { struct gpio_desc *desc; for_each_gpio_desc(gdev->chip, desc) { if (desc->name && !strcmp(desc->name, name)) { spin_unlock_irqrestore(&gpio_lock, flags); return desc; } } } spin_unlock_irqrestore(&gpio_lock, flags); return NULL; } /* * Take the names from gc->names and assign them to their GPIO descriptors. * Warn if a name is already used for a GPIO line on a different GPIO chip. * * Note that: * 1. Non-unique names are still accepted, * 2. Name collisions within the same GPIO chip are not reported. */ static int gpiochip_set_desc_names(struct gpio_chip *gc) { struct gpio_device *gdev = gc->gpiodev; int i; /* First check all names if they are unique */ for (i = 0; i != gc->ngpio; ++i) { struct gpio_desc *gpio; gpio = gpio_name_to_desc(gc->names[i]); if (gpio) dev_warn(&gdev->dev, "Detected name collision for GPIO name '%s'\n", gc->names[i]); } /* Then add all names to the GPIO descriptors */ for (i = 0; i != gc->ngpio; ++i) gdev->descs[i].name = gc->names[i]; return 0; } /* * gpiochip_set_names - Set GPIO line names using device properties * @chip: GPIO chip whose lines should be named, if possible * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned * names belong to the underlying firmware node and should not be released * by the caller. */ static int gpiochip_set_names(struct gpio_chip *chip) { struct gpio_device *gdev = chip->gpiodev; struct device *dev = &gdev->dev; const char **names; int ret, i; int count; count = device_property_string_array_count(dev, "gpio-line-names"); if (count < 0) return 0; /* * When offset is set in the driver side we assume the driver internally * is using more than one gpiochip per the same device. We have to stop * setting friendly names if the specified ones with 'gpio-line-names' * are less than the offset in the device itself. This means all the * lines are not present for every single pin within all the internal * gpiochips. */ if (count <= chip->offset) { dev_warn(dev, "gpio-line-names too short (length %d), cannot map names for the gpiochip at offset %u\n", count, chip->offset); return 0; } names = kcalloc(count, sizeof(*names), GFP_KERNEL); if (!names) return -ENOMEM; ret = device_property_read_string_array(dev, "gpio-line-names", names, count); if (ret < 0) { dev_warn(dev, "failed to read GPIO line names\n"); kfree(names); return ret; } /* * When more that one gpiochip per device is used, 'count' can * contain at most number gpiochips x chip->ngpio. We have to * correctly distribute all defined lines taking into account * chip->offset as starting point from where we will assign * the names to pins from the 'names' array. Since property * 'gpio-line-names' cannot contains gaps, we have to be sure * we only assign those pins that really exists since chip->ngpio * can be different of the chip->offset. */ count = (count > chip->offset) ? count - chip->offset : count; if (count > chip->ngpio) count = chip->ngpio; for (i = 0; i < count; i++) { /* * Allow overriding "fixed" names provided by the GPIO * provider. The "fixed" names are more often than not * generic and less informative than the names given in * device properties. */ if (names[chip->offset + i] && names[chip->offset + i][0]) gdev->descs[i].name = names[chip->offset + i]; } kfree(names); return 0; } static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc) { unsigned long *p; p = bitmap_alloc(gc->ngpio, GFP_KERNEL); if (!p) return NULL; /* Assume by default all GPIOs are valid */ bitmap_fill(p, gc->ngpio); return p; } static void gpiochip_free_mask(unsigned long **p) { bitmap_free(*p); *p = NULL; } static unsigned int gpiochip_count_reserved_ranges(struct gpio_chip *gc) { struct device *dev = &gc->gpiodev->dev; int size; /* Format is "start, count, ..." */ size = device_property_count_u32(dev, "gpio-reserved-ranges"); if (size > 0 && size % 2 == 0) return size; return 0; } static int gpiochip_apply_reserved_ranges(struct gpio_chip *gc) { struct device *dev = &gc->gpiodev->dev; unsigned int size; u32 *ranges; int ret; size = gpiochip_count_reserved_ranges(gc); if (size == 0) return 0; ranges = kmalloc_array(size, sizeof(*ranges), GFP_KERNEL); if (!ranges) return -ENOMEM; ret = device_property_read_u32_array(dev, "gpio-reserved-ranges", ranges, size); if (ret) { kfree(ranges); return ret; } while (size) { u32 count = ranges[--size]; u32 start = ranges[--size]; if (start >= gc->ngpio || start + count > gc->ngpio) continue; bitmap_clear(gc->valid_mask, start, count); } kfree(ranges); return 0; } static int gpiochip_init_valid_mask(struct gpio_chip *gc) { int ret; if (!(gpiochip_count_reserved_ranges(gc) || gc->init_valid_mask)) return 0; gc->valid_mask = gpiochip_allocate_mask(gc); if (!gc->valid_mask) return -ENOMEM; ret = gpiochip_apply_reserved_ranges(gc); if (ret) return ret; if (gc->init_valid_mask) return gc->init_valid_mask(gc, gc->valid_mask, gc->ngpio); return 0; } static void gpiochip_free_valid_mask(struct gpio_chip *gc) { gpiochip_free_mask(&gc->valid_mask); } static int gpiochip_add_pin_ranges(struct gpio_chip *gc) { /* * Device Tree platforms are supposed to use "gpio-ranges" * property. This check ensures that the ->add_pin_ranges() * won't be called for them. */ if (device_property_present(&gc->gpiodev->dev, "gpio-ranges")) return 0; if (gc->add_pin_ranges) return gc->add_pin_ranges(gc); return 0; } bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { /* No mask means all valid */ if (likely(!gc->valid_mask)) return true; return test_bit(offset, gc->valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_line_is_valid); static void gpiodev_release(struct device *dev) { struct gpio_device *gdev = to_gpio_device(dev); unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); spin_unlock_irqrestore(&gpio_lock, flags); ida_free(&gpio_ida, gdev->id); kfree_const(gdev->label); kfree(gdev->descs); kfree(gdev); } #ifdef CONFIG_GPIO_CDEV #define gcdev_register(gdev, devt) gpiolib_cdev_register((gdev), (devt)) #define gcdev_unregister(gdev) gpiolib_cdev_unregister((gdev)) #else /* * gpiolib_cdev_register() indirectly calls device_add(), which is still * required even when cdev is not selected. */ #define gcdev_register(gdev, devt) device_add(&(gdev)->dev) #define gcdev_unregister(gdev) device_del(&(gdev)->dev) #endif static int gpiochip_setup_dev(struct gpio_device *gdev) { struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev); int ret; /* * If fwnode doesn't belong to another device, it's safe to clear its * initialized flag. */ if (fwnode && !fwnode->dev) fwnode_dev_initialized(fwnode, false); ret = gcdev_register(gdev, gpio_devt); if (ret) return ret; /* From this point, the .release() function cleans up gpio_device */ gdev->dev.release = gpiodev_release; ret = gpiochip_sysfs_register(gdev); if (ret) goto err_remove_device; dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base, gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic"); return 0; err_remove_device: gcdev_unregister(gdev); return ret; } static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog) { struct gpio_desc *desc; int rv; desc = gpiochip_get_desc(gc, hog->chip_hwnum); if (IS_ERR(desc)) { chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__, PTR_ERR(desc)); return; } if (test_bit(FLAG_IS_HOGGED, &desc->flags)) return; rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags); if (rv) gpiod_err(desc, "%s: unable to hog GPIO line (%s:%u): %d\n", __func__, gc->label, hog->chip_hwnum, rv); } static void machine_gpiochip_add(struct gpio_chip *gc) { struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); list_for_each_entry(hog, &gpio_machine_hogs, list) { if (!strcmp(gc->label, hog->chip_label)) gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); } static void gpiochip_setup_devs(void) { struct gpio_device *gdev; int ret; list_for_each_entry(gdev, &gpio_devices, list) { ret = gpiochip_setup_dev(gdev); if (ret) dev_err(&gdev->dev, "Failed to initialize gpio device (%d)\n", ret); } } static void gpiochip_set_data(struct gpio_chip *gc, void *data) { gc->gpiodev->data = data; } /** * gpiochip_get_data() - get per-subdriver data for the chip * @gc: GPIO chip * * Returns: * The per-subdriver data for the chip. */ void *gpiochip_get_data(struct gpio_chip *gc) { return gc->gpiodev->data; } EXPORT_SYMBOL_GPL(gpiochip_get_data); int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev) { u32 ngpios = gc->ngpio; int ret; if (ngpios == 0) { ret = device_property_read_u32(dev, "ngpios", &ngpios); if (ret == -ENODATA) /* * -ENODATA means that there is no property found and * we want to issue the error message to the user. * Besides that, we want to return different error code * to state that supplied value is not valid. */ ngpios = 0; else if (ret) return ret; gc->ngpio = ngpios; } if (gc->ngpio == 0) { chip_err(gc, "tried to insert a GPIO chip with zero lines\n"); return -EINVAL; } if (gc->ngpio > FASTPATH_NGPIO) chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n", gc->ngpio, FASTPATH_NGPIO); return 0; } EXPORT_SYMBOL_GPL(gpiochip_get_ngpios); int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key) { struct gpio_device *gdev; unsigned long flags; unsigned int i; int base = 0; int ret = 0; /* * First: allocate and populate the internal stat container, and * set up the struct device. */ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL); if (!gdev) return -ENOMEM; gdev->dev.bus = &gpio_bus_type; gdev->dev.parent = gc->parent; gdev->chip = gc; gc->gpiodev = gdev; gpiochip_set_data(gc, data); /* * If the calling driver did not initialize firmware node, * do it here using the parent device, if any. */ if (gc->fwnode) device_set_node(&gdev->dev, gc->fwnode); else if (gc->parent) device_set_node(&gdev->dev, dev_fwnode(gc->parent)); gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL); if (gdev->id < 0) { ret = gdev->id; goto err_free_gdev; } ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); if (ret) goto err_free_ida; device_initialize(&gdev->dev); if (gc->parent && gc->parent->driver) gdev->owner = gc->parent->driver->owner; else if (gc->owner) /* TODO: remove chip->owner */ gdev->owner = gc->owner; else gdev->owner = THIS_MODULE; ret = gpiochip_get_ngpios(gc, &gdev->dev); if (ret) goto err_free_dev_name; gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL); if (!gdev->descs) { ret = -ENOMEM; goto err_free_dev_name; } gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); if (!gdev->label) { ret = -ENOMEM; goto err_free_descs; } gdev->ngpio = gc->ngpio; spin_lock_irqsave(&gpio_lock, flags); /* * TODO: this allocates a Linux GPIO number base in the global * GPIO numberspace for this chip. In the long run we want to * get *rid* of this numberspace and use only descriptors, but * it may be a pipe dream. It will not happen before we get rid * of the sysfs interface anyways. */ base = gc->base; if (base < 0) { base = gpiochip_find_base(gc->ngpio); if (base < 0) { spin_unlock_irqrestore(&gpio_lock, flags); ret = base; base = 0; goto err_free_label; } /* * TODO: it should not be necessary to reflect the assigned * base outside of the GPIO subsystem. Go over drivers and * see if anyone makes use of this, else drop this and assign * a poison instead. */ gc->base = base; } else { dev_warn(&gdev->dev, "Static allocation of GPIO base is deprecated, use dynamic allocation.\n"); } gdev->base = base; ret = gpiodev_add_to_list(gdev); if (ret) { spin_unlock_irqrestore(&gpio_lock, flags); chip_err(gc, "GPIO integer space overlap, cannot add chip\n"); goto err_free_label; } for (i = 0; i < gc->ngpio; i++) gdev->descs[i].gdev = gdev; spin_unlock_irqrestore(&gpio_lock, flags); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier); init_rwsem(&gdev->sem); #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); #endif if (gc->names) { ret = gpiochip_set_desc_names(gc); if (ret) goto err_remove_from_list; } ret = gpiochip_set_names(gc); if (ret) goto err_remove_from_list; ret = gpiochip_init_valid_mask(gc); if (ret) goto err_remove_from_list; ret = of_gpiochip_add(gc); if (ret) goto err_free_gpiochip_mask; for (i = 0; i < gc->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; if (gc->get_direction && gpiochip_line_is_valid(gc, i)) { assign_bit(FLAG_IS_OUT, &desc->flags, !gc->get_direction(gc, i)); } else { assign_bit(FLAG_IS_OUT, &desc->flags, !gc->direction_input); } } ret = gpiochip_add_pin_ranges(gc); if (ret) goto err_remove_of_chip; acpi_gpiochip_add(gc); machine_gpiochip_add(gc); ret = gpiochip_irqchip_init_valid_mask(gc); if (ret) goto err_remove_acpi_chip; ret = gpiochip_irqchip_init_hw(gc); if (ret) goto err_remove_acpi_chip; ret = gpiochip_add_irqchip(gc, lock_key, request_key); if (ret) goto err_remove_irqchip_mask; /* * By first adding the chardev, and then adding the device, * we get a device node entry in sysfs under * /sys/bus/gpio/devices/gpiochipN/dev that can be used for * coldplug of device nodes and other udev business. * We can do this only if gpiolib has been initialized. * Otherwise, defer until later. */ if (gpiolib_initialized) { ret = gpiochip_setup_dev(gdev); if (ret) goto err_remove_irqchip; } return 0; err_remove_irqchip: gpiochip_irqchip_remove(gc); err_remove_irqchip_mask: gpiochip_irqchip_free_valid_mask(gc); err_remove_acpi_chip: acpi_gpiochip_remove(gc); err_remove_of_chip: gpiochip_free_hogs(gc); of_gpiochip_remove(gc); err_free_gpiochip_mask: gpiochip_remove_pin_ranges(gc); gpiochip_free_valid_mask(gc); if (gdev->dev.release) { /* release() has been registered by gpiochip_setup_dev() */ gpio_device_put(gdev); goto err_print_message; } err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); spin_unlock_irqrestore(&gpio_lock, flags); err_free_label: kfree_const(gdev->label); err_free_descs: kfree(gdev->descs); err_free_dev_name: kfree(dev_name(&gdev->dev)); err_free_ida: ida_free(&gpio_ida, gdev->id); err_free_gdev: kfree(gdev); err_print_message: /* failures here can mean systems won't boot... */ if (ret != -EPROBE_DEFER) { pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, base, base + (int)gc->ngpio - 1, gc->label ? : "generic", ret); } return ret; } EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key); /** * gpiochip_remove() - unregister a gpio_chip * @gc: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ void gpiochip_remove(struct gpio_chip *gc) { struct gpio_device *gdev = gc->gpiodev; unsigned long flags; unsigned int i; down_write(&gdev->sem); /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); gpiochip_free_hogs(gc); /* Numb the device, cancelling all outstanding operations */ gdev->chip = NULL; gpiochip_irqchip_remove(gc); acpi_gpiochip_remove(gc); of_gpiochip_remove(gc); gpiochip_remove_pin_ranges(gc); gpiochip_free_valid_mask(gc); /* * We accept no more calls into the driver from this point, so * NULL the driver data pointer. */ gpiochip_set_data(gc, NULL); spin_lock_irqsave(&gpio_lock, flags); for (i = 0; i < gdev->ngpio; i++) { if (gpiochip_is_requested(gc, i)) break; } spin_unlock_irqrestore(&gpio_lock, flags); if (i != gdev->ngpio) dev_crit(&gdev->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); /* * The gpiochip side puts its use of the device to rest here: * if there are no userspace clients, the chardev and device will * be removed, else it will be dangling until the last user is * gone. */ gcdev_unregister(gdev); up_write(&gdev->sem); gpio_device_put(gdev); } EXPORT_SYMBOL_GPL(gpiochip_remove); /** * gpiochip_find() - iterator for locating a specific gpio_chip * @data: data to pass to match function * @match: Callback function to check gpio_chip * * Similar to bus_find_device. It returns a reference to a gpio_chip as * determined by a user supplied @match callback. The callback should return * 0 if the device doesn't match and non-zero if it does. If the callback is * non-zero, this function will return to the caller and not iterate over any * more gpio_chips. */ struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *gc, void *data)) { struct gpio_device *gdev; struct gpio_chip *gc = NULL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) if (gdev->chip && match(gdev->chip, data)) { gc = gdev->chip; break; } spin_unlock_irqrestore(&gpio_lock, flags); return gc; } EXPORT_SYMBOL_GPL(gpiochip_find); static int gpiochip_match_name(struct gpio_chip *gc, void *data) { const char *name = data; return !strcmp(gc->label, name); } static struct gpio_chip *find_chip_by_name(const char *name) { return gpiochip_find((void *)name, gpiochip_match_name); } #ifdef CONFIG_GPIOLIB_IRQCHIP /* * The following is irqchip helper code for gpiochips. */ static int gpiochip_irqchip_init_hw(struct gpio_chip *gc) { struct gpio_irq_chip *girq = &gc->irq; if (!girq->init_hw) return 0; return girq->init_hw(gc); } static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) { struct gpio_irq_chip *girq = &gc->irq; if (!girq->init_valid_mask) return 0; girq->valid_mask = gpiochip_allocate_mask(gc); if (!girq->valid_mask) return -ENOMEM; girq->init_valid_mask(gc, girq->valid_mask, gc->ngpio); return 0; } static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { gpiochip_free_mask(&gc->irq.valid_mask); } bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset) { if (!gpiochip_line_is_valid(gc, offset)) return false; /* No mask means all valid */ if (likely(!gc->irq.valid_mask)) return true; return test_bit(offset, gc->irq.valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /** * gpiochip_set_hierarchical_irqchip() - connects a hierarchical irqchip * to a gpiochip * @gc: the gpiochip to set the irqchip hierarchical handler to * @irqchip: the irqchip to handle this level of the hierarchy, the interrupt * will then percolate up to the parent */ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc, struct irq_chip *irqchip) { /* DT will deal with mapping each IRQ as we go along */ if (is_of_node(gc->irq.fwnode)) return; /* * This is for legacy and boardfile "irqchip" fwnodes: allocate * irqs upfront instead of dynamically since we don't have the * dynamic type of allocation that hardware description languages * provide. Once all GPIO drivers using board files are gone from * the kernel we can delete this code, but for a transitional period * it is necessary to keep this around. */ if (is_fwnode_irqchip(gc->irq.fwnode)) { int i; int ret; for (i = 0; i < gc->ngpio; i++) { struct irq_fwspec fwspec; unsigned int parent_hwirq; unsigned int parent_type; struct gpio_irq_chip *girq = &gc->irq; /* * We call the child to parent translation function * only to check if the child IRQ is valid or not. * Just pick the rising edge type here as that is what * we likely need to support. */ ret = girq->child_to_parent_hwirq(gc, i, IRQ_TYPE_EDGE_RISING, &parent_hwirq, &parent_type); if (ret) { chip_err(gc, "skip set-up on hwirq %d\n", i); continue; } fwspec.fwnode = gc->irq.fwnode; /* This is the hwirq for the GPIO line side of things */ fwspec.param[0] = girq->child_offset_to_irq(gc, i); /* Just pick something */ fwspec.param[1] = IRQ_TYPE_EDGE_RISING; fwspec.param_count = 2; ret = irq_domain_alloc_irqs(gc->irq.domain, 1, NUMA_NO_NODE, &fwspec); if (ret < 0) { chip_err(gc, "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n", i, parent_hwirq, ret); } } } chip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__); return; } static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { /* We support standard DT translation */ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { return irq_domain_translate_twocell(d, fwspec, hwirq, type); } /* This is for board files and others not using DT */ if (is_fwnode_irqchip(fwspec->fwnode)) { int ret; ret = irq_domain_translate_twocell(d, fwspec, hwirq, type); if (ret) return ret; WARN_ON(*type == IRQ_TYPE_NONE); return 0; } return -EINVAL; } static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, unsigned int irq, unsigned int nr_irqs, void *data) { struct gpio_chip *gc = d->host_data; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = data; union gpio_irq_fwspec gpio_parent_fwspec = {}; unsigned int parent_hwirq; unsigned int parent_type; struct gpio_irq_chip *girq = &gc->irq; int ret; /* * The nr_irqs parameter is always one except for PCI multi-MSI * so this should not happen. */ WARN_ON(nr_irqs != 1); ret = gc->irq.child_irq_domain_ops.translate(d, fwspec, &hwirq, &type); if (ret) return ret; chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq); ret = girq->child_to_parent_hwirq(gc, hwirq, type, &parent_hwirq, &parent_type); if (ret) { chip_err(gc, "can't look up hwirq %lu\n", hwirq); return ret; } chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq); /* * We set handle_bad_irq because the .set_type() should * always be invoked and set the right type of handler. */ irq_domain_set_info(d, irq, hwirq, gc->irq.chip, gc, girq->handler, NULL, NULL); irq_set_probe(irq); /* This parent only handles asserted level IRQs */ ret = girq->populate_parent_alloc_arg(gc, &gpio_parent_fwspec, parent_hwirq, parent_type); if (ret) return ret; chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n", irq, parent_hwirq); irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec); /* * If the parent irqdomain is msi, the interrupts have already * been allocated, so the EEXIST is good. */ if (irq_domain_is_msi(d->parent) && (ret == -EEXIST)) ret = 0; if (ret) chip_err(gc, "failed to allocate parent hwirq %d for hwirq %lu\n", parent_hwirq, hwirq); return ret; } static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc, unsigned int offset) { return offset; } static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops) { ops->activate = gpiochip_irq_domain_activate; ops->deactivate = gpiochip_irq_domain_deactivate; ops->alloc = gpiochip_hierarchy_irq_domain_alloc; /* * We only allow overriding the translate() and free() functions for * hierarchical chips, and this should only be done if the user * really need something other than 1:1 translation for translate() * callback and free if user wants to free up any resources which * were allocated during callbacks, for example populate_parent_alloc_arg. */ if (!ops->translate) ops->translate = gpiochip_hierarchy_irq_domain_translate; if (!ops->free) ops->free = irq_domain_free_irqs_common; } static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc) { struct irq_domain *domain; if (!gc->irq.child_to_parent_hwirq || !gc->irq.fwnode) { chip_err(gc, "missing irqdomain vital data\n"); return ERR_PTR(-EINVAL); } if (!gc->irq.child_offset_to_irq) gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop; if (!gc->irq.populate_parent_alloc_arg) gc->irq.populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell; gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops); domain = irq_domain_create_hierarchy( gc->irq.parent_domain, 0, gc->ngpio, gc->irq.fwnode, &gc->irq.child_irq_domain_ops, gc); if (!domain) return ERR_PTR(-ENOMEM); gpiochip_set_hierarchical_irqchip(gc, gc->irq.chip); return domain; } static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) { return !!gc->irq.parent_domain; } int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type) { struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 2; fwspec->param[0] = parent_hwirq; fwspec->param[1] = parent_type; return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell); int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type) { struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 4; fwspec->param[0] = 0; fwspec->param[1] = parent_hwirq; fwspec->param[2] = 0; fwspec->param[3] = parent_type; return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell); #else static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc) { return ERR_PTR(-EINVAL); } static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) { return false; } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ /** * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip * @d: the irqdomain used by this irqchip * @irq: the global irq number used by this GPIO irqchip irq * @hwirq: the local IRQ/GPIO line offset on this gpiochip * * This function will set up the mapping for a certain IRQ line on a * gpiochip by assigning the gpiochip as chip data, and using the irqchip * stored inside the gpiochip. */ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct gpio_chip *gc = d->host_data; int ret = 0; if (!gpiochip_irqchip_irq_valid(gc, hwirq)) return -ENXIO; irq_set_chip_data(irq, gc); /* * This lock class tells lockdep that GPIO irqs are in a different * category than their parents, so it won't report false recursion. */ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); irq_set_chip_and_handler(irq, gc->irq.chip, gc->irq.handler); /* Chips that use nested thread handlers have them marked */ if (gc->irq.threaded) irq_set_nested_thread(irq, 1); irq_set_noprobe(irq); if (gc->irq.num_parents == 1) ret = irq_set_parent(irq, gc->irq.parents[0]); else if (gc->irq.map) ret = irq_set_parent(irq, gc->irq.map[hwirq]); if (ret < 0) return ret; /* * No set-up of the hardware will happen if IRQ_TYPE_NONE * is passed as default type. */ if (gc->irq.default_type != IRQ_TYPE_NONE) irq_set_irq_type(irq, gc->irq.default_type); return 0; } EXPORT_SYMBOL_GPL(gpiochip_irq_map); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { struct gpio_chip *gc = d->host_data; if (gc->irq.threaded) irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } EXPORT_SYMBOL_GPL(gpiochip_irq_unmap); static const struct irq_domain_ops gpiochip_domain_ops = { .map = gpiochip_irq_map, .unmap = gpiochip_irq_unmap, /* Virtually all GPIO irqchips are twocell:ed */ .xlate = irq_domain_xlate_twocell, }; static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc) { struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev); struct irq_domain *domain; domain = irq_domain_create_simple(fwnode, gc->ngpio, gc->irq.first, &gpiochip_domain_ops, gc); if (!domain) return ERR_PTR(-EINVAL); return domain; } /* * TODO: move these activate/deactivate in under the hierarchicial * irqchip implementation as static once SPMI and SSBI (all external * users) are phased over. */ /** * gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ * @domain: The IRQ domain used by this IRQ chip * @data: Outermost irq_data associated with the IRQ * @reserve: If set, only reserve an interrupt vector instead of assigning one * * This function is a wrapper that calls gpiochip_lock_as_irq() and is to be * used as the activate function for the &struct irq_domain_ops. The host_data * for the IRQ domain must be the &struct gpio_chip. */ int gpiochip_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool reserve) { struct gpio_chip *gc = domain->host_data; unsigned int hwirq = irqd_to_hwirq(data); return gpiochip_lock_as_irq(gc, hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); /** * gpiochip_irq_domain_deactivate() - Unlock a GPIO used as an IRQ * @domain: The IRQ domain used by this IRQ chip * @data: Outermost irq_data associated with the IRQ * * This function is a wrapper that will call gpiochip_unlock_as_irq() and is to * be used as the deactivate function for the &struct irq_domain_ops. The * host_data for the IRQ domain must be the &struct gpio_chip. */ void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data) { struct gpio_chip *gc = domain->host_data; unsigned int hwirq = irqd_to_hwirq(data); return gpiochip_unlock_as_irq(gc, hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate); static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset) { struct irq_domain *domain = gc->irq.domain; #ifdef CONFIG_GPIOLIB_IRQCHIP /* * Avoid race condition with other code, which tries to lookup * an IRQ before the irqchip has been properly registered, * i.e. while gpiochip is still being brought up. */ if (!gc->irq.initialized) return -EPROBE_DEFER; #endif if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (irq_domain_is_hierarchy(domain)) { struct irq_fwspec spec; spec.fwnode = domain->fwnode; spec.param_count = 2; spec.param[0] = gc->irq.child_offset_to_irq(gc, offset); spec.param[1] = IRQ_TYPE_NONE; return irq_create_fwspec_mapping(&spec); } #endif return irq_create_mapping(domain, offset); } int gpiochip_irq_reqres(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); return gpiochip_reqres_irq(gc, hwirq); } EXPORT_SYMBOL(gpiochip_irq_reqres); void gpiochip_irq_relres(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); gpiochip_relres_irq(gc, hwirq); } EXPORT_SYMBOL(gpiochip_irq_relres); static void gpiochip_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); if (gc->irq.irq_mask) gc->irq.irq_mask(d); gpiochip_disable_irq(gc, hwirq); } static void gpiochip_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); gpiochip_enable_irq(gc, hwirq); if (gc->irq.irq_unmask) gc->irq.irq_unmask(d); } static void gpiochip_irq_enable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); gpiochip_enable_irq(gc, hwirq); gc->irq.irq_enable(d); } static void gpiochip_irq_disable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); unsigned int hwirq = irqd_to_hwirq(d); gc->irq.irq_disable(d); gpiochip_disable_irq(gc, hwirq); } static void gpiochip_set_irq_hooks(struct gpio_chip *gc) { struct irq_chip *irqchip = gc->irq.chip; if (irqchip->flags & IRQCHIP_IMMUTABLE) return; chip_warn(gc, "not an immutable chip, please consider fixing it!\n"); if (!irqchip->irq_request_resources && !irqchip->irq_release_resources) { irqchip->irq_request_resources = gpiochip_irq_reqres; irqchip->irq_release_resources = gpiochip_irq_relres; } if (WARN_ON(gc->irq.irq_enable)) return; /* Check if the irqchip already has this hook... */ if (irqchip->irq_enable == gpiochip_irq_enable || irqchip->irq_mask == gpiochip_irq_mask) { /* * ...and if so, give a gentle warning that this is bad * practice. */ chip_info(gc, "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n"); return; } if (irqchip->irq_disable) { gc->irq.irq_disable = irqchip->irq_disable; irqchip->irq_disable = gpiochip_irq_disable; } else { gc->irq.irq_mask = irqchip->irq_mask; irqchip->irq_mask = gpiochip_irq_mask; } if (irqchip->irq_enable) { gc->irq.irq_enable = irqchip->irq_enable; irqchip->irq_enable = gpiochip_irq_enable; } else { gc->irq.irq_unmask = irqchip->irq_unmask; irqchip->irq_unmask = gpiochip_irq_unmask; } } static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc, struct irq_domain *domain, bool allocated_externally) { if (!domain) return -EINVAL; if (gc->to_irq) chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__); gc->to_irq = gpiochip_to_irq; gc->irq.domain = domain; gc->irq.domain_is_allocated_externally = allocated_externally; /* * Using barrier() here to prevent compiler from reordering * gc->irq.initialized before adding irqdomain. */ barrier(); gc->irq.initialized = true; return 0; } /** * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip * @gc: the GPIO chip to add the IRQ chip to * @lock_key: lockdep class for IRQ lock * @request_key: lockdep class for IRQ request */ static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev); struct irq_chip *irqchip = gc->irq.chip; struct irq_domain *domain; unsigned int type; unsigned int i; int ret; if (!irqchip) return 0; if (gc->irq.parent_handler && gc->can_sleep) { chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n"); return -EINVAL; } type = gc->irq.default_type; /* * Specifying a default trigger is a terrible idea if DT or ACPI is * used to configure the interrupts, as you may end up with * conflicting triggers. Tell the user, and reset to NONE. */ if (WARN(fwnode && type != IRQ_TYPE_NONE, "%pfw: Ignoring %u default trigger\n", fwnode, type)) type = IRQ_TYPE_NONE; gc->irq.default_type = type; gc->irq.lock_key = lock_key; gc->irq.request_key = request_key; /* If a parent irqdomain is provided, let's build a hierarchy */ if (gpiochip_hierarchy_is_hierarchical(gc)) { domain = gpiochip_hierarchy_create_domain(gc); } else { domain = gpiochip_simple_create_domain(gc); } if (IS_ERR(domain)) return PTR_ERR(domain); if (gc->irq.parent_handler) { for (i = 0; i < gc->irq.num_parents; i++) { void *data; if (gc->irq.per_parent_data) data = gc->irq.parent_handler_data_array[i]; else data = gc->irq.parent_handler_data ?: gc; /* * The parent IRQ chip is already using the chip_data * for this IRQ chip, so our callbacks simply use the * handler_data. */ irq_set_chained_handler_and_data(gc->irq.parents[i], gc->irq.parent_handler, data); } } gpiochip_set_irq_hooks(gc); ret = gpiochip_irqchip_add_allocated_domain(gc, domain, false); if (ret) return ret; acpi_gpiochip_request_interrupts(gc); return 0; } /** * gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip * @gc: the gpiochip to remove the irqchip from * * This is called only from gpiochip_remove() */ static void gpiochip_irqchip_remove(struct gpio_chip *gc) { struct irq_chip *irqchip = gc->irq.chip; unsigned int offset; acpi_gpiochip_free_interrupts(gc); if (irqchip && gc->irq.parent_handler) { struct gpio_irq_chip *irq = &gc->irq; unsigned int i; for (i = 0; i < irq->num_parents; i++) irq_set_chained_handler_and_data(irq->parents[i], NULL, NULL); } /* Remove all IRQ mappings and delete the domain */ if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) { unsigned int irq; for (offset = 0; offset < gc->ngpio; offset++) { if (!gpiochip_irqchip_irq_valid(gc, offset)) continue; irq = irq_find_mapping(gc->irq.domain, offset); irq_dispose_mapping(irq); } irq_domain_remove(gc->irq.domain); } if (irqchip && !(irqchip->flags & IRQCHIP_IMMUTABLE)) { if (irqchip->irq_request_resources == gpiochip_irq_reqres) { irqchip->irq_request_resources = NULL; irqchip->irq_release_resources = NULL; } if (irqchip->irq_enable == gpiochip_irq_enable) { irqchip->irq_enable = gc->irq.irq_enable; irqchip->irq_disable = gc->irq.irq_disable; } } gc->irq.irq_enable = NULL; gc->irq.irq_disable = NULL; gc->irq.chip = NULL; gpiochip_irqchip_free_valid_mask(gc); } /** * gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip * @gc: the gpiochip to add the irqchip to * @domain: the irqdomain to add to the gpiochip * * This function adds an IRQ domain to the gpiochip. */ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain) { return gpiochip_irqchip_add_allocated_domain(gc, domain, true); } EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain); #else /* CONFIG_GPIOLIB_IRQCHIP */ static inline int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { return 0; } static void gpiochip_irqchip_remove(struct gpio_chip *gc) {} static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gc) { return 0; } static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) { return 0; } static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { } #endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * gpiochip_generic_request() - request the gpio function for a pin * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to request for GPIO function */ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset) { #ifdef CONFIG_PINCTRL if (list_empty(&gc->gpiodev->pin_ranges)) return 0; #endif return pinctrl_gpio_request(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_request); /** * gpiochip_generic_free() - free the gpio function from a pin * @gc: the gpiochip to request the gpio function for * @offset: the offset of the GPIO to free from GPIO function */ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset) { #ifdef CONFIG_PINCTRL if (list_empty(&gc->gpiodev->pin_ranges)) return; #endif pinctrl_gpio_free(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_free); /** * gpiochip_generic_config() - apply configuration for a pin * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to apply the configuration * @config: the configuration to be applied */ int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config); } EXPORT_SYMBOL_GPL(gpiochip_generic_config); #ifdef CONFIG_PINCTRL /** * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping * @gc: the gpiochip to add the range for * @pctldev: the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_group: name of the pin group inside the pin controller * * Calling this function directly from a DeviceTree-supported * pinctrl driver is DEPRECATED. Please see Section 2.1 of * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { struct gpio_pin_range *pin_range; struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; pin_range->range.gc = gc; pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->pctldev = pctldev; ret = pinctrl_get_group_pins(pctldev, pin_group, &pin_range->range.pins, &pin_range->range.npins); if (ret < 0) { kfree(pin_range); return ret; } pinctrl_add_gpio_range(pctldev, &pin_range->range); chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n", gpio_offset, gpio_offset + pin_range->range.npins - 1, pinctrl_dev_get_devname(pctldev), pin_group); list_add_tail(&pin_range->node, &gdev->pin_ranges); return 0; } EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); /** * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping * @gc: the gpiochip to add the range for * @pinctl_name: the dev_name() of the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_offset: the start offset in the pin controller number space * @npins: the number of pins from the offset of each pin space (GPIO and * pin controller) to accumulate in this range * * Returns: * 0 on success, or a negative error-code on failure. * * Calling this function directly from a DeviceTree-supported * pinctrl driver is DEPRECATED. Please see Section 2.1 of * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { struct gpio_pin_range *pin_range; struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; pin_range->range.gc = gc; pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->range.pin_base = pin_offset; pin_range->range.npins = npins; pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name, &pin_range->range); if (IS_ERR(pin_range->pctldev)) { ret = PTR_ERR(pin_range->pctldev); chip_err(gc, "could not create pin range\n"); kfree(pin_range); return ret; } chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n", gpio_offset, gpio_offset + npins - 1, pinctl_name, pin_offset, pin_offset + npins - 1); list_add_tail(&pin_range->node, &gdev->pin_ranges); return 0; } EXPORT_SYMBOL_GPL(gpiochip_add_pin_range); /** * gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings * @gc: the chip to remove all the mappings for */ void gpiochip_remove_pin_ranges(struct gpio_chip *gc) { struct gpio_pin_range *pin_range, *tmp; struct gpio_device *gdev = gc->gpiodev; list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) { list_del(&pin_range->node); pinctrl_remove_gpio_range(pin_range->pctldev, &pin_range->range); kfree(pin_range); } } EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges); #endif /* CONFIG_PINCTRL */ /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. * They're called even less than the "set direction" calls. */ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) { struct gpio_chip *gc = desc->gdev->chip; int ret; unsigned long flags; unsigned offset; if (label) { label = kstrdup_const(label, GFP_KERNEL); if (!label) return -ENOMEM; } spin_lock_irqsave(&gpio_lock, flags); /* NOTE: gpio_request() can be called in early boot, * before IRQs are enabled, for non-sleeping (SOC) GPIOs. */ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { desc_set_label(desc, label ? : "?"); } else { ret = -EBUSY; goto out_free_unlock; } if (gc->request) { /* gc->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); if (gpiochip_line_is_valid(gc, offset)) ret = gc->request(gc, offset); else ret = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (ret) { desc_set_label(desc, NULL); clear_bit(FLAG_REQUESTED, &desc->flags); goto out_free_unlock; } } if (gc->get_direction) { /* gc->get_direction may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); gpiod_get_direction(desc); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return 0; out_free_unlock: spin_unlock_irqrestore(&gpio_lock, flags); kfree_const(label); return ret; } /* * This descriptor validation needs to be inserted verbatim into each * function taking a descriptor, so we need to use a preprocessor * macro to avoid endless duplication. If the desc is NULL it is an * optional GPIO and calls should just bail out. */ static int validate_desc(const struct gpio_desc *desc, const char *func) { if (!desc) return 0; if (IS_ERR(desc)) { pr_warn("%s: invalid GPIO (errorpointer)\n", func); return PTR_ERR(desc); } if (!desc->gdev) { pr_warn("%s: invalid GPIO (no device)\n", func); return -EINVAL; } if (!desc->gdev->chip) { dev_warn(&desc->gdev->dev, "%s: backing chip is gone\n", func); return 0; } return 1; } #define VALIDATE_DESC(desc) do { \ int __valid = validate_desc(desc, __func__); \ if (__valid <= 0) \ return __valid; \ } while (0) #define VALIDATE_DESC_VOID(desc) do { \ int __valid = validate_desc(desc, __func__); \ if (__valid <= 0) \ return; \ } while (0) int gpiod_request(struct gpio_desc *desc, const char *label) { int ret = -EPROBE_DEFER; VALIDATE_DESC(desc); if (try_module_get(desc->gdev->owner)) { ret = gpiod_request_commit(desc, label); if (ret) module_put(desc->gdev->owner); else gpio_device_get(desc->gdev); } if (ret) gpiod_dbg(desc, "%s: status %d\n", __func__, ret); return ret; } static bool gpiod_free_commit(struct gpio_desc *desc) { bool ret = false; unsigned long flags; struct gpio_chip *gc; might_sleep(); spin_lock_irqsave(&gpio_lock, flags); gc = desc->gdev->chip; if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) { if (gc->free) { spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(gc->can_sleep); gc->free(gc, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); } kfree_const(desc->label); desc_set_label(desc, NULL); clear_bit(FLAG_ACTIVE_LOW, &desc->flags); clear_bit(FLAG_REQUESTED, &desc->flags); clear_bit(FLAG_OPEN_DRAIN, &desc->flags); clear_bit(FLAG_OPEN_SOURCE, &desc->flags); clear_bit(FLAG_PULL_UP, &desc->flags); clear_bit(FLAG_PULL_DOWN, &desc->flags); clear_bit(FLAG_BIAS_DISABLE, &desc->flags); clear_bit(FLAG_EDGE_RISING, &desc->flags); clear_bit(FLAG_EDGE_FALLING, &desc->flags); clear_bit(FLAG_IS_HOGGED, &desc->flags); #ifdef CONFIG_OF_DYNAMIC desc->hog = NULL; #endif #ifdef CONFIG_GPIO_CDEV WRITE_ONCE(desc->debounce_period_us, 0); #endif ret = true; } spin_unlock_irqrestore(&gpio_lock, flags); gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED); return ret; } void gpiod_free(struct gpio_desc *desc) { /* * We must not use VALIDATE_DESC_VOID() as the underlying gdev->chip * may already be NULL but we still want to put the references. */ if (!desc) return; if (!gpiod_free_commit(desc)) WARN_ON(extra_checks); module_put(desc->gdev->owner); gpio_device_put(desc->gdev); } /** * gpiochip_is_requested - return string iff signal was requested * @gc: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. * The string returned is the label passed to gpio_request(); if none has been * passed it is a meaningless, non-NULL constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return NULL; if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) return NULL; return desc->label; } EXPORT_SYMBOL_GPL(gpiochip_is_requested); /** * gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor * @gc: GPIO chip * @hwnum: hardware number of the GPIO for which to request the descriptor * @label: label for the GPIO * @lflags: lookup flags for this GPIO or 0 if default, this can be used to * specify things like line inversion semantics with the machine flags * such as GPIO_OUT_LOW * @dflags: descriptor request flags for this GPIO or 0 if default, this * can be used to specify consumer semantics such as open drain * * Function allows GPIO chip drivers to request and use their own GPIO * descriptors via gpiolib API. Difference to gpiod_request() is that this * function will not increase reference count of the GPIO chip module. This * allows the GPIO chip module to be unloaded as needed (we assume that the * GPIO chip driver handles freeing the GPIOs it has requested). * * Returns: * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error * code on failure. */ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, unsigned int hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags) { struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum); int ret; if (IS_ERR(desc)) { chip_err(gc, "failed to get GPIO descriptor\n"); return desc; } ret = gpiod_request_commit(desc, label); if (ret < 0) return ERR_PTR(ret); ret = gpiod_configure_flags(desc, label, lflags, dflags); if (ret) { chip_err(gc, "setup of own GPIO %s failed\n", label); gpiod_free_commit(desc); return ERR_PTR(ret); } return desc; } EXPORT_SYMBOL_GPL(gpiochip_request_own_desc); /** * gpiochip_free_own_desc - Free GPIO requested by the chip driver * @desc: GPIO descriptor to free * * Function frees the given GPIO requested previously with * gpiochip_request_own_desc(). */ void gpiochip_free_own_desc(struct gpio_desc *desc) { if (desc) gpiod_free_commit(desc); } EXPORT_SYMBOL_GPL(gpiochip_free_own_desc); /* * Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. * * As a rule these aren't called more than once (except for drivers * using the open-drain emulation idiom) so these are natural places * to accumulate extra debugging checks. Note that we can't (yet) * rely on gpio_request() having been called beforehand. */ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { if (!gc->set_config) return -ENOTSUPP; return gc->set_config(gc, offset, config); } static int gpio_set_config_with_argument(struct gpio_desc *desc, enum pin_config_param mode, u32 argument) { struct gpio_chip *gc = desc->gdev->chip; unsigned long config; config = pinconf_to_config_packed(mode, argument); return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); } static int gpio_set_config_with_argument_optional(struct gpio_desc *desc, enum pin_config_param mode, u32 argument) { struct device *dev = &desc->gdev->dev; int gpio = gpio_chip_hwgpio(desc); int ret; ret = gpio_set_config_with_argument(desc, mode, argument); if (ret != -ENOTSUPP) return ret; switch (mode) { case PIN_CONFIG_PERSIST_STATE: dev_dbg(dev, "Persistence not supported for GPIO %d\n", gpio); break; default: break; } return 0; } static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode) { return gpio_set_config_with_argument(desc, mode, 0); } static int gpio_set_bias(struct gpio_desc *desc) { enum pin_config_param bias; unsigned int arg; if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) bias = PIN_CONFIG_BIAS_DISABLE; else if (test_bit(FLAG_PULL_UP, &desc->flags)) bias = PIN_CONFIG_BIAS_PULL_UP; else if (test_bit(FLAG_PULL_DOWN, &desc->flags)) bias = PIN_CONFIG_BIAS_PULL_DOWN; else return 0; switch (bias) { case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: arg = 1; break; default: arg = 0; break; } return gpio_set_config_with_argument_optional(desc, bias, arg); } /** * gpio_set_debounce_timeout() - Set debounce timeout * @desc: GPIO descriptor to set the debounce timeout * @debounce: Debounce timeout in microseconds * * The function calls the certain GPIO driver to set debounce timeout * in the hardware. * * Returns 0 on success, or negative error code otherwise. */ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) { return gpio_set_config_with_argument_optional(desc, PIN_CONFIG_INPUT_DEBOUNCE, debounce); } /** * gpiod_direction_input - set the GPIO direction to input * @desc: GPIO to set to input * * Set the direction of the passed GPIO to input, such as gpiod_get_value() can * be called safely on it. * * Return 0 in case of success, else an error code. */ int gpiod_direction_input(struct gpio_desc *desc) { struct gpio_chip *gc; int ret = 0; VALIDATE_DESC(desc); gc = desc->gdev->chip; /* * It is legal to have no .get() and .direction_input() specified if * the chip is output-only, but you can't specify .direction_input() * and not support the .get() operation, that doesn't make sense. */ if (!gc->get && gc->direction_input) { gpiod_warn(desc, "%s: missing get() but have direction_input()\n", __func__); return -EIO; } /* * If we have a .direction_input() callback, things are simple, * just call it. Else we are some input-only chip so try to check the * direction (if .get_direction() is supported) else we silently * assume we are in input mode after this. */ if (gc->direction_input) { ret = gc->direction_input(gc, gpio_chip_hwgpio(desc)); } else if (gc->get_direction && (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) { gpiod_warn(desc, "%s: missing direction_input() operation and line is output\n", __func__); return -EIO; } if (ret == 0) { clear_bit(FLAG_IS_OUT, &desc->flags); ret = gpio_set_bias(desc); } trace_gpio_direction(desc_to_gpio(desc), 1, ret); return ret; } EXPORT_SYMBOL_GPL(gpiod_direction_input); static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) { struct gpio_chip *gc = desc->gdev->chip; int val = !!value; int ret = 0; /* * It's OK not to specify .direction_output() if the gpiochip is * output-only, but if there is then not even a .set() operation it * is pretty tricky to drive the output line. */ if (!gc->set && !gc->direction_output) { gpiod_warn(desc, "%s: missing set() and direction_output() operations\n", __func__); return -EIO; } if (gc->direction_output) { ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val); } else { /* Check that we are in output mode if we can */ if (gc->get_direction && gc->get_direction(gc, gpio_chip_hwgpio(desc))) { gpiod_warn(desc, "%s: missing direction_output() operation\n", __func__); return -EIO; } /* * If we can't actively set the direction, we are some * output-only chip, so just drive the output as desired. */ gc->set(gc, gpio_chip_hwgpio(desc), val); } if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_value(desc_to_gpio(desc), 0, val); trace_gpio_direction(desc_to_gpio(desc), 0, ret); return ret; } /** * gpiod_direction_output_raw - set the GPIO direction to output * @desc: GPIO to set to output * @value: initial output value of the GPIO * * Set the direction of the passed GPIO to output, such as gpiod_set_value() can * be called safely on it. The initial value of the output must be specified * as raw value on the physical line without regard for the ACTIVE_LOW status. * * Return 0 in case of success, else an error code. */ int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { VALIDATE_DESC(desc); return gpiod_direction_output_raw_commit(desc, value); } EXPORT_SYMBOL_GPL(gpiod_direction_output_raw); /** * gpiod_direction_output - set the GPIO direction to output * @desc: GPIO to set to output * @value: initial output value of the GPIO * * Set the direction of the passed GPIO to output, such as gpiod_set_value() can * be called safely on it. The initial value of the output must be specified * as the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into * account. * * Return 0 in case of success, else an error code. */ int gpiod_direction_output(struct gpio_desc *desc, int value) { int ret; VALIDATE_DESC(desc); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; else value = !!value; /* GPIOs used for enabled IRQs shall not be set as output */ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags) && test_bit(FLAG_IRQ_IS_ENABLED, &desc->flags)) { gpiod_err(desc, "%s: tried to set a GPIO tied to an IRQ as output\n", __func__); return -EIO; } if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { /* First see if we can enable open drain in hardware */ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN); if (!ret) goto set_output_value; /* Emulate open drain by not actively driving the line high */ if (value) { ret = gpiod_direction_input(desc); goto set_output_flag; } } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); if (!ret) goto set_output_value; /* Emulate open source by not actively driving the line low */ if (!value) { ret = gpiod_direction_input(desc); goto set_output_flag; } } else { gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL); } set_output_value: ret = gpio_set_bias(desc); if (ret) return ret; return gpiod_direction_output_raw_commit(desc, value); set_output_flag: /* * When emulating open-source or open-drain functionalities by not * actively driving the line (setting mode to input) we still need to * set the IS_OUT flag or otherwise we won't be able to set the line * value anymore. */ if (ret == 0) set_bit(FLAG_IS_OUT, &desc->flags); return ret; } EXPORT_SYMBOL_GPL(gpiod_direction_output); /** * gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds. * * @desc: GPIO to enable. * @flags: Flags related to GPIO edge. * * Return 0 in case of success, else negative error code. */ int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags) { int ret = 0; struct gpio_chip *gc; VALIDATE_DESC(desc); gc = desc->gdev->chip; if (!gc->en_hw_timestamp) { gpiod_warn(desc, "%s: hw ts not supported\n", __func__); return -ENOTSUPP; } ret = gc->en_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags); if (ret) gpiod_warn(desc, "%s: hw ts request failed\n", __func__); return ret; } EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns); /** * gpiod_disable_hw_timestamp_ns - Disable hardware timestamp. * * @desc: GPIO to disable. * @flags: Flags related to GPIO edge, same value as used during enable call. * * Return 0 in case of success, else negative error code. */ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags) { int ret = 0; struct gpio_chip *gc; VALIDATE_DESC(desc); gc = desc->gdev->chip; if (!gc->dis_hw_timestamp) { gpiod_warn(desc, "%s: hw ts not supported\n", __func__); return -ENOTSUPP; } ret = gc->dis_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags); if (ret) gpiod_warn(desc, "%s: hw ts release failed\n", __func__); return ret; } EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns); /** * gpiod_set_config - sets @config for a GPIO * @desc: descriptor of the GPIO for which to set the configuration * @config: Same packed config format as generic pinconf * * Returns: * 0 on success, %-ENOTSUPP if the controller doesn't support setting the * configuration. */ int gpiod_set_config(struct gpio_desc *desc, unsigned long config) { struct gpio_chip *gc; VALIDATE_DESC(desc); gc = desc->gdev->chip; return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); } EXPORT_SYMBOL_GPL(gpiod_set_config); /** * gpiod_set_debounce - sets @debounce time for a GPIO * @desc: descriptor of the GPIO for which to set debounce time * @debounce: debounce time in microseconds * * Returns: * 0 on success, %-ENOTSUPP if the controller doesn't support setting the * debounce time. */ int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce) { unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); return gpiod_set_config(desc, config); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); /** * gpiod_set_transitory - Lose or retain GPIO state on suspend or reset * @desc: descriptor of the GPIO for which to configure persistence * @transitory: True to lose state on suspend or reset, false for persistence * * Returns: * 0 on success, otherwise a negative error code. */ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { VALIDATE_DESC(desc); /* * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for * persistence state. */ assign_bit(FLAG_TRANSITORY, &desc->flags, transitory); /* If the driver supports it, set the persistence state now */ return gpio_set_config_with_argument_optional(desc, PIN_CONFIG_PERSIST_STATE, !transitory); } EXPORT_SYMBOL_GPL(gpiod_set_transitory); /** * gpiod_is_active_low - test whether a GPIO is active-low or not * @desc: the gpio descriptor to test * * Returns 1 if the GPIO is active-low, 0 otherwise. */ int gpiod_is_active_low(const struct gpio_desc *desc) { VALIDATE_DESC(desc); return test_bit(FLAG_ACTIVE_LOW, &desc->flags); } EXPORT_SYMBOL_GPL(gpiod_is_active_low); /** * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not * @desc: the gpio descriptor to change */ void gpiod_toggle_active_low(struct gpio_desc *desc) { VALIDATE_DESC_VOID(desc); change_bit(FLAG_ACTIVE_LOW, &desc->flags); } EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc) { return gc->get ? gc->get(gc, gpio_chip_hwgpio(desc)) : -EIO; } /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * * "Get" operations are often inlinable as reading a pin value register, * and masking the relevant bit in that register. * * When "set" operations are inlinable, they involve writing that mask to * one register to set a low value, or a different register to set it high. * Otherwise locking is needed, so there may be little value to inlining. * *------------------------------------------------------------------------ * * IMPORTANT!!! The hot paths -- get/set value -- assume that callers * have requested the GPIO. That can include implicit requesting by * a direction setting call. Marking a gpio as requested locks its chip * in memory, guaranteeing that these table lookups need no more locking * and that gpiochip_remove() will fail. * * REVISIT when debugging, consider adding some instrumentation to ensure * that the GPIO was actually requested. */ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) { struct gpio_chip *gc; int value; gc = desc->gdev->chip; value = gpio_chip_get_value(gc, desc); value = value < 0 ? value : !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { if (gc->get_multiple) return gc->get_multiple(gc, mask, bits); if (gc->get) { int i, value; for_each_set_bit(i, mask, gc->ngpio) { value = gc->get(gc, i); if (value < 0) return value; __assign_bit(i, bits, value); } return 0; } return -EIO; } int gpiod_get_array_value_complex(bool raw, bool can_sleep, unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { int ret, i = 0; /* * Validate array_info against desc_array and its size. * It should immediately follow desc_array if both * have been obtained from the same gpiod_get_array() call. */ if (array_info && array_info->desc == desc_array && array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) WARN_ON(array_info->chip->can_sleep); ret = gpio_chip_get_multiple(array_info->chip, array_info->get_mask, value_bitmap); if (ret) return ret; if (!raw && !bitmap_empty(array_info->invert_mask, array_size)) bitmap_xor(value_bitmap, value_bitmap, array_info->invert_mask, array_size); i = find_first_zero_bit(array_info->get_mask, array_size); if (i == array_size) return 0; } else { array_info = NULL; } while (i < array_size) { struct gpio_chip *gc = desc_array[i]->gdev->chip; DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO); DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO); unsigned long *mask, *bits; int first, j; if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath_mask; bits = fastpath_bits; } else { gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC; mask = bitmap_alloc(gc->ngpio, flags); if (!mask) return -ENOMEM; bits = bitmap_alloc(gc->ngpio, flags); if (!bits) { bitmap_free(mask); return -ENOMEM; } } bitmap_zero(mask, gc->ngpio); if (!can_sleep) WARN_ON(gc->can_sleep); /* collect all inputs belonging to the same chip */ first = i; do { const struct gpio_desc *desc = desc_array[i]; int hwgpio = gpio_chip_hwgpio(desc); __set_bit(hwgpio, mask); i++; if (array_info) i = find_next_zero_bit(array_info->get_mask, array_size, i); } while ((i < array_size) && (desc_array[i]->gdev->chip == gc)); ret = gpio_chip_get_multiple(gc, mask, bits); if (ret) { if (mask != fastpath_mask) bitmap_free(mask); if (bits != fastpath_bits) bitmap_free(bits); return ret; } for (j = first; j < i; ) { const struct gpio_desc *desc = desc_array[j]; int hwgpio = gpio_chip_hwgpio(desc); int value = test_bit(hwgpio, bits); if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; __assign_bit(j, value_bitmap, value); trace_gpio_value(desc_to_gpio(desc), 1, value); j++; if (array_info) j = find_next_zero_bit(array_info->get_mask, i, j); } if (mask != fastpath_mask) bitmap_free(mask); if (bits != fastpath_bits) bitmap_free(bits); } return 0; } /** * gpiod_get_raw_value() - return a gpio's raw value * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status, or negative errno on failure. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_get_raw_value(const struct gpio_desc *desc) { VALIDATE_DESC(desc); /* Should be using gpiod_get_raw_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); return gpiod_get_raw_value_commit(desc); } EXPORT_SYMBOL_GPL(gpiod_get_raw_value); /** * gpiod_get_value() - return a gpio's value * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account, or negative errno on failure. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_get_value(const struct gpio_desc *desc) { int value; VALIDATE_DESC(desc); /* Should be using gpiod_get_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); value = gpiod_get_raw_value_commit(desc); if (value < 0) return value; if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; return value; } EXPORT_SYMBOL_GPL(gpiod_get_value); /** * gpiod_get_raw_array_value() - read raw values from an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be read * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap to store the read values * * Read the raw values of the GPIOs, i.e. the values of the physical lines * without regard for their ACTIVE_LOW status. Return 0 in case of success, * else an error code. * * This function can be called from contexts where we cannot sleep, * and it will complain if the GPIO chip functions potentially sleep. */ int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { if (!desc_array) return -EINVAL; return gpiod_get_array_value_complex(true, false, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value); /** * gpiod_get_array_value() - read values from an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be read * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap to store the read values * * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status * into account. Return 0 in case of success, else an error code. * * This function can be called from contexts where we cannot sleep, * and it will complain if the GPIO chip functions potentially sleep. */ int gpiod_get_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { if (!desc_array) return -EINVAL; return gpiod_get_array_value_complex(false, false, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_get_array_value); /* * gpio_set_open_drain_value_commit() - Set the open drain gpio's value. * @desc: gpio descriptor whose state need to be set. * @value: Non-zero for setting it HIGH otherwise it will set to LOW. */ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { ret = gc->direction_input(gc, offset); } else { ret = gc->direction_output(gc, offset, 0); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } trace_gpio_direction(desc_to_gpio(desc), value, ret); if (ret < 0) gpiod_err(desc, "%s: Error in set_value for open drain err %d\n", __func__, ret); } /* * _gpio_set_open_source_value() - Set the open source gpio's value. * @desc: gpio descriptor whose state need to be set. * @value: Non-zero for setting it HIGH otherwise it will set to LOW. */ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { ret = gc->direction_output(gc, offset, 1); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } else { ret = gc->direction_input(gc, offset); } trace_gpio_direction(desc_to_gpio(desc), !value, ret); if (ret < 0) gpiod_err(desc, "%s: Error in set_value for open source err %d\n", __func__, ret); } static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value) { struct gpio_chip *gc; gc = desc->gdev->chip; trace_gpio_value(desc_to_gpio(desc), 0, value); gc->set(gc, gpio_chip_hwgpio(desc), value); } /* * set multiple outputs on the same chip; * use the chip's set_multiple function if available; * otherwise set the outputs sequentially; * @chip: the GPIO chip we operate on * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word * defines which outputs are to be changed * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word * defines the values the outputs specified by mask are to be set to */ static void gpio_chip_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { if (gc->set_multiple) { gc->set_multiple(gc, mask, bits); } else { unsigned int i; /* set outputs if the corresponding mask bit is set */ for_each_set_bit(i, mask, gc->ngpio) gc->set(gc, i, test_bit(i, bits)); } } int gpiod_set_array_value_complex(bool raw, bool can_sleep, unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { int i = 0; /* * Validate array_info against desc_array and its size. * It should immediately follow desc_array if both * have been obtained from the same gpiod_get_array() call. */ if (array_info && array_info->desc == desc_array && array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) WARN_ON(array_info->chip->can_sleep); if (!raw && !bitmap_empty(array_info->invert_mask, array_size)) bitmap_xor(value_bitmap, value_bitmap, array_info->invert_mask, array_size); gpio_chip_set_multiple(array_info->chip, array_info->set_mask, value_bitmap); i = find_first_zero_bit(array_info->set_mask, array_size); if (i == array_size) return 0; } else { array_info = NULL; } while (i < array_size) { struct gpio_chip *gc = desc_array[i]->gdev->chip; DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO); DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO); unsigned long *mask, *bits; int count = 0; if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath_mask; bits = fastpath_bits; } else { gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC; mask = bitmap_alloc(gc->ngpio, flags); if (!mask) return -ENOMEM; bits = bitmap_alloc(gc->ngpio, flags); if (!bits) { bitmap_free(mask); return -ENOMEM; } } bitmap_zero(mask, gc->ngpio); if (!can_sleep) WARN_ON(gc->can_sleep); do { struct gpio_desc *desc = desc_array[i]; int hwgpio = gpio_chip_hwgpio(desc); int value = test_bit(i, value_bitmap); /* * Pins applicable for fast input but not for * fast output processing may have been already * inverted inside the fast path, skip them. */ if (!raw && !(array_info && test_bit(i, array_info->invert_mask)) && test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; trace_gpio_value(desc_to_gpio(desc), 0, value); /* * collect all normal outputs belonging to the same chip * open drain and open source outputs are set individually */ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) { gpio_set_open_drain_value_commit(desc, value); } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) { gpio_set_open_source_value_commit(desc, value); } else { __set_bit(hwgpio, mask); __assign_bit(hwgpio, bits, value); count++; } i++; if (array_info) i = find_next_zero_bit(array_info->set_mask, array_size, i); } while ((i < array_size) && (desc_array[i]->gdev->chip == gc)); /* push collected bits to outputs */ if (count != 0) gpio_chip_set_multiple(gc, mask, bits); if (mask != fastpath_mask) bitmap_free(mask); if (bits != fastpath_bits) bitmap_free(bits); } return 0; } /** * gpiod_set_raw_value() - assign a gpio's raw value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the raw value of the GPIO, i.e. the value of its physical line without * regard for its ACTIVE_LOW status. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ void gpiod_set_raw_value(struct gpio_desc *desc, int value) { VALIDATE_DESC_VOID(desc); /* Should be using gpiod_set_raw_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); gpiod_set_raw_value_commit(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_raw_value); /** * gpiod_set_value_nocheck() - set a GPIO line value without checking * @desc: the descriptor to set the value on * @value: value to set * * This sets the value of a GPIO line backing a descriptor, applying * different semantic quirks like active low and open drain/source * handling. */ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value) { if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) gpio_set_open_drain_value_commit(desc, value); else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) gpio_set_open_source_value_commit(desc, value); else gpiod_set_raw_value_commit(desc, value); } /** * gpiod_set_value() - assign a gpio's value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW, * OPEN_DRAIN and OPEN_SOURCE flags into account. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ void gpiod_set_value(struct gpio_desc *desc, int value) { VALIDATE_DESC_VOID(desc); /* Should be using gpiod_set_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); gpiod_set_value_nocheck(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_value); /** * gpiod_set_raw_array_value() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be assigned * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap of values to assign * * Set the raw values of the GPIOs, i.e. the values of the physical lines * without regard for their ACTIVE_LOW status. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { if (!desc_array) return -EINVAL; return gpiod_set_array_value_complex(true, false, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value); /** * gpiod_set_array_value() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be assigned * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap of values to assign * * Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status * into account. * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { if (!desc_array) return -EINVAL; return gpiod_set_array_value_complex(false, false, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_set_array_value); /** * gpiod_cansleep() - report whether gpio value access may sleep * @desc: gpio to check * */ int gpiod_cansleep(const struct gpio_desc *desc) { VALIDATE_DESC(desc); return desc->gdev->chip->can_sleep; } EXPORT_SYMBOL_GPL(gpiod_cansleep); /** * gpiod_set_consumer_name() - set the consumer name for the descriptor * @desc: gpio to set the consumer name on * @name: the new consumer name */ int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { VALIDATE_DESC(desc); if (name) { name = kstrdup_const(name, GFP_KERNEL); if (!name) return -ENOMEM; } kfree_const(desc->label); desc_set_label(desc, name); return 0; } EXPORT_SYMBOL_GPL(gpiod_set_consumer_name); /** * gpiod_to_irq() - return the IRQ corresponding to a GPIO * @desc: gpio whose IRQ will be returned (already requested) * * Return the IRQ corresponding to the passed GPIO, or an error code in case of * error. */ int gpiod_to_irq(const struct gpio_desc *desc) { struct gpio_chip *gc; int offset; /* * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics * requires this function to not return zero on an invalid descriptor * but rather a negative error number. */ if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) return -EINVAL; gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); if (gc->to_irq) { int retirq = gc->to_irq(gc, offset); /* Zero means NO_IRQ */ if (!retirq) return -ENXIO; return retirq; } #ifdef CONFIG_GPIOLIB_IRQCHIP if (gc->irq.chip) { /* * Avoid race condition with other code, which tries to lookup * an IRQ before the irqchip has been properly registered, * i.e. while gpiochip is still being brought up. */ return -EPROBE_DEFER; } #endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); /** * gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used for IRQs. */ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return PTR_ERR(desc); /* * If it's fast: flush the direction setting if something changed * behind our back */ if (!gc->can_sleep && gc->get_direction) { int dir = gpiod_get_direction(desc); if (dir < 0) { chip_err(gc, "%s: cannot get GPIO direction\n", __func__); return dir; } } /* To be valid for IRQ the line needs to be input or open drain */ if (test_bit(FLAG_IS_OUT, &desc->flags) && !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { chip_err(gc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } set_bit(FLAG_USED_AS_IRQ, &desc->flags); set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); /* * If the consumer has not set up a label (such as when the * IRQ is referenced from .to_irq()) we set up a label here * so it is clear this is used as an interrupt. */ if (!desc->label) desc_set_label(desc, "interrupt"); return 0; } EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); /** * gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to indicate * that a certain GPIO is no longer used exclusively for IRQ. */ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return; clear_bit(FLAG_USED_AS_IRQ, &desc->flags); clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); /* If we only had this marking, erase it */ if (desc->label && !strcmp(desc->label, "interrupt")) desc_set_label(desc, NULL); } EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); } EXPORT_SYMBOL_GPL(gpiochip_disable_irq); void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { /* * We must not be output when using IRQ UNLESS we are * open drain. */ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); } } EXPORT_SYMBOL_GPL(gpiochip_enable_irq); bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset) { if (offset >= gc->ngpio) return false; return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_irq); int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset) { int ret; if (!try_module_get(gc->gpiodev->owner)) return -ENODEV; ret = gpiochip_lock_as_irq(gc, offset); if (ret) { chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset); module_put(gc->gpiodev->owner); return ret; } return 0; } EXPORT_SYMBOL_GPL(gpiochip_reqres_irq); void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset) { gpiochip_unlock_as_irq(gc, offset); module_put(gc->gpiodev->owner); } EXPORT_SYMBOL_GPL(gpiochip_relres_irq); bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset) { if (offset >= gc->ngpio) return false; return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain); bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset) { if (offset >= gc->ngpio) return false; return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source); bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset) { if (offset >= gc->ngpio) return false; return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent); /** * gpiod_get_raw_value_cansleep() - return a gpio's raw value * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { might_sleep_if(extra_checks); VALIDATE_DESC(desc); return gpiod_get_raw_value_commit(desc); } EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); /** * gpiod_get_value_cansleep() - return a gpio's value * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ int gpiod_get_value_cansleep(const struct gpio_desc *desc) { int value; might_sleep_if(extra_checks); VALIDATE_DESC(desc); value = gpiod_get_raw_value_commit(desc); if (value < 0) return value; if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; return value; } EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep); /** * gpiod_get_raw_array_value_cansleep() - read raw values from an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be read * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap to store the read values * * Read the raw values of the GPIOs, i.e. the values of the physical lines * without regard for their ACTIVE_LOW status. Return 0 in case of success, * else an error code. * * This function is to be called from contexts that can sleep. */ int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { might_sleep_if(extra_checks); if (!desc_array) return -EINVAL; return gpiod_get_array_value_complex(true, true, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep); /** * gpiod_get_array_value_cansleep() - read values from an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be read * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap to store the read values * * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status * into account. Return 0 in case of success, else an error code. * * This function is to be called from contexts that can sleep. */ int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { might_sleep_if(extra_checks); if (!desc_array) return -EINVAL; return gpiod_get_array_value_complex(false, true, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep); /** * gpiod_set_raw_value_cansleep() - assign a gpio's raw value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the raw value of the GPIO, i.e. the value of its physical line without * regard for its ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. */ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { might_sleep_if(extra_checks); VALIDATE_DESC_VOID(desc); gpiod_set_raw_value_commit(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep); /** * gpiod_set_value_cansleep() - assign a gpio's value * @desc: gpio whose value will be assigned * @value: value to assign * * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into * account * * This function is to be called from contexts that can sleep. */ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { might_sleep_if(extra_checks); VALIDATE_DESC_VOID(desc); gpiod_set_value_nocheck(desc, value); } EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); /** * gpiod_set_raw_array_value_cansleep() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be assigned * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap of values to assign * * Set the raw values of the GPIOs, i.e. the values of the physical lines * without regard for their ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. */ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { might_sleep_if(extra_checks); if (!desc_array) return -EINVAL; return gpiod_set_array_value_complex(true, true, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep); /** * gpiod_add_lookup_tables() - register GPIO device consumers * @tables: list of tables of consumers to register * @n: number of tables in the list */ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) { unsigned int i; mutex_lock(&gpio_lookup_lock); for (i = 0; i < n; i++) list_add_tail(&tables[i]->list, &gpio_lookup_list); mutex_unlock(&gpio_lookup_lock); } /** * gpiod_set_array_value_cansleep() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor array / value bitmap * @desc_array: array of GPIO descriptors whose values will be assigned * @array_info: information on applicability of fast bitmap processing path * @value_bitmap: bitmap of values to assign * * Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status * into account. * * This function is to be called from contexts that can sleep. */ int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { might_sleep_if(extra_checks); if (!desc_array) return -EINVAL; return gpiod_set_array_value_complex(false, true, array_size, desc_array, array_info, value_bitmap); } EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep); void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action) { blocking_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc); } /** * gpiod_add_lookup_table() - register GPIO device consumers * @table: table of consumers to register */ void gpiod_add_lookup_table(struct gpiod_lookup_table *table) { gpiod_add_lookup_tables(&table, 1); } EXPORT_SYMBOL_GPL(gpiod_add_lookup_table); /** * gpiod_remove_lookup_table() - unregister GPIO device consumers * @table: table of consumers to unregister */ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) { /* Nothing to remove */ if (!table) return; mutex_lock(&gpio_lookup_lock); list_del(&table->list); mutex_unlock(&gpio_lookup_lock); } EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table); /** * gpiod_add_hogs() - register a set of GPIO hogs from machine code * @hogs: table of gpio hog entries with a zeroed sentinel at the end */ void gpiod_add_hogs(struct gpiod_hog *hogs) { struct gpio_chip *gc; struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); for (hog = &hogs[0]; hog->chip_label; hog++) { list_add_tail(&hog->list, &gpio_machine_hogs); /* * The chip may have been registered earlier, so check if it * exists and, if so, try to hog the line now. */ gc = find_chip_by_name(hog->chip_label); if (gc) gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); } EXPORT_SYMBOL_GPL(gpiod_add_hogs); void gpiod_remove_hogs(struct gpiod_hog *hogs) { struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); for (hog = &hogs[0]; hog->chip_label; hog++) list_del(&hog->list); mutex_unlock(&gpio_machine_hogs_mutex); } EXPORT_SYMBOL_GPL(gpiod_remove_hogs); static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev) { const char *dev_id = dev ? dev_name(dev) : NULL; struct gpiod_lookup_table *table; mutex_lock(&gpio_lookup_lock); list_for_each_entry(table, &gpio_lookup_list, list) { if (table->dev_id && dev_id) { /* * Valid strings on both ends, must be identical to have * a match */ if (!strcmp(table->dev_id, dev_id)) goto found; } else { /* * One of the pointers is NULL, so both must be to have * a match */ if (dev_id == table->dev_id) goto found; } } table = NULL; found: mutex_unlock(&gpio_lookup_lock); return table; } static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, unsigned int idx, unsigned long *flags) { struct gpio_desc *desc = ERR_PTR(-ENOENT); struct gpiod_lookup_table *table; struct gpiod_lookup *p; table = gpiod_find_lookup_table(dev); if (!table) return desc; for (p = &table->table[0]; p->key; p++) { struct gpio_chip *gc; /* idx must always match exactly */ if (p->idx != idx) continue; /* If the lookup entry has a con_id, require exact match */ if (p->con_id && (!con_id || strcmp(p->con_id, con_id))) continue; if (p->chip_hwnum == U16_MAX) { desc = gpio_name_to_desc(p->key); if (desc) { *flags = p->flags; return desc; } dev_warn(dev, "cannot find GPIO line %s, deferring\n", p->key); return ERR_PTR(-EPROBE_DEFER); } gc = find_chip_by_name(p->key); if (!gc) { /* * As the lookup table indicates a chip with * p->key should exist, assume it may * still appear later and let the interested * consumer be probed again or let the Deferred * Probe infrastructure handle the error. */ dev_warn(dev, "cannot find GPIO chip %s, deferring\n", p->key); return ERR_PTR(-EPROBE_DEFER); } if (gc->ngpio <= p->chip_hwnum) { dev_err(dev, "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", idx, p->chip_hwnum, gc->ngpio - 1, gc->label); return ERR_PTR(-EINVAL); } desc = gpiochip_get_desc(gc, p->chip_hwnum); *flags = p->flags; return desc; } return desc; } static int platform_gpio_count(struct device *dev, const char *con_id) { struct gpiod_lookup_table *table; struct gpiod_lookup *p; unsigned int count = 0; table = gpiod_find_lookup_table(dev); if (!table) return -ENOENT; for (p = &table->table[0]; p->key; p++) { if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) || (!con_id && !p->con_id)) count++; } if (!count) return -ENOENT; return count; } static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode, struct device *consumer, const char *con_id, unsigned int idx, enum gpiod_flags *flags, unsigned long *lookupflags) { struct gpio_desc *desc = ERR_PTR(-ENOENT); if (is_of_node(fwnode)) { dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, con_id); desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags); } else if (is_acpi_node(fwnode)) { dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, con_id); desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags); } else if (is_software_node(fwnode)) { dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, con_id); desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags); } return desc; } static struct gpio_desc *gpiod_find_and_request(struct device *consumer, struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, enum gpiod_flags flags, const char *label, bool platform_lookup_allowed) { unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT; struct gpio_desc *desc; int ret; desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, &flags, &lookupflags); if (gpiod_not_found(desc) && platform_lookup_allowed) { /* * Either we are not using DT or ACPI, or their lookup did not * return a result. In that case, use platform lookup as a * fallback. */ dev_dbg(consumer, "using lookup tables for GPIO lookup\n"); desc = gpiod_find(consumer, con_id, idx, &lookupflags); } if (IS_ERR(desc)) { dev_dbg(consumer, "No GPIO consumer %s found\n", con_id); return desc; } /* * If a connection label was passed use that, else attempt to use * the device name as label */ ret = gpiod_request(desc, label); if (ret) { if (!(ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE)) return ERR_PTR(ret); /* * This happens when there are several consumers for * the same GPIO line: we just return here without * further initialization. It is a bit of a hack. * This is necessary to support fixed regulators. * * FIXME: Make this more sane and safe. */ dev_info(consumer, "nonexclusive access to GPIO for %s\n", con_id); return desc; } ret = gpiod_configure_flags(desc, con_id, lookupflags, flags); if (ret < 0) { dev_dbg(consumer, "setup of GPIO %s failed\n", con_id); gpiod_put(desc); return ERR_PTR(ret); } gpiod_line_state_notify(desc, GPIOLINE_CHANGED_REQUESTED); return desc; } /** * fwnode_gpiod_get_index - obtain a GPIO from firmware node * @fwnode: handle of the firmware node * @con_id: function within the GPIO consumer * @index: index of the GPIO to obtain for the consumer * @flags: GPIO initialization flags * @label: label to attach to the requested GPIO * * This function can be used for drivers that get their configuration * from opaque firmware. * * The function properly finds the corresponding GPIO using whatever is the * underlying firmware interface and then makes sure that the GPIO * descriptor is requested before it is returned to the caller. * * Returns: * On successful request the GPIO pin is configured in accordance with * provided @flags. * * In case of error an ERR_PTR() is returned. */ struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode, const char *con_id, int index, enum gpiod_flags flags, const char *label) { return gpiod_find_and_request(NULL, fwnode, con_id, index, flags, label, false); } EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index); /** * gpiod_count - return the number of GPIOs associated with a device / function * or -ENOENT if no GPIO has been assigned to the requested function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer */ int gpiod_count(struct device *dev, const char *con_id) { const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; int count = -ENOENT; if (is_of_node(fwnode)) count = of_gpio_get_count(dev, con_id); else if (is_acpi_node(fwnode)) count = acpi_gpio_count(dev, con_id); else if (is_software_node(fwnode)) count = swnode_gpio_count(fwnode, con_id); if (count < 0) count = platform_gpio_count(dev, con_id); return count; } EXPORT_SYMBOL_GPL(gpiod_count); /** * gpiod_get - obtain a GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @flags: optional GPIO initialization flags * * Return the GPIO descriptor corresponding to the function con_id of device * dev, -ENOENT if no GPIO has been assigned to the requested function, or * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) { return gpiod_get_index(dev, con_id, 0, flags); } EXPORT_SYMBOL_GPL(gpiod_get); /** * gpiod_get_optional - obtain an optional GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @flags: optional GPIO initialization flags * * This is equivalent to gpiod_get(), except that when no GPIO was assigned to * the requested function it will return NULL. This is convenient for drivers * that need to handle optional GPIOs. */ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return gpiod_get_index_optional(dev, con_id, 0, flags); } EXPORT_SYMBOL_GPL(gpiod_get_optional); /** * gpiod_configure_flags - helper function to configure a given GPIO * @desc: gpio whose value will be assigned * @con_id: function within the GPIO consumer * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from * of_find_gpio() or of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags * * Return 0 on success, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error * occurred while trying to acquire the GPIO. */ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, unsigned long lflags, enum gpiod_flags dflags) { int ret; if (lflags & GPIO_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); if (lflags & GPIO_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) { /* * This enforces open drain mode from the consumer side. * This is necessary for some busses like I2C, but the lookup * should *REALLY* have specified them as open drain in the * first place, so print a little warning here. */ set_bit(FLAG_OPEN_DRAIN, &desc->flags); gpiod_warn(desc, "enforced open drain please flag it properly in DT/ACPI DSDT/board file\n"); } if (lflags & GPIO_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) || ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) || ((lflags & GPIO_PULL_DOWN) && (lflags & GPIO_PULL_DISABLE))) { gpiod_err(desc, "multiple pull-up, pull-down or pull-disable enabled, invalid configuration\n"); return -EINVAL; } if (lflags & GPIO_PULL_UP) set_bit(FLAG_PULL_UP, &desc->flags); else if (lflags & GPIO_PULL_DOWN) set_bit(FLAG_PULL_DOWN, &desc->flags); else if (lflags & GPIO_PULL_DISABLE) set_bit(FLAG_BIAS_DISABLE, &desc->flags); ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY)); if (ret < 0) return ret; /* No particular flag request, return here... */ if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { gpiod_dbg(desc, "no flags found for %s\n", con_id); return 0; } /* Process flags */ if (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ret = gpiod_direction_output(desc, !!(dflags & GPIOD_FLAGS_BIT_DIR_VAL)); else ret = gpiod_direction_input(desc); return ret; } /** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @flags: optional GPIO initialization flags * * This variant of gpiod_get() allows to access GPIOs other than the first * defined one for functions that define several GPIOs. * * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error * occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags) { struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; const char *devname = dev ? dev_name(dev) : "?"; const char *label = con_id ?: devname; return gpiod_find_and_request(dev, fwnode, con_id, idx, flags, label, true); } EXPORT_SYMBOL_GPL(gpiod_get_index); /** * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO * function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @index: index of the GPIO to obtain in the consumer * @flags: optional GPIO initialization flags * * This is equivalent to gpiod_get_index(), except that when no GPIO with the * specified index was assigned to the requested function it will return NULL. * This is convenient for drivers that need to handle optional GPIOs. */ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) { struct gpio_desc *desc; desc = gpiod_get_index(dev, con_id, index, flags); if (gpiod_not_found(desc)) return NULL; return desc; } EXPORT_SYMBOL_GPL(gpiod_get_index_optional); /** * gpiod_hog - Hog the specified GPIO desc given the provided flags * @desc: gpio whose value will be assigned * @name: gpio line name * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from * of_find_gpio() or of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags */ int gpiod_hog(struct gpio_desc *desc, const char *name, unsigned long lflags, enum gpiod_flags dflags) { struct gpio_chip *gc; struct gpio_desc *local_desc; int hwnum; int ret; gc = gpiod_to_chip(desc); hwnum = gpio_chip_hwgpio(desc); local_desc = gpiochip_request_own_desc(gc, hwnum, name, lflags, dflags); if (IS_ERR(local_desc)) { ret = PTR_ERR(local_desc); pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n", name, gc->label, hwnum, ret); return ret; } /* Mark GPIO as hogged so it can be identified and removed later */ set_bit(FLAG_IS_HOGGED, &desc->flags); gpiod_dbg(desc, "hogged as %s%s\n", (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input", (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : ""); return 0; } /** * gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog * @gc: gpio chip to act on */ static void gpiochip_free_hogs(struct gpio_chip *gc) { struct gpio_desc *desc; for_each_gpio_desc_with_flag(gc, desc, FLAG_IS_HOGGED) gpiochip_free_own_desc(desc); } /** * gpiod_get_array - obtain multiple GPIOs from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @flags: optional GPIO initialization flags * * This function acquires all the GPIOs defined under a given function. * * Return a struct gpio_descs containing an array of descriptors, -ENOENT if * no GPIO has been assigned to the requested function, or another IS_ERR() * code if an error occurred while trying to acquire the GPIOs. */ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, const char *con_id, enum gpiod_flags flags) { struct gpio_desc *desc; struct gpio_descs *descs; struct gpio_array *array_info = NULL; struct gpio_chip *gc; int count, bitmap_size; size_t descs_size; count = gpiod_count(dev, con_id); if (count < 0) return ERR_PTR(count); descs_size = struct_size(descs, desc, count); descs = kzalloc(descs_size, GFP_KERNEL); if (!descs) return ERR_PTR(-ENOMEM); for (descs->ndescs = 0; descs->ndescs < count; descs->ndescs++) { desc = gpiod_get_index(dev, con_id, descs->ndescs, flags); if (IS_ERR(desc)) { gpiod_put_array(descs); return ERR_CAST(desc); } descs->desc[descs->ndescs] = desc; gc = gpiod_to_chip(desc); /* * If pin hardware number of array member 0 is also 0, select * its chip as a candidate for fast bitmap processing path. */ if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) { struct gpio_descs *array; bitmap_size = BITS_TO_LONGS(gc->ngpio > count ? gc->ngpio : count); array = krealloc(descs, descs_size + struct_size(array_info, invert_mask, 3 * bitmap_size), GFP_KERNEL | __GFP_ZERO); if (!array) { gpiod_put_array(descs); return ERR_PTR(-ENOMEM); } descs = array; array_info = (void *)descs + descs_size; array_info->get_mask = array_info->invert_mask + bitmap_size; array_info->set_mask = array_info->get_mask + bitmap_size; array_info->desc = descs->desc; array_info->size = count; array_info->chip = gc; bitmap_set(array_info->get_mask, descs->ndescs, count - descs->ndescs); bitmap_set(array_info->set_mask, descs->ndescs, count - descs->ndescs); descs->info = array_info; } /* If there is no cache for fast bitmap processing path, continue */ if (!array_info) continue; /* Unmark array members which don't belong to the 'fast' chip */ if (array_info->chip != gc) { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } /* * Detect array members which belong to the 'fast' chip * but their pins are not in hardware order. */ else if (gpio_chip_hwgpio(desc) != descs->ndescs) { /* * Don't use fast path if all array members processed so * far belong to the same chip as this one but its pin * hardware number is different from its array index. */ if (bitmap_full(array_info->get_mask, descs->ndescs)) { array_info = NULL; } else { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } } else { /* Exclude open drain or open source from fast output */ if (gpiochip_line_is_open_drain(gc, descs->ndescs) || gpiochip_line_is_open_source(gc, descs->ndescs)) __clear_bit(descs->ndescs, array_info->set_mask); /* Identify 'fast' pins which require invertion */ if (gpiod_is_active_low(desc)) __set_bit(descs->ndescs, array_info->invert_mask); } } if (array_info) dev_dbg(dev, "GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n", array_info->chip->label, array_info->size, *array_info->get_mask, *array_info->set_mask, *array_info->invert_mask); return descs; } EXPORT_SYMBOL_GPL(gpiod_get_array); /** * gpiod_get_array_optional - obtain multiple GPIOs from a multi-index GPIO * function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @flags: optional GPIO initialization flags * * This is equivalent to gpiod_get_array(), except that when no GPIO was * assigned to the requested function it will return NULL. */ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { struct gpio_descs *descs; descs = gpiod_get_array(dev, con_id, flags); if (gpiod_not_found(descs)) return NULL; return descs; } EXPORT_SYMBOL_GPL(gpiod_get_array_optional); /** * gpiod_put - dispose of a GPIO descriptor * @desc: GPIO descriptor to dispose of * * No descriptor can be used after gpiod_put() has been called on it. */ void gpiod_put(struct gpio_desc *desc) { if (desc) gpiod_free(desc); } EXPORT_SYMBOL_GPL(gpiod_put); /** * gpiod_put_array - dispose of multiple GPIO descriptors * @descs: struct gpio_descs containing an array of descriptors */ void gpiod_put_array(struct gpio_descs *descs) { unsigned int i; for (i = 0; i < descs->ndescs; i++) gpiod_put(descs->desc[i]); kfree(descs); } EXPORT_SYMBOL_GPL(gpiod_put_array); static int gpio_stub_drv_probe(struct device *dev) { /* * The DT node of some GPIO chips have a "compatible" property, but * never have a struct device added and probed by a driver to register * the GPIO chip with gpiolib. In such cases, fw_devlink=on will cause * the consumers of the GPIO chip to get probe deferred forever because * they will be waiting for a device associated with the GPIO chip * firmware node to get added and bound to a driver. * * To allow these consumers to probe, we associate the struct * gpio_device of the GPIO chip with the firmware node and then simply * bind it to this stub driver. */ return 0; } static struct device_driver gpio_stub_drv = { .name = "gpio_stub_drv", .bus = &gpio_bus_type, .probe = gpio_stub_drv_probe, }; static int __init gpiolib_dev_init(void) { int ret; /* Register GPIO sysfs bus */ ret = bus_register(&gpio_bus_type); if (ret < 0) { pr_err("gpiolib: could not register GPIO bus type\n"); return ret; } ret = driver_register(&gpio_stub_drv); if (ret < 0) { pr_err("gpiolib: could not register GPIO stub driver\n"); bus_unregister(&gpio_bus_type); return ret; } ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME); if (ret < 0) { pr_err("gpiolib: failed to allocate char dev region\n"); driver_unregister(&gpio_stub_drv); bus_unregister(&gpio_bus_type); return ret; } gpiolib_initialized = true; gpiochip_setup_devs(); #if IS_ENABLED(CONFIG_OF_DYNAMIC) && IS_ENABLED(CONFIG_OF_GPIO) WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier)); #endif /* CONFIG_OF_DYNAMIC && CONFIG_OF_GPIO */ return ret; } core_initcall(gpiolib_dev_init); #ifdef CONFIG_DEBUG_FS static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) { struct gpio_chip *gc = gdev->chip; struct gpio_desc *desc; unsigned gpio = gdev->base; int value; bool is_out; bool is_irq; bool active_low; for_each_gpio_desc(gc, desc) { if (test_bit(FLAG_REQUESTED, &desc->flags)) { gpiod_get_direction(desc); is_out = test_bit(FLAG_IS_OUT, &desc->flags); value = gpio_chip_get_value(gc, desc); is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags); active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags); seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s\n", gpio, desc->name ?: "", desc->label, is_out ? "out" : "in ", value >= 0 ? (value ? "hi" : "lo") : "? ", is_irq ? "IRQ " : "", active_low ? "ACTIVE LOW" : ""); } else if (desc->name) { seq_printf(s, " gpio-%-3d (%-20.20s)\n", gpio, desc->name); } gpio++; } } static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos) { unsigned long flags; struct gpio_device *gdev = NULL; loff_t index = *pos; s->private = ""; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) if (index-- == 0) { spin_unlock_irqrestore(&gpio_lock, flags); return gdev; } spin_unlock_irqrestore(&gpio_lock, flags); return NULL; } static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos) { unsigned long flags; struct gpio_device *gdev = v; void *ret = NULL; spin_lock_irqsave(&gpio_lock, flags); if (list_is_last(&gdev->list, &gpio_devices)) ret = NULL; else ret = list_first_entry(&gdev->list, struct gpio_device, list); spin_unlock_irqrestore(&gpio_lock, flags); s->private = "\n"; ++*pos; return ret; } static void gpiolib_seq_stop(struct seq_file *s, void *v) { } static int gpiolib_seq_show(struct seq_file *s, void *v) { struct gpio_device *gdev = v; struct gpio_chip *gc = gdev->chip; struct device *parent; if (!gc) { seq_printf(s, "%s%s: (dangling chip)", (char *)s->private, dev_name(&gdev->dev)); return 0; } seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private, dev_name(&gdev->dev), gdev->base, gdev->base + gdev->ngpio - 1); parent = gc->parent; if (parent) seq_printf(s, ", parent: %s/%s", parent->bus ? parent->bus->name : "no-bus", dev_name(parent)); if (gc->label) seq_printf(s, ", %s", gc->label); if (gc->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); if (gc->dbg_show) gc->dbg_show(s, gc); else gpiolib_dbg_show(s, gdev); return 0; } static const struct seq_operations gpiolib_sops = { .start = gpiolib_seq_start, .next = gpiolib_seq_next, .stop = gpiolib_seq_stop, .show = gpiolib_seq_show, }; DEFINE_SEQ_ATTRIBUTE(gpiolib); static int __init gpiolib_debugfs_init(void) { /* /sys/kernel/debug/gpio */ debugfs_create_file("gpio", 0444, NULL, NULL, &gpiolib_fops); return 0; } subsys_initcall(gpiolib_debugfs_init); #endif /* DEBUG_FS */
linux-master
drivers/gpio/gpiolib.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2021~2022 NXP * * The driver exports a standard gpiochip interface * to control the PIN resources on SCU domain. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> #include <linux/firmware/imx/svc/rm.h> #include <dt-bindings/firmware/imx/rsrc.h> struct scu_gpio_priv { struct gpio_chip chip; struct mutex lock; struct device *dev; struct imx_sc_ipc *handle; }; static unsigned int scu_rsrc_arr[] = { IMX_SC_R_BOARD_R0, IMX_SC_R_BOARD_R1, IMX_SC_R_BOARD_R2, IMX_SC_R_BOARD_R3, IMX_SC_R_BOARD_R4, IMX_SC_R_BOARD_R5, IMX_SC_R_BOARD_R6, IMX_SC_R_BOARD_R7, }; static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct scu_gpio_priv *priv = gpiochip_get_data(chip); int level; int err; if (offset >= chip->ngpio) return -EINVAL; mutex_lock(&priv->lock); /* to read PIN state via scu api */ err = imx_sc_misc_get_control(priv->handle, scu_rsrc_arr[offset], 0, &level); mutex_unlock(&priv->lock); if (err) { dev_err(priv->dev, "SCU get failed: %d\n", err); return err; } return level; } static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct scu_gpio_priv *priv = gpiochip_get_data(chip); int err; if (offset >= chip->ngpio) return; mutex_lock(&priv->lock); /* to set PIN output level via scu api */ err = imx_sc_misc_set_control(priv->handle, scu_rsrc_arr[offset], 0, value); mutex_unlock(&priv->lock); if (err) dev_err(priv->dev, "SCU set (%d) failed: %d\n", scu_rsrc_arr[offset], err); } static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { if (offset >= chip->ngpio) return -EINVAL; return GPIO_LINE_DIRECTION_OUT; } static int imx_scu_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct scu_gpio_priv *priv; struct gpio_chip *gc; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; ret = imx_scu_get_handle(&priv->handle); if (ret) return ret; priv->dev = dev; mutex_init(&priv->lock); gc = &priv->chip; gc->base = -1; gc->parent = dev; gc->ngpio = ARRAY_SIZE(scu_rsrc_arr); gc->label = dev_name(dev); gc->get = imx_scu_gpio_get; gc->set = imx_scu_gpio_set; gc->get_direction = imx_scu_gpio_get_direction; platform_set_drvdata(pdev, priv); return devm_gpiochip_add_data(dev, gc, priv); } static const struct of_device_id imx_scu_gpio_dt_ids[] = { { .compatible = "fsl,imx8qxp-sc-gpio" }, { /* sentinel */ } }; static struct platform_driver imx_scu_gpio_driver = { .driver = { .name = "gpio-imx-scu", .of_match_table = imx_scu_gpio_dt_ids, }, .probe = imx_scu_gpio_probe, }; static int __init _imx_scu_gpio_init(void) { return platform_driver_register(&imx_scu_gpio_driver); } subsys_initcall_sync(_imx_scu_gpio_init); MODULE_AUTHOR("Shenwei Wang <[email protected]>"); MODULE_DESCRIPTION("NXP GPIO over IMX SCU API");
linux-master
drivers/gpio/gpio-imx-scu.c
// SPDX-License-Identifier: GPL-2.0-only /* * 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver * * Copyright (C) 2010 Gabor Juhos <[email protected]> * Copyright (C) 2010 Miguel Gaio <[email protected]> */ #include <linux/bitops.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/spi/spi.h> #define GEN_74X164_NUMBER_GPIOS 8 struct gen_74x164_chip { struct gpio_chip gpio_chip; struct mutex lock; struct gpio_desc *gpiod_oe; u32 registers; /* * Since the registers are chained, every byte sent will make * the previous byte shift to the next register in the * chain. Thus, the first byte sent will end up in the last * register at the end of the transfer. So, to have a logical * numbering, store the bytes in reverse order. */ u8 buffer[]; }; static int __gen_74x164_write_config(struct gen_74x164_chip *chip) { return spi_write(to_spi_device(chip->gpio_chip.parent), chip->buffer, chip->registers); } static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset) { struct gen_74x164_chip *chip = gpiochip_get_data(gc); u8 bank = chip->registers - 1 - offset / 8; u8 pin = offset % 8; int ret; mutex_lock(&chip->lock); ret = (chip->buffer[bank] >> pin) & 0x1; mutex_unlock(&chip->lock); return ret; } static void gen_74x164_set_value(struct gpio_chip *gc, unsigned offset, int val) { struct gen_74x164_chip *chip = gpiochip_get_data(gc); u8 bank = chip->registers - 1 - offset / 8; u8 pin = offset % 8; mutex_lock(&chip->lock); if (val) chip->buffer[bank] |= (1 << pin); else chip->buffer[bank] &= ~(1 << pin); __gen_74x164_write_config(chip); mutex_unlock(&chip->lock); } static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct gen_74x164_chip *chip = gpiochip_get_data(gc); unsigned long offset; unsigned long bankmask; size_t bank; unsigned long bitmask; mutex_lock(&chip->lock); for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) { bank = chip->registers - 1 - offset / 8; bitmask = bitmap_get_value8(bits, offset) & bankmask; chip->buffer[bank] &= ~bankmask; chip->buffer[bank] |= bitmask; } __gen_74x164_write_config(chip); mutex_unlock(&chip->lock); } static int gen_74x164_direction_output(struct gpio_chip *gc, unsigned offset, int val) { gen_74x164_set_value(gc, offset, val); return 0; } static int gen_74x164_probe(struct spi_device *spi) { struct gen_74x164_chip *chip; u32 nregs; int ret; /* * bits_per_word cannot be configured in platform data */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) return ret; ret = device_property_read_u32(&spi->dev, "registers-number", &nregs); if (ret) { dev_err(&spi->dev, "Missing 'registers-number' property.\n"); return -EINVAL; } chip = devm_kzalloc(&spi->dev, sizeof(*chip) + nregs, GFP_KERNEL); if (!chip) return -ENOMEM; chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(chip->gpiod_oe)) return PTR_ERR(chip->gpiod_oe); gpiod_set_value_cansleep(chip->gpiod_oe, 1); spi_set_drvdata(spi, chip); chip->gpio_chip.label = spi->modalias; chip->gpio_chip.direction_output = gen_74x164_direction_output; chip->gpio_chip.get = gen_74x164_get_value; chip->gpio_chip.set = gen_74x164_set_value; chip->gpio_chip.set_multiple = gen_74x164_set_multiple; chip->gpio_chip.base = -1; chip->registers = nregs; chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers; chip->gpio_chip.can_sleep = true; chip->gpio_chip.parent = &spi->dev; chip->gpio_chip.owner = THIS_MODULE; mutex_init(&chip->lock); ret = __gen_74x164_write_config(chip); if (ret) { dev_err(&spi->dev, "Failed writing: %d\n", ret); goto exit_destroy; } ret = gpiochip_add_data(&chip->gpio_chip, chip); if (!ret) return 0; exit_destroy: mutex_destroy(&chip->lock); return ret; } static void gen_74x164_remove(struct spi_device *spi) { struct gen_74x164_chip *chip = spi_get_drvdata(spi); gpiod_set_value_cansleep(chip->gpiod_oe, 0); gpiochip_remove(&chip->gpio_chip); mutex_destroy(&chip->lock); } static const struct spi_device_id gen_74x164_spi_ids[] = { { .name = "74hc595" }, { .name = "74lvc594" }, {}, }; MODULE_DEVICE_TABLE(spi, gen_74x164_spi_ids); static const struct of_device_id gen_74x164_dt_ids[] = { { .compatible = "fairchild,74hc595" }, { .compatible = "nxp,74lvc594" }, {}, }; MODULE_DEVICE_TABLE(of, gen_74x164_dt_ids); static struct spi_driver gen_74x164_driver = { .driver = { .name = "74x164", .of_match_table = gen_74x164_dt_ids, }, .probe = gen_74x164_probe, .remove = gen_74x164_remove, .id_table = gen_74x164_spi_ids, }; module_spi_driver(gen_74x164_driver); MODULE_AUTHOR("Gabor Juhos <[email protected]>"); MODULE_AUTHOR("Miguel Gaio <[email protected]>"); MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-74x164.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * GPIO driver for Fintek and Nuvoton Super-I/O chips * * Copyright (C) 2010-2013 LaCie * * Author: Simon Guinot <[email protected]> */ #define DRVNAME "gpio-f7188x" #define pr_fmt(fmt) DRVNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio/driver.h> #include <linux/bitops.h> /* * Super-I/O registers */ #define SIO_LDSEL 0x07 /* Logical device select */ #define SIO_DEVID 0x20 /* Device ID (2 bytes) */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ #define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ /* * Fintek devices. */ #define SIO_FINTEK_DEVREV 0x22 /* Fintek Device revision */ #define SIO_FINTEK_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */ #define SIO_F71869_ID 0x0814 /* F71869 chipset ID */ #define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */ #define SIO_F71882_ID 0x0541 /* F71882 chipset ID */ #define SIO_F71889_ID 0x0909 /* F71889 chipset ID */ #define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */ #define SIO_F81866_ID 0x1010 /* F81866 chipset ID */ #define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for F81966 */ #define SIO_F81865_ID 0x0704 /* F81865 chipset ID */ #define SIO_LD_GPIO_FINTEK 0x06 /* GPIO logical device */ /* * Nuvoton devices. */ #define SIO_NCT6126D_ID 0xD283 /* NCT6126D chipset ID */ #define SIO_LD_GPIO_NUVOTON 0x07 /* GPIO logical device */ enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866, f81804, f81865, nct6126d, }; static const char * const f7188x_names[] = { "f71869", "f71869a", "f71882fg", "f71889a", "f71889f", "f81866", "f81804", "f81865", "nct6126d", }; struct f7188x_sio { int addr; int device; enum chips type; }; struct f7188x_gpio_bank { struct gpio_chip chip; unsigned int regbase; struct f7188x_gpio_data *data; }; struct f7188x_gpio_data { struct f7188x_sio *sio; int nr_bank; struct f7188x_gpio_bank *bank; }; /* * Super-I/O functions. */ static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static int superio_inw(int base, int reg) { int val; outb(reg++, base); val = inb(base + 1) << 8; outb(reg, base); val |= inb(base + 1); return val; } static inline void superio_outb(int base, int reg, int val) { outb(reg, base); outb(val, base + 1); } static inline int superio_enter(int base) { /* Don't step on other drivers' I/O space by accident. */ if (!request_muxed_region(base, 2, DRVNAME)) { pr_err("I/O address 0x%04x already in use\n", base); return -EBUSY; } /* According to the datasheet the key must be send twice. */ outb(SIO_UNLOCK_KEY, base); outb(SIO_UNLOCK_KEY, base); return 0; } static inline void superio_select(int base, int ld) { outb(SIO_LDSEL, base); outb(ld, base + 1); } static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); release_region(base, 2); } /* * GPIO chip. */ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value); static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value); static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset, unsigned long config); #define F7188X_GPIO_BANK(_base, _ngpio, _regbase, _label) \ { \ .chip = { \ .label = _label, \ .owner = THIS_MODULE, \ .get_direction = f7188x_gpio_get_direction, \ .direction_input = f7188x_gpio_direction_in, \ .get = f7188x_gpio_get, \ .direction_output = f7188x_gpio_direction_out, \ .set = f7188x_gpio_set, \ .set_config = f7188x_gpio_set_config, \ .base = _base, \ .ngpio = _ngpio, \ .can_sleep = true, \ }, \ .regbase = _regbase, \ } #define f7188x_gpio_dir(base) ((base) + 0) #define f7188x_gpio_data_out(base) ((base) + 1) #define f7188x_gpio_data_in(base) ((base) + 2) /* Output mode register (0:open drain 1:push-pull). */ #define f7188x_gpio_out_mode(base) ((base) + 3) #define f7188x_gpio_dir_invert(type) ((type) == nct6126d) #define f7188x_gpio_data_single(type) ((type) == nct6126d) static struct f7188x_gpio_bank f71869_gpio_bank[] = { F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 6, 0x90, DRVNAME "-6"), }; static struct f7188x_gpio_bank f71869a_gpio_bank[] = { F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"), F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"), }; static struct f7188x_gpio_bank f71882_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 4, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 4, 0xB0, DRVNAME "-4"), }; static struct f7188x_gpio_bank f71889a_gpio_bank[] = { F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"), F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"), }; static struct f7188x_gpio_bank f71889_gpio_bank[] = { F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"), F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"), }; static struct f7188x_gpio_bank f81866_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"), F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"), F7188X_GPIO_BANK(80, 8, 0x88, DRVNAME "-8"), }; static struct f7188x_gpio_bank f81804_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-3"), F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-4"), F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-5"), F7188X_GPIO_BANK(90, 8, 0x98, DRVNAME "-6"), }; static struct f7188x_gpio_bank f81865_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"), F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"), }; static struct f7188x_gpio_bank nct6126d_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"), F7188X_GPIO_BANK(30, 8, 0xEC, DRVNAME "-3"), F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"), F7188X_GPIO_BANK(70, 8, 0xFC, DRVNAME "-7"), }; static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { int err; struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 dir; err = superio_enter(sio->addr); if (err) return err; superio_select(sio->addr, sio->device); dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase)); superio_exit(sio->addr); if (f7188x_gpio_dir_invert(sio->type)) dir = ~dir; if (dir & BIT(offset)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { int err; struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 dir; err = superio_enter(sio->addr); if (err) return err; superio_select(sio->addr, sio->device); dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase)); if (f7188x_gpio_dir_invert(sio->type)) dir |= BIT(offset); else dir &= ~BIT(offset); superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir); superio_exit(sio->addr); return 0; } static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset) { int err; struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 dir, data; err = superio_enter(sio->addr); if (err) return err; superio_select(sio->addr, sio->device); dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase)); dir = !!(dir & BIT(offset)); if (f7188x_gpio_data_single(sio->type) || dir) data = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase)); else data = superio_inb(sio->addr, f7188x_gpio_data_in(bank->regbase)); superio_exit(sio->addr); return !!(data & BIT(offset)); } static int f7188x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { int err; struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 dir, data_out; err = superio_enter(sio->addr); if (err) return err; superio_select(sio->addr, sio->device); data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase)); if (value) data_out |= BIT(offset); else data_out &= ~BIT(offset); superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out); dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase)); if (f7188x_gpio_dir_invert(sio->type)) dir &= ~BIT(offset); else dir |= BIT(offset); superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir); superio_exit(sio->addr); return 0; } static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { int err; struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 data_out; err = superio_enter(sio->addr); if (err) return; superio_select(sio->addr, sio->device); data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase)); if (value) data_out |= BIT(offset); else data_out &= ~BIT(offset); superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out); superio_exit(sio->addr); } static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset, unsigned long config) { int err; enum pin_config_param param = pinconf_to_config_param(config); struct f7188x_gpio_bank *bank = gpiochip_get_data(chip); struct f7188x_sio *sio = bank->data->sio; u8 data; if (param != PIN_CONFIG_DRIVE_OPEN_DRAIN && param != PIN_CONFIG_DRIVE_PUSH_PULL) return -ENOTSUPP; err = superio_enter(sio->addr); if (err) return err; superio_select(sio->addr, sio->device); data = superio_inb(sio->addr, f7188x_gpio_out_mode(bank->regbase)); if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN) data &= ~BIT(offset); else data |= BIT(offset); superio_outb(sio->addr, f7188x_gpio_out_mode(bank->regbase), data); superio_exit(sio->addr); return 0; } /* * Platform device and driver. */ static int f7188x_gpio_probe(struct platform_device *pdev) { int err; int i; struct f7188x_sio *sio = dev_get_platdata(&pdev->dev); struct f7188x_gpio_data *data; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; switch (sio->type) { case f71869: data->nr_bank = ARRAY_SIZE(f71869_gpio_bank); data->bank = f71869_gpio_bank; break; case f71869a: data->nr_bank = ARRAY_SIZE(f71869a_gpio_bank); data->bank = f71869a_gpio_bank; break; case f71882fg: data->nr_bank = ARRAY_SIZE(f71882_gpio_bank); data->bank = f71882_gpio_bank; break; case f71889a: data->nr_bank = ARRAY_SIZE(f71889a_gpio_bank); data->bank = f71889a_gpio_bank; break; case f71889f: data->nr_bank = ARRAY_SIZE(f71889_gpio_bank); data->bank = f71889_gpio_bank; break; case f81866: data->nr_bank = ARRAY_SIZE(f81866_gpio_bank); data->bank = f81866_gpio_bank; break; case f81804: data->nr_bank = ARRAY_SIZE(f81804_gpio_bank); data->bank = f81804_gpio_bank; break; case f81865: data->nr_bank = ARRAY_SIZE(f81865_gpio_bank); data->bank = f81865_gpio_bank; break; case nct6126d: data->nr_bank = ARRAY_SIZE(nct6126d_gpio_bank); data->bank = nct6126d_gpio_bank; break; default: return -ENODEV; } data->sio = sio; platform_set_drvdata(pdev, data); /* For each GPIO bank, register a GPIO chip. */ for (i = 0; i < data->nr_bank; i++) { struct f7188x_gpio_bank *bank = &data->bank[i]; bank->chip.parent = &pdev->dev; bank->data = data; err = devm_gpiochip_add_data(&pdev->dev, &bank->chip, bank); if (err) { dev_err(&pdev->dev, "Failed to register gpiochip %d: %d\n", i, err); return err; } } return 0; } static int __init f7188x_find(int addr, struct f7188x_sio *sio) { int err; u16 devid; u16 manid; err = superio_enter(addr); if (err) return err; err = -ENODEV; sio->device = SIO_LD_GPIO_FINTEK; devid = superio_inw(addr, SIO_DEVID); switch (devid) { case SIO_F71869_ID: sio->type = f71869; break; case SIO_F71869A_ID: sio->type = f71869a; break; case SIO_F71882_ID: sio->type = f71882fg; break; case SIO_F71889A_ID: sio->type = f71889a; break; case SIO_F71889_ID: sio->type = f71889f; break; case SIO_F81866_ID: sio->type = f81866; break; case SIO_F81804_ID: sio->type = f81804; break; case SIO_F81865_ID: sio->type = f81865; break; case SIO_NCT6126D_ID: sio->device = SIO_LD_GPIO_NUVOTON; sio->type = nct6126d; break; default: pr_info("Unsupported Fintek device 0x%04x\n", devid); goto err; } /* double check manufacturer where possible */ if (sio->type != nct6126d) { manid = superio_inw(addr, SIO_FINTEK_MANID); if (manid != SIO_FINTEK_ID) { pr_debug("Not a Fintek device at 0x%08x\n", addr); goto err; } } sio->addr = addr; err = 0; pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr); if (sio->type != nct6126d) pr_info(" revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV)); err: superio_exit(addr); return err; } static struct platform_device *f7188x_gpio_pdev; static int __init f7188x_gpio_device_add(const struct f7188x_sio *sio) { int err; f7188x_gpio_pdev = platform_device_alloc(DRVNAME, -1); if (!f7188x_gpio_pdev) return -ENOMEM; err = platform_device_add_data(f7188x_gpio_pdev, sio, sizeof(*sio)); if (err) { pr_err("Platform data allocation failed\n"); goto err; } err = platform_device_add(f7188x_gpio_pdev); if (err) { pr_err("Device addition failed\n"); goto err; } return 0; err: platform_device_put(f7188x_gpio_pdev); return err; } /* * Try to match a supported Fintek device by reading the (hard-wired) * configuration I/O ports. If available, then register both the platform * device and driver to support the GPIOs. */ static struct platform_driver f7188x_gpio_driver = { .driver = { .name = DRVNAME, }, .probe = f7188x_gpio_probe, }; static int __init f7188x_gpio_init(void) { int err; struct f7188x_sio sio; if (f7188x_find(0x2e, &sio) && f7188x_find(0x4e, &sio)) return -ENODEV; err = platform_driver_register(&f7188x_gpio_driver); if (!err) { err = f7188x_gpio_device_add(&sio); if (err) platform_driver_unregister(&f7188x_gpio_driver); } return err; } subsys_initcall(f7188x_gpio_init); static void __exit f7188x_gpio_exit(void) { platform_device_unregister(f7188x_gpio_pdev); platform_driver_unregister(&f7188x_gpio_driver); } module_exit(f7188x_gpio_exit); MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG, F71889A, F71889F and F81866"); MODULE_AUTHOR("Simon Guinot <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-f7188x.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> #include <linux/kstrtox.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include "gpiolib.h" #include "gpiolib-sysfs.h" struct kernfs_node; #define GPIO_IRQF_TRIGGER_NONE 0 #define GPIO_IRQF_TRIGGER_FALLING BIT(0) #define GPIO_IRQF_TRIGGER_RISING BIT(1) #define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \ GPIO_IRQF_TRIGGER_RISING) struct gpiod_data { struct gpio_desc *desc; struct mutex mutex; struct kernfs_node *value_kn; int irq; unsigned char irq_flags; bool direction_can_change; }; /* * Lock to serialise gpiod export and unexport, and prevent re-export of * gpiod whose chip is being unregistered. */ static DEFINE_MUTEX(sysfs_lock); /* * /sys/class/gpio/gpioN... only for GPIOs that are exported * /direction * * MAY BE OMITTED if kernel won't allow direction changes * * is read/write as "in" or "out" * * may also be written as "high" or "low", initializing * output value as specified ("out" implies "low") * /value * * always readable, subject to hardware behavior * * may be writable, as zero/nonzero * /edge * * configures behavior of poll(2) on /value * * available only if pin can generate IRQs on input * * is read/write as "none", "falling", "rising", or "both" * /active_low * * configures polarity of /value * * is read/write as zero/nonzero * * also affects existing and subsequent "falling" and "rising" * /edge configuration */ static ssize_t direction_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; int value; mutex_lock(&data->mutex); gpiod_get_direction(desc); value = !!test_bit(FLAG_IS_OUT, &desc->flags); mutex_unlock(&data->mutex); return sysfs_emit(buf, "%s\n", value ? "out" : "in"); } static ssize_t direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; ssize_t status; mutex_lock(&data->mutex); if (sysfs_streq(buf, "high")) status = gpiod_direction_output_raw(desc, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpiod_direction_output_raw(desc, 0); else if (sysfs_streq(buf, "in")) status = gpiod_direction_input(desc); else status = -EINVAL; mutex_unlock(&data->mutex); return status ? : size; } static DEVICE_ATTR_RW(direction); static ssize_t value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; ssize_t status; mutex_lock(&data->mutex); status = gpiod_get_value_cansleep(desc); mutex_unlock(&data->mutex); if (status < 0) return status; return sysfs_emit(buf, "%zd\n", status); } static ssize_t value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; ssize_t status; long value; status = kstrtol(buf, 0, &value); mutex_lock(&data->mutex); if (!test_bit(FLAG_IS_OUT, &desc->flags)) { status = -EPERM; } else if (status == 0) { gpiod_set_value_cansleep(desc, value); status = size; } mutex_unlock(&data->mutex); return status; } static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct gpiod_data *data = priv; sysfs_notify_dirent(data->value_kn); return IRQ_HANDLED; } /* Caller holds gpiod-data mutex. */ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; unsigned long irq_flags; int ret; data->irq = gpiod_to_irq(desc); if (data->irq < 0) return -EIO; data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value"); if (!data->value_kn) return -ENODEV; irq_flags = IRQF_SHARED; if (flags & GPIO_IRQF_TRIGGER_FALLING) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; if (flags & GPIO_IRQF_TRIGGER_RISING) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; /* * FIXME: This should be done in the irq_request_resources callback * when the irq is requested, but a few drivers currently fail * to do so. * * Remove this redundant call (along with the corresponding * unlock) when those drivers have been fixed. */ ret = gpiochip_lock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc)); if (ret < 0) goto err_put_kn; ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags, "gpiolib", data); if (ret < 0) goto err_unlock; data->irq_flags = flags; return 0; err_unlock: gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc)); err_put_kn: sysfs_put(data->value_kn); return ret; } /* * Caller holds gpiod-data mutex (unless called after class-device * deregistration). */ static void gpio_sysfs_free_irq(struct device *dev) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; data->irq_flags = 0; free_irq(data->irq, data); gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc)); sysfs_put(data->value_kn); } static const char * const trigger_names[] = { [GPIO_IRQF_TRIGGER_NONE] = "none", [GPIO_IRQF_TRIGGER_FALLING] = "falling", [GPIO_IRQF_TRIGGER_RISING] = "rising", [GPIO_IRQF_TRIGGER_BOTH] = "both", }; static ssize_t edge_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpiod_data *data = dev_get_drvdata(dev); int flags; mutex_lock(&data->mutex); flags = data->irq_flags; mutex_unlock(&data->mutex); if (flags >= ARRAY_SIZE(trigger_names)) return 0; return sysfs_emit(buf, "%s\n", trigger_names[flags]); } static ssize_t edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpiod_data *data = dev_get_drvdata(dev); ssize_t status = size; int flags; flags = sysfs_match_string(trigger_names, buf); if (flags < 0) return flags; mutex_lock(&data->mutex); if (flags == data->irq_flags) { status = size; goto out_unlock; } if (data->irq_flags) gpio_sysfs_free_irq(dev); if (flags) { status = gpio_sysfs_request_irq(dev, flags); if (!status) status = size; } out_unlock: mutex_unlock(&data->mutex); return status; } static DEVICE_ATTR_RW(edge); /* Caller holds gpiod-data mutex. */ static int gpio_sysfs_set_active_low(struct device *dev, int value) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; int status = 0; unsigned int flags = data->irq_flags; if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value); /* reconfigure poll(2) support if enabled on one edge only */ if (flags == GPIO_IRQF_TRIGGER_FALLING || flags == GPIO_IRQF_TRIGGER_RISING) { gpio_sysfs_free_irq(dev); status = gpio_sysfs_request_irq(dev, flags); } return status; } static ssize_t active_low_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; int value; mutex_lock(&data->mutex); value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags); mutex_unlock(&data->mutex); return sysfs_emit(buf, "%d\n", value); } static ssize_t active_low_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpiod_data *data = dev_get_drvdata(dev); ssize_t status; long value; status = kstrtol(buf, 0, &value); if (status) return status; mutex_lock(&data->mutex); status = gpio_sysfs_set_active_low(dev, value); mutex_unlock(&data->mutex); return status ? : size; } static DEVICE_ATTR_RW(active_low); static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct gpiod_data *data = dev_get_drvdata(dev); struct gpio_desc *desc = data->desc; umode_t mode = attr->mode; bool show_direction = data->direction_can_change; if (attr == &dev_attr_direction.attr) { if (!show_direction) mode = 0; } else if (attr == &dev_attr_edge.attr) { if (gpiod_to_irq(desc) < 0) mode = 0; if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) mode = 0; } return mode; } static struct attribute *gpio_attrs[] = { &dev_attr_direction.attr, &dev_attr_edge.attr, &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; static const struct attribute_group gpio_group = { .attrs = gpio_attrs, .is_visible = gpio_is_visible, }; static const struct attribute_group *gpio_groups[] = { &gpio_group, NULL }; /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio */ static ssize_t base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sysfs_emit(buf, "%d\n", chip->base); } static DEVICE_ATTR_RO(base); static ssize_t label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", chip->label ?: ""); } static DEVICE_ATTR_RO(label); static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", chip->ngpio); } static DEVICE_ATTR_RO(ngpio); static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; ATTRIBUTE_GROUPS(gpiochip); /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) * /sys/class/gpio/unexport ... write-only * integer N ... number of GPIO to unexport */ static ssize_t export_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t len) { long gpio; struct gpio_desc *desc; int status; struct gpio_chip *gc; int offset; status = kstrtol(buf, 0, &gpio); if (status < 0) goto done; desc = gpio_to_desc(gpio); /* reject invalid GPIOs */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); if (!gpiochip_line_is_valid(gc, offset)) { pr_warn("%s: GPIO %ld masked\n", __func__, gpio); return -EINVAL; } /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ status = gpiod_request_user(desc, "sysfs"); if (status) goto done; status = gpiod_set_transitory(desc, false); if (!status) { status = gpiod_export(desc, true); if (status < 0) gpiod_free(desc); else set_bit(FLAG_SYSFS, &desc->flags); } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static CLASS_ATTR_WO(export); static ssize_t unexport_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t len) { long gpio; struct gpio_desc *desc; int status; status = kstrtol(buf, 0, &gpio); if (status < 0) goto done; desc = gpio_to_desc(gpio); /* reject bogus commands (gpiod_unexport() ignores them) */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } status = -EINVAL; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { gpiod_unexport(desc); gpiod_free(desc); status = 0; } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static CLASS_ATTR_WO(unexport); static struct attribute *gpio_class_attrs[] = { &class_attr_export.attr, &class_attr_unexport.attr, NULL, }; ATTRIBUTE_GROUPS(gpio_class); static struct class gpio_class = { .name = "gpio", .class_groups = gpio_class_groups, }; /** * gpiod_export - export a GPIO through sysfs * @desc: GPIO to make available, already requested * @direction_may_change: true if userspace may change GPIO direction * Context: arch_initcall or later * * When drivers want to make a GPIO accessible to userspace after they * have requested it -- perhaps while debugging, or as part of their * public interface -- they may use this routine. If the GPIO can * change direction (some can't) and the caller allows it, userspace * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * * Returns zero on success, else an error. */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { struct gpio_chip *chip; struct gpio_device *gdev; struct gpiod_data *data; unsigned long flags; int status; const char *ioname = NULL; struct device *dev; int offset; /* can't export until sysfs is available ... */ if (!class_is_registered(&gpio_class)) { pr_debug("%s: called too early!\n", __func__); return -ENOENT; } if (!desc) { pr_debug("%s: invalid gpio descriptor\n", __func__); return -EINVAL; } gdev = desc->gdev; chip = gdev->chip; mutex_lock(&sysfs_lock); /* check if chip is being removed */ if (!chip || !gdev->mockdev) { status = -ENODEV; goto err_unlock; } spin_lock_irqsave(&gpio_lock, flags); if (!test_bit(FLAG_REQUESTED, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags)) { spin_unlock_irqrestore(&gpio_lock, flags); gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", __func__, test_bit(FLAG_REQUESTED, &desc->flags), test_bit(FLAG_EXPORT, &desc->flags)); status = -EPERM; goto err_unlock; } spin_unlock_irqrestore(&gpio_lock, flags); data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { status = -ENOMEM; goto err_unlock; } data->desc = desc; mutex_init(&data->mutex); if (chip->direction_input && chip->direction_output) data->direction_can_change = direction_may_change; else data->direction_can_change = false; offset = gpio_chip_hwgpio(desc); if (chip->names && chip->names[offset]) ioname = chip->names[offset]; dev = device_create_with_groups(&gpio_class, &gdev->dev, MKDEV(0, 0), data, gpio_groups, ioname ? ioname : "gpio%u", desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); goto err_free_data; } set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; err_free_data: kfree(data); err_unlock: mutex_unlock(&sysfs_lock); gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_export); static int match_export(struct device *dev, const void *desc) { struct gpiod_data *data = dev_get_drvdata(dev); return data->desc == desc; } /** * gpiod_export_link - create a sysfs link to an exported GPIO node * @dev: device under which to create symlink * @name: name of the symlink * @desc: GPIO to create symlink to, already exported * * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * * Returns zero on success, else an error. */ int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) { struct device *cdev; int ret; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } cdev = class_find_device(&gpio_class, NULL, desc, match_export); if (!cdev) return -ENODEV; ret = sysfs_create_link(&dev->kobj, &cdev->kobj, name); put_device(cdev); return ret; } EXPORT_SYMBOL_GPL(gpiod_export_link); /** * gpiod_unexport - reverse effect of gpiod_export() * @desc: GPIO to make unavailable * * This is implicit on gpiod_free(). */ void gpiod_unexport(struct gpio_desc *desc) { struct gpiod_data *data; struct device *dev; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return; } mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) goto err_unlock; dev = class_find_device(&gpio_class, NULL, desc, match_export); if (!dev) goto err_unlock; data = dev_get_drvdata(dev); clear_bit(FLAG_EXPORT, &desc->flags); device_unregister(dev); /* * Release irq after deregistration to prevent race with edge_store. */ if (data->irq_flags) gpio_sysfs_free_irq(dev); mutex_unlock(&sysfs_lock); put_device(dev); kfree(data); return; err_unlock: mutex_unlock(&sysfs_lock); } EXPORT_SYMBOL_GPL(gpiod_unexport); int gpiochip_sysfs_register(struct gpio_device *gdev) { struct device *dev; struct device *parent; struct gpio_chip *chip = gdev->chip; /* * Many systems add gpio chips for SOC support very early, * before driver model support is available. In those cases we * register later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!class_is_registered(&gpio_class)) return 0; /* * For sysfs backward compatibility we need to preserve this * preferred parenting to the gpio_chip parent field, if set. */ if (chip->parent) parent = chip->parent; else parent = &gdev->dev; /* use chip->base for the ID; it's already known to be unique */ dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip, gpiochip_groups, GPIOCHIP_NAME "%d", chip->base); if (IS_ERR(dev)) return PTR_ERR(dev); mutex_lock(&sysfs_lock); gdev->mockdev = dev; mutex_unlock(&sysfs_lock); return 0; } void gpiochip_sysfs_unregister(struct gpio_device *gdev) { struct gpio_desc *desc; struct gpio_chip *chip = gdev->chip; if (!gdev->mockdev) return; device_unregister(gdev->mockdev); /* prevent further gpiod exports */ mutex_lock(&sysfs_lock); gdev->mockdev = NULL; mutex_unlock(&sysfs_lock); /* unregister gpiod class devices owned by sysfs */ for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) { gpiod_unexport(desc); gpiod_free(desc); } } static int __init gpiolib_sysfs_init(void) { int status; unsigned long flags; struct gpio_device *gdev; status = class_register(&gpio_class); if (status < 0) return status; /* Scan and register the gpio_chips which registered very * early (e.g. before the class_register above was called). * * We run before arch_initcall() so chip->dev nodes can have * registered, and so arch_initcall() can always gpiod_export(). */ spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) { if (gdev->mockdev) continue; /* * TODO we yield gpio_lock here because * gpiochip_sysfs_register() acquires a mutex. This is unsafe * and needs to be fixed. * * Also it would be nice to use gpiochip_find() here so we * can keep gpio_chips local to gpiolib.c, but the yield of * gpio_lock prevents us from doing this. */ spin_unlock_irqrestore(&gpio_lock, flags); status = gpiochip_sysfs_register(gdev); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return status; } postcore_initcall(gpiolib_sysfs_init);
linux-master
drivers/gpio/gpiolib-sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO interface for IT87xx Super I/O chips * * Author: Diego Elio Pettenò <[email protected]> * Copyright (c) 2017 Google, Inc. * * Based on it87_wdt.c by Oliver Schuster * gpio-it8761e.c by Denis Turischev * gpio-stmpe.c by Rabin Vincent */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/gpio/driver.h> /* Chip Id numbers */ #define NO_DEV_ID 0xffff #define IT8613_ID 0x8613 #define IT8620_ID 0x8620 #define IT8628_ID 0x8628 #define IT8718_ID 0x8718 #define IT8728_ID 0x8728 #define IT8732_ID 0x8732 #define IT8761_ID 0x8761 #define IT8772_ID 0x8772 #define IT8786_ID 0x8786 /* IO Ports */ #define REG 0x2e #define VAL 0x2f /* Logical device Numbers LDN */ #define GPIO 0x07 /* Configuration Registers and Functions */ #define LDNREG 0x07 #define CHIPID 0x20 #define CHIPREV 0x22 /** * struct it87_gpio - it87-specific GPIO chip * @chip: the underlying gpio_chip structure * @lock: a lock to avoid races between operations * @io_base: base address for gpio ports * @io_size: size of the port rage starting from io_base. * @output_base: Super I/O register address for Output Enable register * @simple_base: Super I/O 'Simple I/O' Enable register * @simple_size: Super IO 'Simple I/O' Enable register size; this is * required because IT87xx chips might only provide Simple I/O * switches on a subset of lines, whereas the others keep the * same status all time. */ struct it87_gpio { struct gpio_chip chip; spinlock_t lock; u16 io_base; u16 io_size; u8 output_base; u8 simple_base; u8 simple_size; }; static struct it87_gpio it87_gpio_chip = { .lock = __SPIN_LOCK_UNLOCKED(it87_gpio_chip.lock), }; /* Superio chip access functions; copied from wdt_it87 */ static inline int superio_enter(void) { /* * Try to reserve REG and REG + 1 for exclusive access. */ if (!request_muxed_region(REG, 2, KBUILD_MODNAME)) return -EBUSY; outb(0x87, REG); outb(0x01, REG); outb(0x55, REG); outb(0x55, REG); return 0; } static inline void superio_exit(void) { outb(0x02, REG); outb(0x02, VAL); release_region(REG, 2); } static inline void superio_select(int ldn) { outb(LDNREG, REG); outb(ldn, VAL); } static inline int superio_inb(int reg) { outb(reg, REG); return inb(VAL); } static inline void superio_outb(int val, int reg) { outb(reg, REG); outb(val, VAL); } static inline int superio_inw(int reg) { int val; outb(reg++, REG); val = inb(VAL) << 8; outb(reg, REG); val |= inb(VAL); return val; } static inline void superio_set_mask(int mask, int reg) { u8 curr_val = superio_inb(reg); u8 new_val = curr_val | mask; if (curr_val != new_val) superio_outb(new_val, reg); } static inline void superio_clear_mask(int mask, int reg) { u8 curr_val = superio_inb(reg); u8 new_val = curr_val & ~mask; if (curr_val != new_val) superio_outb(new_val, reg); } static int it87_gpio_request(struct gpio_chip *chip, unsigned gpio_num) { u8 mask, group; int rc = 0; struct it87_gpio *it87_gpio = gpiochip_get_data(chip); mask = 1 << (gpio_num % 8); group = (gpio_num / 8); spin_lock(&it87_gpio->lock); rc = superio_enter(); if (rc) goto exit; /* not all the IT87xx chips support Simple I/O and not all of * them allow all the lines to be set/unset to Simple I/O. */ if (group < it87_gpio->simple_size) superio_set_mask(mask, group + it87_gpio->simple_base); /* clear output enable, setting the pin to input, as all the * newly-exported GPIO interfaces are set to input. */ superio_clear_mask(mask, group + it87_gpio->output_base); superio_exit(); exit: spin_unlock(&it87_gpio->lock); return rc; } static int it87_gpio_get(struct gpio_chip *chip, unsigned gpio_num) { u16 reg; u8 mask; struct it87_gpio *it87_gpio = gpiochip_get_data(chip); mask = 1 << (gpio_num % 8); reg = (gpio_num / 8) + it87_gpio->io_base; return !!(inb(reg) & mask); } static int it87_gpio_direction_in(struct gpio_chip *chip, unsigned gpio_num) { u8 mask, group; int rc = 0; struct it87_gpio *it87_gpio = gpiochip_get_data(chip); mask = 1 << (gpio_num % 8); group = (gpio_num / 8); spin_lock(&it87_gpio->lock); rc = superio_enter(); if (rc) goto exit; /* clear the output enable bit */ superio_clear_mask(mask, group + it87_gpio->output_base); superio_exit(); exit: spin_unlock(&it87_gpio->lock); return rc; } static void it87_gpio_set(struct gpio_chip *chip, unsigned gpio_num, int val) { u8 mask, curr_vals; u16 reg; struct it87_gpio *it87_gpio = gpiochip_get_data(chip); mask = 1 << (gpio_num % 8); reg = (gpio_num / 8) + it87_gpio->io_base; curr_vals = inb(reg); if (val) outb(curr_vals | mask, reg); else outb(curr_vals & ~mask, reg); } static int it87_gpio_direction_out(struct gpio_chip *chip, unsigned gpio_num, int val) { u8 mask, group; int rc = 0; struct it87_gpio *it87_gpio = gpiochip_get_data(chip); mask = 1 << (gpio_num % 8); group = (gpio_num / 8); spin_lock(&it87_gpio->lock); rc = superio_enter(); if (rc) goto exit; /* set the output enable bit */ superio_set_mask(mask, group + it87_gpio->output_base); it87_gpio_set(chip, gpio_num, val); superio_exit(); exit: spin_unlock(&it87_gpio->lock); return rc; } static const struct gpio_chip it87_template_chip = { .label = KBUILD_MODNAME, .owner = THIS_MODULE, .request = it87_gpio_request, .get = it87_gpio_get, .direction_input = it87_gpio_direction_in, .set = it87_gpio_set, .direction_output = it87_gpio_direction_out, .base = -1 }; static int __init it87_gpio_init(void) { int rc = 0, i; u16 chip_type; u8 chip_rev, gpio_ba_reg; char *labels, **labels_table; struct it87_gpio *it87_gpio = &it87_gpio_chip; rc = superio_enter(); if (rc) return rc; chip_type = superio_inw(CHIPID); chip_rev = superio_inb(CHIPREV) & 0x0f; superio_exit(); it87_gpio->chip = it87_template_chip; switch (chip_type) { case IT8613_ID: gpio_ba_reg = 0x62; it87_gpio->io_size = 8; /* it8613 only needs 6, use 8 for alignment */ it87_gpio->output_base = 0xc8; it87_gpio->simple_base = 0xc0; it87_gpio->simple_size = 6; it87_gpio->chip.ngpio = 64; /* has 48, use 64 for convenient calc */ break; case IT8620_ID: case IT8628_ID: gpio_ba_reg = 0x62; it87_gpio->io_size = 11; it87_gpio->output_base = 0xc8; it87_gpio->simple_size = 0; it87_gpio->chip.ngpio = 64; break; case IT8718_ID: case IT8728_ID: case IT8732_ID: case IT8772_ID: case IT8786_ID: gpio_ba_reg = 0x62; it87_gpio->io_size = 8; it87_gpio->output_base = 0xc8; it87_gpio->simple_base = 0xc0; it87_gpio->simple_size = 5; it87_gpio->chip.ngpio = 64; break; case IT8761_ID: gpio_ba_reg = 0x60; it87_gpio->io_size = 4; it87_gpio->output_base = 0xf0; it87_gpio->simple_size = 0; it87_gpio->chip.ngpio = 16; break; case NO_DEV_ID: pr_err("no device\n"); return -ENODEV; default: pr_err("Unknown Chip found, Chip %04x Revision %x\n", chip_type, chip_rev); return -ENODEV; } rc = superio_enter(); if (rc) return rc; superio_select(GPIO); /* fetch GPIO base address */ it87_gpio->io_base = superio_inw(gpio_ba_reg); superio_exit(); pr_info("Found Chip IT%04x rev %x. %u GPIO lines starting at %04xh\n", chip_type, chip_rev, it87_gpio->chip.ngpio, it87_gpio->io_base); if (!request_region(it87_gpio->io_base, it87_gpio->io_size, KBUILD_MODNAME)) return -EBUSY; /* Set up aliases for the GPIO connection. * * ITE documentation for recent chips such as the IT8728F * refers to the GPIO lines as GPxy, with a coordinates system * where x is the GPIO group (starting from 1) and y is the * bit within the group. * * By creating these aliases, we make it easier to understand * to which GPIO pin we're referring to. */ labels = kcalloc(it87_gpio->chip.ngpio, sizeof("it87_gpXY"), GFP_KERNEL); labels_table = kcalloc(it87_gpio->chip.ngpio, sizeof(const char *), GFP_KERNEL); if (!labels || !labels_table) { rc = -ENOMEM; goto labels_free; } for (i = 0; i < it87_gpio->chip.ngpio; i++) { char *label = &labels[i * sizeof("it87_gpXY")]; sprintf(label, "it87_gp%u%u", 1+(i/8), i%8); labels_table[i] = label; } it87_gpio->chip.names = (const char *const*)labels_table; rc = gpiochip_add_data(&it87_gpio->chip, it87_gpio); if (rc) goto labels_free; return 0; labels_free: kfree(labels_table); kfree(labels); release_region(it87_gpio->io_base, it87_gpio->io_size); return rc; } static void __exit it87_gpio_exit(void) { struct it87_gpio *it87_gpio = &it87_gpio_chip; gpiochip_remove(&it87_gpio->chip); release_region(it87_gpio->io_base, it87_gpio->io_size); kfree(it87_gpio->chip.names[0]); kfree(it87_gpio->chip.names); } module_init(it87_gpio_init); module_exit(it87_gpio_exit); MODULE_AUTHOR("Diego Elio Pettenò <[email protected]>"); MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-it87.c
// SPDX-License-Identifier: GPL-2.0+ /* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinctrl.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/gpio/consumer.h> #include <linux/gpio/machine.h> #include "gpiolib.h" #include "gpiolib-of.h" /* * This is Linux-specific flags. By default controllers' and Linux' mapping * match, but GPIO controllers are free to translate their own flags to * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended. */ enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, OF_GPIO_SINGLE_ENDED = 0x2, OF_GPIO_OPEN_DRAIN = 0x4, OF_GPIO_TRANSITORY = 0x8, OF_GPIO_PULL_UP = 0x10, OF_GPIO_PULL_DOWN = 0x20, OF_GPIO_PULL_DISABLE = 0x40, }; /** * of_gpio_named_count() - Count GPIOs for a device * @np: device node to count GPIOs for * @propname: property name containing gpio specifier(s) * * The function returns the count of GPIOs specified for a node. * Note that the empty GPIO specifiers count too. Returns either * Number of gpios defined in property, * -EINVAL for an incorrectly formed gpios property, or * -ENOENT for a missing gpios property * * Example: * gpios = <0 * &gpio1 1 2 * 0 * &gpio2 3 4>; * * The above example defines four GPIOs, two of which are not specified. * This function will return '4' */ static int of_gpio_named_count(const struct device_node *np, const char *propname) { return of_count_phandle_with_args(np, propname, "#gpio-cells"); } /** * of_gpio_spi_cs_get_count() - special GPIO counting for SPI * @dev: Consuming device * @con_id: Function within the GPIO consumer * * Some elder GPIO controllers need special quirks. Currently we handle * the Freescale and PPC GPIO controller with bindings that doesn't use the * established "cs-gpios" for chip selects but instead rely on * "gpios" for the chip select lines. If we detect this, we redirect * the counting of "cs-gpios" to count "gpios" transparent to the * driver. */ static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id) { struct device_node *np = dev->of_node; if (!IS_ENABLED(CONFIG_SPI_MASTER)) return 0; if (!con_id || strcmp(con_id, "cs")) return 0; if (!of_device_is_compatible(np, "fsl,spi") && !of_device_is_compatible(np, "aeroflexgaisler,spictrl") && !of_device_is_compatible(np, "ibm,ppc4xx-spi")) return 0; return of_gpio_named_count(np, "gpios"); } int of_gpio_get_count(struct device *dev, const char *con_id) { int ret; char propname[32]; unsigned int i; ret = of_gpio_spi_cs_get_count(dev, con_id); if (ret > 0) return ret; for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", con_id, gpio_suffixes[i]); else snprintf(propname, sizeof(propname), "%s", gpio_suffixes[i]); ret = of_gpio_named_count(dev->of_node, propname); if (ret > 0) break; } return ret ? ret : -ENOENT; } static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) { struct of_phandle_args *gpiospec = data; return device_match_of_node(&chip->gpiodev->dev, gpiospec->np) && chip->of_xlate && chip->of_xlate(chip, gpiospec, NULL) >= 0; } static struct gpio_chip *of_find_gpiochip_by_xlate( struct of_phandle_args *gpiospec) { return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate); } static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, struct of_phandle_args *gpiospec, enum of_gpio_flags *flags) { int ret; if (chip->of_gpio_n_cells != gpiospec->args_count) return ERR_PTR(-EINVAL); ret = chip->of_xlate(chip, gpiospec, flags); if (ret < 0) return ERR_PTR(ret); return gpiochip_get_desc(chip, ret); } /* * Overrides stated polarity of a gpio line and warns when there is a * discrepancy. */ static void of_gpio_quirk_polarity(const struct device_node *np, bool active_high, enum of_gpio_flags *flags) { if (active_high) { if (*flags & OF_GPIO_ACTIVE_LOW) { pr_warn("%s GPIO handle specifies active low - ignored\n", of_node_full_name(np)); *flags &= ~OF_GPIO_ACTIVE_LOW; } } else { if (!(*flags & OF_GPIO_ACTIVE_LOW)) pr_info("%s enforce active low on GPIO handle\n", of_node_full_name(np)); *flags |= OF_GPIO_ACTIVE_LOW; } } /* * This quirk does static polarity overrides in cases where existing * DTS specified incorrect polarity. */ static void of_gpio_try_fixup_polarity(const struct device_node *np, const char *propname, enum of_gpio_flags *flags) { static const struct { const char *compatible; const char *propname; bool active_high; } gpios[] = { #if !IS_ENABLED(CONFIG_LCD_HX8357) /* * Himax LCD controllers used incorrectly named * "gpios-reset" property and also specified wrong * polarity. */ { "himax,hx8357", "gpios-reset", false }, { "himax,hx8369", "gpios-reset", false }, #endif }; unsigned int i; for (i = 0; i < ARRAY_SIZE(gpios); i++) { if (of_device_is_compatible(np, gpios[i].compatible) && !strcmp(propname, gpios[i].propname)) { of_gpio_quirk_polarity(np, gpios[i].active_high, flags); break; } } } static void of_gpio_set_polarity_by_property(const struct device_node *np, const char *propname, enum of_gpio_flags *flags) { const struct device_node *np_compat = np; const struct device_node *np_propname = np; static const struct { const char *compatible; const char *gpio_propname; const char *polarity_propname; } gpios[] = { #if IS_ENABLED(CONFIG_FEC) /* Freescale Fast Ethernet Controller */ { "fsl,imx25-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx27-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx28-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx6q-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,mvf600-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx6sx-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx6ul-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx8mq-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" }, { "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" }, #endif #if IS_ENABLED(CONFIG_PCI_IMX6) { "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx6qp-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx7d-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx8mq-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx8mm-pcie", "reset-gpio", "reset-gpio-active-high" }, { "fsl,imx8mp-pcie", "reset-gpio", "reset-gpio-active-high" }, #endif /* * The regulator GPIO handles are specified such that the * presence or absence of "enable-active-high" solely controls * the polarity of the GPIO line. Any phandle flags must * be actively ignored. */ #if IS_ENABLED(CONFIG_REGULATOR_FIXED_VOLTAGE) { "regulator-fixed", "gpios", "enable-active-high" }, { "regulator-fixed", "gpio", "enable-active-high" }, { "reg-fixed-voltage", "gpios", "enable-active-high" }, { "reg-fixed-voltage", "gpio", "enable-active-high" }, #endif #if IS_ENABLED(CONFIG_REGULATOR_GPIO) { "regulator-gpio", "enable-gpio", "enable-active-high" }, { "regulator-gpio", "enable-gpios", "enable-active-high" }, #endif #if IS_ENABLED(CONFIG_MMC_ATMELMCI) { "atmel,hsmci", "cd-gpios", "cd-inverted" }, #endif }; unsigned int i; bool active_high; #if IS_ENABLED(CONFIG_MMC_ATMELMCI) /* * The Atmel HSMCI has compatible property in the parent node and * gpio property in a child node */ if (of_device_is_compatible(np->parent, "atmel,hsmci")) { np_compat = np->parent; np_propname = np; } #endif for (i = 0; i < ARRAY_SIZE(gpios); i++) { if (of_device_is_compatible(np_compat, gpios[i].compatible) && !strcmp(propname, gpios[i].gpio_propname)) { active_high = of_property_read_bool(np_propname, gpios[i].polarity_propname); of_gpio_quirk_polarity(np, active_high, flags); break; } } } static void of_gpio_flags_quirks(const struct device_node *np, const char *propname, enum of_gpio_flags *flags, int index) { of_gpio_try_fixup_polarity(np, propname, flags); of_gpio_set_polarity_by_property(np, propname, flags); /* * Legacy open drain handling for fixed voltage regulators. */ if (IS_ENABLED(CONFIG_REGULATOR) && of_device_is_compatible(np, "reg-fixed-voltage") && of_property_read_bool(np, "gpio-open-drain")) { *flags |= (OF_GPIO_SINGLE_ENDED | OF_GPIO_OPEN_DRAIN); pr_info("%s uses legacy open drain flag - update the DTS if you can\n", of_node_full_name(np)); } /* * Legacy handling of SPI active high chip select. If we have a * property named "cs-gpios" we need to inspect the child node * to determine if the flags should have inverted semantics. */ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && of_property_read_bool(np, "cs-gpios")) { struct device_node *child; u32 cs; int ret; for_each_child_of_node(np, child) { ret = of_property_read_u32(child, "reg", &cs); if (ret) continue; if (cs == index) { /* * SPI children have active low chip selects * by default. This can be specified negatively * by just omitting "spi-cs-high" in the * device node, or actively by tagging on * GPIO_ACTIVE_LOW as flag in the device * tree. If the line is simultaneously * tagged as active low in the device tree * and has the "spi-cs-high" set, we get a * conflict and the "spi-cs-high" flag will * take precedence. */ bool active_high = of_property_read_bool(child, "spi-cs-high"); of_gpio_quirk_polarity(child, active_high, flags); of_node_put(child); break; } } } /* Legacy handling of stmmac's active-low PHY reset line */ if (IS_ENABLED(CONFIG_STMMAC_ETH) && !strcmp(propname, "snps,reset-gpio") && of_property_read_bool(np, "snps,reset-active-low")) *flags |= OF_GPIO_ACTIVE_LOW; } /** * of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API * @np: device node to get GPIO from * @propname: property name containing gpio specifier(s) * @index: index of the GPIO * @flags: a flags pointer to fill in * * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. If @flags is not NULL the function also fills * in flags for the GPIO. */ static struct gpio_desc *of_get_named_gpiod_flags(const struct device_node *np, const char *propname, int index, enum of_gpio_flags *flags) { struct of_phandle_args gpiospec; struct gpio_chip *chip; struct gpio_desc *desc; int ret; ret = of_parse_phandle_with_args_map(np, propname, "gpio", index, &gpiospec); if (ret) { pr_debug("%s: can't parse '%s' property of node '%pOF[%d]'\n", __func__, propname, np, index); return ERR_PTR(ret); } chip = of_find_gpiochip_by_xlate(&gpiospec); if (!chip) { desc = ERR_PTR(-EPROBE_DEFER); goto out; } desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, flags); if (IS_ERR(desc)) goto out; if (flags) of_gpio_flags_quirks(np, propname, flags, index); pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n", __func__, propname, np, index, PTR_ERR_OR_ZERO(desc)); out: of_node_put(gpiospec.np); return desc; } /** * of_get_named_gpio() - Get a GPIO number to use with GPIO API * @np: device node to get GPIO from * @propname: Name of property containing gpio specifier(s) * @index: index of the GPIO * * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ int of_get_named_gpio(const struct device_node *np, const char *propname, int index) { struct gpio_desc *desc; desc = of_get_named_gpiod_flags(np, propname, index, NULL); if (IS_ERR(desc)) return PTR_ERR(desc); else return desc_to_gpio(desc); } EXPORT_SYMBOL_GPL(of_get_named_gpio); /* Converts gpio_lookup_flags into bitmask of GPIO_* values */ static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags) { unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT; if (flags & OF_GPIO_ACTIVE_LOW) lflags |= GPIO_ACTIVE_LOW; if (flags & OF_GPIO_SINGLE_ENDED) { if (flags & OF_GPIO_OPEN_DRAIN) lflags |= GPIO_OPEN_DRAIN; else lflags |= GPIO_OPEN_SOURCE; } if (flags & OF_GPIO_TRANSITORY) lflags |= GPIO_TRANSITORY; if (flags & OF_GPIO_PULL_UP) lflags |= GPIO_PULL_UP; if (flags & OF_GPIO_PULL_DOWN) lflags |= GPIO_PULL_DOWN; if (flags & OF_GPIO_PULL_DISABLE) lflags |= GPIO_PULL_DISABLE; return lflags; } static struct gpio_desc *of_find_gpio_rename(struct device_node *np, const char *con_id, unsigned int idx, enum of_gpio_flags *of_flags) { static const struct of_rename_gpio { const char *con_id; const char *legacy_id; /* NULL - same as con_id */ /* * Compatible string can be set to NULL in case where * matching to a particular compatible is not practical, * but it should only be done for gpio names that have * vendor prefix to reduce risk of false positives. * Addition of such entries is strongly discouraged. */ const char *compatible; } gpios[] = { #if !IS_ENABLED(CONFIG_LCD_HX8357) /* Himax LCD controllers used "gpios-reset" */ { "reset", "gpios-reset", "himax,hx8357" }, { "reset", "gpios-reset", "himax,hx8369" }, #endif #if IS_ENABLED(CONFIG_MFD_ARIZONA) { "wlf,reset", NULL, NULL }, #endif #if IS_ENABLED(CONFIG_RTC_DRV_MOXART) { "rtc-data", "gpio-rtc-data", "moxa,moxart-rtc" }, { "rtc-sclk", "gpio-rtc-sclk", "moxa,moxart-rtc" }, { "rtc-reset", "gpio-rtc-reset", "moxa,moxart-rtc" }, #endif #if IS_ENABLED(CONFIG_NFC_MRVL_I2C) { "reset", "reset-n-io", "marvell,nfc-i2c" }, #endif #if IS_ENABLED(CONFIG_NFC_MRVL_SPI) { "reset", "reset-n-io", "marvell,nfc-spi" }, #endif #if IS_ENABLED(CONFIG_NFC_MRVL_UART) { "reset", "reset-n-io", "marvell,nfc-uart" }, { "reset", "reset-n-io", "mrvl,nfc-uart" }, #endif #if !IS_ENABLED(CONFIG_PCI_LANTIQ) /* MIPS Lantiq PCI */ { "reset", "gpios-reset", "lantiq,pci-xway" }, #endif /* * Some regulator bindings happened before we managed to * establish that GPIO properties should be named * "foo-gpios" so we have this special kludge for them. */ #if IS_ENABLED(CONFIG_REGULATOR_ARIZONA_LDO1) { "wlf,ldoena", NULL, NULL }, /* Arizona */ #endif #if IS_ENABLED(CONFIG_REGULATOR_WM8994) { "wlf,ldo1ena", NULL, NULL }, /* WM8994 */ { "wlf,ldo2ena", NULL, NULL }, /* WM8994 */ #endif #if IS_ENABLED(CONFIG_SND_SOC_CS42L56) { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" }, #endif #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X) { "reset", "gpio-reset", "ti,tlv320aic3x" }, { "reset", "gpio-reset", "ti,tlv320aic33" }, { "reset", "gpio-reset", "ti,tlv320aic3007" }, { "reset", "gpio-reset", "ti,tlv320aic3104" }, { "reset", "gpio-reset", "ti,tlv320aic3106" }, #endif #if IS_ENABLED(CONFIG_SPI_GPIO) /* * The SPI GPIO bindings happened before we managed to * establish that GPIO properties should be named * "foo-gpios" so we have this special kludge for them. */ { "miso", "gpio-miso", "spi-gpio" }, { "mosi", "gpio-mosi", "spi-gpio" }, { "sck", "gpio-sck", "spi-gpio" }, #endif /* * The old Freescale bindings use simply "gpios" as name * for the chip select lines rather than "cs-gpios" like * all other SPI hardware. Allow this specifically for * Freescale and PPC devices. */ #if IS_ENABLED(CONFIG_SPI_FSL_SPI) { "cs", "gpios", "fsl,spi" }, { "cs", "gpios", "aeroflexgaisler,spictrl" }, #endif #if IS_ENABLED(CONFIG_SPI_PPC4xx) { "cs", "gpios", "ibm,ppc4xx-spi" }, #endif #if IS_ENABLED(CONFIG_TYPEC_FUSB302) /* * Fairchild FUSB302 host is using undocumented "fcs,int_n" * property without the compulsory "-gpios" suffix. */ { "fcs,int_n", NULL, "fcs,fusb302" }, #endif }; struct gpio_desc *desc; const char *legacy_id; unsigned int i; if (!con_id) return ERR_PTR(-ENOENT); for (i = 0; i < ARRAY_SIZE(gpios); i++) { if (strcmp(con_id, gpios[i].con_id)) continue; if (gpios[i].compatible && !of_device_is_compatible(np, gpios[i].compatible)) continue; legacy_id = gpios[i].legacy_id ?: gpios[i].con_id; desc = of_get_named_gpiod_flags(np, legacy_id, idx, of_flags); if (!gpiod_not_found(desc)) { pr_info("%s uses legacy gpio name '%s' instead of '%s-gpios'\n", of_node_full_name(np), legacy_id, con_id); return desc; } } return ERR_PTR(-ENOENT); } static struct gpio_desc *of_find_mt2701_gpio(struct device_node *np, const char *con_id, unsigned int idx, enum of_gpio_flags *of_flags) { struct gpio_desc *desc; const char *legacy_id; if (!IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)) return ERR_PTR(-ENOENT); if (!of_device_is_compatible(np, "mediatek,mt2701-cs42448-machine")) return ERR_PTR(-ENOENT); if (!con_id || strcmp(con_id, "i2s1-in-sel")) return ERR_PTR(-ENOENT); if (idx == 0) legacy_id = "i2s1-in-sel-gpio1"; else if (idx == 1) legacy_id = "i2s1-in-sel-gpio2"; else return ERR_PTR(-ENOENT); desc = of_get_named_gpiod_flags(np, legacy_id, 0, of_flags); if (!gpiod_not_found(desc)) pr_info("%s is using legacy gpio name '%s' instead of '%s-gpios'\n", of_node_full_name(np), legacy_id, con_id); return desc; } typedef struct gpio_desc *(*of_find_gpio_quirk)(struct device_node *np, const char *con_id, unsigned int idx, enum of_gpio_flags *of_flags); static const of_find_gpio_quirk of_find_gpio_quirks[] = { of_find_gpio_rename, of_find_mt2701_gpio, NULL }; struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *flags) { char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; const of_find_gpio_quirk *q; struct gpio_desc *desc; unsigned int i; /* Try GPIO property "foo-gpios" and "foo-gpio" */ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id, gpio_suffixes[i]); else snprintf(prop_name, sizeof(prop_name), "%s", gpio_suffixes[i]); desc = of_get_named_gpiod_flags(np, prop_name, idx, &of_flags); if (!gpiod_not_found(desc)) break; } /* Properly named GPIO was not found, try workarounds */ for (q = of_find_gpio_quirks; gpiod_not_found(desc) && *q; q++) desc = (*q)(np, con_id, idx, &of_flags); if (IS_ERR(desc)) return desc; *flags = of_convert_gpio_flags(of_flags); return desc; } /** * of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API * @np: device node to get GPIO from * @chip: GPIO chip whose hog is parsed * @idx: Index of the GPIO to parse * @name: GPIO line name * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from * of_find_gpio() or of_parse_own_gpio() * @dflags: gpiod_flags - optional GPIO initialization flags * * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. */ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, struct gpio_chip *chip, unsigned int idx, const char **name, unsigned long *lflags, enum gpiod_flags *dflags) { struct device_node *chip_np; enum of_gpio_flags xlate_flags; struct of_phandle_args gpiospec; struct gpio_desc *desc; unsigned int i; u32 tmp; int ret; chip_np = dev_of_node(&chip->gpiodev->dev); if (!chip_np) return ERR_PTR(-EINVAL); xlate_flags = 0; *lflags = GPIO_LOOKUP_FLAGS_DEFAULT; *dflags = GPIOD_ASIS; ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp); if (ret) return ERR_PTR(ret); gpiospec.np = chip_np; gpiospec.args_count = tmp; for (i = 0; i < tmp; i++) { ret = of_property_read_u32_index(np, "gpios", idx * tmp + i, &gpiospec.args[i]); if (ret) return ERR_PTR(ret); } desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, &xlate_flags); if (IS_ERR(desc)) return desc; *lflags = of_convert_gpio_flags(xlate_flags); if (of_property_read_bool(np, "input")) *dflags |= GPIOD_IN; else if (of_property_read_bool(np, "output-low")) *dflags |= GPIOD_OUT_LOW; else if (of_property_read_bool(np, "output-high")) *dflags |= GPIOD_OUT_HIGH; else { pr_warn("GPIO line %d (%pOFn): no hogging state specified, bailing out\n", desc_to_gpio(desc), np); return ERR_PTR(-EINVAL); } if (name && of_property_read_string(np, "line-name", name)) *name = np->name; return desc; } /** * of_gpiochip_add_hog - Add all hogs in a hog device node * @chip: gpio chip to act on * @hog: device node describing the hogs * * Returns error if it fails otherwise 0 on success. */ static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog) { enum gpiod_flags dflags; struct gpio_desc *desc; unsigned long lflags; const char *name; unsigned int i; int ret; for (i = 0;; i++) { desc = of_parse_own_gpio(hog, chip, i, &name, &lflags, &dflags); if (IS_ERR(desc)) break; ret = gpiod_hog(desc, name, lflags, dflags); if (ret < 0) return ret; #ifdef CONFIG_OF_DYNAMIC desc->hog = hog; #endif } return 0; } /** * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions * @chip: gpio chip to act on * * This is only used by of_gpiochip_add to request/set GPIO initial * configuration. * It returns error if it fails otherwise 0 on success. */ static int of_gpiochip_scan_gpios(struct gpio_chip *chip) { struct device_node *np; int ret; for_each_available_child_of_node(dev_of_node(&chip->gpiodev->dev), np) { if (!of_property_read_bool(np, "gpio-hog")) continue; ret = of_gpiochip_add_hog(chip, np); if (ret < 0) { of_node_put(np); return ret; } of_node_set_flag(np, OF_POPULATED); } return 0; } #ifdef CONFIG_OF_DYNAMIC /** * of_gpiochip_remove_hog - Remove all hogs in a hog device node * @chip: gpio chip to act on * @hog: device node describing the hogs */ static void of_gpiochip_remove_hog(struct gpio_chip *chip, struct device_node *hog) { struct gpio_desc *desc; for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED) if (desc->hog == hog) gpiochip_free_own_desc(desc); } static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) { return device_match_of_node(&chip->gpiodev->dev, data); } static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) { return gpiochip_find(np, of_gpiochip_match_node); } static int of_gpio_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct gpio_chip *chip; int ret; /* * This only supports adding and removing complete gpio-hog nodes. * Modifying an existing gpio-hog node is not supported (except for * changing its "status" property, which is treated the same as * addition/removal). */ switch (of_reconfig_get_state_change(action, arg)) { case OF_RECONFIG_CHANGE_ADD: if (!of_property_read_bool(rd->dn, "gpio-hog")) return NOTIFY_OK; /* not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; chip = of_find_gpiochip_by_node(rd->dn->parent); if (chip == NULL) return NOTIFY_OK; /* not for us */ ret = of_gpiochip_add_hog(chip, rd->dn); if (ret < 0) { pr_err("%s: failed to add hogs for %pOF\n", __func__, rd->dn); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(ret); } break; case OF_RECONFIG_CHANGE_REMOVE: if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* already depopulated */ chip = of_find_gpiochip_by_node(rd->dn->parent); if (chip == NULL) return NOTIFY_OK; /* not for us */ of_gpiochip_remove_hog(chip, rd->dn); of_node_clear_flag(rd->dn, OF_POPULATED); break; } return NOTIFY_OK; } struct notifier_block gpio_of_notifier = { .notifier_call = of_gpio_notify, }; #endif /* CONFIG_OF_DYNAMIC */ /** * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags * @gc: pointer to the gpio_chip structure * @gpiospec: GPIO specifier as found in the device tree * @flags: a flags pointer to fill in * * This is simple translation function, suitable for the most 1:1 mapped * GPIO chips. This function performs only one sanity check: whether GPIO * is less than ngpios (that is specified in the gpio_chip). */ static int of_gpio_simple_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { /* * We're discouraging gpio_cells < 2, since that way you'll have to * write your own xlate function (that will have to retrieve the GPIO * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ if (gc->of_gpio_n_cells < 2) { WARN_ON(1); return -EINVAL; } if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; if (gpiospec->args[0] >= gc->ngpio) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0]; } #if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP) #include <linux/gpio/legacy-of-mm-gpiochip.h> /** * of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank) * @np: device node of the GPIO chip * @mm_gc: pointer to the of_mm_gpio_chip allocated structure * @data: driver data to store in the struct gpio_chip * * To use this function you should allocate and fill mm_gc with: * * 1) In the gpio_chip structure: * - all the callbacks * - of_gpio_n_cells * - of_xlate callback (optional) * * 3) In the of_mm_gpio_chip structure: * - save_regs callback (optional) * * If succeeded, this function will map bank's memory and will * do all necessary work for you. Then you'll able to use .regs * to manage GPIOs from the callbacks. */ int of_mm_gpiochip_add_data(struct device_node *np, struct of_mm_gpio_chip *mm_gc, void *data) { int ret = -ENOMEM; struct gpio_chip *gc = &mm_gc->gc; gc->label = kasprintf(GFP_KERNEL, "%pOF", np); if (!gc->label) goto err0; mm_gc->regs = of_iomap(np, 0); if (!mm_gc->regs) goto err1; gc->base = -1; if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); fwnode_handle_put(mm_gc->gc.fwnode); mm_gc->gc.fwnode = fwnode_handle_get(of_fwnode_handle(np)); ret = gpiochip_add_data(gc, data); if (ret) goto err2; return 0; err2: of_node_put(np); iounmap(mm_gc->regs); err1: kfree(gc->label); err0: pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret); return ret; } EXPORT_SYMBOL_GPL(of_mm_gpiochip_add_data); /** * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank) * @mm_gc: pointer to the of_mm_gpio_chip allocated structure */ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc) { struct gpio_chip *gc = &mm_gc->gc; gpiochip_remove(gc); iounmap(mm_gc->regs); kfree(gc->label); } EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove); #endif #ifdef CONFIG_PINCTRL static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { struct of_phandle_args pinspec; struct pinctrl_dev *pctldev; struct device_node *np; int index = 0, ret; const char *name; static const char group_names_propname[] = "gpio-ranges-group-names"; struct property *group_names; np = dev_of_node(&chip->gpiodev->dev); if (!np) return 0; group_names = of_find_property(np, group_names_propname, NULL); for (;; index++) { ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, index, &pinspec); if (ret) break; pctldev = of_pinctrl_get(pinspec.np); of_node_put(pinspec.np); if (!pctldev) return -EPROBE_DEFER; if (pinspec.args[2]) { if (group_names) { of_property_read_string_index(np, group_names_propname, index, &name); if (strlen(name)) { pr_err("%pOF: Group name of numeric GPIO ranges must be the empty string.\n", np); break; } } /* npins != 0: linear range */ ret = gpiochip_add_pin_range(chip, pinctrl_dev_get_devname(pctldev), pinspec.args[0], pinspec.args[1], pinspec.args[2]); if (ret) return ret; } else { /* npins == 0: special range */ if (pinspec.args[1]) { pr_err("%pOF: Illegal gpio-range format.\n", np); break; } if (!group_names) { pr_err("%pOF: GPIO group range requested but no %s property.\n", np, group_names_propname); break; } ret = of_property_read_string_index(np, group_names_propname, index, &name); if (ret) break; if (!strlen(name)) { pr_err("%pOF: Group name of GPIO group range cannot be the empty string.\n", np); break; } ret = gpiochip_add_pingroup_range(chip, pctldev, pinspec.args[0], name); if (ret) return ret; } } return 0; } #else static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; } #endif int of_gpiochip_add(struct gpio_chip *chip) { struct device_node *np; int ret; np = dev_of_node(&chip->gpiodev->dev); if (!np) return 0; if (!chip->of_xlate) { chip->of_gpio_n_cells = 2; chip->of_xlate = of_gpio_simple_xlate; } if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS) return -EINVAL; ret = of_gpiochip_add_pin_range(chip); if (ret) return ret; of_node_get(np); ret = of_gpiochip_scan_gpios(chip); if (ret) of_node_put(np); return ret; } void of_gpiochip_remove(struct gpio_chip *chip) { of_node_put(dev_of_node(&chip->gpiodev->dev)); }
linux-master
drivers/gpio/gpiolib-of.c
// SPDX-License-Identifier: GPL-2.0+ // // Gateworks I2C PLD GPIO expander // // Copyright (C) 2019 Linus Walleij <[email protected]> // // Based on code and know-how from the OpenWrt driver: // Copyright (C) 2009 Gateworks Corporation // Authors: Chris Lang, Imre Kaloz #include <linux/bits.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/module.h> /** * struct gw_pld - State container for Gateworks PLD * @chip: GPIO chip instance * @client: I2C client * @out: shadow register for the output bute */ struct gw_pld { struct gpio_chip chip; struct i2c_client *client; u8 out; }; /* * The Gateworks I2C PLD chip only expose one read and one write register. * Writing a "one" bit (to match the reset state) lets that pin be used as an * input. It is an open-drain model. */ static int gw_pld_input8(struct gpio_chip *gc, unsigned offset) { struct gw_pld *gw = gpiochip_get_data(gc); gw->out |= BIT(offset); return i2c_smbus_write_byte(gw->client, gw->out); } static int gw_pld_get8(struct gpio_chip *gc, unsigned offset) { struct gw_pld *gw = gpiochip_get_data(gc); s32 val; val = i2c_smbus_read_byte(gw->client); return (val < 0) ? 0 : !!(val & BIT(offset)); } static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value) { struct gw_pld *gw = gpiochip_get_data(gc); if (value) gw->out |= BIT(offset); else gw->out &= ~BIT(offset); return i2c_smbus_write_byte(gw->client, gw->out); } static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value) { gw_pld_output8(gc, offset, value); } static int gw_pld_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct gw_pld *gw; int ret; gw = devm_kzalloc(dev, sizeof(*gw), GFP_KERNEL); if (!gw) return -ENOMEM; gw->chip.base = -1; gw->chip.can_sleep = true; gw->chip.parent = dev; gw->chip.owner = THIS_MODULE; gw->chip.label = dev_name(dev); gw->chip.ngpio = 8; gw->chip.direction_input = gw_pld_input8; gw->chip.get = gw_pld_get8; gw->chip.direction_output = gw_pld_output8; gw->chip.set = gw_pld_set8; gw->client = client; /* * The Gateworks I2C PLD chip does not properly send the acknowledge * bit at all times, but we can still use the standard i2c_smbus * functions by simply ignoring this bit. */ client->flags |= I2C_M_IGNORE_NAK; gw->out = 0xFF; i2c_set_clientdata(client, gw); ret = devm_gpiochip_add_data(dev, &gw->chip, gw); if (ret) return ret; dev_info(dev, "registered Gateworks PLD GPIO device\n"); return 0; } static const struct i2c_device_id gw_pld_id[] = { { "gw-pld", }, { } }; MODULE_DEVICE_TABLE(i2c, gw_pld_id); static const struct of_device_id gw_pld_dt_ids[] = { { .compatible = "gateworks,pld-gpio", }, { }, }; MODULE_DEVICE_TABLE(of, gw_pld_dt_ids); static struct i2c_driver gw_pld_driver = { .driver = { .name = "gw_pld", .of_match_table = gw_pld_dt_ids, }, .probe = gw_pld_probe, .id_table = gw_pld_id, }; module_i2c_driver(gw_pld_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linus Walleij <[email protected]>");
linux-master
drivers/gpio/gpio-gw-pld.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the ACCES 104-DIO-48E series * Copyright (C) 2016 William Breathitt Gray * * This driver supports the following ACCES devices: 104-DIO-48E and * 104-DIO-24E. */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i8254.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/isa.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/regmap.h> #include <linux/spinlock.h> #include <linux/types.h> #include "gpio-i8255.h" MODULE_IMPORT_NS(I8255); #define DIO48E_EXTENT 16 #define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT) static unsigned int base[MAX_NUM_DIO48E]; static unsigned int num_dio48e; module_param_hw_array(base, uint, ioport, &num_dio48e, 0); MODULE_PARM_DESC(base, "ACCES 104-DIO-48E base addresses"); static unsigned int irq[MAX_NUM_DIO48E]; static unsigned int num_irq; module_param_hw_array(irq, uint, irq, &num_irq, 0); MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers"); #define DIO48E_ENABLE_INTERRUPT 0xB #define DIO48E_DISABLE_INTERRUPT DIO48E_ENABLE_INTERRUPT #define DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING 0xD #define DIO48E_DISABLE_COUNTER_TIMER_ADDRESSING DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING #define DIO48E_CLEAR_INTERRUPT 0xF #define DIO48E_NUM_PPI 2 static const struct regmap_range dio48e_wr_ranges[] = { regmap_reg_range(0x0, 0x9), regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF), }; static const struct regmap_range dio48e_rd_ranges[] = { regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x6), regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF), }; static const struct regmap_range dio48e_volatile_ranges[] = { i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4), regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF), }; static const struct regmap_range dio48e_precious_ranges[] = { regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF), }; static const struct regmap_access_table dio48e_wr_table = { .yes_ranges = dio48e_wr_ranges, .n_yes_ranges = ARRAY_SIZE(dio48e_wr_ranges), }; static const struct regmap_access_table dio48e_rd_table = { .yes_ranges = dio48e_rd_ranges, .n_yes_ranges = ARRAY_SIZE(dio48e_rd_ranges), }; static const struct regmap_access_table dio48e_volatile_table = { .yes_ranges = dio48e_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(dio48e_volatile_ranges), }; static const struct regmap_access_table dio48e_precious_table = { .yes_ranges = dio48e_precious_ranges, .n_yes_ranges = ARRAY_SIZE(dio48e_precious_ranges), }; static const struct regmap_range pit_wr_ranges[] = { regmap_reg_range(0x0, 0x3), }; static const struct regmap_range pit_rd_ranges[] = { regmap_reg_range(0x0, 0x2), }; static const struct regmap_access_table pit_wr_table = { .yes_ranges = pit_wr_ranges, .n_yes_ranges = ARRAY_SIZE(pit_wr_ranges), }; static const struct regmap_access_table pit_rd_table = { .yes_ranges = pit_rd_ranges, .n_yes_ranges = ARRAY_SIZE(pit_rd_ranges), }; /* only bit 3 on each respective Port C supports interrupts */ #define DIO48E_REGMAP_IRQ(_ppi) \ [19 + (_ppi) * 24] = { \ .mask = BIT(_ppi), \ .type = { .types_supported = IRQ_TYPE_EDGE_RISING }, \ } static const struct regmap_irq dio48e_regmap_irqs[] = { DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1), }; /** * struct dio48e_gpio - GPIO device private data structure * @lock: synchronization lock to prevent I/O race conditions * @map: Regmap for the device * @regs: virtual mapping for device registers * @flags: IRQ flags saved during locking * @irq_mask: Current IRQ mask state on the device */ struct dio48e_gpio { raw_spinlock_t lock; struct regmap *map; void __iomem *regs; unsigned long flags; unsigned int irq_mask; }; static void dio48e_regmap_lock(void *lock_arg) __acquires(&dio48egpio->lock) { struct dio48e_gpio *const dio48egpio = lock_arg; unsigned long flags; raw_spin_lock_irqsave(&dio48egpio->lock, flags); dio48egpio->flags = flags; } static void dio48e_regmap_unlock(void *lock_arg) __releases(&dio48egpio->lock) { struct dio48e_gpio *const dio48egpio = lock_arg; raw_spin_unlock_irqrestore(&dio48egpio->lock, dio48egpio->flags); } static void pit_regmap_lock(void *lock_arg) __acquires(&dio48egpio->lock) { struct dio48e_gpio *const dio48egpio = lock_arg; unsigned long flags; raw_spin_lock_irqsave(&dio48egpio->lock, flags); dio48egpio->flags = flags; iowrite8(0x00, dio48egpio->regs + DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING); } static void pit_regmap_unlock(void *lock_arg) __releases(&dio48egpio->lock) { struct dio48e_gpio *const dio48egpio = lock_arg; ioread8(dio48egpio->regs + DIO48E_DISABLE_COUNTER_TIMER_ADDRESSING); raw_spin_unlock_irqrestore(&dio48egpio->lock, dio48egpio->flags); } static int dio48e_handle_mask_sync(const int index, const unsigned int mask_buf_def, const unsigned int mask_buf, void *const irq_drv_data) { struct dio48e_gpio *const dio48egpio = irq_drv_data; const unsigned int prev_mask = dio48egpio->irq_mask; int err; unsigned int val; /* exit early if no change since the previous mask */ if (mask_buf == prev_mask) return 0; /* remember the current mask for the next mask sync */ dio48egpio->irq_mask = mask_buf; /* if all previously masked, enable interrupts when unmasking */ if (prev_mask == mask_buf_def) { err = regmap_write(dio48egpio->map, DIO48E_CLEAR_INTERRUPT, 0x00); if (err) return err; return regmap_write(dio48egpio->map, DIO48E_ENABLE_INTERRUPT, 0x00); } /* if all are currently masked, disable interrupts */ if (mask_buf == mask_buf_def) return regmap_read(dio48egpio->map, DIO48E_DISABLE_INTERRUPT, &val); return 0; } #define DIO48E_NGPIO 48 static const char *dio48e_names[DIO48E_NGPIO] = { "PPI Group 0 Port A 0", "PPI Group 0 Port A 1", "PPI Group 0 Port A 2", "PPI Group 0 Port A 3", "PPI Group 0 Port A 4", "PPI Group 0 Port A 5", "PPI Group 0 Port A 6", "PPI Group 0 Port A 7", "PPI Group 0 Port B 0", "PPI Group 0 Port B 1", "PPI Group 0 Port B 2", "PPI Group 0 Port B 3", "PPI Group 0 Port B 4", "PPI Group 0 Port B 5", "PPI Group 0 Port B 6", "PPI Group 0 Port B 7", "PPI Group 0 Port C 0", "PPI Group 0 Port C 1", "PPI Group 0 Port C 2", "PPI Group 0 Port C 3", "PPI Group 0 Port C 4", "PPI Group 0 Port C 5", "PPI Group 0 Port C 6", "PPI Group 0 Port C 7", "PPI Group 1 Port A 0", "PPI Group 1 Port A 1", "PPI Group 1 Port A 2", "PPI Group 1 Port A 3", "PPI Group 1 Port A 4", "PPI Group 1 Port A 5", "PPI Group 1 Port A 6", "PPI Group 1 Port A 7", "PPI Group 1 Port B 0", "PPI Group 1 Port B 1", "PPI Group 1 Port B 2", "PPI Group 1 Port B 3", "PPI Group 1 Port B 4", "PPI Group 1 Port B 5", "PPI Group 1 Port B 6", "PPI Group 1 Port B 7", "PPI Group 1 Port C 0", "PPI Group 1 Port C 1", "PPI Group 1 Port C 2", "PPI Group 1 Port C 3", "PPI Group 1 Port C 4", "PPI Group 1 Port C 5", "PPI Group 1 Port C 6", "PPI Group 1 Port C 7" }; static int dio48e_irq_init_hw(struct regmap *const map) { unsigned int val; /* Disable IRQ by default */ return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val); } static int dio48e_probe(struct device *dev, unsigned int id) { const char *const name = dev_name(dev); struct i8255_regmap_config config = {}; void __iomem *regs; struct regmap *map; struct regmap_config dio48e_regmap_config; struct regmap_config pit_regmap_config; struct i8254_regmap_config pit_config; int err; struct regmap_irq_chip *chip; struct dio48e_gpio *dio48egpio; struct regmap_irq_chip_data *chip_data; if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", base[id], base[id] + DIO48E_EXTENT); return -EBUSY; } dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL); if (!dio48egpio) return -ENOMEM; regs = devm_ioport_map(dev, base[id], DIO48E_EXTENT); if (!regs) return -ENOMEM; dio48egpio->regs = regs; raw_spin_lock_init(&dio48egpio->lock); dio48e_regmap_config = (struct regmap_config) { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .lock = dio48e_regmap_lock, .unlock = dio48e_regmap_unlock, .lock_arg = dio48egpio, .io_port = true, .wr_table = &dio48e_wr_table, .rd_table = &dio48e_rd_table, .volatile_table = &dio48e_volatile_table, .precious_table = &dio48e_precious_table, .cache_type = REGCACHE_FLAT, }; map = devm_regmap_init_mmio(dev, regs, &dio48e_regmap_config); if (IS_ERR(map)) return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n"); dio48egpio->map = map; pit_regmap_config = (struct regmap_config) { .name = "i8254", .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .lock = pit_regmap_lock, .unlock = pit_regmap_unlock, .lock_arg = dio48egpio, .io_port = true, .wr_table = &pit_wr_table, .rd_table = &pit_rd_table, }; pit_config.map = devm_regmap_init_mmio(dev, regs, &pit_regmap_config); if (IS_ERR(pit_config.map)) return dev_err_probe(dev, PTR_ERR(pit_config.map), "Unable to initialize i8254 register map\n"); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->name = name; chip->mask_base = DIO48E_ENABLE_INTERRUPT; chip->ack_base = DIO48E_CLEAR_INTERRUPT; chip->no_status = true; chip->num_regs = 1; chip->irqs = dio48e_regmap_irqs; chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs); chip->handle_mask_sync = dio48e_handle_mask_sync; chip->irq_drv_data = dio48egpio; /* Initialize to prevent spurious interrupts before we're ready */ err = dio48e_irq_init_hw(map); if (err) return err; err = devm_regmap_add_irq_chip(dev, map, irq[id], 0, 0, chip, &chip_data); if (err) return dev_err_probe(dev, err, "IRQ registration failed\n"); pit_config.parent = dev; err = devm_i8254_regmap_register(dev, &pit_config); if (err) return err; config.parent = dev; config.map = map; config.num_ppi = DIO48E_NUM_PPI; config.names = dio48e_names; config.domain = regmap_irq_get_domain(chip_data); return devm_i8255_regmap_register(dev, &config); } static struct isa_driver dio48e_driver = { .probe = dio48e_probe, .driver = { .name = "104-dio-48e" }, }; module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(I8254);
linux-master
drivers/gpio/gpio-104-dio-48e.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI/National Semiconductor LP3943 GPIO driver * * Copyright 2013 Texas Instruments * * Author: Milo Kim <[email protected]> */ #include <linux/bitops.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/mfd/lp3943.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> enum lp3943_gpios { LP3943_GPIO1, LP3943_GPIO2, LP3943_GPIO3, LP3943_GPIO4, LP3943_GPIO5, LP3943_GPIO6, LP3943_GPIO7, LP3943_GPIO8, LP3943_GPIO9, LP3943_GPIO10, LP3943_GPIO11, LP3943_GPIO12, LP3943_GPIO13, LP3943_GPIO14, LP3943_GPIO15, LP3943_GPIO16, LP3943_MAX_GPIO, }; struct lp3943_gpio { struct gpio_chip chip; struct lp3943 *lp3943; u16 input_mask; /* 1 = GPIO is input direction, 0 = output */ }; static int lp3943_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); struct lp3943 *lp3943 = lp3943_gpio->lp3943; /* Return an error if the pin is already assigned */ if (test_and_set_bit(offset, &lp3943->pin_used)) return -EBUSY; return 0; } static void lp3943_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); struct lp3943 *lp3943 = lp3943_gpio->lp3943; clear_bit(offset, &lp3943->pin_used); } static int lp3943_gpio_set_mode(struct lp3943_gpio *lp3943_gpio, u8 offset, u8 val) { struct lp3943 *lp3943 = lp3943_gpio->lp3943; const struct lp3943_reg_cfg *mux = lp3943->mux_cfg; return lp3943_update_bits(lp3943, mux[offset].reg, mux[offset].mask, val << mux[offset].shift); } static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); lp3943_gpio->input_mask |= BIT(offset); return lp3943_gpio_set_mode(lp3943_gpio, offset, LP3943_GPIO_IN); } static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio, struct gpio_chip *chip, unsigned int offset) { u8 addr, read; int err; switch (offset) { case LP3943_GPIO1 ... LP3943_GPIO8: addr = LP3943_REG_GPIO_A; break; case LP3943_GPIO9 ... LP3943_GPIO16: addr = LP3943_REG_GPIO_B; offset = offset - 8; break; default: return -EINVAL; } err = lp3943_read_byte(lp3943_gpio->lp3943, addr, &read); if (err) return err; return !!(read & BIT(offset)); } static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio, struct gpio_chip *chip, unsigned int offset) { struct lp3943 *lp3943 = lp3943_gpio->lp3943; const struct lp3943_reg_cfg *mux = lp3943->mux_cfg; u8 read; int err; err = lp3943_read_byte(lp3943, mux[offset].reg, &read); if (err) return err; read = (read & mux[offset].mask) >> mux[offset].shift; if (read == LP3943_GPIO_OUT_HIGH) return 1; else if (read == LP3943_GPIO_OUT_LOW) return 0; else return -EINVAL; } static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); /* * Limitation: * LP3943 doesn't have the GPIO direction register. It provides * only input and output status registers. * So, direction info is required to handle the 'get' operation. * This variable is updated whenever the direction is changed and * it is used here. */ if (lp3943_gpio->input_mask & BIT(offset)) return lp3943_get_gpio_in_status(lp3943_gpio, chip, offset); else return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset); } static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); u8 data; if (value) data = LP3943_GPIO_OUT_HIGH; else data = LP3943_GPIO_OUT_LOW; lp3943_gpio_set_mode(lp3943_gpio, offset, data); } static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip); lp3943_gpio_set(chip, offset, value); lp3943_gpio->input_mask &= ~BIT(offset); return 0; } static const struct gpio_chip lp3943_gpio_chip = { .label = "lp3943", .owner = THIS_MODULE, .request = lp3943_gpio_request, .free = lp3943_gpio_free, .direction_input = lp3943_gpio_direction_input, .get = lp3943_gpio_get, .direction_output = lp3943_gpio_direction_output, .set = lp3943_gpio_set, .base = -1, .ngpio = LP3943_MAX_GPIO, .can_sleep = 1, }; static int lp3943_gpio_probe(struct platform_device *pdev) { struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent); struct lp3943_gpio *lp3943_gpio; lp3943_gpio = devm_kzalloc(&pdev->dev, sizeof(*lp3943_gpio), GFP_KERNEL); if (!lp3943_gpio) return -ENOMEM; lp3943_gpio->lp3943 = lp3943; lp3943_gpio->chip = lp3943_gpio_chip; lp3943_gpio->chip.parent = &pdev->dev; return devm_gpiochip_add_data(&pdev->dev, &lp3943_gpio->chip, lp3943_gpio); } static const struct of_device_id lp3943_gpio_of_match[] = { { .compatible = "ti,lp3943-gpio", }, { } }; MODULE_DEVICE_TABLE(of, lp3943_gpio_of_match); static struct platform_driver lp3943_gpio_driver = { .probe = lp3943_gpio_probe, .driver = { .name = "lp3943-gpio", .of_match_table = lp3943_gpio_of_match, }, }; module_platform_driver(lp3943_gpio_driver); MODULE_DESCRIPTION("LP3943 GPIO driver"); MODULE_ALIAS("platform:lp3943-gpio"); MODULE_AUTHOR("Milo Kim"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-lp3943.c
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO controller in LSI ZEVIO SoCs. * * Author: Fabian Vogt <[email protected]> */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/gpio/driver.h> /* * Memory layout: * This chip has four gpio sections, each controls 8 GPIOs. * Bit 0 in section 0 is GPIO 0, bit 2 in section 1 is GPIO 10. * Disclaimer: Reverse engineered! * For more information refer to: * http://hackspire.unsads.com/wiki/index.php/Memory-mapped_I/O_ports#90000000_-_General_Purpose_I.2FO_.28GPIO.29 * * 0x00-0x3F: Section 0 * +0x00: Masked interrupt status (read-only) * +0x04: R: Interrupt status W: Reset interrupt status * +0x08: R: Interrupt mask W: Mask interrupt * +0x0C: W: Unmask interrupt (write-only) * +0x10: Direction: I/O=1/0 * +0x14: Output * +0x18: Input (read-only) * +0x20: R: Level interrupt W: Set as level interrupt * 0x40-0x7F: Section 1 * 0x80-0xBF: Section 2 * 0xC0-0xFF: Section 3 */ #define ZEVIO_GPIO_SECTION_SIZE 0x40 /* Offsets to various registers */ #define ZEVIO_GPIO_INT_MASKED_STATUS 0x00 #define ZEVIO_GPIO_INT_STATUS 0x04 #define ZEVIO_GPIO_INT_UNMASK 0x08 #define ZEVIO_GPIO_INT_MASK 0x0C #define ZEVIO_GPIO_DIRECTION 0x10 #define ZEVIO_GPIO_OUTPUT 0x14 #define ZEVIO_GPIO_INPUT 0x18 #define ZEVIO_GPIO_INT_STICKY 0x20 /* Bit number of GPIO in its section */ #define ZEVIO_GPIO_BIT(gpio) (gpio&7) struct zevio_gpio { struct gpio_chip chip; spinlock_t lock; void __iomem *regs; }; static inline u32 zevio_gpio_port_get(struct zevio_gpio *c, unsigned pin, unsigned port_offset) { unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE; return readl(IOMEM(c->regs + section_offset + port_offset)); } static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin, unsigned port_offset, u32 val) { unsigned section_offset = ((pin >> 3) & 3)*ZEVIO_GPIO_SECTION_SIZE; writel(val, IOMEM(c->regs + section_offset + port_offset)); } /* Functions for struct gpio_chip */ static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin) { struct zevio_gpio *controller = gpiochip_get_data(chip); u32 val, dir; spin_lock(&controller->lock); dir = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION); if (dir & BIT(ZEVIO_GPIO_BIT(pin))) val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT); else val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT); spin_unlock(&controller->lock); return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1; } static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value) { struct zevio_gpio *controller = gpiochip_get_data(chip); u32 val; spin_lock(&controller->lock); val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT); if (value) val |= BIT(ZEVIO_GPIO_BIT(pin)); else val &= ~BIT(ZEVIO_GPIO_BIT(pin)); zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val); spin_unlock(&controller->lock); } static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin) { struct zevio_gpio *controller = gpiochip_get_data(chip); u32 val; spin_lock(&controller->lock); val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION); val |= BIT(ZEVIO_GPIO_BIT(pin)); zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val); spin_unlock(&controller->lock); return 0; } static int zevio_gpio_direction_output(struct gpio_chip *chip, unsigned pin, int value) { struct zevio_gpio *controller = gpiochip_get_data(chip); u32 val; spin_lock(&controller->lock); val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT); if (value) val |= BIT(ZEVIO_GPIO_BIT(pin)); else val &= ~BIT(ZEVIO_GPIO_BIT(pin)); zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val); val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION); val &= ~BIT(ZEVIO_GPIO_BIT(pin)); zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_DIRECTION, val); spin_unlock(&controller->lock); return 0; } static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin) { /* * TODO: Implement IRQs. * Not implemented yet due to weird lockups */ return -ENXIO; } static const struct gpio_chip zevio_gpio_chip = { .direction_input = zevio_gpio_direction_input, .direction_output = zevio_gpio_direction_output, .set = zevio_gpio_set, .get = zevio_gpio_get, .to_irq = zevio_gpio_to_irq, .base = 0, .owner = THIS_MODULE, .ngpio = 32, }; /* Initialization */ static int zevio_gpio_probe(struct platform_device *pdev) { struct zevio_gpio *controller; int status, i; controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL); if (!controller) return -ENOMEM; /* Copy our reference */ controller->chip = zevio_gpio_chip; controller->chip.parent = &pdev->dev; controller->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(controller->regs)) return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs), "failed to ioremap memory resource\n"); status = devm_gpiochip_add_data(&pdev->dev, &controller->chip, controller); if (status) { dev_err(&pdev->dev, "failed to add gpiochip: %d\n", status); return status; } spin_lock_init(&controller->lock); /* Disable interrupts, they only cause errors */ for (i = 0; i < controller->chip.ngpio; i += 8) zevio_gpio_port_set(controller, i, ZEVIO_GPIO_INT_MASK, 0xFF); dev_dbg(controller->chip.parent, "ZEVIO GPIO controller set up!\n"); return 0; } static const struct of_device_id zevio_gpio_of_match[] = { { .compatible = "lsi,zevio-gpio", }, { }, }; static struct platform_driver zevio_gpio_driver = { .driver = { .name = "gpio-zevio", .of_match_table = zevio_gpio_of_match, .suppress_bind_attrs = true, }, .probe = zevio_gpio_probe, }; builtin_platform_driver(zevio_gpio_driver);
linux-master
drivers/gpio/gpio-zevio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * CLPS711X GPIO driver * * Copyright (C) 2012,2013 Alexander Shiyan <[email protected]> */ #include <linux/err.h> #include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/platform_device.h> static int clps711x_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; void __iomem *dat, *dir; struct gpio_chip *gc; int err, id; if (!np) return -ENODEV; id = of_alias_get_id(np, "gpio"); if ((id < 0) || (id > 4)) return -ENODEV; gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL); if (!gc) return -ENOMEM; dat = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dat)) return PTR_ERR(dat); dir = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dir)) return PTR_ERR(dir); switch (id) { case 3: /* PORTD is inverted logic for direction register */ err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL, NULL, dir, 0); break; default: err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL, dir, NULL, 0); break; } if (err) return err; switch (id) { case 4: /* PORTE is 3 lines only */ gc->ngpio = 3; break; default: break; } gc->base = -1; gc->owner = THIS_MODULE; platform_set_drvdata(pdev, gc); return devm_gpiochip_add_data(&pdev->dev, gc, NULL); } static const struct of_device_id clps711x_gpio_ids[] = { { .compatible = "cirrus,ep7209-gpio" }, { } }; MODULE_DEVICE_TABLE(of, clps711x_gpio_ids); static struct platform_driver clps711x_gpio_driver = { .driver = { .name = "clps711x-gpio", .of_match_table = clps711x_gpio_ids, }, .probe = clps711x_gpio_probe, }; module_platform_driver(clps711x_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shiyan <[email protected]>"); MODULE_DESCRIPTION("CLPS711X GPIO driver"); MODULE_ALIAS("platform:clps711x-gpio");
linux-master
drivers/gpio/gpio-clps711x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * RDC321x GPIO driver * * Copyright (C) 2008, Volker Weiss <[email protected]> * Copyright (C) 2007-2010 Florian Fainelli <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/gpio/driver.h> #include <linux/mfd/rdc321x.h> #include <linux/slab.h> struct rdc321x_gpio { spinlock_t lock; struct pci_dev *sb_pdev; u32 data_reg[2]; int reg1_ctrl_base; int reg1_data_base; int reg2_ctrl_base; int reg2_data_base; struct gpio_chip chip; }; /* read GPIO pin */ static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { struct rdc321x_gpio *gpch; u32 value = 0; int reg; gpch = gpiochip_get_data(chip); reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base; spin_lock(&gpch->lock); pci_write_config_dword(gpch->sb_pdev, reg, gpch->data_reg[gpio < 32 ? 0 : 1]); pci_read_config_dword(gpch->sb_pdev, reg, &value); spin_unlock(&gpch->lock); return (1 << (gpio & 0x1f)) & value ? 1 : 0; } static void rdc_gpio_set_value_impl(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; int reg = (gpio < 32) ? 0 : 1; gpch = gpiochip_get_data(chip); if (value) gpch->data_reg[reg] |= 1 << (gpio & 0x1f); else gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f)); pci_write_config_dword(gpch->sb_pdev, reg ? gpch->reg2_data_base : gpch->reg1_data_base, gpch->data_reg[reg]); } /* set GPIO pin to value */ static void rdc_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; gpch = gpiochip_get_data(chip); spin_lock(&gpch->lock); rdc_gpio_set_value_impl(chip, gpio, value); spin_unlock(&gpch->lock); } static int rdc_gpio_config(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; int err; u32 reg; gpch = gpiochip_get_data(chip); spin_lock(&gpch->lock); err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ? gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg); if (err) goto unlock; reg |= 1 << (gpio & 0x1f); err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ? gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg); if (err) goto unlock; rdc_gpio_set_value_impl(chip, gpio, value); unlock: spin_unlock(&gpch->lock); return err; } /* configure GPIO pin as input */ static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { return rdc_gpio_config(chip, gpio, 1); } /* * Cache the initial value of both GPIO data registers */ static int rdc321x_gpio_probe(struct platform_device *pdev) { int err; struct resource *r; struct rdc321x_gpio *rdc321x_gpio_dev; struct rdc321x_gpio_pdata *pdata; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; } rdc321x_gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct rdc321x_gpio), GFP_KERNEL); if (!rdc321x_gpio_dev) return -ENOMEM; r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); return -ENODEV; } spin_lock_init(&rdc321x_gpio_dev->lock); rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev; rdc321x_gpio_dev->reg1_ctrl_base = r->start; rdc321x_gpio_dev->reg1_data_base = r->start + 0x4; r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); return -ENODEV; } rdc321x_gpio_dev->reg2_ctrl_base = r->start; rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; rdc321x_gpio_dev->chip.owner = THIS_MODULE; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; rdc321x_gpio_dev->chip.base = 0; rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; platform_set_drvdata(pdev, rdc321x_gpio_dev); /* This might not be, what others (BIOS, bootloader, etc.) wrote to these registers before, but it's a good guess. Still better than just using 0xffffffff. */ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg1_data_base, &rdc321x_gpio_dev->data_reg[0]); if (err) return err; err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg2_data_base, &rdc321x_gpio_dev->data_reg[1]); if (err) return err; dev_info(&pdev->dev, "registering %d GPIOs\n", rdc321x_gpio_dev->chip.ngpio); return devm_gpiochip_add_data(&pdev->dev, &rdc321x_gpio_dev->chip, rdc321x_gpio_dev); } static struct platform_driver rdc321x_gpio_driver = { .driver.name = "rdc321x-gpio", .probe = rdc321x_gpio_probe, }; module_platform_driver(rdc321x_gpio_driver); MODULE_AUTHOR("Florian Fainelli <[email protected]>"); MODULE_DESCRIPTION("RDC321x GPIO driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rdc321x-gpio");
linux-master
drivers/gpio/gpio-rdc321x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Intel Tangier GPIO driver * * Copyright (c) 2016, 2021, 2023 Intel Corporation. * * Authors: Andy Shevchenko <[email protected]> * Pandith N <[email protected]> * Raag Jadav <[email protected]> */ #include <linux/bitops.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/math.h> #include <linux/module.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/spinlock.h> #include <linux/string_helpers.h> #include <linux/types.h> #include <linux/gpio/driver.h> #include "gpio-tangier.h" #define GCCR 0x000 /* Controller configuration */ #define GPLR 0x004 /* Pin level r/o */ #define GPDR 0x01c /* Pin direction */ #define GPSR 0x034 /* Pin set w/o */ #define GPCR 0x04c /* Pin clear w/o */ #define GRER 0x064 /* Rising edge detect */ #define GFER 0x07c /* Falling edge detect */ #define GFBR 0x094 /* Glitch filter bypass */ #define GIMR 0x0ac /* Interrupt mask */ #define GISR 0x0c4 /* Interrupt source */ #define GITR 0x300 /* Input type */ #define GLPR 0x318 /* Level input polarity */ /** * struct tng_gpio_context - Context to be saved during suspend-resume * @level: Pin level * @gpdr: Pin direction * @grer: Rising edge detect enable * @gfer: Falling edge detect enable * @gimr: Interrupt mask * @gwmr: Wake mask */ struct tng_gpio_context { u32 level; u32 gpdr; u32 grer; u32 gfer; u32 gimr; u32 gwmr; }; static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned int offset, unsigned int reg) { struct tng_gpio *priv = gpiochip_get_data(chip); u8 reg_offset = offset / 32; return priv->reg_base + reg + reg_offset * 4; } static void __iomem *gpio_reg_and_bit(struct gpio_chip *chip, unsigned int offset, unsigned int reg, u8 *bit) { struct tng_gpio *priv = gpiochip_get_data(chip); u8 reg_offset = offset / 32; u8 shift = offset % 32; *bit = shift; return priv->reg_base + reg + reg_offset * 4; } static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset) { void __iomem *gplr; u8 shift; gplr = gpio_reg_and_bit(chip, offset, GPLR, &shift); return !!(readl(gplr) & BIT(shift)); } static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct tng_gpio *priv = gpiochip_get_data(chip); unsigned long flags; void __iomem *reg; u8 shift; reg = gpio_reg_and_bit(chip, offset, value ? GPSR : GPCR, &shift); raw_spin_lock_irqsave(&priv->lock, flags); writel(BIT(shift), reg); raw_spin_unlock_irqrestore(&priv->lock, flags); } static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct tng_gpio *priv = gpiochip_get_data(chip); unsigned long flags; void __iomem *gpdr; u32 value; u8 shift; gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift); raw_spin_lock_irqsave(&priv->lock, flags); value = readl(gpdr); value &= ~BIT(shift); writel(value, gpdr); raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int tng_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct tng_gpio *priv = gpiochip_get_data(chip); unsigned long flags; void __iomem *gpdr; u8 shift; gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift); tng_gpio_set(chip, offset, value); raw_spin_lock_irqsave(&priv->lock, flags); value = readl(gpdr); value |= BIT(shift); writel(value, gpdr); raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int tng_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { void __iomem *gpdr; u8 shift; gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift); if (readl(gpdr) & BIT(shift)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static int tng_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset, unsigned int debounce) { struct tng_gpio *priv = gpiochip_get_data(chip); unsigned long flags; void __iomem *gfbr; u32 value; u8 shift; gfbr = gpio_reg_and_bit(chip, offset, GFBR, &shift); raw_spin_lock_irqsave(&priv->lock, flags); value = readl(gfbr); if (debounce) value &= ~BIT(shift); else value |= BIT(shift); writel(value, gfbr); raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long config) { u32 debounce; switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_DISABLE: case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: return gpiochip_generic_config(chip, offset, config); case PIN_CONFIG_INPUT_DEBOUNCE: debounce = pinconf_to_config_argument(config); return tng_gpio_set_debounce(chip, offset, debounce); default: return -ENOTSUPP; } } static void tng_irq_ack(struct irq_data *d) { struct tng_gpio *priv = irq_data_get_irq_chip_data(d); irq_hw_number_t gpio = irqd_to_hwirq(d); unsigned long flags; void __iomem *gisr; u8 shift; gisr = gpio_reg_and_bit(&priv->chip, gpio, GISR, &shift); raw_spin_lock_irqsave(&priv->lock, flags); writel(BIT(shift), gisr); raw_spin_unlock_irqrestore(&priv->lock, flags); } static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask) { unsigned long flags; void __iomem *gimr; u32 value; u8 shift; gimr = gpio_reg_and_bit(&priv->chip, gpio, GIMR, &shift); raw_spin_lock_irqsave(&priv->lock, flags); value = readl(gimr); if (unmask) value |= BIT(shift); else value &= ~BIT(shift); writel(value, gimr); raw_spin_unlock_irqrestore(&priv->lock, flags); } static void tng_irq_mask(struct irq_data *d) { struct tng_gpio *priv = irq_data_get_irq_chip_data(d); irq_hw_number_t gpio = irqd_to_hwirq(d); tng_irq_unmask_mask(priv, gpio, false); gpiochip_disable_irq(&priv->chip, gpio); } static void tng_irq_unmask(struct irq_data *d) { struct tng_gpio *priv = irq_data_get_irq_chip_data(d); irq_hw_number_t gpio = irqd_to_hwirq(d); gpiochip_enable_irq(&priv->chip, gpio); tng_irq_unmask_mask(priv, gpio, true); } static int tng_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct tng_gpio *priv = gpiochip_get_data(gc); irq_hw_number_t gpio = irqd_to_hwirq(d); void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER); void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER); void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR); void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR); u8 shift = gpio % 32; unsigned long flags; u32 value; raw_spin_lock_irqsave(&priv->lock, flags); value = readl(grer); if (type & IRQ_TYPE_EDGE_RISING) value |= BIT(shift); else value &= ~BIT(shift); writel(value, grer); value = readl(gfer); if (type & IRQ_TYPE_EDGE_FALLING) value |= BIT(shift); else value &= ~BIT(shift); writel(value, gfer); /* * To prevent glitches from triggering an unintended level interrupt, * configure GLPR register first and then configure GITR. */ value = readl(glpr); if (type & IRQ_TYPE_LEVEL_LOW) value |= BIT(shift); else value &= ~BIT(shift); writel(value, glpr); if (type & IRQ_TYPE_LEVEL_MASK) { value = readl(gitr); value |= BIT(shift); writel(value, gitr); irq_set_handler_locked(d, handle_level_irq); } else if (type & IRQ_TYPE_EDGE_BOTH) { value = readl(gitr); value &= ~BIT(shift); writel(value, gitr); irq_set_handler_locked(d, handle_edge_irq); } raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int tng_irq_set_wake(struct irq_data *d, unsigned int on) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct tng_gpio *priv = gpiochip_get_data(gc); irq_hw_number_t gpio = irqd_to_hwirq(d); void __iomem *gwmr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwmr); void __iomem *gwsr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwsr); u8 shift = gpio % 32; unsigned long flags; u32 value; raw_spin_lock_irqsave(&priv->lock, flags); /* Clear the existing wake status */ writel(BIT(shift), gwsr); value = readl(gwmr); if (on) value |= BIT(shift); else value &= ~BIT(shift); writel(value, gwmr); raw_spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(priv->dev, "%s wake for gpio %lu\n", str_enable_disable(on), gpio); return 0; } static const struct irq_chip tng_irqchip = { .name = "gpio-tangier", .irq_ack = tng_irq_ack, .irq_mask = tng_irq_mask, .irq_unmask = tng_irq_unmask, .irq_set_type = tng_irq_set_type, .irq_set_wake = tng_irq_set_wake, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void tng_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct tng_gpio *priv = gpiochip_get_data(gc); struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned long base, gpio; chained_irq_enter(irqchip, desc); /* Check GPIO controller to check which pin triggered the interrupt */ for (base = 0; base < priv->chip.ngpio; base += 32) { void __iomem *gisr = gpio_reg(&priv->chip, base, GISR); void __iomem *gimr = gpio_reg(&priv->chip, base, GIMR); unsigned long pending, enabled; pending = readl(gisr); enabled = readl(gimr); /* Only interrupts that are enabled */ pending &= enabled; for_each_set_bit(gpio, &pending, 32) generic_handle_domain_irq(gc->irq.domain, base + gpio); } chained_irq_exit(irqchip, desc); } static int tng_irq_init_hw(struct gpio_chip *chip) { struct tng_gpio *priv = gpiochip_get_data(chip); void __iomem *reg; unsigned int base; for (base = 0; base < priv->chip.ngpio; base += 32) { /* Clear the rising-edge detect register */ reg = gpio_reg(&priv->chip, base, GRER); writel(0, reg); /* Clear the falling-edge detect register */ reg = gpio_reg(&priv->chip, base, GFER); writel(0, reg); } return 0; } static int tng_gpio_add_pin_ranges(struct gpio_chip *chip) { struct tng_gpio *priv = gpiochip_get_data(chip); const struct tng_gpio_pinrange *range; unsigned int i; int ret; for (i = 0; i < priv->pin_info.nranges; i++) { range = &priv->pin_info.pin_ranges[i]; ret = gpiochip_add_pin_range(&priv->chip, priv->pin_info.name, range->gpio_base, range->pin_base, range->npins); if (ret) { dev_err(priv->dev, "failed to add GPIO pin range\n"); return ret; } } return 0; } int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio) { const struct tng_gpio_info *info = &gpio->info; size_t nctx = DIV_ROUND_UP(info->ngpio, 32); struct gpio_irq_chip *girq; int ret; gpio->ctx = devm_kcalloc(dev, nctx, sizeof(*gpio->ctx), GFP_KERNEL); if (!gpio->ctx) return -ENOMEM; gpio->chip.label = dev_name(dev); gpio->chip.parent = dev; gpio->chip.request = gpiochip_generic_request; gpio->chip.free = gpiochip_generic_free; gpio->chip.direction_input = tng_gpio_direction_input; gpio->chip.direction_output = tng_gpio_direction_output; gpio->chip.get = tng_gpio_get; gpio->chip.set = tng_gpio_set; gpio->chip.get_direction = tng_gpio_get_direction; gpio->chip.set_config = tng_gpio_set_config; gpio->chip.base = info->base; gpio->chip.ngpio = info->ngpio; gpio->chip.can_sleep = false; gpio->chip.add_pin_ranges = tng_gpio_add_pin_ranges; raw_spin_lock_init(&gpio->lock); girq = &gpio->chip.irq; gpio_irq_chip_set_chip(girq, &tng_irqchip); girq->init_hw = tng_irq_init_hw; girq->parent_handler = tng_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, girq->num_parents, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; girq->parents[0] = gpio->irq; girq->first = info->first; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; ret = devm_gpiochip_add_data(dev, &gpio->chip, gpio); if (ret) return dev_err_probe(dev, ret, "gpiochip_add error\n"); return 0; } EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, GPIO_TANGIER); int tng_gpio_suspend(struct device *dev) { struct tng_gpio *priv = dev_get_drvdata(dev); struct tng_gpio_context *ctx = priv->ctx; unsigned long flags; unsigned int base; raw_spin_lock_irqsave(&priv->lock, flags); for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) { /* GPLR is RO, values read will be restored using GPSR */ ctx->level = readl(gpio_reg(&priv->chip, base, GPLR)); ctx->gpdr = readl(gpio_reg(&priv->chip, base, GPDR)); ctx->grer = readl(gpio_reg(&priv->chip, base, GRER)); ctx->gfer = readl(gpio_reg(&priv->chip, base, GFER)); ctx->gimr = readl(gpio_reg(&priv->chip, base, GIMR)); ctx->gwmr = readl(gpio_reg(&priv->chip, base, priv->wake_regs.gwmr)); } raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } EXPORT_SYMBOL_NS_GPL(tng_gpio_suspend, GPIO_TANGIER); int tng_gpio_resume(struct device *dev) { struct tng_gpio *priv = dev_get_drvdata(dev); struct tng_gpio_context *ctx = priv->ctx; unsigned long flags; unsigned int base; raw_spin_lock_irqsave(&priv->lock, flags); for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) { /* GPLR is RO, values read will be restored using GPSR */ writel(ctx->level, gpio_reg(&priv->chip, base, GPSR)); writel(ctx->gpdr, gpio_reg(&priv->chip, base, GPDR)); writel(ctx->grer, gpio_reg(&priv->chip, base, GRER)); writel(ctx->gfer, gpio_reg(&priv->chip, base, GFER)); writel(ctx->gimr, gpio_reg(&priv->chip, base, GIMR)); writel(ctx->gwmr, gpio_reg(&priv->chip, base, priv->wake_regs.gwmr)); } raw_spin_unlock_irqrestore(&priv->lock, flags); return 0; } EXPORT_SYMBOL_NS_GPL(tng_gpio_resume, GPIO_TANGIER); MODULE_AUTHOR("Andy Shevchenko <[email protected]>"); MODULE_AUTHOR("Pandith N <[email protected]>"); MODULE_AUTHOR("Raag Jadav <[email protected]>"); MODULE_DESCRIPTION("Intel Tangier GPIO driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-tangier.c
// SPDX-License-Identifier: GPL-2.0+ /* * Generic driver for memory-mapped GPIO controllers. * * Copyright 2008 MontaVista Software, Inc. * Copyright 2008,2010 Anton Vorontsov <[email protected]> * * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`....... * ...`` ```````.. * ..The simplest form of a GPIO controller that the driver supports is`` * `.just a single "data" register, where GPIO state can be read and/or ` * `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.``````` * ````````` ___ _/~~|___/~| . ```~~~~~~ ___/___\___ ,~.`.`.`.`````.~~...,,,,... __________|~$@~~~ %~ /o*o*o*o*o*o\ .. Implementing such a GPIO . o ` ~~~~\___/~~~~ ` controller in FPGA is ,.` `....trivial..'~`.```.``` * ``````` * .```````~~~~`..`.``.``. * . The driver supports `... ,..```.`~~~```````````````....````.``,, * . big-endian notation, just`. .. A bit more sophisticated controllers , * . register the device with -be`. .with a pair of set/clear-bit registers , * `.. suffix. ```~~`````....`.` . affecting the data register and the .` * ``.`.``...``` ```.. output pins are also supported.` * ^^ `````.`````````.,``~``~``~~`````` * . ^^ * ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`.. * .. The expectation is that in at least some cases . ,-~~~-, * .this will be used with roll-your-own ASIC/FPGA .` \ / * .logic in Verilog or VHDL. ~~~`````````..`````~~` \ / * ..````````......``````````` \o_ * | * ^^ / \ * * ...`````~~`.....``.`..........``````.`.``.```........``. * ` 8, 16, 32 and 64 bits registers are supported, and``. * . the number of GPIOs is determined by the width of ~ * .. the registers. ,............```.`.`..`.`.~~~.`.`.`~ * `.......````.``` */ #include <linux/init.h> #include <linux/err.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/log2.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/of_device.h> #include "gpiolib.h" static void bgpio_write8(void __iomem *reg, unsigned long data) { writeb(data, reg); } static unsigned long bgpio_read8(void __iomem *reg) { return readb(reg); } static void bgpio_write16(void __iomem *reg, unsigned long data) { writew(data, reg); } static unsigned long bgpio_read16(void __iomem *reg) { return readw(reg); } static void bgpio_write32(void __iomem *reg, unsigned long data) { writel(data, reg); } static unsigned long bgpio_read32(void __iomem *reg) { return readl(reg); } #if BITS_PER_LONG >= 64 static void bgpio_write64(void __iomem *reg, unsigned long data) { writeq(data, reg); } static unsigned long bgpio_read64(void __iomem *reg) { return readq(reg); } #endif /* BITS_PER_LONG >= 64 */ static void bgpio_write16be(void __iomem *reg, unsigned long data) { iowrite16be(data, reg); } static unsigned long bgpio_read16be(void __iomem *reg) { return ioread16be(reg); } static void bgpio_write32be(void __iomem *reg, unsigned long data) { iowrite32be(data, reg); } static unsigned long bgpio_read32be(void __iomem *reg) { return ioread32be(reg); } static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line) { if (gc->be_bits) return BIT(gc->bgpio_bits - 1 - line); return BIT(line); } static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio) { unsigned long pinmask = bgpio_line2mask(gc, gpio); bool dir = !!(gc->bgpio_dir & pinmask); if (dir) return !!(gc->read_reg(gc->reg_set) & pinmask); else return !!(gc->read_reg(gc->reg_dat) & pinmask); } /* * This assumes that the bits in the GPIO register are in native endianness. * We only assign the function pointer if we have that. */ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { unsigned long get_mask = 0; unsigned long set_mask = 0; /* Make sure we first clear any bits that are zero when we read the register */ *bits &= ~*mask; set_mask = *mask & gc->bgpio_dir; get_mask = *mask & ~gc->bgpio_dir; if (set_mask) *bits |= gc->read_reg(gc->reg_set) & set_mask; if (get_mask) *bits |= gc->read_reg(gc->reg_dat) & get_mask; return 0; } static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) { return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio)); } /* * This only works if the bits in the GPIO register are in native endianness. */ static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { /* Make sure we first clear any bits that are zero when we read the register */ *bits &= ~*mask; *bits |= gc->read_reg(gc->reg_dat) & *mask; return 0; } /* * With big endian mirrored bit order it becomes more tedious. */ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { unsigned long readmask = 0; unsigned long val; int bit; /* Make sure we first clear any bits that are zero when we read the register */ *bits &= ~*mask; /* Create a mirrored mask */ for_each_set_bit(bit, mask, gc->ngpio) readmask |= bgpio_line2mask(gc, bit); /* Read the register */ val = gc->read_reg(gc->reg_dat) & readmask; /* * Mirror the result into the "bits" result, this will give line 0 * in bit 0 ... line 31 in bit 31 for a 32bit register. */ for_each_set_bit(bit, &val, gc->ngpio) *bits |= bgpio_line2mask(gc, bit); return 0; } static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val) { } static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long mask = bgpio_line2mask(gc, gpio); unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); if (val) gc->bgpio_data |= mask; else gc->bgpio_data &= ~mask; gc->write_reg(gc->reg_dat, gc->bgpio_data); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long mask = bgpio_line2mask(gc, gpio); if (val) gc->write_reg(gc->reg_set, mask); else gc->write_reg(gc->reg_clr, mask); } static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long mask = bgpio_line2mask(gc, gpio); unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); if (val) gc->bgpio_data |= mask; else gc->bgpio_data &= ~mask; gc->write_reg(gc->reg_set, gc->bgpio_data); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void bgpio_multiple_get_masks(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits, unsigned long *set_mask, unsigned long *clear_mask) { int i; *set_mask = 0; *clear_mask = 0; for_each_set_bit(i, mask, gc->bgpio_bits) { if (test_bit(i, bits)) *set_mask |= bgpio_line2mask(gc, i); else *clear_mask |= bgpio_line2mask(gc, i); } } static void bgpio_set_multiple_single_reg(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits, void __iomem *reg) { unsigned long flags; unsigned long set_mask, clear_mask; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask); gc->bgpio_data |= set_mask; gc->bgpio_data &= ~clear_mask; gc->write_reg(reg, gc->bgpio_data); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat); } static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set); } static void bgpio_set_multiple_with_clear(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { unsigned long set_mask, clear_mask; bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask); if (set_mask) gc->write_reg(gc->reg_set, set_mask); if (clear_mask) gc->write_reg(gc->reg_clr, clear_mask); } static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio) { return 0; } static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio, int val) { return -EINVAL; } static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { gc->set(gc, gpio, val); return 0; } static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio); if (gc->reg_dir_in) gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir); if (gc->reg_dir_out) gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); return 0; } static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio) { /* Return 0 if output, 1 if input */ if (gc->bgpio_dir_unreadable) { if (gc->bgpio_dir & bgpio_line2mask(gc, gpio)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } if (gc->reg_dir_out) { if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } if (gc->reg_dir_in) if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio))) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); gc->bgpio_dir |= bgpio_line2mask(gc, gpio); if (gc->reg_dir_in) gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir); if (gc->reg_dir_out) gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio, int val) { bgpio_dir_out(gc, gpio, val); gc->set(gc, gpio, val); return 0; } static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio, int val) { gc->set(gc, gpio, val); bgpio_dir_out(gc, gpio, val); return 0; } static int bgpio_setup_accessors(struct device *dev, struct gpio_chip *gc, bool byte_be) { switch (gc->bgpio_bits) { case 8: gc->read_reg = bgpio_read8; gc->write_reg = bgpio_write8; break; case 16: if (byte_be) { gc->read_reg = bgpio_read16be; gc->write_reg = bgpio_write16be; } else { gc->read_reg = bgpio_read16; gc->write_reg = bgpio_write16; } break; case 32: if (byte_be) { gc->read_reg = bgpio_read32be; gc->write_reg = bgpio_write32be; } else { gc->read_reg = bgpio_read32; gc->write_reg = bgpio_write32; } break; #if BITS_PER_LONG >= 64 case 64: if (byte_be) { dev_err(dev, "64 bit big endian byte order unsupported\n"); return -EINVAL; } else { gc->read_reg = bgpio_read64; gc->write_reg = bgpio_write64; } break; #endif /* BITS_PER_LONG >= 64 */ default: dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits); return -EINVAL; } return 0; } /* * Create the device and allocate the resources. For setting GPIO's there are * three supported configurations: * * - single input/output register resource (named "dat"). * - set/clear pair (named "set" and "clr"). * - single output register resource and single input resource ("set" and * dat"). * * For the single output register, this drives a 1 by setting a bit and a zero * by clearing a bit. For the set clr pair, this drives a 1 by setting a bit * in the set register and clears it by setting a bit in the clear register. * The configuration is detected by which resources are present. * * For setting the GPIO direction, there are three supported configurations: * * - simple bidirection GPIO that requires no configuration. * - an output direction register (named "dirout") where a 1 bit * indicates the GPIO is an output. * - an input direction register (named "dirin") where a 1 bit indicates * the GPIO is an input. */ static int bgpio_setup_io(struct gpio_chip *gc, void __iomem *dat, void __iomem *set, void __iomem *clr, unsigned long flags) { gc->reg_dat = dat; if (!gc->reg_dat) return -EINVAL; if (set && clr) { gc->reg_set = set; gc->reg_clr = clr; gc->set = bgpio_set_with_clear; gc->set_multiple = bgpio_set_multiple_with_clear; } else if (set && !clr) { gc->reg_set = set; gc->set = bgpio_set_set; gc->set_multiple = bgpio_set_multiple_set; } else if (flags & BGPIOF_NO_OUTPUT) { gc->set = bgpio_set_none; gc->set_multiple = NULL; } else { gc->set = bgpio_set; gc->set_multiple = bgpio_set_multiple; } if (!(flags & BGPIOF_UNREADABLE_REG_SET) && (flags & BGPIOF_READ_OUTPUT_REG_SET)) { gc->get = bgpio_get_set; if (!gc->be_bits) gc->get_multiple = bgpio_get_set_multiple; /* * We deliberately avoid assigning the ->get_multiple() call * for big endian mirrored registers which are ALSO reflecting * their value in the set register when used as output. It is * simply too much complexity, let the GPIO core fall back to * reading each line individually in that fringe case. */ } else { gc->get = bgpio_get; if (gc->be_bits) gc->get_multiple = bgpio_get_multiple_be; else gc->get_multiple = bgpio_get_multiple; } return 0; } static int bgpio_setup_direction(struct gpio_chip *gc, void __iomem *dirout, void __iomem *dirin, unsigned long flags) { if (dirout || dirin) { gc->reg_dir_out = dirout; gc->reg_dir_in = dirin; if (flags & BGPIOF_NO_SET_ON_INPUT) gc->direction_output = bgpio_dir_out_dir_first; else gc->direction_output = bgpio_dir_out_val_first; gc->direction_input = bgpio_dir_in; gc->get_direction = bgpio_get_dir; } else { if (flags & BGPIOF_NO_OUTPUT) gc->direction_output = bgpio_dir_out_err; else gc->direction_output = bgpio_simple_dir_out; gc->direction_input = bgpio_simple_dir_in; } return 0; } static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin) { if (gpio_pin < chip->ngpio) return 0; return -EINVAL; } /** * bgpio_init() - Initialize generic GPIO accessor functions * @gc: the GPIO chip to set up * @dev: the parent device of the new GPIO chip (compulsory) * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4 * @dat: MMIO address for the register to READ the value of the GPIO lines, it * is expected that a 1 in the corresponding bit in this register means the * line is asserted * @set: MMIO address for the register to SET the value of the GPIO lines, it is * expected that we write the line with 1 in this register to drive the GPIO line * high. * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is * expected that we write the line with 1 in this register to drive the GPIO line * low. It is allowed to leave this address as NULL, in that case the SET register * will be assumed to also clear the GPIO lines, by actively writing the line * with 0. * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed * that setting a line to 1 in this register will turn that line into an * output line. Conversely, setting the line to 0 will turn that line into * an input. * @dirin: MMIO address for the register to set this line as INPUT. It is assumed * that setting a line to 1 in this register will turn that line into an * input line. Conversely, setting the line to 0 will turn that line into * an output. * @flags: Different flags that will affect the behaviour of the device, such as * endianness etc. */ int bgpio_init(struct gpio_chip *gc, struct device *dev, unsigned long sz, void __iomem *dat, void __iomem *set, void __iomem *clr, void __iomem *dirout, void __iomem *dirin, unsigned long flags) { int ret; if (!is_power_of_2(sz)) return -EINVAL; gc->bgpio_bits = sz * 8; if (gc->bgpio_bits > BITS_PER_LONG) return -EINVAL; raw_spin_lock_init(&gc->bgpio_lock); gc->parent = dev; gc->label = dev_name(dev); gc->base = -1; gc->request = bgpio_request; gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN); ret = gpiochip_get_ngpios(gc, dev); if (ret) gc->ngpio = gc->bgpio_bits; else gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8)); ret = bgpio_setup_io(gc, dat, set, clr, flags); if (ret) return ret; ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER); if (ret) return ret; ret = bgpio_setup_direction(gc, dirout, dirin, flags); if (ret) return ret; gc->bgpio_data = gc->read_reg(gc->reg_dat); if (gc->set == bgpio_set_set && !(flags & BGPIOF_UNREADABLE_REG_SET)) gc->bgpio_data = gc->read_reg(gc->reg_set); if (flags & BGPIOF_UNREADABLE_REG_DIR) gc->bgpio_dir_unreadable = true; /* * Inspect hardware to find initial direction setting. */ if ((gc->reg_dir_out || gc->reg_dir_in) && !(flags & BGPIOF_UNREADABLE_REG_DIR)) { if (gc->reg_dir_out) gc->bgpio_dir = gc->read_reg(gc->reg_dir_out); else if (gc->reg_dir_in) gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in); /* * If we have two direction registers, synchronise * input setting to output setting, the library * can not handle a line being input and output at * the same time. */ if (gc->reg_dir_out && gc->reg_dir_in) gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir); } return ret; } EXPORT_SYMBOL_GPL(bgpio_init); #if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM) static void __iomem *bgpio_map(struct platform_device *pdev, const char *name, resource_size_t sane_sz) { struct resource *r; resource_size_t sz; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); if (!r) return NULL; sz = resource_size(r); if (sz != sane_sz) return IOMEM_ERR_PTR(-EINVAL); return devm_ioremap_resource(&pdev->dev, r); } #ifdef CONFIG_OF static const struct of_device_id bgpio_of_match[] = { { .compatible = "brcm,bcm6345-gpio" }, { .compatible = "wd,mbl-gpio" }, { .compatible = "ni,169445-nand-gpio" }, { } }; MODULE_DEVICE_TABLE(of, bgpio_of_match); static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev, unsigned long *flags) { struct bgpio_pdata *pdata; if (!of_match_device(bgpio_of_match, &pdev->dev)) return NULL; pdata = devm_kzalloc(&pdev->dev, sizeof(struct bgpio_pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = -1; if (of_device_is_big_endian(pdev->dev.of_node)) *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER; if (of_property_read_bool(pdev->dev.of_node, "no-output")) *flags |= BGPIOF_NO_OUTPUT; return pdata; } #else static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev, unsigned long *flags) { return NULL; } #endif /* CONFIG_OF */ static int bgpio_pdev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *r; void __iomem *dat; void __iomem *set; void __iomem *clr; void __iomem *dirout; void __iomem *dirin; unsigned long sz; unsigned long flags = 0; int err; struct gpio_chip *gc; struct bgpio_pdata *pdata; pdata = bgpio_parse_dt(pdev, &flags); if (IS_ERR(pdata)) return PTR_ERR(pdata); if (!pdata) { pdata = dev_get_platdata(dev); flags = pdev->id_entry->driver_data; } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); if (!r) return -EINVAL; sz = resource_size(r); dat = bgpio_map(pdev, "dat", sz); if (IS_ERR(dat)) return PTR_ERR(dat); set = bgpio_map(pdev, "set", sz); if (IS_ERR(set)) return PTR_ERR(set); clr = bgpio_map(pdev, "clr", sz); if (IS_ERR(clr)) return PTR_ERR(clr); dirout = bgpio_map(pdev, "dirout", sz); if (IS_ERR(dirout)) return PTR_ERR(dirout); dirin = bgpio_map(pdev, "dirin", sz); if (IS_ERR(dirin)) return PTR_ERR(dirin); gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL); if (!gc) return -ENOMEM; err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags); if (err) return err; if (pdata) { if (pdata->label) gc->label = pdata->label; gc->base = pdata->base; if (pdata->ngpio > 0) gc->ngpio = pdata->ngpio; } platform_set_drvdata(pdev, gc); return devm_gpiochip_add_data(&pdev->dev, gc, NULL); } static const struct platform_device_id bgpio_id_table[] = { { .name = "basic-mmio-gpio", .driver_data = 0, }, { .name = "basic-mmio-gpio-be", .driver_data = BGPIOF_BIG_ENDIAN, }, { } }; MODULE_DEVICE_TABLE(platform, bgpio_id_table); static struct platform_driver bgpio_driver = { .driver = { .name = "basic-mmio-gpio", .of_match_table = of_match_ptr(bgpio_of_match), }, .id_table = bgpio_id_table, .probe = bgpio_pdev_probe, }; module_platform_driver(bgpio_driver); #endif /* CONFIG_GPIO_GENERIC_PLATFORM */ MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers"); MODULE_AUTHOR("Anton Vorontsov <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpio/gpio-mmio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for PCA9570 I2C GPO expander * * Copyright (C) 2020 Sungbo Eo <[email protected]> * * Based on gpio-tpic2810.c * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <[email protected]> */ #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/property.h> #define SLG7XL45106_GPO_REG 0xDB /** * struct pca9570_chip_data - GPIO platformdata * @ngpio: no of gpios * @command: Command to be sent */ struct pca9570_chip_data { u16 ngpio; u32 command; }; /** * struct pca9570 - GPIO driver data * @chip: GPIO controller chip * @chip_data: GPIO controller platform data * @lock: Protects write sequences * @out: Buffer for device register */ struct pca9570 { struct gpio_chip chip; const struct pca9570_chip_data *chip_data; struct mutex lock; u8 out; }; static int pca9570_read(struct pca9570 *gpio, u8 *value) { struct i2c_client *client = to_i2c_client(gpio->chip.parent); int ret; if (gpio->chip_data->command != 0) ret = i2c_smbus_read_byte_data(client, gpio->chip_data->command); else ret = i2c_smbus_read_byte(client); if (ret < 0) return ret; *value = ret; return 0; } static int pca9570_write(struct pca9570 *gpio, u8 value) { struct i2c_client *client = to_i2c_client(gpio->chip.parent); if (gpio->chip_data->command != 0) return i2c_smbus_write_byte_data(client, gpio->chip_data->command, value); return i2c_smbus_write_byte(client, value); } static int pca9570_get_direction(struct gpio_chip *chip, unsigned offset) { /* This device always output */ return GPIO_LINE_DIRECTION_OUT; } static int pca9570_get(struct gpio_chip *chip, unsigned offset) { struct pca9570 *gpio = gpiochip_get_data(chip); u8 buffer; int ret; ret = pca9570_read(gpio, &buffer); if (ret) return ret; return !!(buffer & BIT(offset)); } static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value) { struct pca9570 *gpio = gpiochip_get_data(chip); u8 buffer; int ret; mutex_lock(&gpio->lock); buffer = gpio->out; if (value) buffer |= BIT(offset); else buffer &= ~BIT(offset); ret = pca9570_write(gpio, buffer); if (ret) goto out; gpio->out = buffer; out: mutex_unlock(&gpio->lock); } static int pca9570_probe(struct i2c_client *client) { struct pca9570 *gpio; gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; gpio->chip.label = client->name; gpio->chip.parent = &client->dev; gpio->chip.owner = THIS_MODULE; gpio->chip.get_direction = pca9570_get_direction; gpio->chip.get = pca9570_get; gpio->chip.set = pca9570_set; gpio->chip.base = -1; gpio->chip_data = device_get_match_data(&client->dev); gpio->chip.ngpio = gpio->chip_data->ngpio; gpio->chip.can_sleep = true; mutex_init(&gpio->lock); /* Read the current output level */ pca9570_read(gpio, &gpio->out); i2c_set_clientdata(client, gpio); return devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); } static const struct pca9570_chip_data pca9570_gpio = { .ngpio = 4, }; static const struct pca9570_chip_data pca9571_gpio = { .ngpio = 8, }; static const struct pca9570_chip_data slg7xl45106_gpio = { .ngpio = 8, .command = SLG7XL45106_GPO_REG, }; static const struct i2c_device_id pca9570_id_table[] = { { "pca9570", (kernel_ulong_t)&pca9570_gpio}, { "pca9571", (kernel_ulong_t)&pca9571_gpio }, { "slg7xl45106", (kernel_ulong_t)&slg7xl45106_gpio }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, pca9570_id_table); static const struct of_device_id pca9570_of_match_table[] = { { .compatible = "dlg,slg7xl45106", .data = &slg7xl45106_gpio}, { .compatible = "nxp,pca9570", .data = &pca9570_gpio }, { .compatible = "nxp,pca9571", .data = &pca9571_gpio }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pca9570_of_match_table); static struct i2c_driver pca9570_driver = { .driver = { .name = "pca9570", .of_match_table = pca9570_of_match_table, }, .probe = pca9570_probe, .id_table = pca9570_id_table, }; module_i2c_driver(pca9570_driver); MODULE_AUTHOR("Sungbo Eo <[email protected]>"); MODULE_DESCRIPTION("GPIO expander driver for PCA9570"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpio/gpio-pca9570.c
// SPDX-License-Identifier: GPL-2.0-only /* Abilis Systems MODULE DESCRIPTION * * Copyright (C) Abilis Systems 2013 * * Authors: Sascha Leuenberger <[email protected]> * Christian Ruppert <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/pinctrl/consumer.h> #define TB10X_GPIO_DIR_IN (0x00000000) #define TB10X_GPIO_DIR_OUT (0x00000001) #define OFFSET_TO_REG_DDR (0x00) #define OFFSET_TO_REG_DATA (0x04) #define OFFSET_TO_REG_INT_EN (0x08) #define OFFSET_TO_REG_CHANGE (0x0C) #define OFFSET_TO_REG_WRMASK (0x10) #define OFFSET_TO_REG_INT_TYPE (0x14) /** * @base: register base address * @domain: IRQ domain of GPIO generated interrupts managed by this controller * @irq: Interrupt line of parent interrupt controller * @gc: gpio_chip structure associated to this GPIO controller */ struct tb10x_gpio { void __iomem *base; struct irq_domain *domain; int irq; struct gpio_chip gc; }; static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs) { return ioread32(gpio->base + offs); } static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs, u32 val) { iowrite32(val, gpio->base + offs); } static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs, u32 mask, u32 val) { u32 r; unsigned long flags; raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags); r = tb10x_reg_read(gpio, offs); r = (r & ~mask) | (val & mask); tb10x_reg_write(gpio, offs, r); raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags); } static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip); return irq_create_mapping(tb10x_gpio->domain, offset); } static int tb10x_gpio_irq_set_type(struct irq_data *data, unsigned int type) { if ((type & IRQF_TRIGGER_MASK) != IRQ_TYPE_EDGE_BOTH) { pr_err("Only (both) edge triggered interrupts supported.\n"); return -EINVAL; } irqd_set_trigger_type(data, type); return IRQ_SET_MASK_OK; } static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data) { struct tb10x_gpio *tb10x_gpio = data; u32 r = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_CHANGE); u32 m = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_INT_EN); const unsigned long bits = r & m; int i; for_each_set_bit(i, &bits, 32) generic_handle_domain_irq(tb10x_gpio->domain, i); return IRQ_HANDLED; } static int tb10x_gpio_probe(struct platform_device *pdev) { struct tb10x_gpio *tb10x_gpio; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; int ret = -EBUSY; u32 ngpio; if (!np) return -EINVAL; if (of_property_read_u32(np, "abilis,ngpio", &ngpio)) return -EINVAL; tb10x_gpio = devm_kzalloc(dev, sizeof(*tb10x_gpio), GFP_KERNEL); if (tb10x_gpio == NULL) return -ENOMEM; tb10x_gpio->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tb10x_gpio->base)) return PTR_ERR(tb10x_gpio->base); tb10x_gpio->gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node); if (!tb10x_gpio->gc.label) return -ENOMEM; /* * Initialize generic GPIO with one single register for reading and setting * the lines, no special set or clear registers and a data direction register * wher 1 means "output". */ ret = bgpio_init(&tb10x_gpio->gc, dev, 4, tb10x_gpio->base + OFFSET_TO_REG_DATA, NULL, NULL, tb10x_gpio->base + OFFSET_TO_REG_DDR, NULL, 0); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; } tb10x_gpio->gc.base = -1; tb10x_gpio->gc.parent = dev; tb10x_gpio->gc.owner = THIS_MODULE; /* * ngpio is set by bgpio_init() but we override it, this .request() * callback also overrides the one set up by generic GPIO. */ tb10x_gpio->gc.ngpio = ngpio; tb10x_gpio->gc.request = gpiochip_generic_request; tb10x_gpio->gc.free = gpiochip_generic_free; ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio); if (ret < 0) { dev_err(dev, "Could not add gpiochip.\n"); return ret; } platform_set_drvdata(pdev, tb10x_gpio); if (of_property_read_bool(np, "interrupt-controller")) { struct irq_chip_generic *gc; ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq; tb10x_gpio->irq = ret; ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade, IRQF_TRIGGER_NONE | IRQF_SHARED, dev_name(dev), tb10x_gpio); if (ret != 0) return ret; tb10x_gpio->domain = irq_domain_add_linear(np, tb10x_gpio->gc.ngpio, &irq_generic_chip_ops, NULL); if (!tb10x_gpio->domain) { return -ENOMEM; } ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain, tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label, handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE, IRQ_GC_INIT_MASK_CACHE); if (ret) goto err_remove_domain; gc = tb10x_gpio->domain->gc->gc[0]; gc->reg_base = tb10x_gpio->base; gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH; gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[0].chip.irq_set_type = tb10x_gpio_irq_set_type; gc->chip_types[0].regs.ack = OFFSET_TO_REG_CHANGE; gc->chip_types[0].regs.mask = OFFSET_TO_REG_INT_EN; } return 0; err_remove_domain: irq_domain_remove(tb10x_gpio->domain); return ret; } static int tb10x_gpio_remove(struct platform_device *pdev) { struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev); if (tb10x_gpio->gc.to_irq) { irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0], BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0); kfree(tb10x_gpio->domain->gc); irq_domain_remove(tb10x_gpio->domain); } return 0; } static const struct of_device_id tb10x_gpio_dt_ids[] = { { .compatible = "abilis,tb10x-gpio" }, { } }; MODULE_DEVICE_TABLE(of, tb10x_gpio_dt_ids); static struct platform_driver tb10x_gpio_driver = { .probe = tb10x_gpio_probe, .remove = tb10x_gpio_remove, .driver = { .name = "tb10x-gpio", .of_match_table = tb10x_gpio_dt_ids, } }; module_platform_driver(tb10x_gpio_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("tb10x gpio.");
linux-master
drivers/gpio/gpio-tb10x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Wolfram Sang, Pengutronix * * Check max730x.c for further details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/spi/max7301.h> #include <linux/slab.h> static int max7300_i2c_write(struct device *dev, unsigned int reg, unsigned int val) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_write_byte_data(client, reg, val); } static int max7300_i2c_read(struct device *dev, unsigned int reg) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_read_byte_data(client, reg); } static int max7300_probe(struct i2c_client *client) { struct max7301 *ts; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; ts = devm_kzalloc(&client->dev, sizeof(struct max7301), GFP_KERNEL); if (!ts) return -ENOMEM; ts->read = max7300_i2c_read; ts->write = max7300_i2c_write; ts->dev = &client->dev; return __max730x_probe(ts); } static void max7300_remove(struct i2c_client *client) { __max730x_remove(&client->dev); } static const struct i2c_device_id max7300_id[] = { { "max7300", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max7300_id); static struct i2c_driver max7300_driver = { .driver = { .name = "max7300", }, .probe = max7300_probe, .remove = max7300_remove, .id_table = max7300_id, }; static int __init max7300_init(void) { return i2c_add_driver(&max7300_driver); } subsys_initcall(max7300_init); static void __exit max7300_exit(void) { i2c_del_driver(&max7300_driver); } module_exit(max7300_exit); MODULE_AUTHOR("Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX7300 GPIO-Expander");
linux-master
drivers/gpio/gpio-max7300.c