python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* timberdale.c timberdale FPGA MFD driver
* Copyright (c) 2009 Intel Corporation
*/
/* Supports:
* Timberdale FPGA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/timb_gpio.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-ocores.h>
#include <linux/platform_data/i2c-xiic.h>
#include <linux/spi/spi.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/spi/max7301.h>
#include <linux/spi/mc33880.h>
#include <linux/platform_data/tsc2007.h>
#include <linux/platform_data/media/timb_radio.h>
#include <linux/platform_data/media/timb_video.h>
#include <linux/timb_dma.h>
#include <linux/ks8842.h>
#include "timberdale.h"
#define DRIVER_NAME "timberdale"
struct timberdale_device {
resource_size_t ctl_mapbase;
unsigned char __iomem *ctl_membase;
struct {
u32 major;
u32 minor;
u32 config;
} fw;
};
/*--------------------------------------------------------------------------*/
static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
.model = 2003,
.x_plate_ohms = 100
};
static struct i2c_board_info timberdale_i2c_board_info[] = {
{
I2C_BOARD_INFO("tsc2007", 0x48),
.platform_data = &timberdale_tsc2007_platform_data,
.irq = IRQ_TIMBERDALE_TSC_INT
},
};
static struct xiic_i2c_platform_data
timberdale_xiic_platform_data = {
.devices = timberdale_i2c_board_info,
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
static struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
.reg_shift = 2,
.clock_khz = 62500,
.devices = timberdale_i2c_board_info,
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
static const struct resource timberdale_xiic_resources[] = {
{
.start = XIICOFFSET,
.end = XIICEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_I2C,
.end = IRQ_TIMBERDALE_I2C,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource timberdale_ocores_resources[] = {
{
.start = OCORESOFFSET,
.end = OCORESEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_I2C,
.end = IRQ_TIMBERDALE_I2C,
.flags = IORESOURCE_IRQ,
},
};
static const struct max7301_platform_data timberdale_max7301_platform_data = {
.base = 200
};
static const struct mc33880_platform_data timberdale_mc33880_platform_data = {
.base = 100
};
static struct spi_board_info timberdale_spi_16bit_board_info[] = {
{
.modalias = "max7301",
.max_speed_hz = 26000,
.chip_select = 2,
.mode = SPI_MODE_0,
.platform_data = &timberdale_max7301_platform_data
},
};
static struct spi_board_info timberdale_spi_8bit_board_info[] = {
{
.modalias = "mc33880",
.max_speed_hz = 4000,
.chip_select = 1,
.mode = SPI_MODE_1,
.platform_data = &timberdale_mc33880_platform_data
},
};
static struct xspi_platform_data timberdale_xspi_platform_data = {
.num_chipselect = 3,
/* bits per word and devices will be filled in runtime depending
* on the HW config
*/
};
static const struct resource timberdale_spi_resources[] = {
{
.start = SPIOFFSET,
.end = SPIEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_SPI,
.end = IRQ_TIMBERDALE_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct ks8842_platform_data
timberdale_ks8842_platform_data = {
.rx_dma_channel = DMA_ETH_RX,
.tx_dma_channel = DMA_ETH_TX
};
static const struct resource timberdale_eth_resources[] = {
{
.start = ETHOFFSET,
.end = ETHEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_ETHSW_IF,
.end = IRQ_TIMBERDALE_ETHSW_IF,
.flags = IORESOURCE_IRQ,
},
};
static struct timbgpio_platform_data
timberdale_gpio_platform_data = {
.gpio_base = 0,
.nr_pins = GPIO_NR_PINS,
.irq_base = 200,
};
static const struct resource timberdale_gpio_resources[] = {
{
.start = GPIOOFFSET,
.end = GPIOEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_GPIO,
.end = IRQ_TIMBERDALE_GPIO,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource timberdale_mlogicore_resources[] = {
{
.start = MLCOREOFFSET,
.end = MLCOREEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_MLCORE,
.end = IRQ_TIMBERDALE_MLCORE,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_TIMBERDALE_MLCORE_BUF,
.end = IRQ_TIMBERDALE_MLCORE_BUF,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource timberdale_uart_resources[] = {
{
.start = UARTOFFSET,
.end = UARTEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_UART,
.end = IRQ_TIMBERDALE_UART,
.flags = IORESOURCE_IRQ,
},
};
static const struct resource timberdale_uartlite_resources[] = {
{
.start = UARTLITEOFFSET,
.end = UARTLITEEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_UARTLITE,
.end = IRQ_TIMBERDALE_UARTLITE,
.flags = IORESOURCE_IRQ,
},
};
static struct i2c_board_info timberdale_adv7180_i2c_board_info = {
/* Requires jumper JP9 to be off */
I2C_BOARD_INFO("adv7180", 0x42 >> 1),
.irq = IRQ_TIMBERDALE_ADV7180
};
static struct timb_video_platform_data
timberdale_video_platform_data = {
.dma_channel = DMA_VIDEO_RX,
.i2c_adapter = 0,
.encoder = {
.info = &timberdale_adv7180_i2c_board_info
}
};
static const struct resource
timberdale_radio_resources[] = {
{
.start = RDSOFFSET,
.end = RDSEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_RDS,
.end = IRQ_TIMBERDALE_RDS,
.flags = IORESOURCE_IRQ,
},
};
static struct i2c_board_info timberdale_tef6868_i2c_board_info = {
I2C_BOARD_INFO("tef6862", 0x60)
};
static struct i2c_board_info timberdale_saa7706_i2c_board_info = {
I2C_BOARD_INFO("saa7706h", 0x1C)
};
static struct timb_radio_platform_data
timberdale_radio_platform_data = {
.i2c_adapter = 0,
.tuner = &timberdale_tef6868_i2c_board_info,
.dsp = &timberdale_saa7706_i2c_board_info
};
static const struct resource timberdale_video_resources[] = {
{
.start = LOGIWOFFSET,
.end = LOGIWEND,
.flags = IORESOURCE_MEM,
},
/*
note that the "frame buffer" is located in DMA area
starting at 0x1200000
*/
};
static struct timb_dma_platform_data timb_dma_platform_data = {
.nr_channels = 10,
.channels = {
{
/* UART RX */
.rx = true,
.descriptors = 2,
.descriptor_elements = 1
},
{
/* UART TX */
.rx = false,
.descriptors = 2,
.descriptor_elements = 1
},
{
/* MLB RX */
.rx = true,
.descriptors = 2,
.descriptor_elements = 1
},
{
/* MLB TX */
.rx = false,
.descriptors = 2,
.descriptor_elements = 1
},
{
/* Video RX */
.rx = true,
.bytes_per_line = 1440,
.descriptors = 2,
.descriptor_elements = 16
},
{
/* Video framedrop */
},
{
/* SDHCI RX */
.rx = true,
},
{
/* SDHCI TX */
},
{
/* ETH RX */
.rx = true,
.descriptors = 2,
.descriptor_elements = 1
},
{
/* ETH TX */
.rx = false,
.descriptors = 2,
.descriptor_elements = 1
},
}
};
static const struct resource timberdale_dma_resources[] = {
{
.start = DMAOFFSET,
.end = DMAEND,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_DMA,
.end = IRQ_TIMBERDALE_DMA,
.flags = IORESOURCE_IRQ,
},
};
static const struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
.platform_data = &timb_dma_platform_data,
.pdata_size = sizeof(timb_dma_platform_data),
},
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
.platform_data = &timberdale_xiic_platform_data,
.pdata_size = sizeof(timberdale_xiic_platform_data),
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
.platform_data = &timberdale_gpio_platform_data,
.pdata_size = sizeof(timberdale_gpio_platform_data),
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
.platform_data = &timberdale_video_platform_data,
.pdata_size = sizeof(timberdale_video_platform_data),
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
.platform_data = &timberdale_radio_platform_data,
.pdata_size = sizeof(timberdale_radio_platform_data),
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
.platform_data = &timberdale_xspi_platform_data,
.pdata_size = sizeof(timberdale_xspi_platform_data),
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
.platform_data = &timberdale_ks8842_platform_data,
.pdata_size = sizeof(timberdale_ks8842_platform_data),
},
};
static const struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
.platform_data = &timb_dma_platform_data,
.pdata_size = sizeof(timb_dma_platform_data),
},
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
.name = "uartlite",
.num_resources = ARRAY_SIZE(timberdale_uartlite_resources),
.resources = timberdale_uartlite_resources,
},
{
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
.platform_data = &timberdale_xiic_platform_data,
.pdata_size = sizeof(timberdale_xiic_platform_data),
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
.platform_data = &timberdale_gpio_platform_data,
.pdata_size = sizeof(timberdale_gpio_platform_data),
},
{
.name = "timb-mlogicore",
.num_resources = ARRAY_SIZE(timberdale_mlogicore_resources),
.resources = timberdale_mlogicore_resources,
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
.platform_data = &timberdale_video_platform_data,
.pdata_size = sizeof(timberdale_video_platform_data),
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
.platform_data = &timberdale_radio_platform_data,
.pdata_size = sizeof(timberdale_radio_platform_data),
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
.platform_data = &timberdale_xspi_platform_data,
.pdata_size = sizeof(timberdale_xspi_platform_data),
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
.platform_data = &timberdale_ks8842_platform_data,
.pdata_size = sizeof(timberdale_ks8842_platform_data),
},
};
static const struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
.platform_data = &timb_dma_platform_data,
.pdata_size = sizeof(timb_dma_platform_data),
},
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
.name = "xiic-i2c",
.num_resources = ARRAY_SIZE(timberdale_xiic_resources),
.resources = timberdale_xiic_resources,
.platform_data = &timberdale_xiic_platform_data,
.pdata_size = sizeof(timberdale_xiic_platform_data),
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
.platform_data = &timberdale_gpio_platform_data,
.pdata_size = sizeof(timberdale_gpio_platform_data),
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
.platform_data = &timberdale_video_platform_data,
.pdata_size = sizeof(timberdale_video_platform_data),
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
.platform_data = &timberdale_radio_platform_data,
.pdata_size = sizeof(timberdale_radio_platform_data),
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
.platform_data = &timberdale_xspi_platform_data,
.pdata_size = sizeof(timberdale_xspi_platform_data),
},
};
static const struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
.platform_data = &timb_dma_platform_data,
.pdata_size = sizeof(timb_dma_platform_data),
},
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
.name = "ocores-i2c",
.num_resources = ARRAY_SIZE(timberdale_ocores_resources),
.resources = timberdale_ocores_resources,
.platform_data = &timberdale_ocores_platform_data,
.pdata_size = sizeof(timberdale_ocores_platform_data),
},
{
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
.platform_data = &timberdale_gpio_platform_data,
.pdata_size = sizeof(timberdale_gpio_platform_data),
},
{
.name = "timb-video",
.num_resources = ARRAY_SIZE(timberdale_video_resources),
.resources = timberdale_video_resources,
.platform_data = &timberdale_video_platform_data,
.pdata_size = sizeof(timberdale_video_platform_data),
},
{
.name = "timb-radio",
.num_resources = ARRAY_SIZE(timberdale_radio_resources),
.resources = timberdale_radio_resources,
.platform_data = &timberdale_radio_platform_data,
.pdata_size = sizeof(timberdale_radio_platform_data),
},
{
.name = "xilinx_spi",
.num_resources = ARRAY_SIZE(timberdale_spi_resources),
.resources = timberdale_spi_resources,
.platform_data = &timberdale_xspi_platform_data,
.pdata_size = sizeof(timberdale_xspi_platform_data),
},
{
.name = "ks8842",
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
.platform_data = &timberdale_ks8842_platform_data,
.pdata_size = sizeof(timberdale_ks8842_platform_data),
},
};
static const struct resource timberdale_sdhc_resources[] = {
/* located in bar 1 and bar 2 */
{
.start = SDHC0OFFSET,
.end = SDHC0END,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TIMBERDALE_SDHC,
.end = IRQ_TIMBERDALE_SDHC,
.flags = IORESOURCE_IRQ,
},
};
static const struct mfd_cell timberdale_cells_bar1[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
.resources = timberdale_sdhc_resources,
},
};
static const struct mfd_cell timberdale_cells_bar2[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
.resources = timberdale_sdhc_resources,
},
};
static ssize_t fw_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct timberdale_device *priv = dev_get_drvdata(dev);
return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
priv->fw.config);
}
static DEVICE_ATTR_RO(fw_ver);
/*--------------------------------------------------------------------------*/
static int timb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct timberdale_device *priv;
int err, i;
resource_size_t mapbase;
struct msix_entry *msix_entries = NULL;
u8 ip_setup;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pci_set_drvdata(dev, priv);
err = pci_enable_device(dev);
if (err)
goto err_enable;
mapbase = pci_resource_start(dev, 0);
if (!mapbase) {
dev_err(&dev->dev, "No resource\n");
goto err_start;
}
/* create a resource for the PCI master register */
priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
dev_err(&dev->dev, "Failed to request ctl mem\n");
goto err_start;
}
priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
if (!priv->ctl_membase) {
dev_err(&dev->dev, "ioremap failed for ctl mem\n");
goto err_ioremap;
}
/* read the HW config */
priv->fw.major = ioread32(priv->ctl_membase + TIMB_REV_MAJOR);
priv->fw.minor = ioread32(priv->ctl_membase + TIMB_REV_MINOR);
priv->fw.config = ioread32(priv->ctl_membase + TIMB_HW_CONFIG);
if (priv->fw.major > TIMB_SUPPORTED_MAJOR) {
dev_err(&dev->dev, "The driver supports an older "
"version of the FPGA, please update the driver to "
"support %d.%d\n", priv->fw.major, priv->fw.minor);
goto err_config;
}
if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
priv->fw.minor < TIMB_REQUIRED_MINOR) {
dev_err(&dev->dev, "The FPGA image is too old (%d.%d), "
"please upgrade the FPGA to at least: %d.%d\n",
priv->fw.major, priv->fw.minor,
TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
goto err_config;
}
msix_entries = kcalloc(TIMBERDALE_NR_IRQS, sizeof(*msix_entries),
GFP_KERNEL);
if (!msix_entries)
goto err_config;
for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
msix_entries[i].entry = i;
err = pci_enable_msix_exact(dev, msix_entries, TIMBERDALE_NR_IRQS);
if (err) {
dev_err(&dev->dev,
"MSI-X init failed: %d, expected entries: %d\n",
err, TIMBERDALE_NR_IRQS);
goto err_msix;
}
err = device_create_file(&dev->dev, &dev_attr_fw_ver);
if (err)
goto err_create_file;
/* Reset all FPGA PLB peripherals */
iowrite32(0x1, priv->ctl_membase + TIMB_SW_RST);
/* update IRQ offsets in I2C board info */
for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
timberdale_i2c_board_info[i].irq =
msix_entries[timberdale_i2c_board_info[i].irq].vector;
/* Update the SPI configuration depending on the HW (8 or 16 bit) */
if (priv->fw.config & TIMB_HW_CONFIG_SPI_8BIT) {
timberdale_xspi_platform_data.bits_per_word = 8;
timberdale_xspi_platform_data.devices =
timberdale_spi_8bit_board_info;
timberdale_xspi_platform_data.num_devices =
ARRAY_SIZE(timberdale_spi_8bit_board_info);
} else {
timberdale_xspi_platform_data.bits_per_word = 16;
timberdale_xspi_platform_data.devices =
timberdale_spi_16bit_board_info;
timberdale_xspi_platform_data.num_devices =
ARRAY_SIZE(timberdale_spi_16bit_board_info);
}
ip_setup = priv->fw.config & TIMB_HW_VER_MASK;
switch (ip_setup) {
case TIMB_HW_VER0:
err = mfd_add_devices(&dev->dev, -1,
timberdale_cells_bar0_cfg0,
ARRAY_SIZE(timberdale_cells_bar0_cfg0),
&dev->resource[0], msix_entries[0].vector, NULL);
break;
case TIMB_HW_VER1:
err = mfd_add_devices(&dev->dev, -1,
timberdale_cells_bar0_cfg1,
ARRAY_SIZE(timberdale_cells_bar0_cfg1),
&dev->resource[0], msix_entries[0].vector, NULL);
break;
case TIMB_HW_VER2:
err = mfd_add_devices(&dev->dev, -1,
timberdale_cells_bar0_cfg2,
ARRAY_SIZE(timberdale_cells_bar0_cfg2),
&dev->resource[0], msix_entries[0].vector, NULL);
break;
case TIMB_HW_VER3:
err = mfd_add_devices(&dev->dev, -1,
timberdale_cells_bar0_cfg3,
ARRAY_SIZE(timberdale_cells_bar0_cfg3),
&dev->resource[0], msix_entries[0].vector, NULL);
break;
default:
dev_err(&dev->dev, "Unknown IP setup: %d.%d.%d\n",
priv->fw.major, priv->fw.minor, ip_setup);
err = -ENODEV;
goto err_mfd;
}
if (err) {
dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err);
goto err_mfd;
}
err = mfd_add_devices(&dev->dev, 0,
timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
&dev->resource[1], msix_entries[0].vector, NULL);
if (err) {
dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err);
goto err_mfd2;
}
/* only version 0 and 3 have the iNand routed to SDHCI */
if (((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER0) ||
((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
ARRAY_SIZE(timberdale_cells_bar2),
&dev->resource[2], msix_entries[0].vector, NULL);
if (err) {
dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err);
goto err_mfd2;
}
}
kfree(msix_entries);
dev_info(&dev->dev,
"Found Timberdale Card. Rev: %d.%d, HW config: 0x%02x\n",
priv->fw.major, priv->fw.minor, priv->fw.config);
return 0;
err_mfd2:
mfd_remove_devices(&dev->dev);
err_mfd:
device_remove_file(&dev->dev, &dev_attr_fw_ver);
err_create_file:
pci_disable_msix(dev);
err_msix:
kfree(msix_entries);
err_config:
iounmap(priv->ctl_membase);
err_ioremap:
release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
err_start:
pci_disable_device(dev);
err_enable:
kfree(priv);
return -ENODEV;
}
static void timb_remove(struct pci_dev *dev)
{
struct timberdale_device *priv = pci_get_drvdata(dev);
mfd_remove_devices(&dev->dev);
device_remove_file(&dev->dev, &dev_attr_fw_ver);
iounmap(priv->ctl_membase);
release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
pci_disable_msix(dev);
pci_disable_device(dev);
kfree(priv);
}
static const struct pci_device_id timberdale_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
static struct pci_driver timberdale_pci_driver = {
.name = DRIVER_NAME,
.id_table = timberdale_pci_tbl,
.probe = timb_probe,
.remove = timb_remove,
};
module_pci_driver(timberdale_pci_driver);
MODULE_AUTHOR("Mocean Laboratories <[email protected]>");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mfd/timberdale.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Core driver for TPS61050/61052 boost converters, used for while LED
* driving, audio power amplification, white LED flash, and generic
* boost conversion. Additionally it provides a 1-bit GPIO pin (out or in)
* and a flash synchronization pin to synchronize flash events when used as
* flashgun.
*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
static struct regmap_config tps6105x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TPS6105X_REG_3,
};
static int tps6105x_startup(struct tps6105x *tps6105x)
{
int ret;
unsigned int regval;
ret = regmap_read(tps6105x->regmap, TPS6105X_REG_0, ®val);
if (ret)
return ret;
switch (regval >> TPS6105X_REG0_MODE_SHIFT) {
case TPS6105X_REG0_MODE_SHUTDOWN:
dev_info(&tps6105x->client->dev,
"TPS6105x found in SHUTDOWN mode\n");
break;
case TPS6105X_REG0_MODE_TORCH:
dev_info(&tps6105x->client->dev,
"TPS6105x found in TORCH mode\n");
break;
case TPS6105X_REG0_MODE_TORCH_FLASH:
dev_info(&tps6105x->client->dev,
"TPS6105x found in FLASH mode\n");
break;
case TPS6105X_REG0_MODE_VOLTAGE:
dev_info(&tps6105x->client->dev,
"TPS6105x found in VOLTAGE mode\n");
break;
default:
break;
}
return ret;
}
/*
* MFD cells - we always have a GPIO cell and we have one cell
* which is selected operation mode.
*/
static struct mfd_cell tps6105x_gpio_cell = {
.name = "tps6105x-gpio",
};
static struct mfd_cell tps6105x_leds_cell = {
.name = "tps6105x-leds",
};
static struct mfd_cell tps6105x_flash_cell = {
.name = "tps6105x-flash",
};
static struct mfd_cell tps6105x_regulator_cell = {
.name = "tps6105x-regulator",
};
static int tps6105x_add_device(struct tps6105x *tps6105x,
struct mfd_cell *cell)
{
cell->platform_data = tps6105x;
cell->pdata_size = sizeof(*tps6105x);
return mfd_add_devices(&tps6105x->client->dev,
PLATFORM_DEVID_AUTO, cell, 1, NULL, 0, NULL);
}
static struct tps6105x_platform_data *tps6105x_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct tps6105x_platform_data *pdata;
struct device_node *child;
if (!np)
return ERR_PTR(-EINVAL);
if (of_get_available_child_count(np) > 1) {
dev_err(dev, "cannot support multiple operational modes");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
pdata->mode = TPS6105X_MODE_SHUTDOWN;
for_each_available_child_of_node(np, child) {
if (child->name && !of_node_cmp(child->name, "regulator"))
pdata->mode = TPS6105X_MODE_VOLTAGE;
else if (child->name && !of_node_cmp(child->name, "led"))
pdata->mode = TPS6105X_MODE_TORCH;
}
return pdata;
}
static int tps6105x_probe(struct i2c_client *client)
{
struct tps6105x *tps6105x;
struct tps6105x_platform_data *pdata;
int ret;
pdata = dev_get_platdata(&client->dev);
if (!pdata)
pdata = tps6105x_parse_dt(&client->dev);
if (IS_ERR(pdata)) {
dev_err(&client->dev, "No platform data or DT found");
return PTR_ERR(pdata);
}
tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL);
if (!tps6105x)
return -ENOMEM;
tps6105x->regmap = devm_regmap_init_i2c(client, &tps6105x_regmap_config);
if (IS_ERR(tps6105x->regmap))
return PTR_ERR(tps6105x->regmap);
i2c_set_clientdata(client, tps6105x);
tps6105x->client = client;
tps6105x->pdata = pdata;
ret = tps6105x_startup(tps6105x);
if (ret) {
dev_err(&client->dev, "chip initialization failed\n");
return ret;
}
ret = tps6105x_add_device(tps6105x, &tps6105x_gpio_cell);
if (ret)
return ret;
switch (pdata->mode) {
case TPS6105X_MODE_SHUTDOWN:
dev_info(&client->dev,
"present, not used for anything, only GPIO\n");
break;
case TPS6105X_MODE_TORCH:
ret = tps6105x_add_device(tps6105x, &tps6105x_leds_cell);
break;
case TPS6105X_MODE_TORCH_FLASH:
ret = tps6105x_add_device(tps6105x, &tps6105x_flash_cell);
break;
case TPS6105X_MODE_VOLTAGE:
ret = tps6105x_add_device(tps6105x, &tps6105x_regulator_cell);
break;
default:
dev_warn(&client->dev, "invalid mode: %d\n", pdata->mode);
break;
}
if (ret)
mfd_remove_devices(&client->dev);
return ret;
}
static void tps6105x_remove(struct i2c_client *client)
{
struct tps6105x *tps6105x = i2c_get_clientdata(client);
mfd_remove_devices(&client->dev);
/* Put chip in shutdown mode */
regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
}
static const struct i2c_device_id tps6105x_id[] = {
{ "tps61050", 0 },
{ "tps61052", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6105x_id);
static const struct of_device_id tps6105x_of_match[] = {
{ .compatible = "ti,tps61050" },
{ .compatible = "ti,tps61052" },
{ },
};
MODULE_DEVICE_TABLE(of, tps6105x_of_match);
static struct i2c_driver tps6105x_driver = {
.driver = {
.name = "tps6105x",
.of_match_table = tps6105x_of_match,
},
.probe = tps6105x_probe,
.remove = tps6105x_remove,
.id_table = tps6105x_id,
};
static int __init tps6105x_init(void)
{
return i2c_add_driver(&tps6105x_driver);
}
subsys_initcall(tps6105x_init);
static void __exit tps6105x_exit(void)
{
i2c_del_driver(&tps6105x_driver);
}
module_exit(tps6105x_exit);
MODULE_AUTHOR("Linus Walleij");
MODULE_DESCRIPTION("TPS6105x White LED Boost Converter Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mfd/tps6105x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Altera Arria10 DevKit System Resource MFD Driver
*
* Author: Thor Thayer <[email protected]>
*
* Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
*
* SPI access for Altera Arria10 MAX5 System Resource Chip
*
* Adapted from DA9052
*/
#include <linux/mfd/altera-a10sr.h>
#include <linux/mfd/core.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
static const struct mfd_cell altr_a10sr_subdev_info[] = {
{
.name = "altr_a10sr_gpio",
.of_compatible = "altr,a10sr-gpio",
},
{
.name = "altr_a10sr_reset",
.of_compatible = "altr,a10sr-reset",
},
};
static bool altr_a10sr_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
case ALTR_A10SR_VERSION_READ:
case ALTR_A10SR_LED_REG:
case ALTR_A10SR_PBDSW_REG:
case ALTR_A10SR_PBDSW_IRQ_REG:
case ALTR_A10SR_PWR_GOOD1_REG:
case ALTR_A10SR_PWR_GOOD2_REG:
case ALTR_A10SR_PWR_GOOD3_REG:
case ALTR_A10SR_FMCAB_REG:
case ALTR_A10SR_HPS_RST_REG:
case ALTR_A10SR_USB_QSPI_REG:
case ALTR_A10SR_SFPA_REG:
case ALTR_A10SR_SFPB_REG:
case ALTR_A10SR_I2C_M_REG:
case ALTR_A10SR_WARM_RST_REG:
case ALTR_A10SR_WR_KEY_REG:
case ALTR_A10SR_PMBUS_REG:
return true;
default:
return false;
}
}
static bool altr_a10sr_reg_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case ALTR_A10SR_LED_REG:
case ALTR_A10SR_PBDSW_IRQ_REG:
case ALTR_A10SR_FMCAB_REG:
case ALTR_A10SR_HPS_RST_REG:
case ALTR_A10SR_USB_QSPI_REG:
case ALTR_A10SR_SFPA_REG:
case ALTR_A10SR_SFPB_REG:
case ALTR_A10SR_WARM_RST_REG:
case ALTR_A10SR_WR_KEY_REG:
case ALTR_A10SR_PMBUS_REG:
return true;
default:
return false;
}
}
static bool altr_a10sr_reg_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case ALTR_A10SR_PBDSW_REG:
case ALTR_A10SR_PBDSW_IRQ_REG:
case ALTR_A10SR_PWR_GOOD1_REG:
case ALTR_A10SR_PWR_GOOD2_REG:
case ALTR_A10SR_PWR_GOOD3_REG:
case ALTR_A10SR_HPS_RST_REG:
case ALTR_A10SR_I2C_M_REG:
case ALTR_A10SR_WARM_RST_REG:
case ALTR_A10SR_WR_KEY_REG:
case ALTR_A10SR_PMBUS_REG:
return true;
default:
return false;
}
}
static const struct regmap_config altr_a10sr_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
.use_single_read = true,
.use_single_write = true,
.read_flag_mask = 1,
.write_flag_mask = 0,
.max_register = ALTR_A10SR_WR_KEY_REG,
.readable_reg = altr_a10sr_reg_readable,
.writeable_reg = altr_a10sr_reg_writeable,
.volatile_reg = altr_a10sr_reg_volatile,
};
static int altr_a10sr_spi_probe(struct spi_device *spi)
{
int ret;
struct altr_a10sr *a10sr;
a10sr = devm_kzalloc(&spi->dev, sizeof(*a10sr),
GFP_KERNEL);
if (!a10sr)
return -ENOMEM;
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
a10sr->dev = &spi->dev;
spi_set_drvdata(spi, a10sr);
a10sr->regmap = devm_regmap_init_spi(spi, &altr_a10sr_regmap_config);
if (IS_ERR(a10sr->regmap)) {
ret = PTR_ERR(a10sr->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
ret = devm_mfd_add_devices(a10sr->dev, PLATFORM_DEVID_AUTO,
altr_a10sr_subdev_info,
ARRAY_SIZE(altr_a10sr_subdev_info),
NULL, 0, NULL);
if (ret)
dev_err(a10sr->dev, "Failed to register sub-devices: %d\n",
ret);
return ret;
}
static const struct of_device_id altr_a10sr_spi_of_match[] = {
{ .compatible = "altr,a10sr" },
{ },
};
MODULE_DEVICE_TABLE(of, altr_a10sr_spi_of_match);
static const struct spi_device_id altr_a10sr_spi_ids[] = {
{ .name = "a10sr" },
{ },
};
MODULE_DEVICE_TABLE(spi, altr_a10sr_spi_ids);
static struct spi_driver altr_a10sr_spi_driver = {
.probe = altr_a10sr_spi_probe,
.driver = {
.name = "altr_a10sr",
.of_match_table = altr_a10sr_spi_of_match,
},
.id_table = altr_a10sr_spi_ids,
};
builtin_driver(altr_a10sr_spi_driver, spi_register_driver)
| linux-master | drivers/mfd/altera-a10sr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* interface.c - contains everything related to the user interface
*
* Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <[email protected]>
* Copyright 2002 Adam Belay <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/pnp.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/stat.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include "base.h"
struct pnp_info_buffer {
char *buffer; /* pointer to begin of buffer */
char *curr; /* current position in buffer */
unsigned long size; /* current size */
unsigned long len; /* total length of buffer */
int stop; /* stop flag */
int error; /* error code */
};
typedef struct pnp_info_buffer pnp_info_buffer_t;
__printf(2, 3)
static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...)
{
va_list args;
int res;
if (buffer->stop || buffer->error)
return 0;
va_start(args, fmt);
res = vsnprintf(buffer->curr, buffer->len - buffer->size, fmt, args);
va_end(args);
if (buffer->size + res >= buffer->len) {
buffer->stop = 1;
return 0;
}
buffer->curr += res;
buffer->size += res;
return res;
}
static void pnp_print_port(pnp_info_buffer_t * buffer, char *space,
struct pnp_port *port)
{
pnp_printf(buffer, "%sport %#llx-%#llx, align %#llx, size %#llx, "
"%i-bit address decoding\n", space,
(unsigned long long) port->min,
(unsigned long long) port->max,
port->align ? ((unsigned long long) port->align - 1) : 0,
(unsigned long long) port->size,
port->flags & IORESOURCE_IO_16BIT_ADDR ? 16 : 10);
}
static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
struct pnp_irq *irq)
{
int first = 1, i;
pnp_printf(buffer, "%sirq ", space);
for (i = 0; i < PNP_IRQ_NR; i++)
if (test_bit(i, irq->map.bits)) {
if (!first) {
pnp_printf(buffer, ",");
} else {
first = 0;
}
if (i == 2 || i == 9)
pnp_printf(buffer, "2/9");
else
pnp_printf(buffer, "%i", i);
}
if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
pnp_printf(buffer, "<none>");
if (irq->flags & IORESOURCE_IRQ_HIGHEDGE)
pnp_printf(buffer, " High-Edge");
if (irq->flags & IORESOURCE_IRQ_LOWEDGE)
pnp_printf(buffer, " Low-Edge");
if (irq->flags & IORESOURCE_IRQ_HIGHLEVEL)
pnp_printf(buffer, " High-Level");
if (irq->flags & IORESOURCE_IRQ_LOWLEVEL)
pnp_printf(buffer, " Low-Level");
if (irq->flags & IORESOURCE_IRQ_OPTIONAL)
pnp_printf(buffer, " (optional)");
pnp_printf(buffer, "\n");
}
static void pnp_print_dma(pnp_info_buffer_t * buffer, char *space,
struct pnp_dma *dma)
{
int first = 1, i;
char *s;
pnp_printf(buffer, "%sdma ", space);
for (i = 0; i < 8; i++)
if (dma->map & (1 << i)) {
if (!first) {
pnp_printf(buffer, ",");
} else {
first = 0;
}
pnp_printf(buffer, "%i", i);
}
if (!dma->map)
pnp_printf(buffer, "<none>");
switch (dma->flags & IORESOURCE_DMA_TYPE_MASK) {
case IORESOURCE_DMA_8BIT:
s = "8-bit";
break;
case IORESOURCE_DMA_8AND16BIT:
s = "8-bit&16-bit";
break;
default:
s = "16-bit";
}
pnp_printf(buffer, " %s", s);
if (dma->flags & IORESOURCE_DMA_MASTER)
pnp_printf(buffer, " master");
if (dma->flags & IORESOURCE_DMA_BYTE)
pnp_printf(buffer, " byte-count");
if (dma->flags & IORESOURCE_DMA_WORD)
pnp_printf(buffer, " word-count");
switch (dma->flags & IORESOURCE_DMA_SPEED_MASK) {
case IORESOURCE_DMA_TYPEA:
s = "type-A";
break;
case IORESOURCE_DMA_TYPEB:
s = "type-B";
break;
case IORESOURCE_DMA_TYPEF:
s = "type-F";
break;
default:
s = "compatible";
break;
}
pnp_printf(buffer, " %s\n", s);
}
static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space,
struct pnp_mem *mem)
{
char *s;
pnp_printf(buffer, "%sMemory %#llx-%#llx, align %#llx, size %#llx",
space, (unsigned long long) mem->min,
(unsigned long long) mem->max,
(unsigned long long) mem->align,
(unsigned long long) mem->size);
if (mem->flags & IORESOURCE_MEM_WRITEABLE)
pnp_printf(buffer, ", writeable");
if (mem->flags & IORESOURCE_MEM_CACHEABLE)
pnp_printf(buffer, ", cacheable");
if (mem->flags & IORESOURCE_MEM_RANGELENGTH)
pnp_printf(buffer, ", range-length");
if (mem->flags & IORESOURCE_MEM_SHADOWABLE)
pnp_printf(buffer, ", shadowable");
if (mem->flags & IORESOURCE_MEM_EXPANSIONROM)
pnp_printf(buffer, ", expansion ROM");
switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_8BIT:
s = "8-bit";
break;
case IORESOURCE_MEM_8AND16BIT:
s = "8-bit&16-bit";
break;
case IORESOURCE_MEM_32BIT:
s = "32-bit";
break;
default:
s = "16-bit";
}
pnp_printf(buffer, ", %s\n", s);
}
static void pnp_print_option(pnp_info_buffer_t * buffer, char *space,
struct pnp_option *option)
{
switch (option->type) {
case IORESOURCE_IO:
pnp_print_port(buffer, space, &option->u.port);
break;
case IORESOURCE_MEM:
pnp_print_mem(buffer, space, &option->u.mem);
break;
case IORESOURCE_IRQ:
pnp_print_irq(buffer, space, &option->u.irq);
break;
case IORESOURCE_DMA:
pnp_print_dma(buffer, space, &option->u.dma);
break;
}
}
static ssize_t options_show(struct device *dmdev, struct device_attribute *attr,
char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
struct pnp_option *option;
int ret, dep = 0, set = 0;
char *indent;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer->len = PAGE_SIZE;
buffer->buffer = buf;
buffer->curr = buffer->buffer;
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option)) {
indent = " ";
if (!dep || pnp_option_set(option) != set) {
set = pnp_option_set(option);
dep = 1;
pnp_printf(buffer, "Dependent: %02i - "
"Priority %s\n", set,
pnp_option_priority_name(option));
}
} else {
dep = 0;
indent = "";
}
pnp_print_option(buffer, indent, option);
}
ret = (buffer->curr - buf);
kfree(buffer);
return ret;
}
static DEVICE_ATTR_RO(options);
static ssize_t resources_show(struct device *dmdev,
struct device_attribute *attr, char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_info_buffer_t *buffer;
struct pnp_resource *pnp_res;
struct resource *res;
int ret;
if (!dev)
return -EINVAL;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer->len = PAGE_SIZE;
buffer->buffer = buf;
buffer->curr = buffer->buffer;
pnp_printf(buffer, "state = %s\n", dev->active ? "active" : "disabled");
list_for_each_entry(pnp_res, &dev->resources, list) {
res = &pnp_res->res;
pnp_printf(buffer, pnp_resource_type_name(res));
if (res->flags & IORESOURCE_DISABLED) {
pnp_printf(buffer, " disabled\n");
continue;
}
switch (pnp_resource_type(res)) {
case IORESOURCE_IO:
case IORESOURCE_MEM:
case IORESOURCE_BUS:
pnp_printf(buffer, " %#llx-%#llx%s\n",
(unsigned long long) res->start,
(unsigned long long) res->end,
res->flags & IORESOURCE_WINDOW ?
" window" : "");
break;
case IORESOURCE_IRQ:
case IORESOURCE_DMA:
pnp_printf(buffer, " %lld\n",
(unsigned long long) res->start);
break;
}
}
ret = (buffer->curr - buf);
kfree(buffer);
return ret;
}
static char *pnp_get_resource_value(char *buf,
unsigned long type,
resource_size_t *start,
resource_size_t *end,
unsigned long *flags)
{
if (start)
*start = 0;
if (end)
*end = 0;
if (flags)
*flags = 0;
/* TBD: allow for disabled resources */
buf = skip_spaces(buf);
if (start) {
*start = simple_strtoull(buf, &buf, 0);
if (end) {
buf = skip_spaces(buf);
if (*buf == '-') {
buf = skip_spaces(buf + 1);
*end = simple_strtoull(buf, &buf, 0);
} else
*end = *start;
}
}
/* TBD: allow for additional flags, e.g., IORESOURCE_WINDOW */
return buf;
}
static ssize_t resources_store(struct device *dmdev,
struct device_attribute *attr, const char *ubuf,
size_t count)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
char *buf = (void *)ubuf;
int retval = 0;
if (dev->status & PNP_ATTACHED) {
retval = -EBUSY;
dev_info(&dev->dev, "in use; can't configure\n");
goto done;
}
buf = skip_spaces(buf);
if (!strncasecmp(buf, "disable", 7)) {
retval = pnp_disable_dev(dev);
goto done;
}
if (!strncasecmp(buf, "activate", 8)) {
retval = pnp_activate_dev(dev);
goto done;
}
if (!strncasecmp(buf, "fill", 4)) {
if (dev->active)
goto done;
retval = pnp_auto_config_dev(dev);
goto done;
}
if (!strncasecmp(buf, "auto", 4)) {
if (dev->active)
goto done;
pnp_init_resources(dev);
retval = pnp_auto_config_dev(dev);
goto done;
}
if (!strncasecmp(buf, "clear", 5)) {
if (dev->active)
goto done;
pnp_init_resources(dev);
goto done;
}
if (!strncasecmp(buf, "get", 3)) {
mutex_lock(&pnp_res_mutex);
if (pnp_can_read(dev))
dev->protocol->get(dev);
mutex_unlock(&pnp_res_mutex);
goto done;
}
if (!strncasecmp(buf, "set", 3)) {
resource_size_t start;
resource_size_t end;
unsigned long flags;
if (dev->active)
goto done;
buf += 3;
pnp_init_resources(dev);
mutex_lock(&pnp_res_mutex);
while (1) {
buf = skip_spaces(buf);
if (!strncasecmp(buf, "io", 2)) {
buf = pnp_get_resource_value(buf + 2,
IORESOURCE_IO,
&start, &end,
&flags);
pnp_add_io_resource(dev, start, end, flags);
} else if (!strncasecmp(buf, "mem", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_MEM,
&start, &end,
&flags);
pnp_add_mem_resource(dev, start, end, flags);
} else if (!strncasecmp(buf, "irq", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_IRQ,
&start, NULL,
&flags);
pnp_add_irq_resource(dev, start, flags);
} else if (!strncasecmp(buf, "dma", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_DMA,
&start, NULL,
&flags);
pnp_add_dma_resource(dev, start, flags);
} else if (!strncasecmp(buf, "bus", 3)) {
buf = pnp_get_resource_value(buf + 3,
IORESOURCE_BUS,
&start, &end,
NULL);
pnp_add_bus_resource(dev, start, end);
} else
break;
}
mutex_unlock(&pnp_res_mutex);
goto done;
}
done:
if (retval < 0)
return retval;
return count;
}
static DEVICE_ATTR_RW(resources);
static ssize_t id_show(struct device *dmdev, struct device_attribute *attr,
char *buf)
{
char *str = buf;
struct pnp_dev *dev = to_pnp_dev(dmdev);
struct pnp_id *pos = dev->id;
while (pos) {
str += sprintf(str, "%s\n", pos->id);
pos = pos->next;
}
return (str - buf);
}
static DEVICE_ATTR_RO(id);
static struct attribute *pnp_dev_attrs[] = {
&dev_attr_resources.attr,
&dev_attr_options.attr,
&dev_attr_id.attr,
NULL,
};
static const struct attribute_group pnp_dev_group = {
.attrs = pnp_dev_attrs,
};
const struct attribute_group *pnp_dev_groups[] = {
&pnp_dev_group,
NULL,
};
| linux-master | drivers/pnp/interface.c |
// SPDX-License-Identifier: GPL-2.0
/*
* resource.c - Contains functions for registering and analyzing resource information
*
* based on isapnp.c resource management (c) Jaroslav Kysela <[email protected]>
* Copyright 2003 Adam Belay <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <linux/pci.h>
#include <linux/libata.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pnp.h>
#include "base.h"
static int pnp_reserve_irq[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some IRQ */
static int pnp_reserve_dma[8] = {[0 ... 7] = -1 }; /* reserve (don't use) some DMA */
static int pnp_reserve_io[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some I/O region */
static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some memory region */
/*
* option registration
*/
static struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
unsigned int option_flags)
{
struct pnp_option *option;
option = kzalloc(sizeof(struct pnp_option), GFP_KERNEL);
if (!option)
return NULL;
option->flags = option_flags;
option->type = type;
list_add_tail(&option->list, &dev->options);
return option;
}
int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags,
pnp_irq_mask_t *map, unsigned char flags)
{
struct pnp_option *option;
struct pnp_irq *irq;
option = pnp_build_option(dev, IORESOURCE_IRQ, option_flags);
if (!option)
return -ENOMEM;
irq = &option->u.irq;
irq->map = *map;
irq->flags = flags;
#ifdef CONFIG_PCI
{
int i;
for (i = 0; i < 16; i++)
if (test_bit(i, irq->map.bits))
pcibios_penalize_isa_irq(i, 0);
}
#endif
dbg_pnp_show_option(dev, option);
return 0;
}
int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags,
unsigned char map, unsigned char flags)
{
struct pnp_option *option;
struct pnp_dma *dma;
option = pnp_build_option(dev, IORESOURCE_DMA, option_flags);
if (!option)
return -ENOMEM;
dma = &option->u.dma;
dma->map = map;
dma->flags = flags;
dbg_pnp_show_option(dev, option);
return 0;
}
int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags,
resource_size_t min, resource_size_t max,
resource_size_t align, resource_size_t size,
unsigned char flags)
{
struct pnp_option *option;
struct pnp_port *port;
option = pnp_build_option(dev, IORESOURCE_IO, option_flags);
if (!option)
return -ENOMEM;
port = &option->u.port;
port->min = min;
port->max = max;
port->align = align;
port->size = size;
port->flags = flags;
dbg_pnp_show_option(dev, option);
return 0;
}
int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags,
resource_size_t min, resource_size_t max,
resource_size_t align, resource_size_t size,
unsigned char flags)
{
struct pnp_option *option;
struct pnp_mem *mem;
option = pnp_build_option(dev, IORESOURCE_MEM, option_flags);
if (!option)
return -ENOMEM;
mem = &option->u.mem;
mem->min = min;
mem->max = max;
mem->align = align;
mem->size = size;
mem->flags = flags;
dbg_pnp_show_option(dev, option);
return 0;
}
void pnp_free_options(struct pnp_dev *dev)
{
struct pnp_option *option, *tmp;
list_for_each_entry_safe(option, tmp, &dev->options, list) {
list_del(&option->list);
kfree(option);
}
}
/*
* resource validity checking
*/
#define length(start, end) (*(end) - *(start) + 1)
/* Two ranges conflict if one doesn't end before the other starts */
#define ranged_conflict(starta, enda, startb, endb) \
!((*(enda) < *(startb)) || (*(endb) < *(starta)))
#define cannot_compare(flags) \
((flags) & IORESOURCE_DISABLED)
int pnp_check_port(struct pnp_dev *dev, struct resource *res)
{
int i;
struct pnp_dev *tdev;
struct resource *tres;
resource_size_t *port, *end, *tport, *tend;
port = &res->start;
end = &res->end;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(res->flags))
return 1;
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (!request_region(*port, length(port, end), "pnp"))
return 0;
release_region(*port, length(port, end));
}
/* check if the resource is reserved */
for (i = 0; i < 8; i++) {
int rport = pnp_reserve_io[i << 1];
int rend = pnp_reserve_io[(i << 1) + 1] + rport - 1;
if (ranged_conflict(port, end, &rport, &rend))
return 0;
}
/* check for internal conflicts */
for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
if (tres != res && tres->flags & IORESOURCE_IO) {
tport = &tres->start;
tend = &tres->end;
if (ranged_conflict(port, end, tport, tend))
return 0;
}
}
/* check for conflicts with other pnp devices */
pnp_for_each_dev(tdev) {
if (tdev == dev)
continue;
for (i = 0;
(tres = pnp_get_resource(tdev, IORESOURCE_IO, i));
i++) {
if (tres->flags & IORESOURCE_IO) {
if (cannot_compare(tres->flags))
continue;
if (tres->flags & IORESOURCE_WINDOW)
continue;
tport = &tres->start;
tend = &tres->end;
if (ranged_conflict(port, end, tport, tend))
return 0;
}
}
}
return 1;
}
int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
{
int i;
struct pnp_dev *tdev;
struct resource *tres;
resource_size_t *addr, *end, *taddr, *tend;
addr = &res->start;
end = &res->end;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(res->flags))
return 1;
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (!request_mem_region(*addr, length(addr, end), "pnp"))
return 0;
release_mem_region(*addr, length(addr, end));
}
/* check if the resource is reserved */
for (i = 0; i < 8; i++) {
int raddr = pnp_reserve_mem[i << 1];
int rend = pnp_reserve_mem[(i << 1) + 1] + raddr - 1;
if (ranged_conflict(addr, end, &raddr, &rend))
return 0;
}
/* check for internal conflicts */
for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
if (tres != res && tres->flags & IORESOURCE_MEM) {
taddr = &tres->start;
tend = &tres->end;
if (ranged_conflict(addr, end, taddr, tend))
return 0;
}
}
/* check for conflicts with other pnp devices */
pnp_for_each_dev(tdev) {
if (tdev == dev)
continue;
for (i = 0;
(tres = pnp_get_resource(tdev, IORESOURCE_MEM, i));
i++) {
if (tres->flags & IORESOURCE_MEM) {
if (cannot_compare(tres->flags))
continue;
if (tres->flags & IORESOURCE_WINDOW)
continue;
taddr = &tres->start;
tend = &tres->end;
if (ranged_conflict(addr, end, taddr, tend))
return 0;
}
}
}
return 1;
}
static irqreturn_t pnp_test_handler(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
#ifdef CONFIG_PCI
static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
unsigned int irq)
{
u32 class;
u8 progif;
if (pci->irq == irq) {
pnp_dbg(&pnp->dev, " device %s using irq %d\n",
pci_name(pci), irq);
return 1;
}
/*
* See pci_setup_device() and ata_pci_sff_activate_host() for
* similar IDE legacy detection.
*/
pci_read_config_dword(pci, PCI_CLASS_REVISION, &class);
class >>= 8; /* discard revision ID */
progif = class & 0xff;
class >>= 8;
if (class == PCI_CLASS_STORAGE_IDE) {
/*
* Unless both channels are native-PCI mode only,
* treat the compatibility IRQs as busy.
*/
if ((progif & 0x5) != 0x5)
if (ATA_PRIMARY_IRQ(pci) == irq ||
ATA_SECONDARY_IRQ(pci) == irq) {
pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq);
return 1;
}
}
return 0;
}
#endif
static int pci_uses_irq(struct pnp_dev *pnp, unsigned int irq)
{
#ifdef CONFIG_PCI
struct pci_dev *pci = NULL;
for_each_pci_dev(pci) {
if (pci_dev_uses_irq(pnp, pci, irq)) {
pci_dev_put(pci);
return 1;
}
}
#endif
return 0;
}
int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
{
int i;
struct pnp_dev *tdev;
struct resource *tres;
resource_size_t *irq;
irq = &res->start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(res->flags))
return 1;
/* check if the resource is valid */
if (*irq > 15)
return 0;
/* check if the resource is reserved */
for (i = 0; i < 16; i++) {
if (pnp_reserve_irq[i] == *irq)
return 0;
}
/* check for internal conflicts */
for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
if (tres != res && tres->flags & IORESOURCE_IRQ) {
if (tres->start == *irq)
return 0;
}
}
/* check if the resource is being used by a pci device */
if (pci_uses_irq(dev, *irq))
return 0;
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (request_irq(*irq, pnp_test_handler,
IRQF_PROBE_SHARED, "pnp", NULL))
return 0;
free_irq(*irq, NULL);
}
/* check for conflicts with other pnp devices */
pnp_for_each_dev(tdev) {
if (tdev == dev)
continue;
for (i = 0;
(tres = pnp_get_resource(tdev, IORESOURCE_IRQ, i));
i++) {
if (tres->flags & IORESOURCE_IRQ) {
if (cannot_compare(tres->flags))
continue;
if (tres->start == *irq)
return 0;
}
}
}
return 1;
}
#ifdef CONFIG_ISA_DMA_API
int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
{
int i;
struct pnp_dev *tdev;
struct resource *tres;
resource_size_t *dma;
dma = &res->start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(res->flags))
return 1;
/* check if the resource is valid */
if (*dma == 4 || *dma > 7)
return 0;
/* check if the resource is reserved */
for (i = 0; i < 8; i++) {
if (pnp_reserve_dma[i] == *dma)
return 0;
}
/* check for internal conflicts */
for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) {
if (tres != res && tres->flags & IORESOURCE_DMA) {
if (tres->start == *dma)
return 0;
}
}
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
if (!dev->active) {
if (request_dma(*dma, "pnp"))
return 0;
free_dma(*dma);
}
/* check for conflicts with other pnp devices */
pnp_for_each_dev(tdev) {
if (tdev == dev)
continue;
for (i = 0;
(tres = pnp_get_resource(tdev, IORESOURCE_DMA, i));
i++) {
if (tres->flags & IORESOURCE_DMA) {
if (cannot_compare(tres->flags))
continue;
if (tres->start == *dma)
return 0;
}
}
}
return 1;
}
#endif /* CONFIG_ISA_DMA_API */
unsigned long pnp_resource_type(struct resource *res)
{
return res->flags & (IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_IRQ | IORESOURCE_DMA |
IORESOURCE_BUS);
}
struct resource *pnp_get_resource(struct pnp_dev *dev,
unsigned long type, unsigned int num)
{
struct pnp_resource *pnp_res;
struct resource *res;
list_for_each_entry(pnp_res, &dev->resources, list) {
res = &pnp_res->res;
if (pnp_resource_type(res) == type && num-- == 0)
return res;
}
return NULL;
}
EXPORT_SYMBOL(pnp_get_resource);
static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev)
{
struct pnp_resource *pnp_res;
pnp_res = kzalloc(sizeof(struct pnp_resource), GFP_KERNEL);
if (!pnp_res)
return NULL;
list_add_tail(&pnp_res->list, &dev->resources);
return pnp_res;
}
struct pnp_resource *pnp_add_resource(struct pnp_dev *dev,
struct resource *res)
{
struct pnp_resource *pnp_res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource %pR\n", res);
return NULL;
}
pnp_res->res = *res;
pnp_res->res.name = dev->name;
dev_dbg(&dev->dev, "%pR\n", res);
return pnp_res;
}
struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
int flags)
{
struct pnp_resource *pnp_res;
struct resource *res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource for IRQ %d\n", irq);
return NULL;
}
res = &pnp_res->res;
res->flags = IORESOURCE_IRQ | flags;
res->start = irq;
res->end = irq;
dev_dbg(&dev->dev, "%pR\n", res);
return pnp_res;
}
struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
int flags)
{
struct pnp_resource *pnp_res;
struct resource *res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource for DMA %d\n", dma);
return NULL;
}
res = &pnp_res->res;
res->flags = IORESOURCE_DMA | flags;
res->start = dma;
res->end = dma;
dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
resource_size_t start,
resource_size_t end, int flags)
{
struct pnp_resource *pnp_res;
struct resource *res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource for IO %#llx-%#llx\n",
(unsigned long long) start,
(unsigned long long) end);
return NULL;
}
res = &pnp_res->res;
res->flags = IORESOURCE_IO | flags;
res->start = start;
res->end = end;
dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
resource_size_t start,
resource_size_t end, int flags)
{
struct pnp_resource *pnp_res;
struct resource *res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource for MEM %#llx-%#llx\n",
(unsigned long long) start,
(unsigned long long) end);
return NULL;
}
res = &pnp_res->res;
res->flags = IORESOURCE_MEM | flags;
res->start = start;
res->end = end;
dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev,
resource_size_t start,
resource_size_t end)
{
struct pnp_resource *pnp_res;
struct resource *res;
pnp_res = pnp_new_resource(dev);
if (!pnp_res) {
dev_err(&dev->dev, "can't add resource for BUS %#llx-%#llx\n",
(unsigned long long) start,
(unsigned long long) end);
return NULL;
}
res = &pnp_res->res;
res->flags = IORESOURCE_BUS;
res->start = start;
res->end = end;
dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
/*
* Determine whether the specified resource is a possible configuration
* for this device.
*/
int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start,
resource_size_t size)
{
struct pnp_option *option;
struct pnp_port *port;
struct pnp_mem *mem;
struct pnp_irq *irq;
struct pnp_dma *dma;
list_for_each_entry(option, &dev->options, list) {
if (option->type != type)
continue;
switch (option->type) {
case IORESOURCE_IO:
port = &option->u.port;
if (port->min == start && port->size == size)
return 1;
break;
case IORESOURCE_MEM:
mem = &option->u.mem;
if (mem->min == start && mem->size == size)
return 1;
break;
case IORESOURCE_IRQ:
irq = &option->u.irq;
if (start < PNP_IRQ_NR &&
test_bit(start, irq->map.bits))
return 1;
break;
case IORESOURCE_DMA:
dma = &option->u.dma;
if (dma->map & (1 << start))
return 1;
break;
}
}
return 0;
}
EXPORT_SYMBOL(pnp_possible_config);
int pnp_range_reserved(resource_size_t start, resource_size_t end)
{
struct pnp_dev *dev;
struct pnp_resource *pnp_res;
resource_size_t *dev_start, *dev_end;
pnp_for_each_dev(dev) {
list_for_each_entry(pnp_res, &dev->resources, list) {
dev_start = &pnp_res->res.start;
dev_end = &pnp_res->res.end;
if (ranged_conflict(&start, &end, dev_start, dev_end))
return 1;
}
}
return 0;
}
EXPORT_SYMBOL(pnp_range_reserved);
/* format is: pnp_reserve_irq=irq1[,irq2] .... */
static int __init pnp_setup_reserve_irq(char *str)
{
int i;
for (i = 0; i < 16; i++)
if (get_option(&str, &pnp_reserve_irq[i]) != 2)
break;
return 1;
}
__setup("pnp_reserve_irq=", pnp_setup_reserve_irq);
/* format is: pnp_reserve_dma=dma1[,dma2] .... */
static int __init pnp_setup_reserve_dma(char *str)
{
int i;
for (i = 0; i < 8; i++)
if (get_option(&str, &pnp_reserve_dma[i]) != 2)
break;
return 1;
}
__setup("pnp_reserve_dma=", pnp_setup_reserve_dma);
/* format is: pnp_reserve_io=io1,size1[,io2,size2] .... */
static int __init pnp_setup_reserve_io(char *str)
{
int i;
for (i = 0; i < 16; i++)
if (get_option(&str, &pnp_reserve_io[i]) != 2)
break;
return 1;
}
__setup("pnp_reserve_io=", pnp_setup_reserve_io);
/* format is: pnp_reserve_mem=mem1,size1[,mem2,size2] .... */
static int __init pnp_setup_reserve_mem(char *str)
{
int i;
for (i = 0; i < 16; i++)
if (get_option(&str, &pnp_reserve_mem[i]) != 2)
break;
return 1;
}
__setup("pnp_reserve_mem=", pnp_setup_reserve_mem);
| linux-master | drivers/pnp/resource.c |
// SPDX-License-Identifier: GPL-2.0
/*
* system.c - a driver for reserving pnp system resources
*
* Some code is based on pnpbios_core.c
* Copyright 2002 Adam Belay <[email protected]>
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
static const struct pnp_device_id pnp_dev_table[] = {
/* General ID for reserving resources */
{"PNP0c02", 0},
/* memory controller */
{"PNP0c01", 0},
{"", 0}
};
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
if (port)
res = request_region(start, end - start + 1, regionid);
else
res = request_mem_region(start, end - start + 1, regionid);
if (res)
res->flags &= ~IORESOURCE_BUSY;
else
kfree(regionid);
/*
* Failures at this point are usually harmless. pci quirks for
* example do reserve stuff they know about too, so we may well
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
{
struct resource *res;
int i;
for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
if (res->flags & IORESOURCE_DISABLED)
continue;
if (res->start == 0)
continue; /* disabled */
if (res->start < 0x100)
/*
* Below 0x100 is only standard PC hardware
* (pics, kbd, timer, dma, ...)
* We should not get resource conflicts there,
* and the kernel reserves these anyway
* (see arch/i386/kernel/setup.c).
* So, do nothing
*/
continue;
if (res->end < res->start)
continue; /* invalid */
reserve_range(dev, res, 1);
}
for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
if (res->flags & IORESOURCE_DISABLED)
continue;
reserve_range(dev, res, 0);
}
}
static int system_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
reserve_resources_of_dev(dev);
return 0;
}
static struct pnp_driver system_pnp_driver = {
.name = "system",
.id_table = pnp_dev_table,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = system_pnp_probe,
};
static int __init pnp_system_init(void)
{
return pnp_register_driver(&system_pnp_driver);
}
/*
* Reserve motherboard resources after PCI claim BARs,
* but before PCI assign resources for uninitialized PCI devices
*/
fs_initcall(pnp_system_init);
| linux-master | drivers/pnp/system.c |
// SPDX-License-Identifier: GPL-2.0
/*
* support.c - standard functions for the use of pnp protocol drivers
*
* Copyright 2003 Adam Belay <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/pnp.h>
#include "base.h"
/**
* pnp_is_active - Determines if a device is active based on its current
* resources
* @dev: pointer to the desired PnP device
*/
int pnp_is_active(struct pnp_dev *dev)
{
/*
* I don't think this is very reliable because pnp_disable_dev()
* only clears out auto-assigned resources.
*/
if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 &&
!pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 &&
pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1)
return 0;
else
return 1;
}
EXPORT_SYMBOL(pnp_is_active);
/*
* Functionally similar to acpi_ex_eisa_id_to_string(), but that's
* buried in the ACPI CA, and we can't depend on it being present.
*/
void pnp_eisa_id_to_string(u32 id, char *str)
{
id = be32_to_cpu(id);
/*
* According to the specs, the first three characters are five-bit
* compressed ASCII, and the left-over high order bit should be zero.
* However, the Linux ISAPNP code historically used six bits for the
* first character, and there seem to be IDs that depend on that,
* e.g., "nEC8241" in the Linux 8250_pnp serial driver and the
* FreeBSD sys/pc98/cbus/sio_cbus.c driver.
*/
str[0] = 'A' + ((id >> 26) & 0x3f) - 1;
str[1] = 'A' + ((id >> 21) & 0x1f) - 1;
str[2] = 'A' + ((id >> 16) & 0x1f) - 1;
str[3] = hex_asc_hi(id >> 8);
str[4] = hex_asc_lo(id >> 8);
str[5] = hex_asc_hi(id);
str[6] = hex_asc_lo(id);
str[7] = '\0';
}
char *pnp_resource_type_name(struct resource *res)
{
switch (pnp_resource_type(res)) {
case IORESOURCE_IO:
return "io";
case IORESOURCE_MEM:
return "mem";
case IORESOURCE_IRQ:
return "irq";
case IORESOURCE_DMA:
return "dma";
case IORESOURCE_BUS:
return "bus";
}
return "unknown";
}
void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
{
struct pnp_resource *pnp_res;
if (list_empty(&dev->resources))
pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
else {
pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
list_for_each_entry(pnp_res, &dev->resources, list)
pnp_dbg(&dev->dev, "%pr\n", &pnp_res->res);
}
}
char *pnp_option_priority_name(struct pnp_option *option)
{
switch (pnp_option_priority(option)) {
case PNP_RES_PRIORITY_PREFERRED:
return "preferred";
case PNP_RES_PRIORITY_ACCEPTABLE:
return "acceptable";
case PNP_RES_PRIORITY_FUNCTIONAL:
return "functional";
}
return "invalid";
}
void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
{
char buf[128];
int len = 0, i;
struct pnp_port *port;
struct pnp_mem *mem;
struct pnp_irq *irq;
struct pnp_dma *dma;
if (pnp_option_is_dependent(option))
len += scnprintf(buf + len, sizeof(buf) - len,
" dependent set %d (%s) ",
pnp_option_set(option),
pnp_option_priority_name(option));
else
len += scnprintf(buf + len, sizeof(buf) - len,
" independent ");
switch (option->type) {
case IORESOURCE_IO:
port = &option->u.port;
len += scnprintf(buf + len, sizeof(buf) - len, "io min %#llx "
"max %#llx align %lld size %lld flags %#x",
(unsigned long long) port->min,
(unsigned long long) port->max,
(unsigned long long) port->align,
(unsigned long long) port->size, port->flags);
break;
case IORESOURCE_MEM:
mem = &option->u.mem;
len += scnprintf(buf + len, sizeof(buf) - len, "mem min %#llx "
"max %#llx align %lld size %lld flags %#x",
(unsigned long long) mem->min,
(unsigned long long) mem->max,
(unsigned long long) mem->align,
(unsigned long long) mem->size, mem->flags);
break;
case IORESOURCE_IRQ:
irq = &option->u.irq;
len += scnprintf(buf + len, sizeof(buf) - len, "irq");
if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
len += scnprintf(buf + len, sizeof(buf) - len,
" <none>");
else {
for (i = 0; i < PNP_IRQ_NR; i++)
if (test_bit(i, irq->map.bits))
len += scnprintf(buf + len,
sizeof(buf) - len,
" %d", i);
}
len += scnprintf(buf + len, sizeof(buf) - len, " flags %#x",
irq->flags);
if (irq->flags & IORESOURCE_IRQ_OPTIONAL)
len += scnprintf(buf + len, sizeof(buf) - len,
" (optional)");
break;
case IORESOURCE_DMA:
dma = &option->u.dma;
len += scnprintf(buf + len, sizeof(buf) - len, "dma");
if (!dma->map)
len += scnprintf(buf + len, sizeof(buf) - len,
" <none>");
else {
for (i = 0; i < 8; i++)
if (dma->map & (1 << i))
len += scnprintf(buf + len,
sizeof(buf) - len,
" %d", i);
}
len += scnprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) "
"flags %#x", dma->map, dma->flags);
break;
}
pnp_dbg(&dev->dev, "%s\n", buf);
}
| linux-master | drivers/pnp/support.c |
// SPDX-License-Identifier: GPL-2.0
/*
* core.c - contains all core device and protocol registration functions
*
* Copyright 2002 Adam Belay <[email protected]>
*/
#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include "base.h"
static LIST_HEAD(pnp_protocols);
LIST_HEAD(pnp_global);
DEFINE_MUTEX(pnp_lock);
/*
* ACPI or PNPBIOS should tell us about all platform devices, so we can
* skip some blind probes. ISAPNP typically enumerates only plug-in ISA
* devices, not built-in things like COM ports.
*/
int pnp_platform_devices;
EXPORT_SYMBOL(pnp_platform_devices);
static void pnp_remove_protocol(struct pnp_protocol *protocol)
{
mutex_lock(&pnp_lock);
list_del(&protocol->protocol_list);
mutex_unlock(&pnp_lock);
}
/**
* pnp_register_protocol - adds a pnp protocol to the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*
* Ex protocols: ISAPNP, PNPBIOS, etc
*/
int pnp_register_protocol(struct pnp_protocol *protocol)
{
struct list_head *pos;
int nodenum, ret;
INIT_LIST_HEAD(&protocol->devices);
INIT_LIST_HEAD(&protocol->cards);
nodenum = 0;
mutex_lock(&pnp_lock);
/* assign the lowest unused number */
list_for_each(pos, &pnp_protocols) {
struct pnp_protocol *cur = to_pnp_protocol(pos);
if (cur->number == nodenum) {
pos = &pnp_protocols;
nodenum++;
}
}
protocol->number = nodenum;
dev_set_name(&protocol->dev, "pnp%d", nodenum);
list_add_tail(&protocol->protocol_list, &pnp_protocols);
mutex_unlock(&pnp_lock);
ret = device_register(&protocol->dev);
if (ret)
pnp_remove_protocol(protocol);
return ret;
}
/**
* pnp_unregister_protocol - removes a pnp protocol from the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
{
pnp_remove_protocol(protocol);
device_unregister(&protocol->dev);
}
static void pnp_free_ids(struct pnp_dev *dev)
{
struct pnp_id *id;
struct pnp_id *next;
id = dev->id;
while (id) {
next = id->next;
kfree(id);
id = next;
}
}
void pnp_free_resource(struct pnp_resource *pnp_res)
{
list_del(&pnp_res->list);
kfree(pnp_res);
}
void pnp_free_resources(struct pnp_dev *dev)
{
struct pnp_resource *pnp_res, *tmp;
list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
pnp_free_resource(pnp_res);
}
}
static void pnp_release_device(struct device *dmdev)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
pnp_free_ids(dev);
pnp_free_resources(dev);
pnp_free_options(dev);
kfree(dev);
}
struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
const char *pnpid)
{
struct pnp_dev *dev;
struct pnp_id *dev_id;
dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
if (!dev)
return NULL;
INIT_LIST_HEAD(&dev->resources);
INIT_LIST_HEAD(&dev->options);
dev->protocol = protocol;
dev->number = id;
dev->dma_mask = DMA_BIT_MASK(24);
dev->dev.parent = &dev->protocol->dev;
dev->dev.bus = &pnp_bus_type;
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
dev->dev.release = &pnp_release_device;
dev_id = pnp_add_id(dev, pnpid);
if (!dev_id) {
kfree(dev);
return NULL;
}
dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
return dev;
}
static void pnp_delist_device(struct pnp_dev *dev)
{
mutex_lock(&pnp_lock);
list_del(&dev->global_list);
list_del(&dev->protocol_list);
mutex_unlock(&pnp_lock);
}
int __pnp_add_device(struct pnp_dev *dev)
{
int ret;
pnp_fixup_device(dev);
dev->status = PNP_READY;
mutex_lock(&pnp_lock);
list_add_tail(&dev->global_list, &pnp_global);
list_add_tail(&dev->protocol_list, &dev->protocol->devices);
mutex_unlock(&pnp_lock);
ret = device_register(&dev->dev);
if (ret)
pnp_delist_device(dev);
else if (dev->protocol->can_wakeup)
device_set_wakeup_capable(&dev->dev,
dev->protocol->can_wakeup(dev));
return ret;
}
/*
* pnp_add_device - adds a pnp device to the pnp layer
* @dev: pointer to dev to add
*
* adds to driver model, name database, fixups, interface, etc.
*/
int pnp_add_device(struct pnp_dev *dev)
{
int ret;
char buf[128];
int len = 0;
struct pnp_id *id;
if (dev->card)
return -EINVAL;
ret = __pnp_add_device(dev);
if (ret)
return ret;
buf[0] = '\0';
for (id = dev->id; id; id = id->next)
len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
dev_dbg(&dev->dev, "%s device, IDs%s (%s)\n", dev->protocol->name, buf,
dev->active ? "active" : "disabled");
return 0;
}
void __pnp_remove_device(struct pnp_dev *dev)
{
pnp_delist_device(dev);
device_unregister(&dev->dev);
}
static int __init pnp_init(void)
{
return bus_register(&pnp_bus_type);
}
subsys_initcall(pnp_init);
int pnp_debug;
#if defined(CONFIG_PNP_DEBUG_MESSAGES)
module_param_named(debug, pnp_debug, int, 0644);
#endif
| linux-master | drivers/pnp/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* card.c - contains functions for managing groups of PnP devices
*
* Copyright 2002 Adam Belay <[email protected]>
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/dma-mapping.h>
#include "base.h"
LIST_HEAD(pnp_cards);
static LIST_HEAD(pnp_card_drivers);
static const struct pnp_card_device_id *match_card(struct pnp_card_driver *drv,
struct pnp_card *card)
{
const struct pnp_card_device_id *drv_id = drv->id_table;
while (*drv_id->id) {
if (compare_pnp_id(card->id, drv_id->id)) {
int i = 0;
for (;;) {
int found;
struct pnp_dev *dev;
if (i == PNP_MAX_DEVICES ||
!*drv_id->devs[i].id)
return drv_id;
found = 0;
card_for_each_dev(card, dev) {
if (compare_pnp_id(dev->id,
drv_id->devs[i].id)) {
found = 1;
break;
}
}
if (!found)
break;
i++;
}
}
drv_id++;
}
return NULL;
}
static void card_remove(struct pnp_dev *dev)
{
dev->card_link = NULL;
}
static void card_remove_first(struct pnp_dev *dev)
{
struct pnp_card_driver *drv = to_pnp_card_driver(dev->driver);
if (!dev->card || !drv)
return;
if (drv->remove)
drv->remove(dev->card_link);
drv->link.remove = &card_remove;
kfree(dev->card_link);
card_remove(dev);
}
static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
{
const struct pnp_card_device_id *id;
struct pnp_card_link *clink;
struct pnp_dev *dev;
if (!drv->probe)
return 0;
id = match_card(drv, card);
if (!id)
return 0;
clink = kzalloc(sizeof(*clink), GFP_KERNEL);
if (!clink)
return 0;
clink->card = card;
clink->driver = drv;
clink->pm_state = PMSG_ON;
if (drv->probe(clink, id) >= 0)
return 1;
/* Recovery */
card_for_each_dev(card, dev) {
if (dev->card_link == clink)
pnp_release_card_device(dev);
}
kfree(clink);
return 0;
}
/**
* pnp_add_card_id - adds an EISA id to the specified card
* @id: pointer to a pnp_id structure
* @card: pointer to the desired card
*/
static struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id)
{
struct pnp_id *dev_id, *ptr;
dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
if (!dev_id)
return NULL;
dev_id->id[0] = id[0];
dev_id->id[1] = id[1];
dev_id->id[2] = id[2];
dev_id->id[3] = tolower(id[3]);
dev_id->id[4] = tolower(id[4]);
dev_id->id[5] = tolower(id[5]);
dev_id->id[6] = tolower(id[6]);
dev_id->id[7] = '\0';
dev_id->next = NULL;
ptr = card->id;
while (ptr && ptr->next)
ptr = ptr->next;
if (ptr)
ptr->next = dev_id;
else
card->id = dev_id;
return dev_id;
}
static void pnp_free_card_ids(struct pnp_card *card)
{
struct pnp_id *id;
struct pnp_id *next;
id = card->id;
while (id) {
next = id->next;
kfree(id);
id = next;
}
}
static void pnp_release_card(struct device *dmdev)
{
struct pnp_card *card = to_pnp_card(dmdev);
pnp_free_card_ids(card);
kfree(card);
}
struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnpid)
{
struct pnp_card *card;
struct pnp_id *dev_id;
card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL);
if (!card)
return NULL;
card->protocol = protocol;
card->number = id;
card->dev.parent = &card->protocol->dev;
dev_set_name(&card->dev, "%02x:%02x", card->protocol->number, card->number);
card->dev.coherent_dma_mask = DMA_BIT_MASK(24);
card->dev.dma_mask = &card->dev.coherent_dma_mask;
dev_id = pnp_add_card_id(card, pnpid);
if (!dev_id) {
kfree(card);
return NULL;
}
return card;
}
static ssize_t name_show(struct device *dmdev,
struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
str += sprintf(str, "%s\n", card->name);
return (str - buf);
}
static DEVICE_ATTR_RO(name);
static ssize_t card_id_show(struct device *dmdev,
struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
struct pnp_id *pos = card->id;
while (pos) {
str += sprintf(str, "%s\n", pos->id);
pos = pos->next;
}
return (str - buf);
}
static DEVICE_ATTR_RO(card_id);
static int pnp_interface_attach_card(struct pnp_card *card)
{
int rc = device_create_file(&card->dev, &dev_attr_name);
if (rc)
return rc;
rc = device_create_file(&card->dev, &dev_attr_card_id);
if (rc)
goto err_name;
return 0;
err_name:
device_remove_file(&card->dev, &dev_attr_name);
return rc;
}
/**
* pnp_add_card - adds a PnP card to the PnP Layer
* @card: pointer to the card to add
*/
int pnp_add_card(struct pnp_card *card)
{
int error;
struct list_head *pos, *temp;
card->dev.bus = NULL;
card->dev.release = &pnp_release_card;
error = device_register(&card->dev);
if (error) {
dev_err(&card->dev, "could not register (err=%d)\n", error);
put_device(&card->dev);
return error;
}
pnp_interface_attach_card(card);
mutex_lock(&pnp_lock);
list_add_tail(&card->global_list, &pnp_cards);
list_add_tail(&card->protocol_list, &card->protocol->cards);
mutex_unlock(&pnp_lock);
/* we wait until now to add devices in order to ensure the drivers
* will be able to use all of the related devices on the card
* without waiting an unreasonable length of time */
list_for_each(pos, &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(pos);
__pnp_add_device(dev);
}
/* match with card drivers */
list_for_each_safe(pos, temp, &pnp_card_drivers) {
struct pnp_card_driver *drv =
list_entry(pos, struct pnp_card_driver,
global_list);
card_probe(card, drv);
}
return 0;
}
/**
* pnp_remove_card - removes a PnP card from the PnP Layer
* @card: pointer to the card to remove
*/
void pnp_remove_card(struct pnp_card *card)
{
struct list_head *pos, *temp;
device_unregister(&card->dev);
mutex_lock(&pnp_lock);
list_del(&card->global_list);
list_del(&card->protocol_list);
mutex_unlock(&pnp_lock);
list_for_each_safe(pos, temp, &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(pos);
pnp_remove_card_device(dev);
}
}
/**
* pnp_add_card_device - adds a device to the specified card
* @card: pointer to the card to add to
* @dev: pointer to the device to add
*/
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
{
dev->dev.parent = &card->dev;
dev->card_link = NULL;
dev_set_name(&dev->dev, "%02x:%02x.%02x",
dev->protocol->number, card->number, dev->number);
mutex_lock(&pnp_lock);
dev->card = card;
list_add_tail(&dev->card_list, &card->devices);
mutex_unlock(&pnp_lock);
return 0;
}
/**
* pnp_remove_card_device- removes a device from the specified card
* @dev: pointer to the device to remove
*/
void pnp_remove_card_device(struct pnp_dev *dev)
{
mutex_lock(&pnp_lock);
dev->card = NULL;
list_del(&dev->card_list);
mutex_unlock(&pnp_lock);
__pnp_remove_device(dev);
}
/**
* pnp_request_card_device - Searches for a PnP device under the specified card
* @clink: pointer to the card link, cannot be NULL
* @id: pointer to a PnP ID structure that explains the rules for finding the device
* @from: Starting place to search from. If NULL it will start from the beginning.
*/
struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
const char *id, struct pnp_dev *from)
{
struct list_head *pos;
struct pnp_dev *dev;
struct pnp_card_driver *drv;
struct pnp_card *card;
if (!clink || !id)
return NULL;
card = clink->card;
drv = clink->driver;
if (!from) {
pos = card->devices.next;
} else {
if (from->card != card)
return NULL;
pos = from->card_list.next;
}
while (pos != &card->devices) {
dev = card_to_pnp_dev(pos);
if ((!dev->card_link) && compare_pnp_id(dev->id, id))
goto found;
pos = pos->next;
}
return NULL;
found:
dev->card_link = clink;
dev->dev.driver = &drv->link.driver;
if (pnp_bus_type.probe(&dev->dev))
goto err_out;
if (device_bind_driver(&dev->dev))
goto err_out;
return dev;
err_out:
dev->dev.driver = NULL;
dev->card_link = NULL;
return NULL;
}
EXPORT_SYMBOL(pnp_request_card_device);
/**
* pnp_release_card_device - call this when the driver no longer needs the device
* @dev: pointer to the PnP device structure
*/
void pnp_release_card_device(struct pnp_dev *dev)
{
struct pnp_card_driver *drv = dev->card_link->driver;
drv->link.remove = &card_remove;
device_release_driver(&dev->dev);
drv->link.remove = &card_remove_first;
}
EXPORT_SYMBOL(pnp_release_card_device);
/*
* suspend/resume callbacks
*/
static int card_suspend(struct pnp_dev *dev, pm_message_t state)
{
struct pnp_card_link *link = dev->card_link;
if (link->pm_state.event == state.event)
return 0;
link->pm_state = state;
return link->driver->suspend(link, state);
}
static int card_resume(struct pnp_dev *dev)
{
struct pnp_card_link *link = dev->card_link;
if (link->pm_state.event == PM_EVENT_ON)
return 0;
link->pm_state = PMSG_ON;
link->driver->resume(link);
return 0;
}
/**
* pnp_register_card_driver - registers a PnP card driver with the PnP Layer
* @drv: pointer to the driver to register
*/
int pnp_register_card_driver(struct pnp_card_driver *drv)
{
int error;
struct list_head *pos, *temp;
drv->link.name = drv->name;
drv->link.id_table = NULL; /* this will disable auto matching */
drv->link.flags = drv->flags;
drv->link.probe = NULL;
drv->link.remove = &card_remove_first;
drv->link.suspend = drv->suspend ? card_suspend : NULL;
drv->link.resume = drv->resume ? card_resume : NULL;
error = pnp_register_driver(&drv->link);
if (error < 0)
return error;
mutex_lock(&pnp_lock);
list_add_tail(&drv->global_list, &pnp_card_drivers);
mutex_unlock(&pnp_lock);
list_for_each_safe(pos, temp, &pnp_cards) {
struct pnp_card *card =
list_entry(pos, struct pnp_card, global_list);
card_probe(card, drv);
}
return 0;
}
EXPORT_SYMBOL(pnp_register_card_driver);
/**
* pnp_unregister_card_driver - unregisters a PnP card driver from the PnP Layer
* @drv: pointer to the driver to unregister
*/
void pnp_unregister_card_driver(struct pnp_card_driver *drv)
{
mutex_lock(&pnp_lock);
list_del(&drv->global_list);
mutex_unlock(&pnp_lock);
pnp_unregister_driver(&drv->link);
}
EXPORT_SYMBOL(pnp_unregister_card_driver);
| linux-master | drivers/pnp/card.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains quirk handling code for PnP devices
* Some devices do not report all their resources, and need to have extra
* resources added. This is most easily accomplished at initialisation time
* when building up the resource structure for the first time.
*
* Copyright (c) 2000 Peter Denison <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*
* Heavily based on PCI quirks handling which is
*
* Copyright (c) 1999 Martin Mares <[email protected]>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
struct pnp_option *option,
unsigned int offset)
{
struct pnp_option *new_option;
new_option = kmalloc(sizeof(struct pnp_option), GFP_KERNEL);
if (!new_option) {
dev_err(&dev->dev, "couldn't add ioport region to option set "
"%d\n", pnp_option_set(option));
return;
}
*new_option = *option;
new_option->u.port.min += offset;
new_option->u.port.max += offset;
list_add(&new_option->list, &option->list);
dev_info(&dev->dev, "added ioport region %#llx-%#llx to set %d\n",
(unsigned long long) new_option->u.port.min,
(unsigned long long) new_option->u.port.max,
pnp_option_set(option));
}
static void quirk_awe32_resources(struct pnp_dev *dev)
{
struct pnp_option *option;
unsigned int set = ~0;
/*
* Add two extra ioport regions (at offset 0x400 and 0x800 from the
* one given) to every dependent option set.
*/
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option) &&
pnp_option_set(option) != set) {
set = pnp_option_set(option);
quirk_awe32_add_ports(dev, option, 0x800);
quirk_awe32_add_ports(dev, option, 0x400);
}
}
}
static void quirk_cmi8330_resources(struct pnp_dev *dev)
{
struct pnp_option *option;
struct pnp_irq *irq;
struct pnp_dma *dma;
list_for_each_entry(option, &dev->options, list) {
if (!pnp_option_is_dependent(option))
continue;
if (option->type == IORESOURCE_IRQ) {
irq = &option->u.irq;
bitmap_zero(irq->map.bits, PNP_IRQ_NR);
__set_bit(5, irq->map.bits);
__set_bit(7, irq->map.bits);
__set_bit(10, irq->map.bits);
dev_info(&dev->dev, "set possible IRQs in "
"option set %d to 5, 7, 10\n",
pnp_option_set(option));
} else if (option->type == IORESOURCE_DMA) {
dma = &option->u.dma;
if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) ==
IORESOURCE_DMA_8BIT &&
dma->map != 0x0A) {
dev_info(&dev->dev, "changing possible "
"DMA channel mask in option set %d "
"from %#02x to 0x0A (1, 3)\n",
pnp_option_set(option), dma->map);
dma->map = 0x0A;
}
}
}
}
static void quirk_sb16audio_resources(struct pnp_dev *dev)
{
struct pnp_option *option;
unsigned int prev_option_flags = ~0, n = 0;
struct pnp_port *port;
/*
* The default range on the OPL port for these devices is 0x388-0x388.
* Here we increase that range so that two such cards can be
* auto-configured.
*/
list_for_each_entry(option, &dev->options, list) {
if (prev_option_flags != option->flags) {
prev_option_flags = option->flags;
n = 0;
}
if (pnp_option_is_dependent(option) &&
option->type == IORESOURCE_IO) {
n++;
port = &option->u.port;
if (n == 3 && port->min == port->max) {
port->max += 0x70;
dev_info(&dev->dev, "increased option port "
"range from %#llx-%#llx to "
"%#llx-%#llx\n",
(unsigned long long) port->min,
(unsigned long long) port->min,
(unsigned long long) port->min,
(unsigned long long) port->max);
}
}
}
}
static struct pnp_option *pnp_clone_dependent_set(struct pnp_dev *dev,
unsigned int set)
{
struct pnp_option *tail = NULL, *first_new_option = NULL;
struct pnp_option *option, *new_option;
unsigned int flags;
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option))
tail = option;
}
if (!tail) {
dev_err(&dev->dev, "no dependent option sets\n");
return NULL;
}
flags = pnp_new_dependent_set(dev, PNP_RES_PRIORITY_FUNCTIONAL);
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option) &&
pnp_option_set(option) == set) {
new_option = kmalloc(sizeof(struct pnp_option),
GFP_KERNEL);
if (!new_option) {
dev_err(&dev->dev, "couldn't clone dependent "
"set %d\n", set);
return NULL;
}
*new_option = *option;
new_option->flags = flags;
if (!first_new_option)
first_new_option = new_option;
list_add(&new_option->list, &tail->list);
tail = new_option;
}
}
return first_new_option;
}
static void quirk_add_irq_optional_dependent_sets(struct pnp_dev *dev)
{
struct pnp_option *new_option;
unsigned int num_sets, i, set;
struct pnp_irq *irq;
num_sets = dev->num_dependent_sets;
for (i = 0; i < num_sets; i++) {
new_option = pnp_clone_dependent_set(dev, i);
if (!new_option)
return;
set = pnp_option_set(new_option);
while (new_option && pnp_option_set(new_option) == set) {
if (new_option->type == IORESOURCE_IRQ) {
irq = &new_option->u.irq;
irq->flags |= IORESOURCE_IRQ_OPTIONAL;
}
dbg_pnp_show_option(dev, new_option);
new_option = list_entry(new_option->list.next,
struct pnp_option, list);
}
dev_info(&dev->dev, "added dependent option set %d (same as "
"set %d except IRQ optional)\n", set, i);
}
}
static void quirk_ad1815_mpu_resources(struct pnp_dev *dev)
{
struct pnp_option *option;
struct pnp_irq *irq = NULL;
unsigned int independent_irqs = 0;
list_for_each_entry(option, &dev->options, list) {
if (option->type == IORESOURCE_IRQ &&
!pnp_option_is_dependent(option)) {
independent_irqs++;
irq = &option->u.irq;
}
}
if (independent_irqs != 1)
return;
irq->flags |= IORESOURCE_IRQ_OPTIONAL;
dev_info(&dev->dev, "made independent IRQ optional\n");
}
static void quirk_system_pci_resources(struct pnp_dev *dev)
{
struct pci_dev *pdev = NULL;
struct resource *res, *r;
int i, j;
/*
* Some BIOSes have PNP motherboard devices with resources that
* partially overlap PCI BARs. The PNP system driver claims these
* motherboard resources, which prevents the normal PCI driver from
* requesting them later.
*
* This patch disables the PNP resources that conflict with PCI BARs
* so they won't be claimed by the PNP system driver.
*/
for_each_pci_dev(pdev) {
pci_dev_for_each_resource(pdev, r, i) {
unsigned long type = resource_type(r);
if (!(type == IORESOURCE_IO || type == IORESOURCE_MEM) ||
resource_size(r) == 0)
continue;
if (r->flags & IORESOURCE_UNSET)
continue;
for (j = 0;
(res = pnp_get_resource(dev, type, j)); j++) {
if (res->start == 0 && res->end == 0)
continue;
/*
* If the PNP region doesn't overlap the PCI
* region at all, there's no problem.
*/
if (!resource_overlaps(res, r))
continue;
/*
* If the PNP region completely encloses (or is
* at least as large as) the PCI region, that's
* also OK. For example, this happens when the
* PNP device describes a bridge with PCI
* behind it.
*/
if (res->start <= r->start && res->end >= r->end)
continue;
/*
* Otherwise, the PNP region overlaps *part* of
* the PCI region, and that might prevent a PCI
* driver from requesting its resources.
*/
dev_warn(&dev->dev,
"disabling %pR because it overlaps %s BAR %d %pR\n",
res, pci_name(pdev), i, r);
res->flags |= IORESOURCE_DISABLED;
}
}
}
}
#ifdef CONFIG_AMD_NB
#include <asm/amd_nb.h>
static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
{
resource_size_t start, end;
struct pnp_resource *pnp_res;
struct resource *res;
struct resource mmconfig_res, *mmconfig;
mmconfig = amd_get_mmconfig_range(&mmconfig_res);
if (!mmconfig)
return;
list_for_each_entry(pnp_res, &dev->resources, list) {
res = &pnp_res->res;
if (res->end < mmconfig->start || res->start > mmconfig->end ||
(res->start == mmconfig->start && res->end == mmconfig->end))
continue;
dev_info(&dev->dev, FW_BUG
"%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
res, mmconfig);
if (mmconfig->start < res->start) {
start = mmconfig->start;
end = res->start - 1;
pnp_add_mem_resource(dev, start, end, 0);
}
if (mmconfig->end > res->end) {
start = res->end + 1;
end = mmconfig->end;
pnp_add_mem_resource(dev, start, end, 0);
}
break;
}
}
#endif
#ifdef CONFIG_PCI
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
0x0a04, /* Haswell-ULT */
0x0c00, /* Haswell */
0x1604, /* Broadwell */
};
static struct pci_dev *get_intel_host(void)
{
int i;
struct pci_dev *host;
for (i = 0; i < ARRAY_SIZE(mch_quirk_devices); i++) {
host = pci_get_device(PCI_VENDOR_ID_INTEL, mch_quirk_devices[i],
NULL);
if (host)
return host;
}
return NULL;
}
static void quirk_intel_mch(struct pnp_dev *dev)
{
struct pci_dev *host;
u32 addr_lo, addr_hi;
struct pci_bus_region region;
struct resource mch;
struct pnp_resource *pnp_res;
struct resource *res;
host = get_intel_host();
if (!host)
return;
/*
* MCHBAR is not an architected PCI BAR, so MCH space is usually
* reported as a PNP0C02 resource. The MCH space was originally
* 16KB, but is 32KB in newer parts. Some BIOSes still report a
* PNP0C02 resource that is only 16KB, which means the rest of the
* MCH space is consumed but unreported.
*/
/*
* Read MCHBAR for Host Member Mapped Register Range Base
* https://www-ssl.intel.com/content/www/us/en/processors/core/4th-gen-core-family-desktop-vol-2-datasheet
* Sec 3.1.12.
*/
pci_read_config_dword(host, 0x48, &addr_lo);
region.start = addr_lo & ~0x7fff;
pci_read_config_dword(host, 0x4c, &addr_hi);
region.start |= (u64) addr_hi << 32;
region.end = region.start + 32*1024 - 1;
memset(&mch, 0, sizeof(mch));
mch.flags = IORESOURCE_MEM;
pcibios_bus_to_resource(host->bus, &mch, ®ion);
list_for_each_entry(pnp_res, &dev->resources, list) {
res = &pnp_res->res;
if (res->end < mch.start || res->start > mch.end)
continue; /* no overlap */
if (res->start == mch.start && res->end == mch.end)
continue; /* exact match */
dev_info(&dev->dev, FW_BUG "PNP resource %pR covers only part of %s Intel MCH; extending to %pR\n",
res, pci_name(host), &mch);
res->start = mch.start;
res->end = mch.end;
break;
}
pci_dev_put(host);
}
#endif
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
*/
static struct pnp_fixup pnp_fixups[] = {
/* Soundblaster awe io port quirk */
{"CTL0021", quirk_awe32_resources},
{"CTL0022", quirk_awe32_resources},
{"CTL0023", quirk_awe32_resources},
/* CMI 8330 interrupt and dma fix */
{"@X@0001", quirk_cmi8330_resources},
/* Soundblaster audio device io port range quirk */
{"CTL0001", quirk_sb16audio_resources},
{"CTL0031", quirk_sb16audio_resources},
{"CTL0041", quirk_sb16audio_resources},
{"CTL0042", quirk_sb16audio_resources},
{"CTL0043", quirk_sb16audio_resources},
{"CTL0044", quirk_sb16audio_resources},
{"CTL0045", quirk_sb16audio_resources},
/* Add IRQ-optional MPU options */
{"ADS7151", quirk_ad1815_mpu_resources},
{"ADS7181", quirk_add_irq_optional_dependent_sets},
{"AZT0002", quirk_add_irq_optional_dependent_sets},
/* PnP resources that might overlap PCI BARs */
{"PNP0c01", quirk_system_pci_resources},
{"PNP0c02", quirk_system_pci_resources},
#ifdef CONFIG_AMD_NB
{"PNP0c01", quirk_amd_mmconfig_area},
#endif
#ifdef CONFIG_PCI
{"PNP0c02", quirk_intel_mch},
#endif
{""}
};
void pnp_fixup_device(struct pnp_dev *dev)
{
struct pnp_fixup *f;
for (f = pnp_fixups; *f->id; f++) {
if (!compare_pnp_id(dev->id, f->id))
continue;
pnp_dbg(&dev->dev, "%s: calling %pS\n", f->id,
f->quirk_function);
f->quirk_function(dev);
}
}
| linux-master | drivers/pnp/quirks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* manager.c - Resource Management, Conflict Resolution, Activation and Disabling of Devices
*
* based on isapnp.c resource management (c) Jaroslav Kysela <[email protected]>
* Copyright 2003 Adam Belay <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pnp.h>
#include <linux/bitmap.h>
#include <linux/mutex.h>
#include "base.h"
DEFINE_MUTEX(pnp_res_mutex);
static struct resource *pnp_find_resource(struct pnp_dev *dev,
unsigned char rule,
unsigned long type,
unsigned int bar)
{
struct resource *res = pnp_get_resource(dev, type, bar);
/* when the resource already exists, set its resource bits from rule */
if (res) {
res->flags &= ~IORESOURCE_BITS;
res->flags |= rule & IORESOURCE_BITS;
}
return res;
}
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
struct resource *res, local_res;
res = pnp_find_resource(dev, rule->flags, IORESOURCE_IO, idx);
if (res) {
pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = 0;
res->end = 0;
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " io %d disabled\n", idx);
goto __add;
}
res->start = rule->min;
res->end = res->start + rule->size - 1;
while (!pnp_check_port(dev, res)) {
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
pnp_dbg(&dev->dev, " couldn't assign io %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
return -EBUSY;
}
}
__add:
pnp_add_io_resource(dev, res->start, res->end, res->flags);
return 0;
}
static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
{
struct resource *res, local_res;
res = pnp_find_resource(dev, rule->flags, IORESOURCE_MEM, idx);
if (res) {
pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = 0;
res->end = 0;
/* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY;
if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
res->flags |= IORESOURCE_RANGELENGTH;
if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
res->flags |= IORESOURCE_SHADOWABLE;
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " mem %d disabled\n", idx);
goto __add;
}
res->start = rule->min;
res->end = res->start + rule->size - 1;
while (!pnp_check_mem(dev, res)) {
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
pnp_dbg(&dev->dev, " couldn't assign mem %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
return -EBUSY;
}
}
__add:
pnp_add_mem_resource(dev, res->start, res->end, res->flags);
return 0;
}
static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
{
struct resource *res, local_res;
int i;
/* IRQ priority: this table is good for i386 */
static unsigned short xtab[16] = {
5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
};
res = pnp_find_resource(dev, rule->flags, IORESOURCE_IRQ, idx);
if (res) {
pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = -1;
res->end = -1;
if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " irq %d disabled\n", idx);
goto __add;
}
/* TBD: need check for >16 IRQ */
res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16);
if (res->start < PNP_IRQ_NR) {
res->end = res->start;
goto __add;
}
for (i = 0; i < 16; i++) {
if (test_bit(xtab[i], rule->map.bits)) {
res->start = res->end = xtab[i];
if (pnp_check_irq(dev, res))
goto __add;
}
}
if (rule->flags & IORESOURCE_IRQ_OPTIONAL) {
res->start = -1;
res->end = -1;
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
goto __add;
}
pnp_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
return -EBUSY;
__add:
pnp_add_irq_resource(dev, res->start, res->flags);
return 0;
}
#ifdef CONFIG_ISA_DMA_API
static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
{
struct resource *res, local_res;
int i;
/* DMA priority: this table is good for i386 */
static unsigned short xtab[8] = {
1, 3, 5, 6, 7, 0, 2, 4
};
res = pnp_find_resource(dev, rule->flags, IORESOURCE_DMA, idx);
if (res) {
pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
res = &local_res;
res->flags = rule->flags | IORESOURCE_AUTO;
res->start = -1;
res->end = -1;
if (!rule->map) {
res->flags |= IORESOURCE_DISABLED;
pnp_dbg(&dev->dev, " dma %d disabled\n", idx);
goto __add;
}
for (i = 0; i < 8; i++) {
if (rule->map & (1 << xtab[i])) {
res->start = res->end = xtab[i];
if (pnp_check_dma(dev, res))
goto __add;
}
}
pnp_dbg(&dev->dev, " couldn't assign dma %d\n", idx);
return -EBUSY;
__add:
pnp_add_dma_resource(dev, res->start, res->flags);
return 0;
}
#endif /* CONFIG_ISA_DMA_API */
void pnp_init_resources(struct pnp_dev *dev)
{
pnp_free_resources(dev);
}
static void pnp_clean_resource_table(struct pnp_dev *dev)
{
struct pnp_resource *pnp_res, *tmp;
list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
if (pnp_res->res.flags & IORESOURCE_AUTO)
pnp_free_resource(pnp_res);
}
}
/**
* pnp_assign_resources - assigns resources to the device based on the specified dependent number
* @dev: pointer to the desired device
* @set: the dependent function number
*/
static int pnp_assign_resources(struct pnp_dev *dev, int set)
{
struct pnp_option *option;
int nport = 0, nmem = 0, nirq = 0;
int ndma __maybe_unused = 0;
int ret = 0;
pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(dev);
list_for_each_entry(option, &dev->options, list) {
if (pnp_option_is_dependent(option) &&
pnp_option_set(option) != set)
continue;
switch (option->type) {
case IORESOURCE_IO:
ret = pnp_assign_port(dev, &option->u.port, nport++);
break;
case IORESOURCE_MEM:
ret = pnp_assign_mem(dev, &option->u.mem, nmem++);
break;
case IORESOURCE_IRQ:
ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
break;
#ifdef CONFIG_ISA_DMA_API
case IORESOURCE_DMA:
ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
break;
#endif
default:
ret = -EINVAL;
break;
}
if (ret < 0)
break;
}
mutex_unlock(&pnp_res_mutex);
if (ret < 0) {
pnp_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
pnp_clean_resource_table(dev);
} else
dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded");
return ret;
}
/**
* pnp_auto_config_dev - automatically assigns resources to a device
* @dev: pointer to the desired device
*/
int pnp_auto_config_dev(struct pnp_dev *dev)
{
int i, ret;
if (!pnp_can_configure(dev)) {
pnp_dbg(&dev->dev, "configuration not supported\n");
return -ENODEV;
}
ret = pnp_assign_resources(dev, 0);
if (ret == 0)
return 0;
for (i = 1; i < dev->num_dependent_sets; i++) {
ret = pnp_assign_resources(dev, i);
if (ret == 0)
return 0;
}
dev_err(&dev->dev, "unable to assign resources\n");
return ret;
}
/**
* pnp_start_dev - low-level start of the PnP device
* @dev: pointer to the desired device
*
* assumes that resources have already been allocated
*/
int pnp_start_dev(struct pnp_dev *dev)
{
if (!pnp_can_write(dev)) {
pnp_dbg(&dev->dev, "activation not supported\n");
return -EINVAL;
}
dbg_pnp_show_resources(dev, "pnp_start_dev");
if (dev->protocol->set(dev) < 0) {
dev_err(&dev->dev, "activation failed\n");
return -EIO;
}
dev_info(&dev->dev, "activated\n");
return 0;
}
EXPORT_SYMBOL(pnp_start_dev);
/**
* pnp_stop_dev - low-level disable of the PnP device
* @dev: pointer to the desired device
*
* does not free resources
*/
int pnp_stop_dev(struct pnp_dev *dev)
{
if (!pnp_can_disable(dev)) {
pnp_dbg(&dev->dev, "disabling not supported\n");
return -EINVAL;
}
if (dev->protocol->disable(dev) < 0) {
dev_err(&dev->dev, "disable failed\n");
return -EIO;
}
dev_info(&dev->dev, "disabled\n");
return 0;
}
EXPORT_SYMBOL(pnp_stop_dev);
/**
* pnp_activate_dev - activates a PnP device for use
* @dev: pointer to the desired device
*
* does not validate or set resources so be careful.
*/
int pnp_activate_dev(struct pnp_dev *dev)
{
int error;
if (dev->active)
return 0;
/* ensure resources are allocated */
if (pnp_auto_config_dev(dev))
return -EBUSY;
error = pnp_start_dev(dev);
if (error)
return error;
dev->active = 1;
return 0;
}
EXPORT_SYMBOL(pnp_activate_dev);
/**
* pnp_disable_dev - disables device
* @dev: pointer to the desired device
*
* inform the correct pnp protocol so that resources can be used by other devices
*/
int pnp_disable_dev(struct pnp_dev *dev)
{
int error;
if (!dev->active)
return 0;
error = pnp_stop_dev(dev);
if (error)
return error;
dev->active = 0;
/* release the resources so that other devices can use them */
mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(dev);
mutex_unlock(&pnp_res_mutex);
return 0;
}
EXPORT_SYMBOL(pnp_disable_dev);
| linux-master | drivers/pnp/manager.c |
// SPDX-License-Identifier: GPL-2.0
/*
* driver.c - device id matching, driver model, etc.
*
* Copyright 2002 Adam Belay <[email protected]>
*/
#include <linux/string.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/pnp.h>
#include "base.h"
static int compare_func(const char *ida, const char *idb)
{
int i;
/* we only need to compare the last 4 chars */
for (i = 3; i < 7; i++) {
if (ida[i] != 'X' &&
idb[i] != 'X' && toupper(ida[i]) != toupper(idb[i]))
return 0;
}
return 1;
}
int compare_pnp_id(struct pnp_id *pos, const char *id)
{
if (!pos || !id || (strlen(id) != 7))
return 0;
if (memcmp(id, "ANYDEVS", 7) == 0)
return 1;
while (pos) {
if (memcmp(pos->id, id, 3) == 0)
if (compare_func(pos->id, id) == 1)
return 1;
pos = pos->next;
}
return 0;
}
static const struct pnp_device_id *match_device(struct pnp_driver *drv,
struct pnp_dev *dev)
{
const struct pnp_device_id *drv_id = drv->id_table;
if (!drv_id)
return NULL;
while (*drv_id->id) {
if (compare_pnp_id(dev->id, drv_id->id))
return drv_id;
drv_id++;
}
return NULL;
}
int pnp_device_attach(struct pnp_dev *pnp_dev)
{
mutex_lock(&pnp_lock);
if (pnp_dev->status != PNP_READY) {
mutex_unlock(&pnp_lock);
return -EBUSY;
}
pnp_dev->status = PNP_ATTACHED;
mutex_unlock(&pnp_lock);
return 0;
}
EXPORT_SYMBOL(pnp_device_attach);
void pnp_device_detach(struct pnp_dev *pnp_dev)
{
mutex_lock(&pnp_lock);
if (pnp_dev->status == PNP_ATTACHED)
pnp_dev->status = PNP_READY;
mutex_unlock(&pnp_lock);
}
EXPORT_SYMBOL(pnp_device_detach);
static int pnp_device_probe(struct device *dev)
{
int error;
struct pnp_driver *pnp_drv;
struct pnp_dev *pnp_dev;
const struct pnp_device_id *dev_id = NULL;
pnp_dev = to_pnp_dev(dev);
pnp_drv = to_pnp_driver(dev->driver);
error = pnp_device_attach(pnp_dev);
if (error < 0)
return error;
if (pnp_dev->active == 0) {
if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE)) {
error = pnp_activate_dev(pnp_dev);
if (error < 0)
return error;
}
} else if ((pnp_drv->flags & PNP_DRIVER_RES_DISABLE)
== PNP_DRIVER_RES_DISABLE) {
error = pnp_disable_dev(pnp_dev);
if (error < 0)
return error;
}
error = 0;
if (pnp_drv->probe) {
dev_id = match_device(pnp_drv, pnp_dev);
if (dev_id != NULL)
error = pnp_drv->probe(pnp_dev, dev_id);
}
if (error >= 0) {
pnp_dev->driver = pnp_drv;
error = 0;
} else
goto fail;
return error;
fail:
pnp_device_detach(pnp_dev);
return error;
}
static void pnp_device_remove(struct device *dev)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *drv = pnp_dev->driver;
if (drv) {
if (drv->remove)
drv->remove(pnp_dev);
pnp_dev->driver = NULL;
}
if (pnp_dev->active &&
(!drv || !(drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE)))
pnp_disable_dev(pnp_dev);
pnp_device_detach(pnp_dev);
}
static void pnp_device_shutdown(struct device *dev)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *drv = pnp_dev->driver;
if (drv && drv->shutdown)
drv->shutdown(pnp_dev);
}
static int pnp_bus_match(struct device *dev, struct device_driver *drv)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *pnp_drv = to_pnp_driver(drv);
if (match_device(pnp_drv, pnp_dev) == NULL)
return 0;
return 1;
}
static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *pnp_drv = pnp_dev->driver;
int error;
if (!pnp_drv)
return 0;
if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
error = pnp_drv->driver.pm->suspend(dev);
suspend_report_result(dev, pnp_drv->driver.pm->suspend, error);
if (error)
return error;
}
if (pnp_drv->suspend) {
error = pnp_drv->suspend(pnp_dev, state);
if (error)
return error;
}
/* can_write is necessary to be able to re-start the device on resume */
if (pnp_can_disable(pnp_dev) && pnp_can_write(pnp_dev)) {
error = pnp_stop_dev(pnp_dev);
if (error)
return error;
}
if (pnp_can_suspend(pnp_dev))
pnp_dev->protocol->suspend(pnp_dev, state);
return 0;
}
static int pnp_bus_suspend(struct device *dev)
{
return __pnp_bus_suspend(dev, PMSG_SUSPEND);
}
static int pnp_bus_freeze(struct device *dev)
{
return __pnp_bus_suspend(dev, PMSG_FREEZE);
}
static int pnp_bus_poweroff(struct device *dev)
{
return __pnp_bus_suspend(dev, PMSG_HIBERNATE);
}
static int pnp_bus_resume(struct device *dev)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *pnp_drv = pnp_dev->driver;
int error;
if (!pnp_drv)
return 0;
if (pnp_dev->protocol->resume) {
error = pnp_dev->protocol->resume(pnp_dev);
if (error)
return error;
}
if (pnp_can_write(pnp_dev)) {
error = pnp_start_dev(pnp_dev);
if (error)
return error;
}
if (pnp_drv->driver.pm && pnp_drv->driver.pm->resume) {
error = pnp_drv->driver.pm->resume(dev);
if (error)
return error;
}
if (pnp_drv->resume) {
error = pnp_drv->resume(pnp_dev);
if (error)
return error;
}
return 0;
}
static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
/* Suspend callbacks */
.suspend = pnp_bus_suspend,
.resume = pnp_bus_resume,
/* Hibernate callbacks */
.freeze = pnp_bus_freeze,
.thaw = pnp_bus_resume,
.poweroff = pnp_bus_poweroff,
.restore = pnp_bus_resume,
};
struct bus_type pnp_bus_type = {
.name = "pnp",
.match = pnp_bus_match,
.probe = pnp_device_probe,
.remove = pnp_device_remove,
.shutdown = pnp_device_shutdown,
.pm = &pnp_bus_dev_pm_ops,
.dev_groups = pnp_dev_groups,
};
int pnp_register_driver(struct pnp_driver *drv)
{
drv->driver.name = drv->name;
drv->driver.bus = &pnp_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(pnp_register_driver);
void pnp_unregister_driver(struct pnp_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(pnp_unregister_driver);
/**
* pnp_add_id - adds an EISA id to the specified device
* @dev: pointer to the desired device
* @id: pointer to an EISA id string
*/
struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id)
{
struct pnp_id *dev_id, *ptr;
dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
if (!dev_id)
return NULL;
dev_id->id[0] = id[0];
dev_id->id[1] = id[1];
dev_id->id[2] = id[2];
dev_id->id[3] = tolower(id[3]);
dev_id->id[4] = tolower(id[4]);
dev_id->id[5] = tolower(id[5]);
dev_id->id[6] = tolower(id[6]);
dev_id->id[7] = '\0';
dev_id->next = NULL;
ptr = dev->id;
while (ptr && ptr->next)
ptr = ptr->next;
if (ptr)
ptr->next = dev_id;
else
dev->id = dev_id;
return dev_id;
}
| linux-master | drivers/pnp/driver.c |
// SPDX-License-Identifier: GPL-2.0
/*
* compat.c - A series of functions to make it easier to convert drivers that use
* the old isapnp APIs. If possible use the new APIs instead.
*
* Copyright 2002 Adam Belay <[email protected]>
*/
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/string.h>
static void pnp_convert_id(char *buf, unsigned short vendor,
unsigned short device)
{
sprintf(buf, "%c%c%c%x%x%x%x",
'A' + ((vendor >> 2) & 0x3f) - 1,
'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
'A' + ((vendor >> 8) & 0x1f) - 1,
(device >> 4) & 0x0f, device & 0x0f,
(device >> 12) & 0x0f, (device >> 8) & 0x0f);
}
struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
unsigned short function, struct pnp_dev *from)
{
char id[8];
char any[8];
pnp_convert_id(id, vendor, function);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
if (card == NULL) { /* look for a logical device from all cards */
struct list_head *list;
list = pnp_global.next;
if (from)
list = from->global_list.next;
while (list != &pnp_global) {
struct pnp_dev *dev = global_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id) ||
(memcmp(id, any, 7) == 0))
return dev;
list = list->next;
}
} else {
struct list_head *list;
list = card->devices.next;
if (from) {
list = from->card_list.next;
if (from->card != card) /* something is wrong */
return NULL;
}
while (list != &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id))
return dev;
list = list->next;
}
}
return NULL;
}
EXPORT_SYMBOL(pnp_find_dev);
| linux-master | drivers/pnp/isapnp/compat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ISA Plug & Play support
* Copyright (c) by Jaroslav Kysela <[email protected]>
*
* Changelog:
* 2000-01-01 Added quirks handling for buggy hardware
* Peter Denison <[email protected]>
* 2000-06-14 Added isapnp_probe_devs() and isapnp_activate_dev()
* Christoph Hellwig <[email protected]>
* 2001-06-03 Added release_region calls to correspond with
* request_region calls when a failure occurs. Also
* added KERN_* constants to printk() calls.
* 2001-11-07 Added isapnp_{,un}register_driver calls along the lines
* of the pci driver interface
* Kai Germaschewski <[email protected]>
* 2002-06-06 Made the use of dma channel 0 configurable
* Gerald Teschl <[email protected]>
* 2002-10-06 Ported to PnP Layer - Adam Belay <[email protected]>
* 2003-08-11 Resource Management Updates - Adam Belay <[email protected]>
*/
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/isapnp.h>
#include <linux/mutex.h>
#include <asm/io.h>
#include "../base.h"
#if 0
#define ISAPNP_REGION_OK
#endif
int isapnp_disable; /* Disable ISA PnP */
static int isapnp_rdp; /* Read Data Port */
static int isapnp_reset = 1; /* reset all PnP cards (deactivate) */
static int isapnp_verbose = 1; /* verbose mode */
module_param(isapnp_disable, int, 0);
MODULE_PARM_DESC(isapnp_disable, "ISA Plug & Play disable");
module_param(isapnp_rdp, int, 0);
MODULE_PARM_DESC(isapnp_rdp, "ISA Plug & Play read data port");
module_param(isapnp_reset, int, 0);
MODULE_PARM_DESC(isapnp_reset, "ISA Plug & Play reset all cards");
module_param(isapnp_verbose, int, 0);
MODULE_PARM_DESC(isapnp_verbose, "ISA Plug & Play verbose mode");
#define _PIDXR 0x279
#define _PNPWRP 0xa79
/* short tags */
#define _STAG_PNPVERNO 0x01
#define _STAG_LOGDEVID 0x02
#define _STAG_COMPATDEVID 0x03
#define _STAG_IRQ 0x04
#define _STAG_DMA 0x05
#define _STAG_STARTDEP 0x06
#define _STAG_ENDDEP 0x07
#define _STAG_IOPORT 0x08
#define _STAG_FIXEDIO 0x09
#define _STAG_VENDOR 0x0e
#define _STAG_END 0x0f
/* long tags */
#define _LTAG_MEMRANGE 0x81
#define _LTAG_ANSISTR 0x82
#define _LTAG_UNICODESTR 0x83
#define _LTAG_VENDOR 0x84
#define _LTAG_MEM32RANGE 0x85
#define _LTAG_FIXEDMEM32RANGE 0x86
/* Logical device control and configuration registers */
#define ISAPNP_CFG_ACTIVATE 0x30 /* byte */
#define ISAPNP_CFG_MEM 0x40 /* 4 * dword */
#define ISAPNP_CFG_PORT 0x60 /* 8 * word */
#define ISAPNP_CFG_IRQ 0x70 /* 2 * word */
#define ISAPNP_CFG_DMA 0x74 /* 2 * byte */
/*
* Sizes of ISAPNP logical device configuration register sets.
* See PNP-ISA-v1.0a.pdf, Appendix A.
*/
#define ISAPNP_MAX_MEM 4
#define ISAPNP_MAX_PORT 8
#define ISAPNP_MAX_IRQ 2
#define ISAPNP_MAX_DMA 2
static unsigned char isapnp_checksum_value;
static DEFINE_MUTEX(isapnp_cfg_mutex);
static int isapnp_csn_count;
/* some prototypes */
static inline void write_data(unsigned char x)
{
outb(x, _PNPWRP);
}
static inline void write_address(unsigned char x)
{
outb(x, _PIDXR);
udelay(20);
}
static inline unsigned char read_data(void)
{
unsigned char val = inb(isapnp_rdp);
return val;
}
unsigned char isapnp_read_byte(unsigned char idx)
{
write_address(idx);
return read_data();
}
static unsigned short isapnp_read_word(unsigned char idx)
{
unsigned short val;
val = isapnp_read_byte(idx);
val = (val << 8) + isapnp_read_byte(idx + 1);
return val;
}
void isapnp_write_byte(unsigned char idx, unsigned char val)
{
write_address(idx);
write_data(val);
}
static void isapnp_write_word(unsigned char idx, unsigned short val)
{
isapnp_write_byte(idx, val >> 8);
isapnp_write_byte(idx + 1, val);
}
static void isapnp_key(void)
{
unsigned char code = 0x6a, msb;
int i;
mdelay(1);
write_address(0x00);
write_address(0x00);
write_address(code);
for (i = 1; i < 32; i++) {
msb = ((code & 0x01) ^ ((code & 0x02) >> 1)) << 7;
code = (code >> 1) | msb;
write_address(code);
}
}
/* place all pnp cards in wait-for-key state */
static void isapnp_wait(void)
{
isapnp_write_byte(0x02, 0x02);
}
static void isapnp_wake(unsigned char csn)
{
isapnp_write_byte(0x03, csn);
}
static void isapnp_device(unsigned char logdev)
{
isapnp_write_byte(0x07, logdev);
}
static void isapnp_activate(unsigned char logdev)
{
isapnp_device(logdev);
isapnp_write_byte(ISAPNP_CFG_ACTIVATE, 1);
udelay(250);
}
static void isapnp_deactivate(unsigned char logdev)
{
isapnp_device(logdev);
isapnp_write_byte(ISAPNP_CFG_ACTIVATE, 0);
udelay(500);
}
static void __init isapnp_peek(unsigned char *data, int bytes)
{
int i, j;
unsigned char d = 0;
for (i = 1; i <= bytes; i++) {
for (j = 0; j < 20; j++) {
d = isapnp_read_byte(0x05);
if (d & 1)
break;
udelay(100);
}
if (!(d & 1)) {
if (data != NULL)
*data++ = 0xff;
continue;
}
d = isapnp_read_byte(0x04); /* PRESDI */
isapnp_checksum_value += d;
if (data != NULL)
*data++ = d;
}
}
#define RDP_STEP 32 /* minimum is 4 */
static int isapnp_next_rdp(void)
{
int rdp = isapnp_rdp;
static int old_rdp = 0;
if (old_rdp) {
release_region(old_rdp, 1);
old_rdp = 0;
}
while (rdp <= 0x3ff) {
/*
* We cannot use NE2000 probe spaces for ISAPnP or we
* will lock up machines.
*/
if ((rdp < 0x280 || rdp > 0x380)
&& request_region(rdp, 1, "ISAPnP")) {
isapnp_rdp = rdp;
old_rdp = rdp;
return 0;
}
rdp += RDP_STEP;
}
return -1;
}
/* Set read port address */
static inline void isapnp_set_rdp(void)
{
isapnp_write_byte(0x00, isapnp_rdp >> 2);
udelay(100);
}
/*
* Perform an isolation. The port selection code now tries to avoid
* "dangerous to read" ports.
*/
static int __init isapnp_isolate_rdp_select(void)
{
isapnp_wait();
isapnp_key();
/* Control: reset CSN and conditionally everything else too */
isapnp_write_byte(0x02, isapnp_reset ? 0x05 : 0x04);
mdelay(2);
isapnp_wait();
isapnp_key();
isapnp_wake(0x00);
if (isapnp_next_rdp() < 0) {
isapnp_wait();
return -1;
}
isapnp_set_rdp();
udelay(1000);
write_address(0x01);
udelay(1000);
return 0;
}
/*
* Isolate (assign uniqued CSN) to all ISA PnP devices.
*/
static int __init isapnp_isolate(void)
{
unsigned char checksum = 0x6a;
unsigned char chksum = 0x00;
unsigned char bit = 0x00;
int data;
int csn = 0;
int i;
int iteration = 1;
isapnp_rdp = 0x213;
if (isapnp_isolate_rdp_select() < 0)
return -1;
while (1) {
for (i = 1; i <= 64; i++) {
data = read_data() << 8;
udelay(250);
data = data | read_data();
udelay(250);
if (data == 0x55aa)
bit = 0x01;
checksum =
((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
| (checksum >> 1);
bit = 0x00;
}
for (i = 65; i <= 72; i++) {
data = read_data() << 8;
udelay(250);
data = data | read_data();
udelay(250);
if (data == 0x55aa)
chksum |= (1 << (i - 65));
}
if (checksum != 0x00 && checksum == chksum) {
csn++;
isapnp_write_byte(0x06, csn);
udelay(250);
iteration++;
isapnp_wake(0x00);
isapnp_set_rdp();
udelay(1000);
write_address(0x01);
udelay(1000);
goto __next;
}
if (iteration == 1) {
isapnp_rdp += RDP_STEP;
if (isapnp_isolate_rdp_select() < 0)
return -1;
} else if (iteration > 1) {
break;
}
__next:
if (csn == 255)
break;
checksum = 0x6a;
chksum = 0x00;
bit = 0x00;
}
isapnp_wait();
isapnp_csn_count = csn;
return csn;
}
/*
* Read one tag from stream.
*/
static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
{
unsigned char tag, tmp[2];
isapnp_peek(&tag, 1);
if (tag == 0) /* invalid tag */
return -1;
if (tag & 0x80) { /* large item */
*type = tag;
isapnp_peek(tmp, 2);
*size = (tmp[1] << 8) | tmp[0];
} else {
*type = (tag >> 3) & 0x0f;
*size = tag & 0x07;
}
if (*type == 0xff && *size == 0xffff) /* probably invalid data */
return -1;
return 0;
}
/*
* Skip specified number of bytes from stream.
*/
static void __init isapnp_skip_bytes(int count)
{
isapnp_peek(NULL, count);
}
/*
* Parse logical device tag.
*/
static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
int size, int number)
{
unsigned char tmp[6];
struct pnp_dev *dev;
u32 eisa_id;
char id[8];
isapnp_peek(tmp, size);
eisa_id = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
dev = pnp_alloc_dev(&isapnp_protocol, number, id);
if (!dev)
return NULL;
dev->card = card;
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
dev->capabilities |= PNP_WRITE;
dev->capabilities |= PNP_DISABLE;
pnp_init_resources(dev);
return dev;
}
/*
* Add IRQ resource to resources list.
*/
static void __init isapnp_parse_irq_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[3];
unsigned long bits;
pnp_irq_mask_t map;
unsigned char flags = IORESOURCE_IRQ_HIGHEDGE;
isapnp_peek(tmp, size);
bits = (tmp[1] << 8) | tmp[0];
bitmap_zero(map.bits, PNP_IRQ_NR);
bitmap_copy(map.bits, &bits, 16);
if (size > 2)
flags = tmp[2];
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
/*
* Add DMA resource to resources list.
*/
static void __init isapnp_parse_dma_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[2];
isapnp_peek(tmp, size);
pnp_register_dma_resource(dev, option_flags, tmp[0], tmp[1]);
}
/*
* Add port resource to resources list.
*/
static void __init isapnp_parse_port_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[7];
resource_size_t min, max, align, len;
unsigned char flags;
isapnp_peek(tmp, size);
min = (tmp[2] << 8) | tmp[1];
max = (tmp[4] << 8) | tmp[3];
align = tmp[5];
len = tmp[6];
flags = tmp[0] ? IORESOURCE_IO_16BIT_ADDR : 0;
pnp_register_port_resource(dev, option_flags,
min, max, align, len, flags);
}
/*
* Add fixed port resource to resources list.
*/
static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[3];
resource_size_t base, len;
isapnp_peek(tmp, size);
base = (tmp[1] << 8) | tmp[0];
len = tmp[2];
pnp_register_port_resource(dev, option_flags, base, base, 0, len,
IORESOURCE_IO_FIXED);
}
/*
* Add memory resource to resources list.
*/
static void __init isapnp_parse_mem_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[9];
resource_size_t min, max, align, len;
unsigned char flags;
isapnp_peek(tmp, size);
min = ((tmp[2] << 8) | tmp[1]) << 8;
max = ((tmp[4] << 8) | tmp[3]) << 8;
align = (tmp[6] << 8) | tmp[5];
len = ((tmp[8] << 8) | tmp[7]) << 8;
flags = tmp[0];
pnp_register_mem_resource(dev, option_flags,
min, max, align, len, flags);
}
/*
* Add 32-bit memory resource to resources list.
*/
static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[17];
resource_size_t min, max, align, len;
unsigned char flags;
isapnp_peek(tmp, size);
min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9];
len = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
flags = tmp[0];
pnp_register_mem_resource(dev, option_flags,
min, max, align, len, flags);
}
/*
* Add 32-bit fixed memory resource to resources list.
*/
static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev,
unsigned int option_flags,
int size)
{
unsigned char tmp[9];
resource_size_t base, len;
unsigned char flags;
isapnp_peek(tmp, size);
base = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
len = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
flags = tmp[0];
pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags);
}
/*
* Parse card name for ISA PnP device.
*/
static void __init
isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size)
{
if (name[0] == '\0') {
unsigned short size1 =
*size >= name_max ? (name_max - 1) : *size;
isapnp_peek(name, size1);
name[size1] = '\0';
*size -= size1;
/* clean whitespace from end of string */
while (size1 > 0 && name[--size1] == ' ')
name[size1] = '\0';
}
}
/*
* Parse resource map for logical device.
*/
static int __init isapnp_create_device(struct pnp_card *card,
unsigned short size)
{
int number = 0, skip = 0, priority, compat = 0;
unsigned char type, tmp[17];
unsigned int option_flags;
struct pnp_dev *dev;
u32 eisa_id;
char id[8];
if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
return 1;
option_flags = 0;
pnp_add_card_device(card, dev);
while (1) {
if (isapnp_read_tag(&type, &size) < 0)
return 1;
if (skip && type != _STAG_LOGDEVID && type != _STAG_END)
goto __skip;
switch (type) {
case _STAG_LOGDEVID:
if (size >= 5 && size <= 6) {
if ((dev =
isapnp_parse_device(card, size,
number++)) == NULL)
return 1;
size = 0;
skip = 0;
option_flags = 0;
pnp_add_card_device(card, dev);
} else {
skip = 1;
}
compat = 0;
break;
case _STAG_COMPATDEVID:
if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) {
isapnp_peek(tmp, 4);
eisa_id = tmp[0] | tmp[1] << 8 |
tmp[2] << 16 | tmp[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
pnp_add_id(dev, id);
compat++;
size = 0;
}
break;
case _STAG_IRQ:
if (size < 2 || size > 3)
goto __skip;
isapnp_parse_irq_resource(dev, option_flags, size);
size = 0;
break;
case _STAG_DMA:
if (size != 2)
goto __skip;
isapnp_parse_dma_resource(dev, option_flags, size);
size = 0;
break;
case _STAG_STARTDEP:
if (size > 1)
goto __skip;
priority = PNP_RES_PRIORITY_ACCEPTABLE;
if (size > 0) {
isapnp_peek(tmp, size);
priority = tmp[0];
size = 0;
}
option_flags = pnp_new_dependent_set(dev, priority);
break;
case _STAG_ENDDEP:
if (size != 0)
goto __skip;
option_flags = 0;
break;
case _STAG_IOPORT:
if (size != 7)
goto __skip;
isapnp_parse_port_resource(dev, option_flags, size);
size = 0;
break;
case _STAG_FIXEDIO:
if (size != 3)
goto __skip;
isapnp_parse_fixed_port_resource(dev, option_flags,
size);
size = 0;
break;
case _STAG_VENDOR:
break;
case _LTAG_MEMRANGE:
if (size != 9)
goto __skip;
isapnp_parse_mem_resource(dev, option_flags, size);
size = 0;
break;
case _LTAG_ANSISTR:
isapnp_parse_name(dev->name, sizeof(dev->name), &size);
break;
case _LTAG_UNICODESTR:
/* silently ignore */
/* who use unicode for hardware identification? */
break;
case _LTAG_VENDOR:
break;
case _LTAG_MEM32RANGE:
if (size != 17)
goto __skip;
isapnp_parse_mem32_resource(dev, option_flags, size);
size = 0;
break;
case _LTAG_FIXEDMEM32RANGE:
if (size != 9)
goto __skip;
isapnp_parse_fixed_mem32_resource(dev, option_flags,
size);
size = 0;
break;
case _STAG_END:
if (size > 0)
isapnp_skip_bytes(size);
return 1;
default:
dev_err(&dev->dev, "unknown tag %#x (card %i), "
"ignored\n", type, card->number);
}
__skip:
if (size > 0)
isapnp_skip_bytes(size);
}
return 0;
}
/*
* Parse resource map for ISA PnP card.
*/
static void __init isapnp_parse_resource_map(struct pnp_card *card)
{
unsigned char type, tmp[17];
unsigned short size;
while (1) {
if (isapnp_read_tag(&type, &size) < 0)
return;
switch (type) {
case _STAG_PNPVERNO:
if (size != 2)
goto __skip;
isapnp_peek(tmp, 2);
card->pnpver = tmp[0];
card->productver = tmp[1];
size = 0;
break;
case _STAG_LOGDEVID:
if (size >= 5 && size <= 6) {
if (isapnp_create_device(card, size) == 1)
return;
size = 0;
}
break;
case _STAG_VENDOR:
break;
case _LTAG_ANSISTR:
isapnp_parse_name(card->name, sizeof(card->name),
&size);
break;
case _LTAG_UNICODESTR:
/* silently ignore */
/* who use unicode for hardware identification? */
break;
case _LTAG_VENDOR:
break;
case _STAG_END:
if (size > 0)
isapnp_skip_bytes(size);
return;
default:
dev_err(&card->dev, "unknown tag %#x, ignored\n",
type);
}
__skip:
if (size > 0)
isapnp_skip_bytes(size);
}
}
/*
* Build device list for all present ISA PnP devices.
*/
static int __init isapnp_build_device_list(void)
{
int csn;
unsigned char header[9];
struct pnp_card *card;
u32 eisa_id;
char id[8];
isapnp_wait();
isapnp_key();
for (csn = 1; csn <= isapnp_csn_count; csn++) {
isapnp_wake(csn);
isapnp_peek(header, 9);
eisa_id = header[0] | header[1] << 8 |
header[2] << 16 | header[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
card = pnp_alloc_card(&isapnp_protocol, csn, id);
if (!card)
continue;
INIT_LIST_HEAD(&card->devices);
card->serial =
(header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
header[4];
isapnp_checksum_value = 0x00;
isapnp_parse_resource_map(card);
if (isapnp_checksum_value != 0x00)
dev_err(&card->dev, "invalid checksum %#x\n",
isapnp_checksum_value);
card->checksum = isapnp_checksum_value;
pnp_add_card(card);
}
isapnp_wait();
return 0;
}
/*
* Basic configuration routines.
*/
int isapnp_present(void)
{
struct pnp_card *card;
pnp_for_each_card(card) {
if (card->protocol == &isapnp_protocol)
return 1;
}
return 0;
}
int isapnp_cfg_begin(int csn, int logdev)
{
if (csn < 1 || csn > isapnp_csn_count || logdev > 10)
return -EINVAL;
mutex_lock(&isapnp_cfg_mutex);
isapnp_wait();
isapnp_key();
isapnp_wake(csn);
#if 0
/* to avoid malfunction when the isapnptools package is used */
/* we must set RDP to our value again */
/* it is possible to set RDP only in the isolation phase */
/* Jens Thoms Toerring <[email protected]> */
isapnp_write_byte(0x02, 0x04); /* clear CSN of card */
mdelay(2); /* is this necessary? */
isapnp_wake(csn); /* bring card into sleep state */
isapnp_wake(0); /* bring card into isolation state */
isapnp_set_rdp(); /* reset the RDP port */
udelay(1000); /* delay 1000us */
isapnp_write_byte(0x06, csn); /* reset CSN to previous value */
udelay(250); /* is this necessary? */
#endif
if (logdev >= 0)
isapnp_device(logdev);
return 0;
}
int isapnp_cfg_end(void)
{
isapnp_wait();
mutex_unlock(&isapnp_cfg_mutex);
return 0;
}
/*
* Initialization.
*/
EXPORT_SYMBOL(isapnp_protocol);
EXPORT_SYMBOL(isapnp_present);
EXPORT_SYMBOL(isapnp_cfg_begin);
EXPORT_SYMBOL(isapnp_cfg_end);
EXPORT_SYMBOL(isapnp_write_byte);
static int isapnp_get_resources(struct pnp_dev *dev)
{
int i, ret;
pnp_dbg(&dev->dev, "get resources\n");
pnp_init_resources(dev);
isapnp_cfg_begin(dev->card->number, dev->number);
dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE);
if (!dev->active)
goto __end;
for (i = 0; i < ISAPNP_MAX_PORT; i++) {
ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1));
pnp_add_io_resource(dev, ret, ret,
ret == 0 ? IORESOURCE_DISABLED : 0);
}
for (i = 0; i < ISAPNP_MAX_MEM; i++) {
ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8;
pnp_add_mem_resource(dev, ret, ret,
ret == 0 ? IORESOURCE_DISABLED : 0);
}
for (i = 0; i < ISAPNP_MAX_IRQ; i++) {
ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8;
pnp_add_irq_resource(dev, ret,
ret == 0 ? IORESOURCE_DISABLED : 0);
}
for (i = 0; i < ISAPNP_MAX_DMA; i++) {
ret = isapnp_read_byte(ISAPNP_CFG_DMA + i);
pnp_add_dma_resource(dev, ret,
ret == 4 ? IORESOURCE_DISABLED : 0);
}
__end:
isapnp_cfg_end();
return 0;
}
static int isapnp_set_resources(struct pnp_dev *dev)
{
struct resource *res;
int tmp;
pnp_dbg(&dev->dev, "set resources\n");
isapnp_cfg_begin(dev->card->number, dev->number);
dev->active = 1;
for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_IO, tmp);
if (pnp_resource_enabled(res)) {
pnp_dbg(&dev->dev, " set io %d to %#llx\n",
tmp, (unsigned long long) res->start);
isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
res->start);
}
}
for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_IRQ, tmp);
if (pnp_resource_enabled(res)) {
int irq = res->start;
if (irq == 2)
irq = 9;
pnp_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq);
isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
}
}
for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_DMA, tmp);
if (pnp_resource_enabled(res)) {
pnp_dbg(&dev->dev, " set dma %d to %lld\n",
tmp, (unsigned long long) res->start);
isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start);
}
}
for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_MEM, tmp);
if (pnp_resource_enabled(res)) {
pnp_dbg(&dev->dev, " set mem %d to %#llx\n",
tmp, (unsigned long long) res->start);
isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
(res->start >> 8) & 0xffff);
}
}
/* FIXME: We aren't handling 32bit mems properly here */
isapnp_activate(dev->number);
isapnp_cfg_end();
return 0;
}
static int isapnp_disable_resources(struct pnp_dev *dev)
{
if (!dev->active)
return -EINVAL;
isapnp_cfg_begin(dev->card->number, dev->number);
isapnp_deactivate(dev->number);
dev->active = 0;
isapnp_cfg_end();
return 0;
}
struct pnp_protocol isapnp_protocol = {
.name = "ISA Plug and Play",
.get = isapnp_get_resources,
.set = isapnp_set_resources,
.disable = isapnp_disable_resources,
};
static int __init isapnp_init(void)
{
int cards;
struct pnp_card *card;
struct pnp_dev *dev;
if (isapnp_disable) {
printk(KERN_INFO "isapnp: ISA Plug & Play support disabled\n");
return 0;
}
#ifdef CONFIG_PPC
if (check_legacy_ioport(_PIDXR) || check_legacy_ioport(_PNPWRP))
return -EINVAL;
#endif
#ifdef ISAPNP_REGION_OK
if (!request_region(_PIDXR, 1, "isapnp index")) {
printk(KERN_ERR "isapnp: Index Register 0x%x already used\n",
_PIDXR);
return -EBUSY;
}
#endif
if (!request_region(_PNPWRP, 1, "isapnp write")) {
printk(KERN_ERR
"isapnp: Write Data Register 0x%x already used\n",
_PNPWRP);
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
return -EBUSY;
}
if (pnp_register_protocol(&isapnp_protocol) < 0)
return -EBUSY;
/*
* Print a message. The existing ISAPnP code is hanging machines
* so let the user know where.
*/
printk(KERN_INFO "isapnp: Scanning for PnP cards...\n");
if (isapnp_rdp >= 0x203 && isapnp_rdp <= 0x3ff) {
isapnp_rdp |= 3;
if (!request_region(isapnp_rdp, 1, "isapnp read")) {
printk(KERN_ERR
"isapnp: Read Data Register 0x%x already used\n",
isapnp_rdp);
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
release_region(_PNPWRP, 1);
return -EBUSY;
}
isapnp_set_rdp();
}
if (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff) {
cards = isapnp_isolate();
if (cards < 0 || (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff)) {
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
release_region(_PNPWRP, 1);
printk(KERN_INFO
"isapnp: No Plug & Play device found\n");
return 0;
}
request_region(isapnp_rdp, 1, "isapnp read");
}
isapnp_build_device_list();
cards = 0;
protocol_for_each_card(&isapnp_protocol, card) {
cards++;
if (isapnp_verbose) {
dev_info(&card->dev, "card '%s'\n",
card->name[0] ? card->name : "unknown");
if (isapnp_verbose < 2)
continue;
card_for_each_dev(card, dev) {
dev_info(&card->dev, "device '%s'\n",
dev->name[0] ? dev->name : "unknown");
}
}
}
if (cards)
printk(KERN_INFO
"isapnp: %i Plug & Play card%s detected total\n", cards,
cards > 1 ? "s" : "");
else
printk(KERN_INFO "isapnp: No Plug & Play card found\n");
isapnp_proc_init();
return 0;
}
device_initcall(isapnp_init);
/* format is: noisapnp */
static int __init isapnp_setup_disable(char *str)
{
isapnp_disable = 1;
return 1;
}
__setup("noisapnp", isapnp_setup_disable);
/* format is: isapnp=rdp,reset,skip_pci_scan,verbose */
static int __init isapnp_setup_isapnp(char *str)
{
(void)((get_option(&str, &isapnp_rdp) == 2) &&
(get_option(&str, &isapnp_reset) == 2) &&
(get_option(&str, &isapnp_verbose) == 2));
return 1;
}
__setup("isapnp=", isapnp_setup_isapnp);
| linux-master | drivers/pnp/isapnp/core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ISA Plug & Play support
* Copyright (c) by Jaroslav Kysela <[email protected]>
*/
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/uaccess.h>
extern struct pnp_protocol isapnp_protocol;
static struct proc_dir_entry *isapnp_proc_bus_dir = NULL;
static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
{
return fixed_size_llseek(file, off, whence, 256);
}
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
struct pnp_dev *dev = pde_data(file_inode(file));
int pos = *ppos;
int cnt, size = 256;
if (pos >= size)
return 0;
if (nbytes >= size)
nbytes = size;
if (pos + nbytes > size)
nbytes = size - pos;
cnt = nbytes;
if (!access_ok(buf, cnt))
return -EINVAL;
isapnp_cfg_begin(dev->card->number, dev->number);
for (; pos < 256 && cnt > 0; pos++, buf++, cnt--) {
unsigned char val;
val = isapnp_read_byte(pos);
__put_user(val, buf);
}
isapnp_cfg_end();
*ppos = pos;
return nbytes;
}
static const struct proc_ops isapnp_proc_bus_proc_ops = {
.proc_lseek = isapnp_proc_bus_lseek,
.proc_read = isapnp_proc_bus_read,
};
static int isapnp_proc_attach_device(struct pnp_dev *dev)
{
struct pnp_card *bus = dev->card;
char name[16];
if (!bus->procdir) {
sprintf(name, "%02x", bus->number);
bus->procdir = proc_mkdir(name, isapnp_proc_bus_dir);
if (!bus->procdir)
return -ENOMEM;
}
sprintf(name, "%02x", dev->number);
dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, bus->procdir,
&isapnp_proc_bus_proc_ops, dev);
if (!dev->procent)
return -ENOMEM;
proc_set_size(dev->procent, 256);
return 0;
}
int __init isapnp_proc_init(void)
{
struct pnp_dev *dev;
isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL);
protocol_for_each_dev(&isapnp_protocol, dev) {
isapnp_proc_attach_device(dev);
}
return 0;
}
| linux-master | drivers/pnp/isapnp/proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* rsparser.c - parses and encodes pnpbios resource data streams
*/
#include <linux/ctype.h>
#include <linux/pnp.h>
#include <linux/string.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#else
inline void pcibios_penalize_isa_irq(int irq, int active)
{
}
#endif /* CONFIG_PCI */
#include "../base.h"
#include "pnpbios.h"
/* standard resource tags */
#define SMALL_TAG_PNPVERNO 0x01
#define SMALL_TAG_LOGDEVID 0x02
#define SMALL_TAG_COMPATDEVID 0x03
#define SMALL_TAG_IRQ 0x04
#define SMALL_TAG_DMA 0x05
#define SMALL_TAG_STARTDEP 0x06
#define SMALL_TAG_ENDDEP 0x07
#define SMALL_TAG_PORT 0x08
#define SMALL_TAG_FIXEDPORT 0x09
#define SMALL_TAG_VENDOR 0x0e
#define SMALL_TAG_END 0x0f
#define LARGE_TAG 0x80
#define LARGE_TAG_MEM 0x81
#define LARGE_TAG_ANSISTR 0x82
#define LARGE_TAG_UNICODESTR 0x83
#define LARGE_TAG_VENDOR 0x84
#define LARGE_TAG_MEM32 0x85
#define LARGE_TAG_FIXEDMEM32 0x86
/*
* Resource Data Stream Format:
*
* Allocated Resources (required)
* end tag ->
* Resource Configuration Options (optional)
* end tag ->
* Compitable Device IDs (optional)
* final end tag ->
*/
/*
* Allocated Resources
*/
static void pnpbios_parse_allocated_ioresource(struct pnp_dev *dev,
int start, int len)
{
int flags = 0;
int end = start + len - 1;
if (len <= 0 || end >= 0x10003)
flags |= IORESOURCE_DISABLED;
pnp_add_io_resource(dev, start, end, flags);
}
static void pnpbios_parse_allocated_memresource(struct pnp_dev *dev,
int start, int len)
{
int flags = 0;
int end = start + len - 1;
if (len <= 0)
flags |= IORESOURCE_DISABLED;
pnp_add_mem_resource(dev, start, end, flags);
}
static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev,
unsigned char *p,
unsigned char *end)
{
unsigned int len, tag;
int io, size, mask, i, flags;
if (!p)
return NULL;
pnp_dbg(&dev->dev, "parse allocated resources\n");
pnp_init_resources(dev);
while ((char *)p < (char *)end) {
/* determine the type of tag */
if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
} else { /* small tag */
len = p[0] & 0x07;
tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
case LARGE_TAG_MEM:
if (len != 9)
goto len_err;
io = *(short *)&p[4];
size = *(short *)&p[10];
pnpbios_parse_allocated_memresource(dev, io, size);
break;
case LARGE_TAG_ANSISTR:
/* ignore this for now */
break;
case LARGE_TAG_VENDOR:
/* do nothing */
break;
case LARGE_TAG_MEM32:
if (len != 17)
goto len_err;
io = *(int *)&p[4];
size = *(int *)&p[16];
pnpbios_parse_allocated_memresource(dev, io, size);
break;
case LARGE_TAG_FIXEDMEM32:
if (len != 9)
goto len_err;
io = *(int *)&p[4];
size = *(int *)&p[8];
pnpbios_parse_allocated_memresource(dev, io, size);
break;
case SMALL_TAG_IRQ:
if (len < 2 || len > 3)
goto len_err;
flags = 0;
io = -1;
mask = p[1] + p[2] * 256;
for (i = 0; i < 16; i++, mask = mask >> 1)
if (mask & 0x01)
io = i;
if (io != -1)
pcibios_penalize_isa_irq(io, 1);
else
flags = IORESOURCE_DISABLED;
pnp_add_irq_resource(dev, io, flags);
break;
case SMALL_TAG_DMA:
if (len != 2)
goto len_err;
flags = 0;
io = -1;
mask = p[1];
for (i = 0; i < 8; i++, mask = mask >> 1)
if (mask & 0x01)
io = i;
if (io == -1)
flags = IORESOURCE_DISABLED;
pnp_add_dma_resource(dev, io, flags);
break;
case SMALL_TAG_PORT:
if (len != 7)
goto len_err;
io = p[2] + p[3] * 256;
size = p[7];
pnpbios_parse_allocated_ioresource(dev, io, size);
break;
case SMALL_TAG_VENDOR:
/* do nothing */
break;
case SMALL_TAG_FIXEDPORT:
if (len != 3)
goto len_err;
io = p[1] + p[2] * 256;
size = p[3];
pnpbios_parse_allocated_ioresource(dev, io, size);
break;
case SMALL_TAG_END:
p = p + 2;
return (unsigned char *)p;
break;
default: /* an unknown tag */
len_err:
dev_err(&dev->dev, "unknown tag %#x length %d\n",
tag, len);
break;
}
/* continue to the next tag */
if (p[0] & LARGE_TAG)
p += len + 3;
else
p += len + 1;
}
dev_err(&dev->dev, "no end tag in resource structure\n");
return NULL;
}
/*
* Resource Configuration Options
*/
static __init void pnpbios_parse_mem_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
resource_size_t min, max, align, len;
unsigned char flags;
min = ((p[5] << 8) | p[4]) << 8;
max = ((p[7] << 8) | p[6]) << 8;
align = (p[9] << 8) | p[8];
len = ((p[11] << 8) | p[10]) << 8;
flags = p[3];
pnp_register_mem_resource(dev, option_flags, min, max, align, len,
flags);
}
static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
resource_size_t min, max, align, len;
unsigned char flags;
min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
len = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
flags = p[3];
pnp_register_mem_resource(dev, option_flags, min, max, align, len,
flags);
}
static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
resource_size_t base, len;
unsigned char flags;
base = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
len = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
flags = p[3];
pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags);
}
static __init void pnpbios_parse_irq_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
unsigned long bits;
pnp_irq_mask_t map;
unsigned char flags = IORESOURCE_IRQ_HIGHEDGE;
bits = (p[2] << 8) | p[1];
bitmap_zero(map.bits, PNP_IRQ_NR);
bitmap_copy(map.bits, &bits, 16);
if (size > 2)
flags = p[3];
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
static __init void pnpbios_parse_dma_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
pnp_register_dma_resource(dev, option_flags, p[1], p[2]);
}
static __init void pnpbios_parse_port_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
resource_size_t min, max, align, len;
unsigned char flags;
min = (p[3] << 8) | p[2];
max = (p[5] << 8) | p[4];
align = p[6];
len = p[7];
flags = p[1] ? IORESOURCE_IO_16BIT_ADDR : 0;
pnp_register_port_resource(dev, option_flags, min, max, align, len,
flags);
}
static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev,
unsigned char *p, int size,
unsigned int option_flags)
{
resource_size_t base, len;
base = (p[2] << 8) | p[1];
len = p[3];
pnp_register_port_resource(dev, option_flags, base, base, 0, len,
IORESOURCE_IO_FIXED);
}
static __init unsigned char *
pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
struct pnp_dev *dev)
{
unsigned int len, tag;
int priority;
unsigned int option_flags;
if (!p)
return NULL;
pnp_dbg(&dev->dev, "parse resource options\n");
option_flags = 0;
while ((char *)p < (char *)end) {
/* determine the type of tag */
if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
} else { /* small tag */
len = p[0] & 0x07;
tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
case LARGE_TAG_MEM:
if (len != 9)
goto len_err;
pnpbios_parse_mem_option(dev, p, len, option_flags);
break;
case LARGE_TAG_MEM32:
if (len != 17)
goto len_err;
pnpbios_parse_mem32_option(dev, p, len, option_flags);
break;
case LARGE_TAG_FIXEDMEM32:
if (len != 9)
goto len_err;
pnpbios_parse_fixed_mem32_option(dev, p, len,
option_flags);
break;
case SMALL_TAG_IRQ:
if (len < 2 || len > 3)
goto len_err;
pnpbios_parse_irq_option(dev, p, len, option_flags);
break;
case SMALL_TAG_DMA:
if (len != 2)
goto len_err;
pnpbios_parse_dma_option(dev, p, len, option_flags);
break;
case SMALL_TAG_PORT:
if (len != 7)
goto len_err;
pnpbios_parse_port_option(dev, p, len, option_flags);
break;
case SMALL_TAG_VENDOR:
/* do nothing */
break;
case SMALL_TAG_FIXEDPORT:
if (len != 3)
goto len_err;
pnpbios_parse_fixed_port_option(dev, p, len,
option_flags);
break;
case SMALL_TAG_STARTDEP:
if (len > 1)
goto len_err;
priority = PNP_RES_PRIORITY_ACCEPTABLE;
if (len > 0)
priority = p[1];
option_flags = pnp_new_dependent_set(dev, priority);
break;
case SMALL_TAG_ENDDEP:
if (len != 0)
goto len_err;
option_flags = 0;
break;
case SMALL_TAG_END:
return p + 2;
default: /* an unknown tag */
len_err:
dev_err(&dev->dev, "unknown tag %#x length %d\n",
tag, len);
break;
}
/* continue to the next tag */
if (p[0] & LARGE_TAG)
p += len + 3;
else
p += len + 1;
}
dev_err(&dev->dev, "no end tag in resource structure\n");
return NULL;
}
/*
* Compatible Device IDs
*/
static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
unsigned char *end,
struct pnp_dev *dev)
{
int len, tag;
u32 eisa_id;
char id[8];
struct pnp_id *dev_id;
if (!p)
return NULL;
while ((char *)p < (char *)end) {
/* determine the type of tag */
if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
} else { /* small tag */
len = p[0] & 0x07;
tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
case LARGE_TAG_ANSISTR:
strncpy(dev->name, p + 3,
len >= PNP_NAME_LEN ? PNP_NAME_LEN - 2 : len);
dev->name[len >=
PNP_NAME_LEN ? PNP_NAME_LEN - 1 : len] = '\0';
break;
case SMALL_TAG_COMPATDEVID: /* compatible ID */
if (len != 4)
goto len_err;
eisa_id = p[1] | p[2] << 8 | p[3] << 16 | p[4] << 24;
pnp_eisa_id_to_string(eisa_id & PNP_EISA_ID_MASK, id);
dev_id = pnp_add_id(dev, id);
if (!dev_id)
return NULL;
break;
case SMALL_TAG_END:
p = p + 2;
return (unsigned char *)p;
break;
default: /* an unknown tag */
len_err:
dev_err(&dev->dev, "unknown tag %#x length %d\n",
tag, len);
break;
}
/* continue to the next tag */
if (p[0] & LARGE_TAG)
p += len + 3;
else
p += len + 1;
}
dev_err(&dev->dev, "no end tag in resource structure\n");
return NULL;
}
/*
* Allocated Resource Encoding
*/
static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base;
unsigned long len;
if (pnp_resource_enabled(res)) {
base = res->start;
len = resource_size(res);
} else {
base = 0;
len = 0;
}
p[4] = (base >> 8) & 0xff;
p[5] = ((base >> 8) >> 8) & 0xff;
p[6] = (base >> 8) & 0xff;
p[7] = ((base >> 8) >> 8) & 0xff;
p[10] = (len >> 8) & 0xff;
p[11] = ((len >> 8) >> 8) & 0xff;
pnp_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base;
unsigned long len;
if (pnp_resource_enabled(res)) {
base = res->start;
len = resource_size(res);
} else {
base = 0;
len = 0;
}
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[6] = (base >> 16) & 0xff;
p[7] = (base >> 24) & 0xff;
p[8] = base & 0xff;
p[9] = (base >> 8) & 0xff;
p[10] = (base >> 16) & 0xff;
p[11] = (base >> 24) & 0xff;
p[16] = len & 0xff;
p[17] = (len >> 8) & 0xff;
p[18] = (len >> 16) & 0xff;
p[19] = (len >> 24) & 0xff;
pnp_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base;
unsigned long len;
if (pnp_resource_enabled(res)) {
base = res->start;
len = resource_size(res);
} else {
base = 0;
len = 0;
}
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[6] = (base >> 16) & 0xff;
p[7] = (base >> 24) & 0xff;
p[8] = len & 0xff;
p[9] = (len >> 8) & 0xff;
p[10] = (len >> 16) & 0xff;
p[11] = (len >> 24) & 0xff;
pnp_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base,
base + len - 1);
}
static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long map;
if (pnp_resource_enabled(res))
map = 1 << res->start;
else
map = 0;
p[1] = map & 0xff;
p[2] = (map >> 8) & 0xff;
pnp_dbg(&dev->dev, " encode irq mask %#lx\n", map);
}
static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long map;
if (pnp_resource_enabled(res))
map = 1 << res->start;
else
map = 0;
p[1] = map & 0xff;
pnp_dbg(&dev->dev, " encode dma mask %#lx\n", map);
}
static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base;
unsigned long len;
if (pnp_resource_enabled(res)) {
base = res->start;
len = resource_size(res);
} else {
base = 0;
len = 0;
}
p[2] = base & 0xff;
p[3] = (base >> 8) & 0xff;
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[7] = len & 0xff;
pnp_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base = res->start;
unsigned long len = resource_size(res);
if (pnp_resource_enabled(res)) {
base = res->start;
len = resource_size(res);
} else {
base = 0;
len = 0;
}
p[1] = base & 0xff;
p[2] = (base >> 8) & 0xff;
p[3] = len & 0xff;
pnp_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base,
base + len - 1);
}
static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev
*dev,
unsigned char *p,
unsigned char *end)
{
unsigned int len, tag;
int port = 0, irq = 0, dma = 0, mem = 0;
if (!p)
return NULL;
while ((char *)p < (char *)end) {
/* determine the type of tag */
if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
} else { /* small tag */
len = p[0] & 0x07;
tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
case LARGE_TAG_MEM:
if (len != 9)
goto len_err;
pnpbios_encode_mem(dev, p,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case LARGE_TAG_MEM32:
if (len != 17)
goto len_err;
pnpbios_encode_mem32(dev, p,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case LARGE_TAG_FIXEDMEM32:
if (len != 9)
goto len_err;
pnpbios_encode_fixed_mem32(dev, p,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case SMALL_TAG_IRQ:
if (len < 2 || len > 3)
goto len_err;
pnpbios_encode_irq(dev, p,
pnp_get_resource(dev, IORESOURCE_IRQ, irq));
irq++;
break;
case SMALL_TAG_DMA:
if (len != 2)
goto len_err;
pnpbios_encode_dma(dev, p,
pnp_get_resource(dev, IORESOURCE_DMA, dma));
dma++;
break;
case SMALL_TAG_PORT:
if (len != 7)
goto len_err;
pnpbios_encode_port(dev, p,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case SMALL_TAG_VENDOR:
/* do nothing */
break;
case SMALL_TAG_FIXEDPORT:
if (len != 3)
goto len_err;
pnpbios_encode_fixed_port(dev, p,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case SMALL_TAG_END:
p = p + 2;
return (unsigned char *)p;
break;
default: /* an unknown tag */
len_err:
dev_err(&dev->dev, "unknown tag %#x length %d\n",
tag, len);
break;
}
/* continue to the next tag */
if (p[0] & LARGE_TAG)
p += len + 3;
else
p += len + 1;
}
dev_err(&dev->dev, "no end tag in resource structure\n");
return NULL;
}
/*
* Core Parsing Functions
*/
int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
p = pnpbios_parse_allocated_resource_data(dev, p, end);
if (!p)
return -EIO;
p = pnpbios_parse_resource_option_data(p, end, dev);
if (!p)
return -EIO;
p = pnpbios_parse_compatible_ids(p, end, dev);
if (!p)
return -EIO;
return 0;
}
int pnpbios_read_resources_from_node(struct pnp_dev *dev,
struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
p = pnpbios_parse_allocated_resource_data(dev, p, end);
if (!p)
return -EIO;
return 0;
}
int pnpbios_write_resources_to_node(struct pnp_dev *dev,
struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
p = pnpbios_encode_allocated_resource_data(dev, p, end);
if (!p)
return -EIO;
return 0;
}
| linux-master | drivers/pnp/pnpbios/rsparser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pnpbios -- PnP BIOS driver
*
* This driver provides access to Plug-'n'-Play services provided by
* the PnP BIOS firmware, described in the following documents:
* Plug and Play BIOS Specification, Version 1.0A, 5 May 1994
* Plug and Play BIOS Clarification Paper, 6 October 1994
* Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corp.
*
* Originally (C) 1998 Christian Schmidt <[email protected]>
* Modifications (C) 1998 Tom Lees <[email protected]>
* Minor reorganizations by David Hinds <[email protected]>
* Further modifications (C) 2001, 2002 by:
* Alan Cox <[email protected]>
* Thomas Hood
* Brian Gerst <[email protected]>
*
* Ported to the PnP Layer and several additional improvements (C) 2002
* by Adam Belay <[email protected]>
*/
/* Change Log
*
* Adam Belay - <[email protected]> - March 16, 2003
* rev 1.01 Only call pnp_bios_dev_node_info once
* Added pnpbios_print_status
* Added several new error messages and info messages
* Added pnpbios_interface_attach_device
* integrated core and proc init system
* Introduced PNPMODE flags
* Removed some useless includes
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pnp.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
#include <linux/kmod.h>
#include <linux/kthread.h>
#include <asm/page.h>
#include <asm/desc.h>
#include <asm/byteorder.h>
#include "../base.h"
#include "pnpbios.h"
/*
*
* PnP BIOS INTERFACE
*
*/
static union pnp_bios_install_struct *pnp_bios_install = NULL;
int pnp_bios_present(void)
{
return (pnp_bios_install != NULL);
}
struct pnp_dev_node_info node_info;
/*
*
* DOCKING FUNCTIONS
*
*/
static struct completion unload_sem;
/*
* (Much of this belongs in a shared routine somewhere)
*/
static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
{
static char const sbin_pnpbios[] = "/sbin/pnpbios";
char *argv[3], **envp, *buf, *scratch;
int i = 0, value;
if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL)))
return -ENOMEM;
if (!(buf = kzalloc(256, GFP_KERNEL))) {
kfree(envp);
return -ENOMEM;
}
/* FIXME: if there are actual users of this, it should be
* integrated into the driver core and use the usual infrastructure
* like sysfs and uevents
*/
argv[0] = (char *)sbin_pnpbios;
argv[1] = "dock";
argv[2] = NULL;
/* minimal command environment */
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
#ifdef DEBUG
/* hint that policy agent should enter no-stdout debug mode */
envp[i++] = "DEBUG=kernel";
#endif
/* extensible set of named bus-specific parameters,
* supporting multiple driver selection algorithms.
*/
scratch = buf;
/* action: add, remove */
envp[i++] = scratch;
scratch += sprintf(scratch, "ACTION=%s", dock ? "add" : "remove") + 1;
/* Report the ident for the dock */
envp[i++] = scratch;
scratch += sprintf(scratch, "DOCK=%x/%x/%x",
info->location_id, info->serial, info->capabilities);
envp[i] = NULL;
value = call_usermodehelper(sbin_pnpbios, argv, envp, UMH_WAIT_EXEC);
kfree(buf);
kfree(envp);
return 0;
}
/*
* Poll the PnP docking at regular intervals
*/
static int pnp_dock_thread(void *unused)
{
static struct pnp_docking_station_info now;
int docked = -1, d = 0;
set_freezable();
while (1) {
int status;
/*
* Poll every 2 seconds
*/
msleep_interruptible(2000);
if (try_to_freeze())
continue;
status = pnp_bios_dock_station_info(&now);
switch (status) {
/*
* No dock to manage
*/
case PNP_FUNCTION_NOT_SUPPORTED:
kthread_complete_and_exit(&unload_sem, 0);
case PNP_SYSTEM_NOT_DOCKED:
d = 0;
break;
case PNP_SUCCESS:
d = 1;
break;
default:
pnpbios_print_status("pnp_dock_thread", status);
printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n");
kthread_complete_and_exit(&unload_sem, 0);
}
if (d != docked) {
if (pnp_dock_event(d, &now) == 0) {
docked = d;
#if 0
printk(KERN_INFO
"PnPBIOS: Docking station %stached\n",
docked ? "at" : "de");
#endif
}
}
}
kthread_complete_and_exit(&unload_sem, 0);
}
static int pnpbios_get_resources(struct pnp_dev *dev)
{
u8 nodenum = dev->number;
struct pnp_bios_node *node;
if (!pnpbios_is_dynamic(dev))
return -EPERM;
pnp_dbg(&dev->dev, "get resources\n");
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
pnpbios_read_resources_from_node(dev, node);
dev->active = pnp_is_active(dev);
kfree(node);
return 0;
}
static int pnpbios_set_resources(struct pnp_dev *dev)
{
u8 nodenum = dev->number;
struct pnp_bios_node *node;
int ret;
if (!pnpbios_is_dynamic(dev))
return -EPERM;
pnp_dbg(&dev->dev, "set resources\n");
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
if (pnpbios_write_resources_to_node(dev, node) < 0) {
kfree(node);
return -1;
}
ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node);
kfree(node);
if (ret > 0)
ret = -1;
return ret;
}
static void pnpbios_zero_data_stream(struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
unsigned int len;
int i;
while ((char *)p < (char *)end) {
if (p[0] & 0x80) { /* large tag */
len = (p[2] << 8) | p[1];
p += 3;
} else {
if (((p[0] >> 3) & 0x0f) == 0x0f)
return;
len = p[0] & 0x07;
p += 1;
}
for (i = 0; i < len; i++)
p[i] = 0;
p += len;
}
printk(KERN_ERR
"PnPBIOS: Resource structure did not contain an end tag.\n");
}
static int pnpbios_disable_resources(struct pnp_dev *dev)
{
struct pnp_bios_node *node;
u8 nodenum = dev->number;
int ret;
if (dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
return -EPERM;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
pnpbios_zero_data_stream(node);
ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node);
kfree(node);
if (ret > 0)
ret = -1;
return ret;
}
/* PnP Layer support */
struct pnp_protocol pnpbios_protocol = {
.name = "Plug and Play BIOS",
.get = pnpbios_get_resources,
.set = pnpbios_set_resources,
.disable = pnpbios_disable_resources,
};
static int __init insert_device(struct pnp_bios_node *node)
{
struct pnp_dev *dev;
char id[8];
int error;
/* check if the device is already added */
list_for_each_entry(dev, &pnpbios_protocol.devices, protocol_list) {
if (dev->number == node->handle)
return -EEXIST;
}
pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
if (!dev)
return -ENOMEM;
pnpbios_parse_data_stream(dev, node);
dev->active = pnp_is_active(dev);
dev->flags = node->flags;
if (!(dev->flags & PNPBIOS_NO_CONFIG))
dev->capabilities |= PNP_CONFIGURABLE;
if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev))
dev->capabilities |= PNP_DISABLE;
dev->capabilities |= PNP_READ;
if (pnpbios_is_dynamic(dev))
dev->capabilities |= PNP_WRITE;
if (dev->flags & PNPBIOS_REMOVABLE)
dev->capabilities |= PNP_REMOVABLE;
/* clear out the damaged flags */
if (!dev->active)
pnp_init_resources(dev);
error = pnp_add_device(dev);
if (error) {
put_device(&dev->dev);
return error;
}
pnpbios_interface_attach_device(node);
return 0;
}
static void __init build_devlist(void)
{
u8 nodenum;
unsigned int nodes_got = 0;
unsigned int devs = 0;
struct pnp_bios_node *node;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return;
for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
/* eventually we will want to use PNPMODE_STATIC here but for now
* dynamic will help us catch buggy bioses to add to the blacklist.
*/
if (!pnpbios_dont_use_current_config) {
if (pnp_bios_get_dev_node
(&nodenum, (char)PNPMODE_DYNAMIC, node))
break;
} else {
if (pnp_bios_get_dev_node
(&nodenum, (char)PNPMODE_STATIC, node))
break;
}
nodes_got++;
if (insert_device(node) == 0)
devs++;
if (nodenum <= thisnodenum) {
printk(KERN_ERR
"PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
break;
}
}
kfree(node);
printk(KERN_INFO
"PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n",
nodes_got, nodes_got != 1 ? "s" : "", devs);
}
/*
*
* INIT AND EXIT
*
*/
static int pnpbios_disabled;
int pnpbios_dont_use_current_config;
static int __init pnpbios_setup(char *str)
{
int invert;
while ((str != NULL) && (*str != '\0')) {
if (strncmp(str, "off", 3) == 0)
pnpbios_disabled = 1;
if (strncmp(str, "on", 2) == 0)
pnpbios_disabled = 0;
invert = (strncmp(str, "no-", 3) == 0);
if (invert)
str += 3;
if (strncmp(str, "curr", 4) == 0)
pnpbios_dont_use_current_config = invert;
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
}
return 1;
}
__setup("pnpbios=", pnpbios_setup);
/* PnP BIOS signature: "$PnP" */
#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24))
static int __init pnpbios_probe_system(void)
{
union pnp_bios_install_struct *check;
u8 sum;
int length, i;
printk(KERN_INFO "PnPBIOS: Scanning system for PnP BIOS support...\n");
/*
* Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS
* structure and, if one is found, sets up the selectors and
* entry points
*/
for (check = (union pnp_bios_install_struct *)__va(0xf0000);
check < (union pnp_bios_install_struct *)__va(0xffff0);
check = (void *)check + 16) {
if (check->fields.signature != PNP_SIGNATURE)
continue;
printk(KERN_INFO
"PnPBIOS: Found PnP BIOS installation structure at 0x%p\n",
check);
length = check->fields.length;
if (!length) {
printk(KERN_ERR
"PnPBIOS: installation structure is invalid, skipping\n");
continue;
}
for (sum = 0, i = 0; i < length; i++)
sum += check->chars[i];
if (sum) {
printk(KERN_ERR
"PnPBIOS: installation structure is corrupted, skipping\n");
continue;
}
if (check->fields.version < 0x10) {
printk(KERN_WARNING
"PnPBIOS: PnP BIOS version %d.%d is not supported\n",
check->fields.version >> 4,
check->fields.version & 15);
continue;
}
printk(KERN_INFO
"PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n",
check->fields.version >> 4, check->fields.version & 15,
check->fields.pm16cseg, check->fields.pm16offset,
check->fields.pm16dseg);
pnp_bios_install = check;
return 1;
}
printk(KERN_INFO "PnPBIOS: PnP BIOS support was not detected.\n");
return 0;
}
static int __init exploding_pnp_bios(const struct dmi_system_id *d)
{
printk(KERN_WARNING "%s detected. Disabling PnPBIOS\n", d->ident);
return 0;
}
static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
{ /* PnPBIOS GPF on boot */
.callback = exploding_pnp_bios,
.ident = "Higraded P14H",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
DMI_MATCH(DMI_BIOS_VERSION, "07.00T"),
DMI_MATCH(DMI_SYS_VENDOR, "Higraded"),
DMI_MATCH(DMI_PRODUCT_NAME, "P14H"),
},
},
{ /* PnPBIOS GPF on boot */
.callback = exploding_pnp_bios,
.ident = "ASUS P4P800",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_BOARD_NAME, "P4P800"),
},
},
{}
};
static int __init pnpbios_init(void)
{
int ret;
if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
arch_pnpbios_disabled()) {
printk(KERN_INFO "PnPBIOS: Disabled\n");
return -ENODEV;
}
#ifdef CONFIG_PNPACPI
if (!acpi_disabled && !pnpacpi_disabled) {
pnpbios_disabled = 1;
printk(KERN_INFO "PnPBIOS: Disabled by ACPI PNP\n");
return -ENODEV;
}
#endif /* CONFIG_ACPI */
/* scan the system for pnpbios support */
if (!pnpbios_probe_system())
return -ENODEV;
/* make preparations for bios calls */
pnpbios_calls_init(pnp_bios_install);
/* read the node info */
ret = pnp_bios_dev_node_info(&node_info);
if (ret) {
printk(KERN_ERR
"PnPBIOS: Unable to get node info. Aborting.\n");
return ret;
}
/* register with the pnp layer */
ret = pnp_register_protocol(&pnpbios_protocol);
if (ret) {
printk(KERN_ERR
"PnPBIOS: Unable to register driver. Aborting.\n");
return ret;
}
/* start the proc interface */
ret = pnpbios_proc_init();
if (ret)
printk(KERN_ERR "PnPBIOS: Failed to create proc interface.\n");
/* scan for pnpbios devices */
build_devlist();
pnp_platform_devices = 1;
return 0;
}
fs_initcall(pnpbios_init);
static int __init pnpbios_thread_init(void)
{
struct task_struct *task;
if (pnpbios_disabled)
return 0;
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
device_initcall(pnpbios_thread_init);
EXPORT_SYMBOL(pnpbios_protocol);
| linux-master | drivers/pnp/pnpbios/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* /proc/bus/pnp interface for Plug and Play devices
*
* Written by David Hinds, [email protected]
* Modified by Thomas Hood
*
* The .../devices and .../<node> and .../boot/<node> files are
* utilized by the lspnp and setpnp utilities, supplied with the
* pcmcia-cs package.
* http://pcmcia-cs.sourceforge.net
*
* The .../escd file is utilized by the lsescd utility written by
* Gunther Mayer.
*
* The .../legacy_device_resources file is not used yet.
*
* The other files are human-readable.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/pnp.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include "pnpbios.h"
static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
static int pnpconfig_proc_show(struct seq_file *m, void *v)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
seq_printf(m, "structure_revision %d\n"
"number_of_CSNs %d\n"
"ISA_read_data_port 0x%x\n",
pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
return 0;
}
static int escd_info_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
seq_printf(m, "min_ESCD_write_size %d\n"
"ESCD_size %d\n"
"NVRAM_base 0x%x\n",
escd.min_escd_write_size,
escd.escd_size, escd.nv_storage_base);
return 0;
}
#define MAX_SANE_ESCD_SIZE (32*1024)
static int escd_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
char *tmpbuf;
int escd_size;
if (pnp_bios_escd_info(&escd))
return -EIO;
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR
"PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
return -EFBIG;
}
tmpbuf = kzalloc(escd.escd_size, GFP_KERNEL);
if (!tmpbuf)
return -ENOMEM;
if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
kfree(tmpbuf);
return -EIO;
}
escd_size =
(unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1]) * 256;
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
" BIOS read_escd call is too great\n", __func__);
kfree(tmpbuf);
return -EFBIG;
}
seq_write(m, tmpbuf, escd_size);
kfree(tmpbuf);
return 0;
}
static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
{
void *buf;
buf = kmalloc(65536, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (pnp_bios_get_stat_res(buf)) {
kfree(buf);
return -EIO;
}
seq_write(m, buf, 65536);
kfree(buf);
return 0;
}
static int pnp_devices_proc_show(struct seq_file *m, void *v)
{
struct pnp_bios_node *node;
u8 nodenum;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
seq_printf(m, "%02x\t%08x\t%3phC\t%04x\n",
node->handle, node->eisa_id,
node->type_code, node->flags);
if (nodenum <= thisnodenum) {
printk(KERN_ERR
"%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",
"PnPBIOS: proc_read_devices:",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
break;
}
}
kfree(node);
return 0;
}
static int pnpbios_proc_show(struct seq_file *m, void *v)
{
void *data = m->private;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
int len;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
kfree(node);
return -EIO;
}
len = node->size - sizeof(struct pnp_bios_node);
seq_write(m, node->data, len);
kfree(node);
return 0;
}
static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, pnpbios_proc_show, pde_data(inode));
}
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
void *data = pde_data(file_inode(file));
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
int ret = count;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
ret = -EIO;
goto out;
}
if (count != node->size - sizeof(struct pnp_bios_node)) {
ret = -EINVAL;
goto out;
}
if (copy_from_user(node->data, buf, count)) {
ret = -EFAULT;
goto out;
}
if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) {
ret = -EINVAL;
goto out;
}
ret = count;
out:
kfree(node);
return ret;
}
static const struct proc_ops pnpbios_proc_ops = {
.proc_open = pnpbios_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = pnpbios_proc_write,
};
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
sprintf(name, "%02x", node->handle);
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_ops,
(void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_ops,
(void *)(long)(node->handle + 0x100)))
return 0;
return -EIO;
}
/*
* When this is called, pnpbios functions are assumed to
* work and the pnpbios_dont_use_current_config flag
* should already have been set to the appropriate value
*/
int __init pnpbios_proc_init(void)
{
proc_pnp = proc_mkdir("bus/pnp", NULL);
if (!proc_pnp)
return -EIO;
proc_pnp_boot = proc_mkdir("boot", proc_pnp);
if (!proc_pnp_boot)
return -EIO;
proc_create_single("devices", 0, proc_pnp, pnp_devices_proc_show);
proc_create_single("configuration_info", 0, proc_pnp,
pnpconfig_proc_show);
proc_create_single("escd_info", 0, proc_pnp, escd_info_proc_show);
proc_create_single("escd", S_IRUSR, proc_pnp, escd_proc_show);
proc_create_single("legacy_device_resources", 0, proc_pnp,
pnp_legacyres_proc_show);
return 0;
}
void __exit pnpbios_proc_exit(void)
{
int i;
char name[3];
if (!proc_pnp)
return;
for (i = 0; i < 0xff; i++) {
sprintf(name, "%02x", i);
if (!pnpbios_dont_use_current_config)
remove_proc_entry(name, proc_pnp);
remove_proc_entry(name, proc_pnp_boot);
}
remove_proc_entry("legacy_device_resources", proc_pnp);
remove_proc_entry("escd", proc_pnp);
remove_proc_entry("escd_info", proc_pnp);
remove_proc_entry("configuration_info", proc_pnp);
remove_proc_entry("devices", proc_pnp);
remove_proc_entry("boot", proc_pnp);
remove_proc_entry("bus/pnp", NULL);
}
| linux-master | drivers/pnp/pnpbios/proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bioscalls.c - the lowlevel layer of the PnPBIOS driver
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pnp.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/desc.h>
#include <asm/byteorder.h>
#include "pnpbios.h"
__visible struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
/*
* These are some opcodes for a "static asmlinkage"
* As this code is *not* executed inside the linux kernel segment, but in a
* alias at offset 0, we need a far return that can not be compiled by
* default (please, prove me wrong! this is *really* ugly!)
* This is the only way to get the bios to return into the kernel code,
* because the bios code runs in 16 bit protected mode and therefore can only
* return to the caller if the call is within the first 64kB, and the linux
* kernel begins at offset 3GB...
*/
asmlinkage __visible void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
".globl pnp_bios_callfunc\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
" pushl %ebx \n"
" pushl %eax \n"
" lcallw *pnp_bios_callpoint\n"
" addl $16, %esp \n"
" lret \n"
".previous \n");
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
struct desc_struct *gdt = get_cpu_gdt_rw((cpu)); \
set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \
set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0)
static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/*
* At some point we want to use this stack frame pointer to unwind
* after PnP BIOS oopses.
*/
__visible u32 pnp_bios_fault_esp;
__visible u32 pnp_bios_fault_eip;
__visible u32 pnp_bios_is_utter_crap = 0;
static DEFINE_SPINLOCK(pnp_bios_lock);
/*
* Support Functions
*/
static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
u16 arg4, u16 arg5, u16 arg6, u16 arg7,
void *ts1_base, u32 ts1_size,
void *ts2_base, u32 ts2_size)
{
unsigned long flags;
u16 status;
struct desc_struct save_desc_40;
int cpu;
/*
* PnP BIOSes are generally not terribly re-entrant.
* Also, don't rely on them to save everything correctly.
*/
if (pnp_bios_is_utter_crap)
return PNP_FUNCTION_NOT_SUPPORTED;
cpu = get_cpu();
save_desc_40 = get_cpu_gdt_rw(cpu)[0x40 / 8];
get_cpu_gdt_rw(cpu)[0x40 / 8] = bad_bios_desc;
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
spin_lock_irqsave(&pnp_bios_lock, flags);
/* The lock prevents us bouncing CPU here */
if (ts1_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size);
if (ts2_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS2, ts2_base, ts2_size);
__asm__ __volatile__("pushl %%ebp\n\t"
"pushl %%edi\n\t"
"pushl %%esi\n\t"
"pushl %%ds\n\t"
"pushl %%es\n\t"
"pushl %%fs\n\t"
"pushl %%gs\n\t"
"pushfl\n\t"
"movl %%esp, pnp_bios_fault_esp\n\t"
"movl $1f, pnp_bios_fault_eip\n\t"
"lcall %5,%6\n\t"
"1:popfl\n\t"
"popl %%gs\n\t"
"popl %%fs\n\t"
"popl %%es\n\t"
"popl %%ds\n\t"
"popl %%esi\n\t"
"popl %%edi\n\t"
"popl %%ebp\n\t":"=a"(status)
:"0"((func) | (((u32) arg1) << 16)),
"b"((arg2) | (((u32) arg3) << 16)),
"c"((arg4) | (((u32) arg5) << 16)),
"d"((arg6) | (((u32) arg7) << 16)),
"i"(PNP_CS32), "i"(0)
:"memory");
spin_unlock_irqrestore(&pnp_bios_lock, flags);
get_cpu_gdt_rw(cpu)[0x40 / 8] = save_desc_40;
put_cpu();
/* If we get here and this is set then the PnP BIOS faulted on us. */
if (pnp_bios_is_utter_crap) {
printk(KERN_ERR
"PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
printk(KERN_ERR
"PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
printk(KERN_ERR
"PnPBIOS: Check with your vendor for an updated BIOS\n");
}
return status;
}
void pnpbios_print_status(const char *module, u16 status)
{
switch (status) {
case PNP_SUCCESS:
printk(KERN_ERR "PnPBIOS: %s: function successful\n", module);
break;
case PNP_NOT_SET_STATICALLY:
printk(KERN_ERR "PnPBIOS: %s: unable to set static resources\n",
module);
break;
case PNP_UNKNOWN_FUNCTION:
printk(KERN_ERR "PnPBIOS: %s: invalid function number passed\n",
module);
break;
case PNP_FUNCTION_NOT_SUPPORTED:
printk(KERN_ERR
"PnPBIOS: %s: function not supported on this system\n",
module);
break;
case PNP_INVALID_HANDLE:
printk(KERN_ERR "PnPBIOS: %s: invalid handle\n", module);
break;
case PNP_BAD_PARAMETER:
printk(KERN_ERR "PnPBIOS: %s: invalid parameters were passed\n",
module);
break;
case PNP_SET_FAILED:
printk(KERN_ERR "PnPBIOS: %s: unable to set resources\n",
module);
break;
case PNP_EVENTS_NOT_PENDING:
printk(KERN_ERR "PnPBIOS: %s: no events are pending\n", module);
break;
case PNP_SYSTEM_NOT_DOCKED:
printk(KERN_ERR "PnPBIOS: %s: the system is not docked\n",
module);
break;
case PNP_NO_ISA_PNP_CARDS:
printk(KERN_ERR
"PnPBIOS: %s: no isapnp cards are installed on this system\n",
module);
break;
case PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES:
printk(KERN_ERR
"PnPBIOS: %s: cannot determine the capabilities of the docking station\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_NO_BATTERY:
printk(KERN_ERR
"PnPBIOS: %s: unable to undock, the system does not have a battery\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT:
printk(KERN_ERR
"PnPBIOS: %s: could not dock due to resource conflicts\n",
module);
break;
case PNP_BUFFER_TOO_SMALL:
printk(KERN_ERR "PnPBIOS: %s: the buffer passed is too small\n",
module);
break;
case PNP_USE_ESCD_SUPPORT:
printk(KERN_ERR "PnPBIOS: %s: use ESCD instead\n", module);
break;
case PNP_MESSAGE_NOT_SUPPORTED:
printk(KERN_ERR "PnPBIOS: %s: the message is unsupported\n",
module);
break;
case PNP_HARDWARE_ERROR:
printk(KERN_ERR "PnPBIOS: %s: a hardware failure has occurred\n",
module);
break;
default:
printk(KERN_ERR "PnPBIOS: %s: unexpected status 0x%x\n", module,
status);
break;
}
}
/*
* PnP BIOS Low Level Calls
*/
#define PNP_GET_NUM_SYS_DEV_NODES 0x00
#define PNP_GET_SYS_DEV_NODE 0x01
#define PNP_SET_SYS_DEV_NODE 0x02
#define PNP_GET_EVENT 0x03
#define PNP_SEND_MESSAGE 0x04
#define PNP_GET_DOCKING_STATION_INFORMATION 0x05
#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09
#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a
#define PNP_GET_APM_ID_TABLE 0x0b
#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40
#define PNP_GET_ESCD_INFO 0x41
#define PNP_READ_ESCD 0x42
#define PNP_WRITE_ESCD 0x43
/*
* Call PnP BIOS with function 0x00, "get number of system device nodes"
*/
static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2,
PNP_TS1, PNP_DS, 0, 0, data,
sizeof(struct pnp_dev_node_info), NULL, 0);
data->no_nodes &= 0xff;
return status;
}
int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
int status = __pnp_bios_dev_node_info(data);
if (status)
pnpbios_print_status("dev_node_info", status);
return status;
}
/*
* Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible
* death if they are asked to access the "current" configuration.
* Therefore, if it's a matter of indifference, it's better to call
* get_dev_node() and set_dev_node() with boot=1 rather than with boot=0.
*/
/*
* Call PnP BIOS with function 0x01, "get system device node"
* Input: *nodenum = desired node,
* boot = whether to get nonvolatile boot (!=0)
* or volatile current (0) config
* Output: *nodenum=next node or 0xff if no more nodes
*/
static int __pnp_bios_get_dev_node(u8 *nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
u16 tmp_nodenum;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
tmp_nodenum = *nodenum;
status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2,
boot ? 2 : 1, PNP_DS, 0, &tmp_nodenum,
sizeof(tmp_nodenum), data, 65536);
*nodenum = tmp_nodenum;
return status;
}
int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_get_dev_node(nodenum, boot, data);
if (status)
pnpbios_print_status("get_dev_node", status);
return status;
}
/*
* Call PnP BIOS with function 0x02, "set system device node"
* Input: *nodenum = desired node,
* boot = whether to set nonvolatile boot (!=0)
* or volatile current (0) config
*/
static int __pnp_bios_set_dev_node(u8 nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1,
boot ? 2 : 1, PNP_DS, 0, 0, data, 65536, NULL,
0);
return status;
}
int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_set_dev_node(nodenum, boot, data);
if (status) {
pnpbios_print_status("set_dev_node", status);
return status;
}
if (!boot) { /* Update devlist */
status = pnp_bios_get_dev_node(&nodenum, boot, data);
if (status)
return status;
}
return status;
}
/*
* Call PnP BIOS with function 0x05, "get docking station information"
*/
int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, data,
sizeof(struct pnp_docking_station_info), NULL,
0);
return status;
}
/*
* Call PnP BIOS with function 0x0a, "get statically allocated resource
* information"
*/
static int __pnp_bios_get_stat_res(char *info)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, info, 65536, NULL, 0);
return status;
}
int pnp_bios_get_stat_res(char *info)
{
int status;
status = __pnp_bios_get_stat_res(info);
if (status)
pnpbios_print_status("get_stat_res", status);
return status;
}
/*
* Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
*/
static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS,
0, 0, 0, 0, data,
sizeof(struct pnp_isa_config_struc), NULL, 0);
return status;
}
int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
int status;
status = __pnp_bios_isapnp_config(data);
if (status)
pnpbios_print_status("isapnp_config", status);
return status;
}
/*
* Call PnP BIOS with function 0x41, "get ESCD info"
*/
static int __pnp_bios_escd_info(struct escd_info_struc *data)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4,
PNP_TS1, PNP_DS, data,
sizeof(struct escd_info_struc), NULL, 0);
return status;
}
int pnp_bios_escd_info(struct escd_info_struc *data)
{
int status;
status = __pnp_bios_escd_info(data);
if (status)
pnpbios_print_status("escd_info", status);
return status;
}
/*
* Call PnP BIOS function 0x42, "read ESCD"
* nvram_base is determined by calling escd_info
*/
static int __pnp_bios_read_escd(char *data, u32 nvram_base)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0,
0, data, 65536, __va(nvram_base), 65536);
return status;
}
int pnp_bios_read_escd(char *data, u32 nvram_base)
{
int status;
status = __pnp_bios_read_escd(data, nvram_base);
if (status)
pnpbios_print_status("read_escd", status);
return status;
}
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
int i;
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_rw(i);
if (!gdt)
continue;
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
(unsigned long)&pnp_bios_callfunc);
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
(unsigned long)__va(header->fields.pm16cseg));
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
(unsigned long)__va(header->fields.pm16dseg));
}
}
| linux-master | drivers/pnp/pnpbios/bioscalls.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pnpacpi -- PnP ACPI driver
*
* Copyright (c) 2004 Matthieu Castet <[email protected]>
* Copyright (c) 2004 Li Shaohua <[email protected]>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/slab.h>
#include "../base.h"
#include "pnpacpi.h"
static void decode_irq_flags(struct pnp_dev *dev, int flags, u8 *triggering,
u8 *polarity, u8 *shareable)
{
switch (flags & (IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL |
IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE)) {
case IORESOURCE_IRQ_LOWLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
case IORESOURCE_IRQ_HIGHLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
case IORESOURCE_IRQ_LOWEDGE:
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
case IORESOURCE_IRQ_HIGHEDGE:
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
default:
dev_err(&dev->dev, "can't encode invalid IRQ mode %#x\n",
flags);
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
}
if (flags & IORESOURCE_IRQ_SHAREABLE)
*shareable = ACPI_SHARED;
else
*shareable = ACPI_EXCLUSIVE;
}
static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
int transfer)
{
int flags = 0;
if (bus_master)
flags |= IORESOURCE_DMA_MASTER;
switch (type) {
case ACPI_COMPATIBILITY:
flags |= IORESOURCE_DMA_COMPATIBLE;
break;
case ACPI_TYPE_A:
flags |= IORESOURCE_DMA_TYPEA;
break;
case ACPI_TYPE_B:
flags |= IORESOURCE_DMA_TYPEB;
break;
case ACPI_TYPE_F:
flags |= IORESOURCE_DMA_TYPEF;
break;
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_COMPATIBLE;
dev_err(&dev->dev, "invalid DMA type %d\n", type);
}
switch (transfer) {
case ACPI_TRANSFER_8:
flags |= IORESOURCE_DMA_8BIT;
break;
case ACPI_TRANSFER_8_16:
flags |= IORESOURCE_DMA_8AND16BIT;
break;
case ACPI_TRANSFER_16:
flags |= IORESOURCE_DMA_16BIT;
break;
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_8AND16BIT;
dev_err(&dev->dev, "invalid DMA transfer type %d\n", transfer);
}
return flags;
}
/*
* Allocated Resources
*/
static void pnpacpi_add_irqresource(struct pnp_dev *dev, struct resource *r)
{
if (!(r->flags & IORESOURCE_DISABLED))
pcibios_penalize_isa_irq(r->start, 1);
pnp_add_resource(dev, r);
}
/*
* Device CSRs that do not appear in PCI config space should be described
* via ACPI. This would normally be done with Address Space Descriptors
* marked as "consumer-only," but old versions of Windows and Linux ignore
* the producer/consumer flag, so HP invented a vendor-defined resource to
* describe the location and size of CSR space.
*/
static struct acpi_vendor_uuid hp_ccsr_uuid = {
.subtype = 2,
.data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};
static int vendor_resource_matches(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor,
struct acpi_vendor_uuid *match,
int expected_len)
{
int uuid_len = sizeof(vendor->uuid);
u8 uuid_subtype = vendor->uuid_subtype;
u8 *uuid = vendor->uuid;
int actual_len;
/* byte_length includes uuid_subtype and uuid */
actual_len = vendor->byte_length - uuid_len - 1;
if (uuid_subtype == match->subtype &&
uuid_len == sizeof(match->data) &&
memcmp(uuid, match->data, uuid_len) == 0) {
if (expected_len && expected_len != actual_len) {
dev_err(&dev->dev,
"wrong vendor descriptor size; expected %d, found %d bytes\n",
expected_len, actual_len);
return 0;
}
return 1;
}
return 0;
}
static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor)
{
if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
u64 start, length;
memcpy(&start, vendor->byte_data, sizeof(start));
memcpy(&length, vendor->byte_data + 8, sizeof(length));
pnp_add_mem_resource(dev, start, start + length - 1, 0);
}
}
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
struct pnp_dev *dev = data;
struct acpi_resource_dma *dma;
struct acpi_resource_vendor_typed *vendor_typed;
struct acpi_resource_gpio *gpio;
struct resource_win win = {{0}, 0};
struct resource *r = &win.res;
int i, flags;
if (acpi_dev_resource_address_space(res, &win)
|| acpi_dev_resource_ext_address_space(res, &win)) {
pnp_add_resource(dev, &win.res);
return AE_OK;
}
r->flags = 0;
if (acpi_dev_resource_interrupt(res, 0, r)) {
pnpacpi_add_irqresource(dev, r);
for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++)
pnpacpi_add_irqresource(dev, r);
if (i > 1) {
/*
* The IRQ encoder puts a single interrupt in each
* descriptor, so if a _CRS descriptor has more than
* one interrupt, we won't be able to re-encode it.
*/
if (pnp_can_write(dev)) {
dev_warn(&dev->dev,
"multiple interrupts in _CRS descriptor; configuration can't be changed\n");
dev->capabilities &= ~PNP_WRITE;
}
}
return AE_OK;
} else if (acpi_gpio_get_irq_resource(res, &gpio)) {
/*
* If the resource is GpioInt() type then extract the IRQ
* from GPIO resource and fill it into IRQ resource type.
*/
i = acpi_dev_gpio_irq_get(dev->data, 0);
if (i >= 0) {
flags = acpi_dev_irq_flags(gpio->triggering,
gpio->polarity,
gpio->shareable,
gpio->wake_capable);
} else {
flags = IORESOURCE_DISABLED;
}
pnp_add_irq_resource(dev, i, flags);
return AE_OK;
} else if (r->flags & IORESOURCE_DISABLED) {
pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
return AE_OK;
}
switch (res->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
if (acpi_dev_resource_memory(res, r))
pnp_add_resource(dev, r);
break;
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
if (acpi_dev_resource_io(res, r))
pnp_add_resource(dev, r);
break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
flags = dma_flags(dev, dma->type, dma->bus_master,
dma->transfer);
else
flags = IORESOURCE_DISABLED;
pnp_add_dma_resource(dev, dma->channels[0], flags);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
break;
case ACPI_RESOURCE_TYPE_VENDOR:
vendor_typed = &res->data.vendor_typed;
pnpacpi_parse_allocated_vendor(dev, vendor_typed);
break;
case ACPI_RESOURCE_TYPE_END_TAG:
break;
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
/* serial bus connections (I2C/SPI/UART) are not pnp */
break;
default:
dev_warn(&dev->dev, "unknown resource type %d in _CRS\n",
res->type);
return AE_ERROR;
}
return AE_OK;
}
int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
acpi_status status;
pnp_dbg(&dev->dev, "parse allocated resources\n");
pnp_init_resources(dev);
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_allocated_resource, dev);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
dev_err(&dev->dev, "can't evaluate _CRS: %d", status);
return -EPERM;
}
return 0;
}
static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_dma *p)
{
int i;
unsigned char map = 0, flags;
for (i = 0; i < p->channel_count; i++)
map |= 1 << p->channels[i];
flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(dev, option_flags, map, flags);
}
static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_irq *p)
{
int i;
pnp_irq_mask_t map;
unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_extended_irq *p)
{
int i;
pnp_irq_mask_t map;
unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++) {
if (p->interrupts[i]) {
if (p->interrupts[i] < PNP_IRQ_NR)
__set_bit(p->interrupts[i], map.bits);
else
dev_err(&dev->dev,
"ignoring IRQ %d option (too large for %d entry bitmap)\n",
p->interrupts[i], PNP_IRQ_NR);
}
}
flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_io *io)
{
unsigned char flags = 0;
if (io->io_decode == ACPI_DECODE_16)
flags = IORESOURCE_IO_16BIT_ADDR;
pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
io->alignment, io->address_length, flags);
}
static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_io *io)
{
pnp_register_port_resource(dev, option_flags, io->address, io->address,
0, io->address_length, IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_memory24 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_memory32 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_memory32 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address, p->address,
0, p->address_length, flags);
}
static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
unsigned char flags = 0;
status = acpi_resource_to_address64(r, p);
if (ACPI_FAILURE(status)) {
dev_warn(&dev->dev, "can't convert resource type %d\n",
r->type);
return;
}
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address.minimum,
p->address.minimum, 0, p->address.address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->address.minimum,
p->address.minimum, 0, p->address.address_length,
IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource *r)
{
struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
unsigned char flags = 0;
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address.minimum,
p->address.minimum, 0, p->address.address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->address.minimum,
p->address.minimum, 0, p->address.address_length,
IORESOURCE_IO_FIXED);
}
struct acpipnp_parse_option_s {
struct pnp_dev *dev;
unsigned int option_flags;
};
static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
void *data)
{
int priority;
struct acpipnp_parse_option_s *parse_data = data;
struct pnp_dev *dev = parse_data->dev;
unsigned int option_flags = parse_data->option_flags;
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
pnpacpi_parse_irq_option(dev, option_flags, &res->data.irq);
break;
case ACPI_RESOURCE_TYPE_DMA:
pnpacpi_parse_dma_option(dev, option_flags, &res->data.dma);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
switch (res->data.start_dpf.compatibility_priority) {
case ACPI_GOOD_CONFIGURATION:
priority = PNP_RES_PRIORITY_PREFERRED;
break;
case ACPI_ACCEPTABLE_CONFIGURATION:
priority = PNP_RES_PRIORITY_ACCEPTABLE;
break;
case ACPI_SUB_OPTIMAL_CONFIGURATION:
priority = PNP_RES_PRIORITY_FUNCTIONAL;
break;
default:
priority = PNP_RES_PRIORITY_INVALID;
break;
}
parse_data->option_flags = pnp_new_dependent_set(dev, priority);
break;
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
parse_data->option_flags = 0;
break;
case ACPI_RESOURCE_TYPE_IO:
pnpacpi_parse_port_option(dev, option_flags, &res->data.io);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnpacpi_parse_fixed_port_option(dev, option_flags,
&res->data.fixed_io);
break;
case ACPI_RESOURCE_TYPE_VENDOR:
case ACPI_RESOURCE_TYPE_END_TAG:
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnpacpi_parse_mem24_option(dev, option_flags,
&res->data.memory24);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnpacpi_parse_mem32_option(dev, option_flags,
&res->data.memory32);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnpacpi_parse_fixed_mem32_option(dev, option_flags,
&res->data.fixed_memory32);
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
pnpacpi_parse_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
pnpacpi_parse_ext_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnpacpi_parse_ext_irq_option(dev, option_flags,
&res->data.extended_irq);
break;
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
default:
dev_warn(&dev->dev, "unknown resource type %d in _PRS\n",
res->type);
return AE_ERROR;
}
return AE_OK;
}
int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
acpi_status status;
struct acpipnp_parse_option_s parse_data;
pnp_dbg(&dev->dev, "parse resource options\n");
parse_data.dev = dev;
parse_data.option_flags = 0;
status = acpi_walk_resources(handle, METHOD_NAME__PRS,
pnpacpi_option_resource, &parse_data);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
dev_err(&dev->dev, "can't evaluate _PRS: %d", status);
return -EPERM;
}
return 0;
}
static int pnpacpi_supported_resource(struct acpi_resource *res)
{
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
case ACPI_RESOURCE_TYPE_DMA:
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
return 1;
}
return 0;
}
/*
* Set resource
*/
static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
void *data)
{
int *res_cnt = data;
if (pnpacpi_supported_resource(res))
(*res_cnt)++;
return AE_OK;
}
static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
{
struct acpi_resource **resource = data;
if (pnpacpi_supported_resource(res)) {
(*resource)->type = res->type;
(*resource)->length = sizeof(struct acpi_resource);
if (res->type == ACPI_RESOURCE_TYPE_IRQ)
(*resource)->data.irq.descriptor_length =
res->data.irq.descriptor_length;
(*resource)++;
}
return AE_OK;
}
int pnpacpi_build_resource_template(struct pnp_dev *dev,
struct acpi_buffer *buffer)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
struct acpi_resource *resource;
int res_cnt = 0;
acpi_status status;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_count_resources, &res_cnt);
if (ACPI_FAILURE(status)) {
dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
return -EINVAL;
}
if (!res_cnt)
return -EINVAL;
buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
if (!buffer->pointer)
return -ENOMEM;
resource = (struct acpi_resource *)buffer->pointer;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_type_resources, &resource);
if (ACPI_FAILURE(status)) {
kfree(buffer->pointer);
dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
return -EINVAL;
}
/* resource will pointer the end resource now */
resource->type = ACPI_RESOURCE_TYPE_END_TAG;
resource->length = sizeof(struct acpi_resource);
return 0;
}
static void pnpacpi_encode_irq(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_irq *irq = &resource->data.irq;
u8 triggering, polarity, shareable;
if (!pnp_resource_enabled(p)) {
irq->interrupt_count = 0;
pnp_dbg(&dev->dev, " encode irq (%s)\n",
p ? "disabled" : "missing");
return;
}
decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
irq->triggering = triggering;
irq->polarity = polarity;
irq->shareable = shareable;
irq->interrupt_count = 1;
irq->interrupts[0] = p->start;
pnp_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n",
(int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
irq->shareable == ACPI_SHARED ? "shared" : "exclusive",
irq->descriptor_length);
}
static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq;
u8 triggering, polarity, shareable;
if (!pnp_resource_enabled(p)) {
extended_irq->interrupt_count = 0;
pnp_dbg(&dev->dev, " encode extended irq (%s)\n",
p ? "disabled" : "missing");
return;
}
decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
extended_irq->producer_consumer = ACPI_CONSUMER;
extended_irq->triggering = triggering;
extended_irq->polarity = polarity;
extended_irq->shareable = shareable;
extended_irq->interrupt_count = 1;
extended_irq->interrupts[0] = p->start;
pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
extended_irq->shareable == ACPI_SHARED ? "shared" : "exclusive");
}
static void pnpacpi_encode_dma(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_dma *dma = &resource->data.dma;
if (!pnp_resource_enabled(p)) {
dma->channel_count = 0;
pnp_dbg(&dev->dev, " encode dma (%s)\n",
p ? "disabled" : "missing");
return;
}
/* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
case IORESOURCE_DMA_TYPEA:
dma->type = ACPI_TYPE_A;
break;
case IORESOURCE_DMA_TYPEB:
dma->type = ACPI_TYPE_B;
break;
case IORESOURCE_DMA_TYPEF:
dma->type = ACPI_TYPE_F;
break;
default:
dma->type = ACPI_COMPATIBILITY;
}
switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
case IORESOURCE_DMA_8BIT:
dma->transfer = ACPI_TRANSFER_8;
break;
case IORESOURCE_DMA_8AND16BIT:
dma->transfer = ACPI_TRANSFER_8_16;
break;
default:
dma->transfer = ACPI_TRANSFER_16;
}
dma->bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
dma->channel_count = 1;
dma->channels[0] = p->start;
pnp_dbg(&dev->dev, " encode dma %d "
"type %#x transfer %#x master %d\n",
(int) p->start, dma->type, dma->transfer, dma->bus_master);
}
static void pnpacpi_encode_io(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_io *io = &resource->data.io;
if (pnp_resource_enabled(p)) {
/* Note: pnp_assign_port copies pnp_port->flags into p->flags */
io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ?
ACPI_DECODE_16 : ACPI_DECODE_10;
io->minimum = p->start;
io->maximum = p->end;
io->alignment = 0; /* Correct? */
io->address_length = resource_size(p);
} else {
io->minimum = 0;
io->address_length = 0;
}
pnp_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
io->minimum + io->address_length - 1, io->io_decode);
}
static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io;
if (pnp_resource_enabled(p)) {
fixed_io->address = p->start;
fixed_io->address_length = resource_size(p);
} else {
fixed_io->address = 0;
fixed_io->address_length = 0;
}
pnp_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
fixed_io->address + fixed_io->address_length - 1);
}
static void pnpacpi_encode_mem24(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_memory24 *memory24 = &resource->data.memory24;
if (pnp_resource_enabled(p)) {
/* Note: pnp_assign_mem copies pnp_mem->flags into p->flags */
memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
memory24->minimum = p->start;
memory24->maximum = p->end;
memory24->alignment = 0;
memory24->address_length = resource_size(p);
} else {
memory24->minimum = 0;
memory24->address_length = 0;
}
pnp_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
memory24->minimum,
memory24->minimum + memory24->address_length - 1,
memory24->write_protect);
}
static void pnpacpi_encode_mem32(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_memory32 *memory32 = &resource->data.memory32;
if (pnp_resource_enabled(p)) {
memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
memory32->minimum = p->start;
memory32->maximum = p->end;
memory32->alignment = 0;
memory32->address_length = resource_size(p);
} else {
memory32->minimum = 0;
memory32->alignment = 0;
}
pnp_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
memory32->minimum,
memory32->minimum + memory32->address_length - 1,
memory32->write_protect);
}
static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32;
if (pnp_resource_enabled(p)) {
fixed_memory32->write_protect =
p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
fixed_memory32->address = p->start;
fixed_memory32->address_length = resource_size(p);
} else {
fixed_memory32->address = 0;
fixed_memory32->address_length = 0;
}
pnp_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
fixed_memory32->address,
fixed_memory32->address + fixed_memory32->address_length - 1,
fixed_memory32->write_protect);
}
int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
{
int i = 0;
/* pnpacpi_build_resource_template allocates extra mem */
int res_cnt = (buffer->length - 1) / sizeof(struct acpi_resource) - 1;
struct acpi_resource *resource = buffer->pointer;
unsigned int port = 0, irq = 0, dma = 0, mem = 0;
pnp_dbg(&dev->dev, "encode %d resources\n", res_cnt);
while (i < res_cnt) {
switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
pnpacpi_encode_irq(dev, resource,
pnp_get_resource(dev, IORESOURCE_IRQ, irq));
irq++;
break;
case ACPI_RESOURCE_TYPE_DMA:
pnpacpi_encode_dma(dev, resource,
pnp_get_resource(dev, IORESOURCE_DMA, dma));
dma++;
break;
case ACPI_RESOURCE_TYPE_IO:
pnpacpi_encode_io(dev, resource,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnpacpi_encode_fixed_io(dev, resource,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnpacpi_encode_mem24(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnpacpi_encode_mem32(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnpacpi_encode_fixed_mem32(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnpacpi_encode_ext_irq(dev, resource,
pnp_get_resource(dev, IORESOURCE_IRQ, irq));
irq++;
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
case ACPI_RESOURCE_TYPE_VENDOR:
case ACPI_RESOURCE_TYPE_END_TAG:
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
default: /* other type */
dev_warn(&dev->dev,
"can't encode unknown resource type %d\n",
resource->type);
return -EINVAL;
}
resource++;
i++;
}
return 0;
}
| linux-master | drivers/pnp/pnpacpi/rsparser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pnpacpi -- PnP ACPI driver
*
* Copyright (c) 2004 Matthieu Castet <[email protected]>
* Copyright (c) 2004 Li Shaohua <[email protected]>
*/
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include "../base.h"
#include "pnpacpi.h"
static int num;
/*
* Compatible Device IDs
*/
#define TEST_HEX(c) \
if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \
return 0
#define TEST_ALPHA(c) \
if (!('A' <= (c) && (c) <= 'Z')) \
return 0
static int __init ispnpidacpi(const char *id)
{
TEST_ALPHA(id[0]);
TEST_ALPHA(id[1]);
TEST_ALPHA(id[2]);
TEST_HEX(id[3]);
TEST_HEX(id[4]);
TEST_HEX(id[5]);
TEST_HEX(id[6]);
if (id[7] != '\0')
return 0;
return 1;
}
static int pnpacpi_get_resources(struct pnp_dev *dev)
{
pnp_dbg(&dev->dev, "get resources\n");
return pnpacpi_parse_allocated_resource(dev);
}
static int pnpacpi_set_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_handle handle;
int ret = 0;
pnp_dbg(&dev->dev, "set resources\n");
acpi_dev = ACPI_COMPANION(&dev->dev);
if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
handle = acpi_dev->handle;
if (acpi_has_method(handle, METHOD_NAME__SRS)) {
struct acpi_buffer buffer;
ret = pnpacpi_build_resource_template(dev, &buffer);
if (ret)
return ret;
ret = pnpacpi_encode_resources(dev, &buffer);
if (!ret) {
acpi_status status;
status = acpi_set_current_resources(handle, &buffer);
if (ACPI_FAILURE(status))
ret = -EIO;
}
kfree(buffer.pointer);
}
if (!ret && acpi_device_power_manageable(acpi_dev))
ret = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return ret;
}
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
acpi_dev = ACPI_COMPANION(&dev->dev);
if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
if (acpi_device_power_manageable(acpi_dev))
acpi_device_set_power(acpi_dev, ACPI_STATE_D3_COLD);
/* continue even if acpi_device_set_power() fails */
status = acpi_evaluate_object(acpi_dev->handle, "_DIS", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
return -ENODEV;
return 0;
}
#ifdef CONFIG_ACPI_SLEEP
static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
}
return acpi_bus_can_wakeup(acpi_dev->handle);
}
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
if (device_can_wakeup(&dev->dev)) {
error = acpi_pm_set_device_wakeup(&dev->dev,
device_may_wakeup(&dev->dev));
if (error)
return error;
}
if (acpi_device_power_manageable(acpi_dev)) {
int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
ACPI_STATE_D3_COLD);
if (power_state < 0)
power_state = (state.event == PM_EVENT_ON) ?
ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
/*
* acpi_device_set_power() can fail (keyboard port can't be
* powered-down?), and in any case, our return value is ignored
* by pnp_bus_suspend(). Hence we don't revert the wakeup
* setting if the set_power fails.
*/
error = acpi_device_set_power(acpi_dev, power_state);
}
return error;
}
static int pnpacpi_resume(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
if (device_may_wakeup(&dev->dev))
acpi_pm_set_device_wakeup(&dev->dev, false);
if (acpi_device_power_manageable(acpi_dev))
error = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return error;
}
#endif
struct pnp_protocol pnpacpi_protocol = {
.name = "Plug and Play ACPI",
.get = pnpacpi_get_resources,
.set = pnpacpi_set_resources,
.disable = pnpacpi_disable_resources,
#ifdef CONFIG_ACPI_SLEEP
.can_wakeup = pnpacpi_can_wakeup,
.suspend = pnpacpi_suspend,
.resume = pnpacpi_resume,
#endif
};
EXPORT_SYMBOL(pnpacpi_protocol);
static const char *__init pnpacpi_get_id(struct acpi_device *device)
{
struct acpi_hardware_id *id;
list_for_each_entry(id, &device->pnp.ids, list) {
if (ispnpidacpi(id->id))
return id->id;
}
return NULL;
}
static int __init pnpacpi_add_device(struct acpi_device *device)
{
struct pnp_dev *dev;
const char *pnpid;
struct acpi_hardware_id *id;
int error;
/* Skip devices that are already bound */
if (device->physical_node_count)
return 0;
/*
* If a PnPacpi device is not present , the device
* driver should not be loaded.
*/
if (!acpi_has_method(device->handle, "_CRS"))
return 0;
pnpid = pnpacpi_get_id(device);
if (!pnpid)
return 0;
if (!device->status.present)
return 0;
dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
if (!dev)
return -ENOMEM;
ACPI_COMPANION_SET(&dev->dev, device);
dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
if (acpi_has_method(device->handle, "_SRS"))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
if (acpi_has_method(device->handle, "_DIS"))
dev->capabilities |= PNP_DISABLE;
if (strlen(acpi_device_name(device)))
strncpy(dev->name, acpi_device_name(device), sizeof(dev->name));
else
strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name));
/* Handle possible string truncation */
dev->name[sizeof(dev->name) - 1] = '\0';
if (dev->active)
pnpacpi_parse_allocated_resource(dev);
if (dev->capabilities & PNP_CONFIGURABLE)
pnpacpi_parse_resource_option_data(dev);
list_for_each_entry(id, &device->pnp.ids, list) {
if (!strcmp(id->id, pnpid))
continue;
if (!ispnpidacpi(id->id))
continue;
pnp_add_id(dev, id->id);
}
/* clear out the damaged flags */
if (!dev->active)
pnp_init_resources(dev);
error = pnp_add_device(dev);
if (error) {
put_device(&dev->dev);
return error;
}
num++;
return 0;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
u32 lvl, void *context,
void **rv)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (!device)
return AE_CTRL_DEPTH;
if (acpi_is_pnp_device(device))
pnpacpi_add_device(device);
return AE_OK;
}
int pnpacpi_disabled __initdata;
static int __init pnpacpi_init(void)
{
if (acpi_disabled || pnpacpi_disabled) {
printk(KERN_INFO "pnp: PnP ACPI: disabled\n");
return 0;
}
printk(KERN_INFO "pnp: PnP ACPI init\n");
pnp_register_protocol(&pnpacpi_protocol);
acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
pnp_platform_devices = 1;
return 0;
}
fs_initcall(pnpacpi_init);
static int __init pnpacpi_setup(char *str)
{
if (str == NULL)
return 1;
if (!strncmp(str, "off", 3))
pnpacpi_disabled = 1;
return 1;
}
__setup("pnpacpi=", pnpacpi_setup);
| linux-master | drivers/pnp/pnpacpi/core.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Device core Trace Support
* Copyright (C) 2021, Intel Corporation
*
* Author: Andy Shevchenko <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/base/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* syscore.c - Execution of system core operations.
*
* Copyright (C) 2011 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
/**
* register_syscore_ops - Register a set of system core operations.
* @ops: System core operations to register.
*/
void register_syscore_ops(struct syscore_ops *ops)
{
mutex_lock(&syscore_ops_lock);
list_add_tail(&ops->node, &syscore_ops_list);
mutex_unlock(&syscore_ops_lock);
}
EXPORT_SYMBOL_GPL(register_syscore_ops);
/**
* unregister_syscore_ops - Unregister a set of system core operations.
* @ops: System core operations to unregister.
*/
void unregister_syscore_ops(struct syscore_ops *ops)
{
mutex_lock(&syscore_ops_lock);
list_del(&ops->node);
mutex_unlock(&syscore_ops_lock);
}
EXPORT_SYMBOL_GPL(unregister_syscore_ops);
#ifdef CONFIG_PM_SLEEP
/**
* syscore_suspend - Execute all the registered system core suspend callbacks.
*
* This function is executed with one CPU on-line and disabled interrupts.
*/
int syscore_suspend(void)
{
struct syscore_ops *ops;
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
pm_pr_dbg("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
if (pm_wakeup_pending())
return -EBUSY;
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->suspend) {
pm_pr_dbg("Calling %pS\n", ops->suspend);
ret = ops->suspend();
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pS\n", ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
list_for_each_entry_continue(ops, &syscore_ops_list, node)
if (ops->resume)
ops->resume();
return ret;
}
EXPORT_SYMBOL_GPL(syscore_suspend);
/**
* syscore_resume - Execute all the registered system core resume callbacks.
*
* This function is executed with one CPU on-line and disabled interrupts.
*/
void syscore_resume(void)
{
struct syscore_ops *ops;
trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
list_for_each_entry(ops, &syscore_ops_list, node)
if (ops->resume) {
pm_pr_dbg("Calling %pS\n", ops->resume);
ops->resume();
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pS\n", ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
EXPORT_SYMBOL_GPL(syscore_resume);
#endif /* CONFIG_PM_SLEEP */
/**
* syscore_shutdown - Execute all the registered system core shutdown callbacks.
*/
void syscore_shutdown(void)
{
struct syscore_ops *ops;
mutex_lock(&syscore_ops_lock);
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->shutdown) {
if (initcall_debug)
pr_info("PM: Calling %pS\n", ops->shutdown);
ops->shutdown();
}
mutex_unlock(&syscore_ops_lock);
}
| linux-master | drivers/base/syscore.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Memory subsystem support
*
* Written by Matt Tolentino <[email protected]>
* Dave Hansen <[email protected]>
*
* This file provides the necessary infrastructure to represent
* a SPARSEMEM-memory-model system's physical memory in /sysfs.
* All arch-independent code that assumes MEMORY_HOTPLUG requires
* SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/topology.h>
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#define MEMORY_CLASS_NAME "memory"
static const char *const online_type_to_str[] = {
[MMOP_OFFLINE] = "offline",
[MMOP_ONLINE] = "online",
[MMOP_ONLINE_KERNEL] = "online_kernel",
[MMOP_ONLINE_MOVABLE] = "online_movable",
};
int mhp_online_type_from_str(const char *str)
{
int i;
for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
if (sysfs_streq(str, online_type_to_str[i]))
return i;
}
return -EINVAL;
}
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
static int sections_per_block;
static inline unsigned long memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
static inline unsigned long pfn_to_block_id(unsigned long pfn)
{
return memory_block_id(pfn_to_section_nr(pfn));
}
static inline unsigned long phys_to_block_id(unsigned long phys)
{
return pfn_to_block_id(PFN_DOWN(phys));
}
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
.dev_name = MEMORY_CLASS_NAME,
.online = memory_subsys_online,
.offline = memory_subsys_offline,
};
/*
* Memory blocks are cached in a local radix tree to avoid
* a costly linear search for the corresponding device on
* the subsystem bus.
*/
static DEFINE_XARRAY(memory_blocks);
/*
* Memory groups, indexed by memory group id (mgid).
*/
static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&memory_chain, nb);
}
EXPORT_SYMBOL(register_memory_notifier);
void unregister_memory_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&memory_chain, nb);
}
EXPORT_SYMBOL(unregister_memory_notifier);
static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
/* Verify that the altmap is freed */
WARN_ON(mem->altmap);
kfree(mem);
}
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
}
EXPORT_SYMBOL_GPL(memory_block_size_bytes);
/* Show the memory block ID, relative to the memory block size */
static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
}
/*
* Legacy interface that we cannot remove. Always indicate "removable"
* with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
*/
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
}
/*
* online, offline, going offline, etc.
*/
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct memory_block *mem = to_memory_block(dev);
const char *output;
/*
* We can probably put these states in a nice little array
* so that they're not open-coded
*/
switch (mem->state) {
case MEM_ONLINE:
output = "online";
break;
case MEM_OFFLINE:
output = "offline";
break;
case MEM_GOING_OFFLINE:
output = "going-offline";
break;
default:
WARN_ON(1);
return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
}
return sysfs_emit(buf, "%s\n", output);
}
int memory_notify(unsigned long val, void *v)
{
return blocking_notifier_call_chain(&memory_chain, val, v);
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
static unsigned long memblk_nr_poison(struct memory_block *mem);
#else
static inline unsigned long memblk_nr_poison(struct memory_block *mem)
{
return 0;
}
#endif
static int memory_block_online(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
struct zone *zone;
int ret;
if (memblk_nr_poison(mem))
return -EHWPOISON;
zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
start_pfn, nr_pages);
/*
* Although vmemmap pages have a different lifecycle than the pages
* they describe (they remain until the memory is unplugged), doing
* their initialization and accounting at memory onlining/offlining
* stage helps to keep accounting easier to follow - e.g vmemmaps
* belong to the same zone as the memory they backed.
*/
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
return ret;
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages, zone, mem->group);
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
return ret;
}
/*
* Account once onlining succeeded. If the zone was unpopulated, it is
* now already properly populated.
*/
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
mem->zone = zone;
return ret;
}
static int memory_block_offline(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
int ret;
if (!mem->zone)
return -EINVAL;
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn),
mem->group, nr_vmemmap_pages);
return ret;
}
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
return ret;
}
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
static int
memory_block_action(struct memory_block *mem, unsigned long action)
{
int ret;
switch (action) {
case MEM_ONLINE:
ret = memory_block_online(mem);
break;
case MEM_OFFLINE:
ret = memory_block_offline(mem);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
"%ld\n", __func__, mem->start_section_nr, action, action);
ret = -EINVAL;
}
return ret;
}
static int memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
if (mem->state != from_state_req)
return -EINVAL;
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem, to_state);
mem->state = ret ? from_state_req : to_state;
return ret;
}
/* The device lock serializes operations on memory_subsys_[online|offline] */
static int memory_subsys_online(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
int ret;
if (mem->state == MEM_ONLINE)
return 0;
/*
* When called via device_online() without configuring the online_type,
* we want to default to MMOP_ONLINE.
*/
if (mem->online_type == MMOP_OFFLINE)
mem->online_type = MMOP_ONLINE;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
mem->online_type = MMOP_OFFLINE;
return ret;
}
static int memory_subsys_offline(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
if (mem->state == MEM_OFFLINE)
return 0;
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
const int online_type = mhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
int ret;
if (online_type < 0)
return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE:
/* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
case MMOP_OFFLINE:
ret = device_offline(&mem->dev);
break;
default:
ret = -EINVAL; /* should never happen */
}
unlock_device_hotplug();
if (ret < 0)
return ret;
if (ret)
return -EINVAL;
return count;
}
/*
* Legacy interface that we cannot remove: s390x exposes the storage increment
* covered by a memory block, allowing for identifying which memory blocks
* comprise a storage increment. Since a memory block spans complete
* storage increments nowadays, this interface is basically unused. Other
* archs never exposed != 0.
*/
static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
return sysfs_emit(buf, "%d\n",
arch_get_memory_phys_device(start_pfn));
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static int print_allowed_zone(char *buf, int len, int nid,
struct memory_group *group,
unsigned long start_pfn, unsigned long nr_pages,
int online_type, struct zone *default_zone)
{
struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
if (zone == default_zone)
return 0;
return sysfs_emit_at(buf, len, " %s", zone->name);
}
static ssize_t valid_zones_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct memory_group *group = mem->group;
struct zone *default_zone;
int nid = mem->nid;
int len = 0;
/*
* Check the existing zone. Make sure that we do that only on the
* online nodes otherwise the page_zone is not reliable
*/
if (mem->state == MEM_ONLINE) {
/*
* If !mem->zone, the memory block spans multiple zones and
* cannot get offlined.
*/
default_zone = mem->zone;
if (!default_zone)
return sysfs_emit(buf, "%s\n", "none");
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
goto out;
}
default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
start_pfn, nr_pages);
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
out:
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static DEVICE_ATTR_RO(valid_zones);
#endif
static DEVICE_ATTR_RO(phys_index);
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RO(phys_device);
static DEVICE_ATTR_RO(removable);
/*
* Show the memory block size (shared by all memory blocks).
*/
static ssize_t block_size_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
}
static DEVICE_ATTR_RO(block_size_bytes);
/*
* Memory auto online policy.
*/
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
online_type_to_str[mhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
const int online_type = mhp_online_type_from_str(buf);
if (online_type < 0)
return -EINVAL;
mhp_default_online_type = online_type;
return count;
}
static DEVICE_ATTR_RW(auto_online_blocks);
#ifdef CONFIG_CRASH_HOTPLUG
#include <linux/kexec.h>
static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support());
}
static DEVICE_ATTR_RO(crash_hotplug);
#endif
/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
* as well as ppc64 will do all of their discovery in userspace
* and will require this interface.
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
int nid, ret;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
ret = kstrtoull(buf, 0, &phys_addr);
if (ret)
return ret;
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = __add_memory(nid, phys_addr,
MIN_MEMORY_BLOCK_SIZE * sections_per_block,
MHP_NONE);
if (ret)
goto out;
ret = count;
out:
unlock_device_hotplug();
return ret;
}
static DEVICE_ATTR_WO(probe);
#endif
#ifdef CONFIG_MEMORY_FAILURE
/*
* Support for offlining pages of memory
*/
/* Soft offline a page */
static ssize_t soft_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = soft_offline_page(pfn, 0);
return ret == 0 ? count : ret;
}
/* Forcibly offline a page, including killing processes. */
static ssize_t hard_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, MF_SW_SIMULATED);
if (ret == -EOPNOTSUPP)
ret = 0;
return ret ? ret : count;
}
static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
#endif
/* See phys_device_show(). */
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
{
return 0;
}
/*
* A reference for the returned memory block device is acquired.
*
* Called under device_hotplug_lock.
*/
static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
mem = xa_load(&memory_blocks, block_id);
if (mem)
get_device(&mem->dev);
return mem;
}
/*
* Called under device_hotplug_lock.
*/
struct memory_block *find_memory_block(unsigned long section_nr)
{
unsigned long block_id = memory_block_id(section_nr);
return find_memory_block_by_id(block_id);
}
static struct attribute *memory_memblk_attrs[] = {
&dev_attr_phys_index.attr,
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
#ifdef CONFIG_MEMORY_HOTREMOVE
&dev_attr_valid_zones.attr,
#endif
NULL
};
static const struct attribute_group memory_memblk_attr_group = {
.attrs = memory_memblk_attrs,
};
static const struct attribute_group *memory_memblk_attr_groups[] = {
&memory_memblk_attr_group,
NULL,
};
static int __add_memory_block(struct memory_block *memory)
{
int ret;
memory->dev.bus = &memory_subsys;
memory->dev.id = memory->start_section_nr / sections_per_block;
memory->dev.release = memory_block_release;
memory->dev.groups = memory_memblk_attr_groups;
memory->dev.offline = memory->state == MEM_OFFLINE;
ret = device_register(&memory->dev);
if (ret) {
put_device(&memory->dev);
return ret;
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL));
if (ret)
device_unregister(&memory->dev);
return ret;
}
static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
int nid)
{
const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct zone *zone, *matching_zone = NULL;
pg_data_t *pgdat = NODE_DATA(nid);
int i;
/*
* This logic only works for early memory, when the applicable zones
* already span the memory block. We don't expect overlapping zones on
* a single node for early memory. So if we're told that some PFNs
* of a node fall into this memory block, we can assume that all node
* zones that intersect with the memory block are actually applicable.
* No need to look at the memmap.
*/
for (i = 0; i < MAX_NR_ZONES; i++) {
zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
if (!zone_intersects(zone, start_pfn, nr_pages))
continue;
if (!matching_zone) {
matching_zone = zone;
continue;
}
/* Spans multiple zones ... */
matching_zone = NULL;
break;
}
return matching_zone;
}
#ifdef CONFIG_NUMA
/**
* memory_block_add_nid() - Indicate that system RAM falling into this memory
* block device (partially) belongs to the given node.
* @mem: The memory block device.
* @nid: The node id.
* @context: The memory initialization context.
*
* Indicate that system RAM falling into this memory block (partially) belongs
* to the given node. If the context indicates ("early") that we are adding the
* node during node device subsystem initialization, this will also properly
* set/adjust mem->zone based on the zone ranges of the given node.
*/
void memory_block_add_nid(struct memory_block *mem, int nid,
enum meminit_context context)
{
if (context == MEMINIT_EARLY && mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
* memory block by indicate via zone == NULL that we're not
* dealing with a single zone. So if we're setting the node id
* the first time, determine if there is a single zone. If we're
* setting the node id a second time to a different node,
* invalidate the single detected zone.
*/
if (mem->nid == NUMA_NO_NODE)
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
}
/*
* If this memory block spans multiple nodes, we only indicate
* the last processed node. If we span multiple nodes (not applicable
* to hotplugged memory), zone == NULL will prohibit memory offlining
* and consequently unplug.
*/
mem->nid = nid;
}
#endif
static int add_memory_block(unsigned long block_id, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
mem = find_memory_block_by_id(block_id);
if (mem) {
put_device(&mem->dev);
return -EEXIST;
}
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
mem->nid = NUMA_NO_NODE;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
#ifndef CONFIG_NUMA
if (state == MEM_ONLINE)
/*
* MEM_ONLINE at this point implies early memory. With NUMA,
* we'll determine the zone when setting the node id via
* memory_block_add_nid(). Memory hotplug updated the zone
* manually when memory onlining/offlining succeeds.
*/
mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
#endif /* CONFIG_NUMA */
ret = __add_memory_block(mem);
if (ret)
return ret;
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
return 0;
}
static int __init add_boot_memory_block(unsigned long base_section_nr)
{
int section_count = 0;
unsigned long nr;
for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
nr++)
if (present_section_nr(nr))
section_count++;
if (section_count == 0)
return 0;
return add_memory_block(memory_block_id(base_section_nr),
MEM_ONLINE, NULL, NULL);
}
static int add_hotplug_memory_block(unsigned long block_id,
struct vmem_altmap *altmap,
struct memory_group *group)
{
return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
}
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
if (memory->group) {
list_del(&memory->group_next);
memory->group = NULL;
}
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
/*
* Create memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* will be initialized as offline.
*
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
unsigned long block_id;
int ret = 0;
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes())))
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
ret = add_hotplug_memory_block(block_id, altmap, group);
if (ret)
break;
}
if (ret) {
end_block_id = block_id;
for (block_id = start_block_id; block_id != end_block_id;
block_id++) {
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
remove_memory_block(mem);
}
}
return ret;
}
/*
* Remove memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* have to be offline.
*
* Called under device_hotplug_lock.
*/
void remove_memory_block_devices(unsigned long start, unsigned long size)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
unsigned long block_id;
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes())))
return;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
unregister_memory_block_under_nodes(mem);
remove_memory_block(mem);
}
}
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
#endif
#ifdef CONFIG_MEMORY_FAILURE
&dev_attr_soft_offline_page.attr,
&dev_attr_hard_offline_page.attr,
#endif
&dev_attr_block_size_bytes.attr,
&dev_attr_auto_online_blocks.attr,
#ifdef CONFIG_CRASH_HOTPLUG
&dev_attr_crash_hotplug.attr,
#endif
NULL
};
static const struct attribute_group memory_root_attr_group = {
.attrs = memory_root_attrs,
};
static const struct attribute_group *memory_root_attr_groups[] = {
&memory_root_attr_group,
NULL,
};
/*
* Initialize the sysfs support for memory devices. At the time this function
* is called, we cannot have concurrent creation/deletion of memory block
* devices, the device_hotplug_lock is not needed.
*/
void __init memory_dev_init(void)
{
int ret;
unsigned long block_sz, nr;
/* Validate the configured memory block size */
block_sz = memory_block_size_bytes();
if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
panic("Memory block size not suitable: 0x%lx\n", block_sz);
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
* Create entries for memory sections that were found
* during boot and have been initialized
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
ret = add_boot_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
}
}
/**
* walk_memory_blocks - walk through all present memory blocks overlapped
* by the range [start, start + size)
*
* @start: start address of the memory range
* @size: size of the memory range
* @arg: argument passed to func
* @func: callback for each memory section walked
*
* This function walks through all present memory blocks overlapped by the
* range [start, start + size), calling func on each memory block.
*
* In case func() returns an error, walking is aborted and the error is
* returned.
*
* Called under device_hotplug_lock.
*/
int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func)
{
const unsigned long start_block_id = phys_to_block_id(start);
const unsigned long end_block_id = phys_to_block_id(start + size - 1);
struct memory_block *mem;
unsigned long block_id;
int ret = 0;
if (!size)
return 0;
for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id);
if (!mem)
continue;
ret = func(mem, arg);
put_device(&mem->dev);
if (ret)
break;
}
return ret;
}
struct for_each_memory_block_cb_data {
walk_memory_blocks_func_t func;
void *arg;
};
static int for_each_memory_block_cb(struct device *dev, void *data)
{
struct memory_block *mem = to_memory_block(dev);
struct for_each_memory_block_cb_data *cb_data = data;
return cb_data->func(mem, cb_data->arg);
}
/**
* for_each_memory_block - walk through all present memory blocks
*
* @arg: argument passed to func
* @func: callback for each memory block walked
*
* This function walks through all present memory blocks, calling func on
* each memory block.
*
* In case func() returns an error, walking is aborted and the error is
* returned.
*/
int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
{
struct for_each_memory_block_cb_data cb_data = {
.func = func,
.arg = arg,
};
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
for_each_memory_block_cb);
}
/*
* This is an internal helper to unify allocation and initialization of
* memory groups. Note that the passed memory group will be copied to a
* dynamically allocated memory group. After this call, the passed
* memory group should no longer be used.
*/
static int memory_group_register(struct memory_group group)
{
struct memory_group *new_group;
uint32_t mgid;
int ret;
if (!node_possible(group.nid))
return -EINVAL;
new_group = kzalloc(sizeof(group), GFP_KERNEL);
if (!new_group)
return -ENOMEM;
*new_group = group;
INIT_LIST_HEAD(&new_group->memory_blocks);
ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
GFP_KERNEL);
if (ret) {
kfree(new_group);
return ret;
} else if (group.is_dynamic) {
xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
}
return mgid;
}
/**
* memory_group_register_static() - Register a static memory group.
* @nid: The node id.
* @max_pages: The maximum number of pages we'll have in this static memory
* group.
*
* Register a new static memory group and return the memory group id.
* All memory in the group belongs to a single unit, such as a DIMM. All
* memory belonging to a static memory group is added in one go to be removed
* in one go -- it's static.
*
* Returns an error if out of memory, if the node id is invalid, if no new
* memory groups can be registered, or if max_pages is invalid (0). Otherwise,
* returns the new memory group id.
*/
int memory_group_register_static(int nid, unsigned long max_pages)
{
struct memory_group group = {
.nid = nid,
.s = {
.max_pages = max_pages,
},
};
if (!max_pages)
return -EINVAL;
return memory_group_register(group);
}
EXPORT_SYMBOL_GPL(memory_group_register_static);
/**
* memory_group_register_dynamic() - Register a dynamic memory group.
* @nid: The node id.
* @unit_pages: Unit in pages in which is memory added/removed in this dynamic
* memory group.
*
* Register a new dynamic memory group and return the memory group id.
* Memory within a dynamic memory group is added/removed dynamically
* in unit_pages.
*
* Returns an error if out of memory, if the node id is invalid, if no new
* memory groups can be registered, or if unit_pages is invalid (0, not a
* power of two, smaller than a single memory block). Otherwise, returns the
* new memory group id.
*/
int memory_group_register_dynamic(int nid, unsigned long unit_pages)
{
struct memory_group group = {
.nid = nid,
.is_dynamic = true,
.d = {
.unit_pages = unit_pages,
},
};
if (!unit_pages || !is_power_of_2(unit_pages) ||
unit_pages < PHYS_PFN(memory_block_size_bytes()))
return -EINVAL;
return memory_group_register(group);
}
EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
/**
* memory_group_unregister() - Unregister a memory group.
* @mgid: the memory group id
*
* Unregister a memory group. If any memory block still belongs to this
* memory group, unregistering will fail.
*
* Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
* memory blocks still belong to this memory group and returns 0 if
* unregistering succeeded.
*/
int memory_group_unregister(int mgid)
{
struct memory_group *group;
if (mgid < 0)
return -EINVAL;
group = xa_load(&memory_groups, mgid);
if (!group)
return -EINVAL;
if (!list_empty(&group->memory_blocks))
return -EBUSY;
xa_erase(&memory_groups, mgid);
kfree(group);
return 0;
}
EXPORT_SYMBOL_GPL(memory_group_unregister);
/*
* This is an internal helper only to be used in core memory hotplug code to
* lookup a memory group. We don't care about locking, as we don't expect a
* memory group to get unregistered while adding memory to it -- because
* the group and the memory is managed by the same driver.
*/
struct memory_group *memory_group_find_by_id(int mgid)
{
return xa_load(&memory_groups, mgid);
}
/*
* This is an internal helper only to be used in core memory hotplug code to
* walk all dynamic memory groups excluding a given memory group, either
* belonging to a specific node, or belonging to any node.
*/
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg)
{
struct memory_group *group;
unsigned long index;
int ret = 0;
xa_for_each_marked(&memory_groups, index, group,
MEMORY_GROUP_MARK_DYNAMIC) {
if (group == excluded)
continue;
#ifdef CONFIG_NUMA
if (nid != NUMA_NO_NODE && group->nid != nid)
continue;
#endif /* CONFIG_NUMA */
ret = func(group, arg);
if (ret)
break;
}
return ret;
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
void memblk_nr_poison_inc(unsigned long pfn)
{
const unsigned long block_id = pfn_to_block_id(pfn);
struct memory_block *mem = find_memory_block_by_id(block_id);
if (mem)
atomic_long_inc(&mem->nr_hwpoison);
}
void memblk_nr_poison_sub(unsigned long pfn, long i)
{
const unsigned long block_id = pfn_to_block_id(pfn);
struct memory_block *mem = find_memory_block_by_id(block_id);
if (mem)
atomic_long_sub(i, &mem->nr_hwpoison);
}
static unsigned long memblk_nr_poison(struct memory_block *mem)
{
return atomic_long_read(&mem->nr_hwpoison);
}
#endif
| linux-master | drivers/base/memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* class.c - basic device class management
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2003-2004 Greg Kroah-Hartman
* Copyright (c) 2003-2004 IBM Corp.
*/
#include <linux/device/class.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
/* /sys/class */
static struct kset *class_kset;
#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
/**
* class_to_subsys - Turn a struct class into a struct subsys_private
*
* @class: pointer to the struct bus_type to look up
*
* The driver core internals need to work on the subsys_private structure, not
* the external struct class pointer. This function walks the list of
* registered classes in the system and finds the matching one and returns the
* internal struct subsys_private that relates to that class.
*
* Note, the reference count of the return value is INCREMENTED if it is not
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
struct subsys_private *class_to_subsys(const struct class *class)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
if (!class || !class_kset)
return NULL;
spin_lock(&class_kset->list_lock);
if (list_empty(&class_kset->list))
goto done;
list_for_each_entry(kobj, &class_kset->list, entry) {
struct kset *kset = container_of(kobj, struct kset, kobj);
sp = container_of_const(kset, struct subsys_private, subsys);
if (sp->class == class)
goto done;
}
sp = NULL;
done:
sp = subsys_get(sp);
spin_unlock(&class_kset->list_lock);
return sp;
}
static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct class_attribute *class_attr = to_class_attr(attr);
struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->show)
ret = class_attr->show(cp->class, class_attr, buf);
return ret;
}
static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct class_attribute *class_attr = to_class_attr(attr);
struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->store)
ret = class_attr->store(cp->class, class_attr, buf, count);
return ret;
}
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
const struct class *class = cp->class;
pr_debug("class '%s': release.\n", class->name);
if (class->class_release)
class->class_release(class);
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
lockdep_unregister_key(&cp->lock_key);
kfree(cp);
}
static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj)
{
const struct subsys_private *cp = to_subsys_private(kobj);
const struct class *class = cp->class;
return class->ns_type;
}
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
};
static const struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
.child_ns_type = class_child_ns_type,
};
int class_create_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
struct subsys_private *sp = class_to_subsys(cls);
int error;
if (!sp)
return -EINVAL;
error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(class_create_file_ns);
void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr,
const void *ns)
{
struct subsys_private *sp = class_to_subsys(cls);
if (!sp)
return;
sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_remove_file_ns);
static struct device *klist_class_to_dev(struct klist_node *n)
{
struct device_private *p = to_device_private_class(n);
return p->device;
}
static void klist_class_dev_get(struct klist_node *n)
{
struct device *dev = klist_class_to_dev(n);
get_device(dev);
}
static void klist_class_dev_put(struct klist_node *n)
{
struct device *dev = klist_class_to_dev(n);
put_device(dev);
}
int class_register(const struct class *cls)
{
struct subsys_private *cp;
struct lock_class_key *key;
int error;
pr_debug("device class '%s': registering\n", cls->name);
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->interfaces);
kset_init(&cp->glue_dirs);
key = &cp->lock_key;
lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) {
kfree(cp);
return error;
}
cp->subsys.kobj.kset = class_kset;
cp->subsys.kobj.ktype = &class_ktype;
cp->class = cls;
error = kset_register(&cp->subsys);
if (error)
goto err_out;
error = sysfs_create_groups(&cp->subsys.kobj, cls->class_groups);
if (error) {
kobject_del(&cp->subsys.kobj);
kfree_const(cp->subsys.kobj.name);
goto err_out;
}
return 0;
err_out:
kfree(cp);
return error;
}
EXPORT_SYMBOL_GPL(class_register);
void class_unregister(const struct class *cls)
{
struct subsys_private *sp = class_to_subsys(cls);
if (!sp)
return;
pr_debug("device class '%s': unregistering\n", cls->name);
sysfs_remove_groups(&sp->subsys.kobj, cls->class_groups);
kset_unregister(&sp->subsys);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_unregister);
static void class_create_release(const struct class *cls)
{
pr_debug("%s called for %s\n", __func__, cls->name);
kfree(cls);
}
/**
* class_create - create a struct class structure
* @name: pointer to a string for the name of this class.
*
* This is used to create a struct class pointer that can then be used
* in calls to device_create().
*
* Returns &struct class pointer on success, or ERR_PTR() on error.
*
* Note, the pointer created here is to be destroyed when finished by
* making a call to class_destroy().
*/
struct class *class_create(const char *name)
{
struct class *cls;
int retval;
cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) {
retval = -ENOMEM;
goto error;
}
cls->name = name;
cls->class_release = class_create_release;
retval = class_register(cls);
if (retval)
goto error;
return cls;
error:
kfree(cls);
return ERR_PTR(retval);
}
EXPORT_SYMBOL_GPL(class_create);
/**
* class_destroy - destroys a struct class structure
* @cls: pointer to the struct class that is to be destroyed
*
* Note, the pointer to be destroyed must have been created with a call
* to class_create().
*/
void class_destroy(const struct class *cls)
{
if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
}
EXPORT_SYMBOL_GPL(class_destroy);
/**
* class_dev_iter_init - initialize class device iterator
* @iter: class iterator to initialize
* @class: the class we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
* Initialize class iterator @iter such that it iterates over devices
* of @class. If @start is set, the list iteration will start there,
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
const struct device *start, const struct device_type *type)
{
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
if (!sp)
return;
if (start)
start_knode = &start->p->knode_class;
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
iter->sp = sp;
}
EXPORT_SYMBOL_GPL(class_dev_iter_init);
/**
* class_dev_iter_next - iterate to the next device
* @iter: class iterator to proceed
*
* Proceed @iter to the next device and return it. Returns NULL if
* iteration is complete.
*
* The returned device is referenced and won't be released till
* iterator is proceed to the next device or exited. The caller is
* free to do whatever it wants to do with the device including
* calling back into class code.
*/
struct device *class_dev_iter_next(struct class_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
dev = klist_class_to_dev(knode);
if (!iter->type || iter->type == dev->type)
return dev;
}
}
EXPORT_SYMBOL_GPL(class_dev_iter_next);
/**
* class_dev_iter_exit - finish iteration
* @iter: class iterator to finish
*
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
void class_dev_iter_exit(struct class_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
subsys_put(iter->sp);
}
EXPORT_SYMBOL_GPL(class_dev_iter_exit);
/**
* class_for_each_device - device iterator
* @class: the class we're iterating
* @start: the device to start with in the list, if any.
* @data: data for the callback
* @fn: function to be called for each device
*
* Iterate over @class's list of devices, and call @fn for each,
* passing it @data. If @start is set, the list iteration will start
* there, otherwise if it is NULL, the iteration starts at the
* beginning of the list.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*
* @fn is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
void *data, int (*fn)(struct device *, void *))
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
int error = 0;
if (!class)
return -EINVAL;
if (!sp) {
WARN(1, "%s called for class '%s' before it was initialized",
__func__, class->name);
return -EINVAL;
}
class_dev_iter_init(&iter, class, start, NULL);
while ((dev = class_dev_iter_next(&iter))) {
error = fn(dev, data);
if (error)
break;
}
class_dev_iter_exit(&iter);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(class_for_each_device);
/**
* class_find_device - device iterator for locating a particular device
* @class: the class we're iterating
* @start: Device to begin with
* @data: data for the match function
* @match: function to check device
*
* This is similar to the class_for_each_dev() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*
* Note, you will need to drop the reference with put_device() after use.
*
* @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
const void *data,
int (*match)(struct device *, const void *))
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
struct device *dev;
if (!class)
return NULL;
if (!sp) {
WARN(1, "%s called for class '%s' before it was initialized",
__func__, class->name);
return NULL;
}
class_dev_iter_init(&iter, class, start, NULL);
while ((dev = class_dev_iter_next(&iter))) {
if (match(dev, data)) {
get_device(dev);
break;
}
}
class_dev_iter_exit(&iter);
subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(class_find_device);
int class_interface_register(struct class_interface *class_intf)
{
struct subsys_private *sp;
const struct class *parent;
struct class_dev_iter iter;
struct device *dev;
if (!class_intf || !class_intf->class)
return -ENODEV;
parent = class_intf->class;
sp = class_to_subsys(parent);
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the interface is removed in the call to class_interface_unregister()
*/
mutex_lock(&sp->mutex);
list_add_tail(&class_intf->node, &sp->interfaces);
if (class_intf->add_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->add_dev(dev);
class_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(class_interface_register);
void class_interface_unregister(struct class_interface *class_intf)
{
struct subsys_private *sp;
const struct class *parent = class_intf->class;
struct class_dev_iter iter;
struct device *dev;
if (!parent)
return;
sp = class_to_subsys(parent);
if (!sp)
return;
mutex_lock(&sp->mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->remove_dev(dev);
class_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
/*
* Decrement the reference count twice, once for the class_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in class_interface_register()
*/
subsys_put(sp);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(class_interface_unregister);
ssize_t show_class_attr_string(const struct class *class,
const struct class_attribute *attr, char *buf)
{
struct class_attribute_string *cs;
cs = container_of(attr, struct class_attribute_string, attr);
return sysfs_emit(buf, "%s\n", cs->str);
}
EXPORT_SYMBOL_GPL(show_class_attr_string);
struct class_compat {
struct kobject *kobj;
};
/**
* class_compat_register - register a compatibility class
* @name: the name of the class
*
* Compatibility class are meant as a temporary user-space compatibility
* workaround when converting a family of class devices to a bus devices.
*/
struct class_compat *class_compat_register(const char *name)
{
struct class_compat *cls;
cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
if (!cls)
return NULL;
cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
if (!cls->kobj) {
kfree(cls);
return NULL;
}
return cls;
}
EXPORT_SYMBOL_GPL(class_compat_register);
/**
* class_compat_unregister - unregister a compatibility class
* @cls: the class to unregister
*/
void class_compat_unregister(struct class_compat *cls)
{
kobject_put(cls->kobj);
kfree(cls);
}
EXPORT_SYMBOL_GPL(class_compat_unregister);
/**
* class_compat_create_link - create a compatibility class device link to
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
* @device_link: an optional device to which a "device" link should be created
*/
int class_compat_create_link(struct class_compat *cls, struct device *dev,
struct device *device_link)
{
int error;
error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
if (error)
return error;
/*
* Optionally add a "device" link (typically to the parent), as a
* class device would have one and we want to provide as much
* backwards compatibility as possible.
*/
if (device_link) {
error = sysfs_create_link(&dev->kobj, &device_link->kobj,
"device");
if (error)
sysfs_remove_link(cls->kobj, dev_name(dev));
}
return error;
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
/**
* class_compat_remove_link - remove a compatibility class device link to
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
* @device_link: an optional device to which a "device" link was previously
* created
*/
void class_compat_remove_link(struct class_compat *cls, struct device *dev,
struct device *device_link)
{
if (device_link)
sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
/**
* class_is_registered - determine if at this moment in time, a class is
* registered in the driver core or not.
* @class: the class to check
*
* Returns a boolean to state if the class is registered in the driver core
* or not. Note that the value could switch right after this call is made,
* so only use this in places where you "know" it is safe to do so (usually
* to determine if the specific class has been registered yet or not).
*
* Be careful in using this.
*/
bool class_is_registered(const struct class *class)
{
struct subsys_private *sp = class_to_subsys(class);
bool is_initialized = false;
if (sp) {
is_initialized = true;
subsys_put(sp);
}
return is_initialized;
}
EXPORT_SYMBOL_GPL(class_is_registered);
int __init classes_init(void)
{
class_kset = kset_create_and_add("class", NULL, NULL);
if (!class_kset)
return -ENOMEM;
return 0;
}
| linux-master | drivers/base/class.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CPU subsystem support
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/device.h>
#include <linux/node.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/cpufeature.h>
#include <linux/tick.h>
#include <linux/pm_qos.h>
#include <linux/delay.h>
#include <linux/sched/isolation.h>
#include "base.h"
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
{
/* ACPI style match is the only one that may succeed. */
if (acpi_driver_match_device(dev, drv))
return 1;
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static void change_cpu_under_node(struct cpu *cpu,
unsigned int from_nid, unsigned int to_nid)
{
int cpuid = cpu->dev.id;
unregister_cpu_under_node(cpuid, from_nid);
register_cpu_under_node(cpuid, to_nid);
cpu->node_id = to_nid;
}
static int cpu_subsys_online(struct device *dev)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
int from_nid, to_nid;
int ret;
int retries = 0;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
return -ENODEV;
retry:
ret = cpu_device_up(dev);
/*
* If -EBUSY is returned, it is likely that hotplug is temporarily
* disabled when cpu_hotplug_disable() was called. This condition is
* transient. So we retry after waiting for an exponentially
* increasing delay up to a total of at least 620ms as some PCI
* device initialization can take quite a while.
*/
if (ret == -EBUSY) {
retries++;
if (retries > 5)
return ret;
msleep(10 * (1 << retries));
goto retry;
}
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
*/
to_nid = cpu_to_node(cpuid);
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
return ret;
}
static int cpu_subsys_offline(struct device *dev)
{
return cpu_device_down(dev);
}
void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
static ssize_t cpu_probe_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
ssize_t cnt;
int ret;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
cnt = arch_cpu_probe(buf, count);
unlock_device_hotplug();
return cnt;
}
static ssize_t cpu_release_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
ssize_t cnt;
int ret;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
cnt = arch_cpu_release(buf, count);
unlock_device_hotplug();
return cnt;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
unsigned long long addr;
int cpunum;
cpunum = cpu->dev.id;
/*
* Might be reading other cpu's data based on which cpu read thread
* has been scheduled. But cpu data (memory) is allocated once during
* boot up and this data does not change there after. Hence this
* operation should be safe. No locking required.
*/
addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
return sysfs_emit(buf, "%llx\n", addr);
}
static DEVICE_ATTR_ADMIN_RO(crash_notes);
static ssize_t crash_notes_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
}
static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
static struct attribute *crash_note_cpu_attrs[] = {
&dev_attr_crash_notes.attr,
&dev_attr_crash_notes_size.attr,
NULL
};
static const struct attribute_group crash_note_cpu_attr_group = {
.attrs = crash_note_cpu_attrs,
};
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
NULL
};
/*
* Print cpu online, possible, present, and system maps
*/
struct cpu_attr {
struct device_attribute attr;
const struct cpumask *const map;
};
static ssize_t show_cpus_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
return cpumap_print_to_pagebuf(true, buf, ca->map);
}
#define _CPU_ATTR(name, map) \
{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
/* Keep in sync with cpu_subsys_attrs */
static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &__cpu_online_mask),
_CPU_ATTR(possible, &__cpu_possible_mask),
_CPU_ATTR(present, &__cpu_present_mask),
};
/*
* Print values for NR_CPUS and offlined cpus
*/
static ssize_t print_cpus_kernel_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
}
static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;
static ssize_t print_cpus_offline(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0;
cpumask_var_t offline;
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
free_cpumask_var(offline);
/* display offline cpus >= nr_cpu_ids */
if (total_cpus && nr_cpu_ids < total_cpus) {
len += sysfs_emit_at(buf, len, ",");
if (nr_cpu_ids == total_cpus-1)
len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
else
len += sysfs_emit_at(buf, len, "%u-%d",
nr_cpu_ids, total_cpus - 1);
}
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len;
cpumask_var_t isolated;
if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
return -ENOMEM;
cpumask_andnot(isolated, cpu_possible_mask,
housekeeping_cpumask(HK_TYPE_DOMAIN));
len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
return len;
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
#ifdef CONFIG_NO_HZ_FULL
static ssize_t print_cpus_nohz_full(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
}
static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support());
}
static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
#endif
static void cpu_device_release(struct device *dev)
{
/*
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
* of this code has already been publically ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
* code reworked to properly handle this in the near future, so this
* function will then be changed to correctly free up the memory held
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
* on the linux-kernel list, you have been warned.
*/
}
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
static ssize_t print_cpu_modalias(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int len = 0;
u32 i;
len += sysfs_emit_at(buf, len,
"cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
CPU_FEATURE_TYPEVAL);
for (i = 0; i < MAX_CPU_FEATURES; i++)
if (cpu_have_feature(i)) {
if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
WARN(1, "CPU features overflow page\n");
break;
}
len += sysfs_emit_at(buf, len, ",%04X", i);
}
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (buf) {
print_cpu_modalias(NULL, NULL, buf);
add_uevent_var(env, "MODALIAS=%s", buf);
kfree(buf);
}
return 0;
}
#endif
struct bus_type cpu_subsys = {
.name = "cpu",
.dev_name = "cpu",
.match = cpu_subsys_match,
#ifdef CONFIG_HOTPLUG_CPU
.online = cpu_subsys_online,
.offline = cpu_subsys_offline,
#endif
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
.uevent = cpu_uevent,
#endif
};
EXPORT_SYMBOL_GPL(cpu_subsys);
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
* sysfs for this CPU.
* @num - CPU number to use when creating the device.
*
* Initialize and register the CPU device.
*/
int register_cpu(struct cpu *cpu, int num)
{
int error;
cpu->node_id = cpu_to_node(num);
memset(&cpu->dev, 0x00, sizeof(struct device));
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
error = device_register(&cpu->dev);
if (error) {
put_device(&cpu->dev);
return error;
}
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0;
}
struct device *get_cpu_device(unsigned int cpu)
{
if (cpu < nr_cpu_ids && cpu_possible(cpu))
return per_cpu(cpu_sys_devices, cpu);
else
return NULL;
}
EXPORT_SYMBOL_GPL(get_cpu_device);
static void device_create_release(struct device *dev)
{
kfree(dev);
}
__printf(4, 0)
static struct device *
__cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, va_list args)
{
struct device *dev = NULL;
int retval = -ENOMEM;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto error;
device_initialize(dev);
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
device_set_pm_not_required(dev);
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
if (retval)
goto error;
retval = device_add(dev);
if (retval)
goto error;
return dev;
error:
put_device(dev);
return ERR_PTR(retval);
}
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...)
{
va_list vargs;
struct device *dev;
va_start(vargs, fmt);
dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
va_end(vargs);
return dev;
}
EXPORT_SYMBOL_GPL(cpu_device_create);
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
&dev_attr_probe.attr,
&dev_attr_release.attr,
#endif
&cpu_attrs[0].attr.attr,
&cpu_attrs[1].attr.attr,
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
&dev_attr_isolated.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
#ifdef CONFIG_CRASH_HOTPLUG
&dev_attr_crash_hotplug.attr,
#endif
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
};
static const struct attribute_group cpu_root_attr_group = {
.attrs = cpu_root_attrs,
};
static const struct attribute_group *cpu_root_attr_groups[] = {
&cpu_root_attr_group,
NULL,
};
bool cpu_is_hotpluggable(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
return dev && container_of(dev, struct cpu, dev)->hotpluggable
&& tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
#ifdef CONFIG_GENERIC_CPU_DEVICES
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#endif
static void __init cpu_dev_register_generic(void)
{
#ifdef CONFIG_GENERIC_CPU_DEVICES
int i;
for_each_possible_cpu(i) {
if (register_cpu(&per_cpu(cpu_devices, i), i))
panic("Failed to register CPU device");
}
#endif
}
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
static ssize_t cpu_show_not_affected(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
#define CPU_SHOW_VULN_FALLBACK(func) \
ssize_t cpu_show_##func(struct device *, \
struct device_attribute *, char *) \
__attribute__((weak, alias("cpu_show_not_affected")))
CPU_SHOW_VULN_FALLBACK(meltdown);
CPU_SHOW_VULN_FALLBACK(spectre_v1);
CPU_SHOW_VULN_FALLBACK(spectre_v2);
CPU_SHOW_VULN_FALLBACK(spec_store_bypass);
CPU_SHOW_VULN_FALLBACK(l1tf);
CPU_SHOW_VULN_FALLBACK(mds);
CPU_SHOW_VULN_FALLBACK(tsx_async_abort);
CPU_SHOW_VULN_FALLBACK(itlb_multihit);
CPU_SHOW_VULN_FALLBACK(srbds);
CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
&dev_attr_mds.attr,
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
NULL
};
static const struct attribute_group cpu_root_vulnerabilities_group = {
.name = "vulnerabilities",
.attrs = cpu_root_vulnerabilities_attrs,
};
static void __init cpu_register_vulnerabilities(void)
{
struct device *dev = bus_get_dev_root(&cpu_subsys);
if (dev) {
if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group))
pr_err("Unable to register CPU vulnerabilities\n");
put_device(dev);
}
}
#else
static inline void cpu_register_vulnerabilities(void) { }
#endif
void __init cpu_dev_init(void)
{
if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
panic("Failed to register CPU subsystem");
cpu_dev_register_generic();
cpu_register_vulnerabilities();
}
| linux-master | drivers/base/cpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* firmware.c - firmware subsystem hoohaw.
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2007 Novell Inc.
*/
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include "base.h"
struct kobject *firmware_kobj;
EXPORT_SYMBOL_GPL(firmware_kobj);
int __init firmware_init(void)
{
firmware_kobj = kobject_create_and_add("firmware", NULL);
if (!firmware_kobj)
return -ENOMEM;
return 0;
}
| linux-master | drivers/base/firmware.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/memory.h>
#include <linux/of.h>
#include <linux/backing-dev.h>
#include "base.h"
/**
* driver_init - initialize driver model.
*
* Call the driver model init functions to initialize their
* subsystems. Called early from init/main.c.
*/
void __init driver_init(void)
{
/* These are the core pieces */
bdi_init(&noop_backing_dev_info);
devtmpfs_init();
devices_init();
buses_init();
classes_init();
firmware_init();
hypervisor_init();
/* These are also core pieces, but must come after the
* core core pieces.
*/
of_core_init();
platform_bus_init();
auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
node_dev_init();
container_dev_init();
}
| linux-master | drivers/base/init.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* driver/base/topology.c - Populate sysfs with cpu topology information
*
* Written by: Zhang Yanmin, Intel Corporation
*
* Copyright (C) 2006, Intel Corp.
*
* All rights reserved.
*/
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/topology.h>
#define define_id_show_func(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
}
#define define_siblings_read_func(name, mask) \
static ssize_t name##_read(struct file *file, struct kobject *kobj, \
struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
\
return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
off, count); \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
\
return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
off, count); \
}
define_id_show_func(physical_package_id, "%d");
static DEVICE_ATTR_RO(physical_package_id);
#ifdef TOPOLOGY_DIE_SYSFS
define_id_show_func(die_id, "%d");
static DEVICE_ATTR_RO(die_id);
#endif
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_id_show_func(cluster_id, "%d");
static DEVICE_ATTR_RO(cluster_id);
#endif
define_id_show_func(core_id, "%d");
static DEVICE_ATTR_RO(core_id);
define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
static struct bin_attribute *bin_attrs[] = {
&bin_attr_core_cpus,
&bin_attr_core_cpus_list,
&bin_attr_thread_siblings,
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
#ifdef TOPOLOGY_CLUSTER_SYSFS
&bin_attr_cluster_cpus,
&bin_attr_cluster_cpus_list,
#endif
#ifdef TOPOLOGY_DIE_SYSFS
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
#endif
&bin_attr_package_cpus,
&bin_attr_package_cpus_list,
#ifdef TOPOLOGY_BOOK_SYSFS
&bin_attr_book_siblings,
&bin_attr_book_siblings_list,
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
&bin_attr_drawer_siblings,
&bin_attr_drawer_siblings_list,
#endif
NULL
};
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
#ifdef TOPOLOGY_DIE_SYSFS
&dev_attr_die_id.attr,
#endif
#ifdef TOPOLOGY_CLUSTER_SYSFS
&dev_attr_cluster_id.attr,
#endif
&dev_attr_core_id.attr,
#ifdef TOPOLOGY_BOOK_SYSFS
&dev_attr_book_id.attr,
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
&dev_attr_ppin.attr,
NULL
};
static umode_t topology_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
return 0;
return attr->mode;
}
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.bin_attrs = bin_attrs,
.is_visible = topology_is_visible,
.name = "topology"
};
/* Add/Remove cpu_topology interface for CPU device */
static int topology_add_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
static int topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
return 0;
}
static int __init topology_sysfs_init(void)
{
return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
"base/topology:prepare", topology_add_dev,
topology_remove_dev);
}
device_initcall(topology_sysfs_init);
| linux-master | drivers/base/topology.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/dd.c - The core device/driver interactions.
*
* This file contains the (sometimes tricky) code that controls the
* interactions between devices and drivers, which primarily includes
* driver binding and unbinding.
*
* All of this code used to exist in drivers/base/bus.c, but was
* relocated to here in the name of compartmentalization (since it wasn't
* strictly code just for the 'struct bus_type'.
*
* Copyright (c) 2002-5 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007-2009 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2007-2009 Novell Inc.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-map-ops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/slab.h>
#include "base.h"
#include "power/power.h"
/*
* Deferred Probe infrastructure.
*
* Sometimes driver probe order matters, but the kernel doesn't always have
* dependency information which means some drivers will get probed before a
* resource it depends on is available. For example, an SDHCI driver may
* first need a GPIO line from an i2c GPIO controller before it can be
* initialized. If a required resource is not available yet, a driver can
* request probing to be deferred by returning -EPROBE_DEFER from its probe hook
*
* Deferred probe maintains two lists of devices, a pending list and an active
* list. A driver returning -EPROBE_DEFER causes the device to be added to the
* pending list. A successful driver probe will trigger moving all devices
* from the pending to the active list so that the workqueue will eventually
* retry them.
*
* The deferred_probe_mutex must be held any time the deferred_probe_*_list
* of the (struct device*)->p->deferred_probe pointers are manipulated
*/
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static bool initcalls_done;
/* Save the async probe drivers' name from kernel cmdline */
#define ASYNC_DRV_NAMES_MAX_LEN 256
static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
static bool async_probe_default;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
* Once defer_all_probes is true all drivers probes will be forcibly deferred.
*/
static bool defer_all_probes;
static void __device_set_deferred_probe_reason(const struct device *dev, char *reason)
{
kfree(dev->p->deferred_probe_reason);
dev->p->deferred_probe_reason = reason;
}
/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
{
struct device *dev;
struct device_private *private;
/*
* This block processes every device in the deferred 'active' list.
* Each device is removed from the active list and passed to
* bus_probe_device() to re-attempt the probe. The loop continues
* until every device in the active list is removed and retried.
*
* Note: Once the device is removed from the list and the mutex is
* released, it is possible for the device get freed by another thread
* and cause a illegal pointer dereference. This code uses
* get/put_device() to ensure the device structure cannot disappear
* from under our feet.
*/
mutex_lock(&deferred_probe_mutex);
while (!list_empty(&deferred_probe_active_list)) {
private = list_first_entry(&deferred_probe_active_list,
typeof(*dev->p), deferred_probe);
dev = private->device;
list_del_init(&private->deferred_probe);
get_device(dev);
__device_set_deferred_probe_reason(dev, NULL);
/*
* Drop the mutex while probing each device; the probe path may
* manipulate the deferred list
*/
mutex_unlock(&deferred_probe_mutex);
/*
* Force the device to the end of the dpm_list since
* the PM code assumes that the order we add things to
* the list is a good order for suspend but deferred
* probe makes that very unsafe.
*/
device_pm_move_to_tail(dev);
dev_dbg(dev, "Retrying from deferred list\n");
bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
put_device(dev);
}
mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
void driver_deferred_probe_add(struct device *dev)
{
if (!dev->can_match)
return;
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
}
mutex_unlock(&deferred_probe_mutex);
}
void driver_deferred_probe_del(struct device *dev)
{
mutex_lock(&deferred_probe_mutex);
if (!list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Removed from deferred list\n");
list_del_init(&dev->p->deferred_probe);
__device_set_deferred_probe_reason(dev, NULL);
}
mutex_unlock(&deferred_probe_mutex);
}
static bool driver_deferred_probe_enable;
/**
* driver_deferred_probe_trigger() - Kick off re-probing deferred devices
*
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
*
* Note, there is a race condition in multi-threaded probe. In the case where
* more than one device is probing at the same time, it is possible for one
* probe to complete successfully while another is about to defer. If the second
* depends on the first, then it will get put on the pending list after the
* trigger event has already occurred and will be stuck there.
*
* The atomic 'deferred_trigger_count' is used to determine if a successful
* trigger has occurred in the midst of probing a driver. If the trigger count
* changes in the midst of a probe, then deferred processing should be triggered
* again.
*/
void driver_deferred_probe_trigger(void)
{
if (!driver_deferred_probe_enable)
return;
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
/*
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
queue_work(system_unbound_wq, &deferred_probe_work);
}
/**
* device_block_probing() - Block/defer device's probes
*
* It will disable probing of devices and defer their probes instead.
*/
void device_block_probing(void)
{
defer_all_probes = true;
/* sync with probes to avoid races. */
wait_for_device_probe();
}
/**
* device_unblock_probing() - Unblock/enable device's probes
*
* It will restore normal behavior and trigger re-probing of deferred
* devices.
*/
void device_unblock_probing(void)
{
defer_all_probes = false;
driver_deferred_probe_trigger();
}
/**
* device_set_deferred_probe_reason() - Set defer probe reason message for device
* @dev: the pointer to the struct device
* @vaf: the pointer to va_format structure with message
*/
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
{
const char *drv = dev_driver_string(dev);
char *reason;
mutex_lock(&deferred_probe_mutex);
reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
__device_set_deferred_probe_reason(dev, reason);
mutex_unlock(&deferred_probe_mutex);
}
/*
* deferred_devs_show() - Show the devices in the deferred probe pending list.
*/
static int deferred_devs_show(struct seq_file *s, void *data)
{
struct device_private *curr;
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\t%s", dev_name(curr->device),
curr->device->p->deferred_probe_reason ?: "\n");
mutex_unlock(&deferred_probe_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
#ifdef CONFIG_MODULES
static int driver_deferred_probe_timeout = 10;
#else
static int driver_deferred_probe_timeout;
#endif
static int __init deferred_probe_timeout_setup(char *str)
{
int timeout;
if (!kstrtoint(str, 10, &timeout))
driver_deferred_probe_timeout = timeout;
return 1;
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
/**
* driver_deferred_probe_check_state() - Check deferred probe state
* @dev: device to check
*
* Return:
* * -ENODEV if initcalls have completed and modules are disabled.
* * -ETIMEDOUT if the deferred probe timeout was set and has expired
* and modules are enabled.
* * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
*/
int driver_deferred_probe_check_state(struct device *dev)
{
if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
return -ENODEV;
}
if (!driver_deferred_probe_timeout && initcalls_done) {
dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
return -ETIMEDOUT;
}
return -EPROBE_DEFER;
}
EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
void deferred_probe_extend_timeout(void)
{
/*
* If the work hasn't been queued yet or if the work expired, don't
* start a new one.
*/
if (cancel_delayed_work(&deferred_probe_timeout_work)) {
schedule_delayed_work(&deferred_probe_timeout_work,
driver_deferred_probe_timeout * HZ);
pr_debug("Extended deferred probe timeout by %d secs\n",
driver_deferred_probe_timeout);
}
}
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
* We don't want to get in the way when the bulk of drivers are getting probed.
* Instead, this initcall makes sure that deferred probing is delayed until
* late_initcall time.
*/
static int deferred_probe_initcall(void)
{
debugfs_create_file("devices_deferred", 0444, NULL, NULL,
&deferred_devs_fops);
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
initcalls_done = true;
if (!IS_ENABLED(CONFIG_MODULES))
fw_devlink_drivers_done();
/*
* Trigger deferred probe again, this time we won't defer anything
* that is optional
*/
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
if (driver_deferred_probe_timeout > 0) {
schedule_delayed_work(&deferred_probe_timeout_work,
driver_deferred_probe_timeout * HZ);
}
if (!IS_ENABLED(CONFIG_MODULES))
fw_devlink_probing_done();
return 0;
}
late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
debugfs_lookup_and_remove("devices_deferred", NULL);
}
__exitcall(deferred_probe_exit);
/**
* device_is_bound() - Check if device is bound to a driver
* @dev: device to check
*
* Returns true if passed device has already finished probing successfully
* against a driver.
*
* This function must be called with the device lock held.
*/
bool device_is_bound(struct device *dev)
{
return dev->p && klist_node_attached(&dev->p->knode_driver);
}
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
pr_warn("%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
/*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
driver_deferred_probe_del(dev);
driver_deferred_probe_trigger();
bus_notify(dev, BUS_NOTIFY_BOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_BIND);
}
static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
device_lock(dev);
dev->driver->coredump(dev);
device_unlock(dev);
return count;
}
static DEVICE_ATTR_WO(coredump);
static int driver_sysfs_add(struct device *dev)
{
int ret;
bus_notify(dev, BUS_NOTIFY_BIND_DRIVER);
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
if (ret)
goto fail;
ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
"driver");
if (ret)
goto rm_dev;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump)
return 0;
ret = device_create_file(dev, &dev_attr_coredump);
if (!ret)
return 0;
sysfs_remove_link(&dev->kobj, "driver");
rm_dev:
sysfs_remove_link(&dev->driver->p->kobj,
kobject_name(&dev->kobj));
fail:
return ret;
}
static void driver_sysfs_remove(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv) {
if (drv->coredump)
device_remove_file(dev, &dev_attr_coredump);
sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
sysfs_remove_link(&dev->kobj, "driver");
}
}
/**
* device_bind_driver - bind a driver to one device.
* @dev: device.
*
* Allow manual attachment of a driver to a device.
* Caller must have already set @dev->driver.
*
* Note that this does not modify the bus reference count.
* Please verify that is accounted for before calling this.
* (It is ok to call with no other effort from a driver's probe() method.)
*
* This function must be called with the device lock held.
*
* Callers should prefer to use device_driver_attach() instead.
*/
int device_bind_driver(struct device *dev)
{
int ret;
ret = driver_sysfs_add(dev);
if (!ret) {
device_links_force_bind(dev);
driver_bound(dev);
}
else
bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
return ret;
}
EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static ssize_t state_synced_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = 0;
if (strcmp("1", buf))
return -EINVAL;
device_lock(dev);
if (!dev->state_synced) {
dev->state_synced = true;
dev_sync_state(dev);
} else {
ret = -EINVAL;
}
device_unlock(dev);
return ret ? ret : count;
}
static ssize_t state_synced_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool val;
device_lock(dev);
val = dev->state_synced;
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
static DEVICE_ATTR_RW(state_synced);
static void device_unbind_cleanup(struct device *dev)
{
devres_release_all(dev);
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
dev_pm_set_driver_flags(dev, 0);
}
static void device_remove(struct device *dev)
{
device_remove_file(dev, &dev_attr_state_synced);
device_remove_groups(dev, dev->driver->dev_groups);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
else if (dev->driver->remove)
dev->driver->remove(dev);
}
static int call_driver_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
if (dev->bus->probe)
ret = dev->bus->probe(dev);
else if (drv->probe)
ret = drv->probe(dev);
switch (ret) {
case 0:
break;
case -EPROBE_DEFER:
/* Driver requested deferred probing */
dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
break;
case -ENODEV:
case -ENXIO:
pr_debug("%s: probe of %s rejects match %d\n",
drv->name, dev_name(dev), ret);
break;
default:
/* driver matched but the probe failed */
pr_warn("%s: probe of %s failed with error %d\n",
drv->name, dev_name(dev), ret);
break;
}
return ret;
}
static int really_probe(struct device *dev, struct device_driver *drv)
{
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
int ret, link_ret;
if (defer_all_probes) {
/*
* Value of defer_all_probes can be set only by
* device_block_probing() which, in turn, will call
* wait_for_device_probe() right after that to avoid any races.
*/
dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
return -EPROBE_DEFER;
}
link_ret = device_links_check_suppliers(dev);
if (link_ret == -EPROBE_DEFER)
return link_ret;
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
ret = -EBUSY;
goto done;
}
re_probe:
dev->driver = drv;
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
if (ret)
goto pinctrl_bind_failed;
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
goto pinctrl_bind_failed;
}
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
goto sysfs_failed;
}
if (dev->pm_domain && dev->pm_domain->activate) {
ret = dev->pm_domain->activate(dev);
if (ret)
goto probe_failed;
}
ret = call_driver_probe(dev, drv);
if (ret) {
/*
* If fw_devlink_best_effort is active (denoted by -EAGAIN), the
* device might actually probe properly once some of its missing
* suppliers have probed. So, treat this as if the driver
* returned -EPROBE_DEFER.
*/
if (link_ret == -EAGAIN)
ret = -EPROBE_DEFER;
/*
* Return probe errors as positive values so that the callers
* can distinguish them from other errors.
*/
ret = -ret;
goto probe_failed;
}
ret = device_add_groups(dev, drv->dev_groups);
if (ret) {
dev_err(dev, "device_add_groups() failed\n");
goto dev_groups_failed;
}
if (dev_has_sync_state(dev)) {
ret = device_create_file(dev, &dev_attr_state_synced);
if (ret) {
dev_err(dev, "state_synced sysfs add failed\n");
goto dev_sysfs_state_synced_failed;
}
}
if (test_remove) {
test_remove = false;
device_remove(dev);
driver_sysfs_remove(dev);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
device_unbind_cleanup(dev);
goto re_probe;
}
pinctrl_init_done(dev);
if (dev->pm_domain && dev->pm_domain->sync)
dev->pm_domain->sync(dev);
driver_bound(dev);
pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
goto done;
dev_sysfs_state_synced_failed:
dev_groups_failed:
device_remove(dev);
probe_failed:
driver_sysfs_remove(dev);
sysfs_failed:
bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
device_unbind_cleanup(dev);
done:
return ret;
}
/*
* For initcall_debug, show the driver probe time.
*/
static int really_probe_debug(struct device *dev, struct device_driver *drv)
{
ktime_t calltime, rettime;
int ret;
calltime = ktime_get();
ret = really_probe(dev, drv);
rettime = ktime_get();
/*
* Don't change this to pr_debug() because that requires
* CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
* kernel commandline to print this all the time at the debug level.
*/
printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
dev_name(dev), ret, ktime_us_delta(rettime, calltime));
return ret;
}
/**
* driver_probe_done
* Determine if the probe sequence is finished or not.
*
* Should somehow figure out how to use a semaphore, not an atomic variable...
*/
bool __init driver_probe_done(void)
{
int local_probe_count = atomic_read(&probe_count);
pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
return !local_probe_count;
}
/**
* wait_for_device_probe
* Wait for device probing to be completed.
*/
void wait_for_device_probe(void)
{
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
static int __driver_probe_device(struct device_driver *drv, struct device *dev)
{
int ret = 0;
if (dev->p->dead || !device_is_registered(dev))
return -ENODEV;
if (dev->driver)
return -EBUSY;
dev->can_match = true;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
pm_runtime_barrier(dev);
if (initcall_debug)
ret = really_probe_debug(dev, drv);
else
ret = really_probe(dev, drv);
pm_request_idle(dev);
if (dev->parent)
pm_runtime_put(dev->parent);
pm_runtime_put_suppliers(dev);
return ret;
}
/**
* driver_probe_device - attempt to bind device & driver together
* @drv: driver to bind a device to
* @dev: device to try to bind to the driver
*
* This function returns -ENODEV if the device is not registered, -EBUSY if it
* already has a driver, 0 if the device is bound successfully and a positive
* (inverted) error code for failures from the ->probe method.
*
* This function must be called with @dev lock held. When called for a
* USB interface, @dev->parent lock must be held as well.
*
* If the device has a parent, runtime-resume the parent before driver probing.
*/
static int driver_probe_device(struct device_driver *drv, struct device *dev)
{
int trigger_count = atomic_read(&deferred_trigger_count);
int ret;
atomic_inc(&probe_count);
ret = __driver_probe_device(drv, dev);
if (ret == -EPROBE_DEFER || ret == EPROBE_DEFER) {
driver_deferred_probe_add(dev);
/*
* Did a trigger occur while probing? Need to re-trigger if yes
*/
if (trigger_count != atomic_read(&deferred_trigger_count) &&
!defer_all_probes)
driver_deferred_probe_trigger();
}
atomic_dec(&probe_count);
wake_up_all(&probe_waitqueue);
return ret;
}
static inline bool cmdline_requested_async_probing(const char *drv_name)
{
bool async_drv;
async_drv = parse_option_str(async_probe_drv_names, drv_name);
return (async_probe_default != async_drv);
}
/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
static int __init save_async_options(char *buf)
{
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
async_probe_default = parse_option_str(async_probe_drv_names, "*");
return 1;
}
__setup("driver_async_probe=", save_async_options);
static bool driver_allows_async_probing(struct device_driver *drv)
{
switch (drv->probe_type) {
case PROBE_PREFER_ASYNCHRONOUS:
return true;
case PROBE_FORCE_SYNCHRONOUS:
return false;
default:
if (cmdline_requested_async_probing(drv->name))
return true;
if (module_requested_async_probing(drv->owner))
return true;
return false;
}
}
struct device_attach_data {
struct device *dev;
/*
* Indicates whether we are considering asynchronous probing or
* not. Only initial binding after device or driver registration
* (including deferral processing) may be done asynchronously, the
* rest is always synchronous, as we expect it is being done by
* request from userspace.
*/
bool check_async;
/*
* Indicates if we are binding synchronous or asynchronous drivers.
* When asynchronous probing is enabled we'll execute 2 passes
* over drivers: first pass doing synchronous probing and second
* doing asynchronous probing (if synchronous did not succeed -
* most likely because there was no driver requiring synchronous
* probing - and we found asynchronous driver during first pass).
* The 2 passes are done because we can't shoot asynchronous
* probe for given device and driver from bus_for_each_drv() since
* driver pointer is not guaranteed to stay valid once
* bus_for_each_drv() iterates to the next driver on the bus.
*/
bool want_async;
/*
* We'll set have_async to 'true' if, while scanning for matching
* driver, we'll encounter one that requests asynchronous probing.
*/
bool have_async;
};
static int __device_attach_driver(struct device_driver *drv, void *_data)
{
struct device_attach_data *data = _data;
struct device *dev = data->dev;
bool async_allowed;
int ret;
ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
/*
* Device can't match with a driver right now, so don't attempt
* to match or bind with other drivers on the bus.
*/
return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
async_allowed = driver_allows_async_probing(drv);
if (async_allowed)
data->have_async = true;
if (data->check_async && async_allowed != data->want_async)
return 0;
/*
* Ignore errors returned by ->probe so that the next driver can try
* its luck.
*/
ret = driver_probe_device(drv, dev);
if (ret < 0)
return ret;
return ret == 0;
}
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
struct device_attach_data data = {
.dev = dev,
.check_async = true,
.want_async = true,
};
device_lock(dev);
/*
* Check if device has already been removed or claimed. This may
* happen with driver loading, device discovery/registration,
* and deferred probe processing happens all at once with
* multiple threads.
*/
if (dev->p->dead || dev->driver)
goto out_unlock;
if (dev->parent)
pm_runtime_get_sync(dev->parent);
bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
dev_dbg(dev, "async probe completed\n");
pm_request_idle(dev);
if (dev->parent)
pm_runtime_put(dev->parent);
out_unlock:
device_unlock(dev);
put_device(dev);
}
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
bool async = false;
device_lock(dev);
if (dev->p->dead) {
goto out_unlock;
} else if (dev->driver) {
if (device_is_bound(dev)) {
ret = 1;
goto out_unlock;
}
ret = device_bind_driver(dev);
if (ret == 0)
ret = 1;
else {
dev->driver = NULL;
ret = 0;
}
} else {
struct device_attach_data data = {
.dev = dev,
.check_async = allow_async,
.want_async = false,
};
if (dev->parent)
pm_runtime_get_sync(dev->parent);
ret = bus_for_each_drv(dev->bus, NULL, &data,
__device_attach_driver);
if (!ret && allow_async && data.have_async) {
/*
* If we could not find appropriate driver
* synchronously and we are allowed to do
* async probes and there are drivers that
* want to probe asynchronously, we'll
* try them.
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
async = true;
} else {
pm_request_idle(dev);
}
if (dev->parent)
pm_runtime_put(dev->parent);
}
out_unlock:
device_unlock(dev);
if (async)
async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
/**
* device_attach - try to attach device to a driver.
* @dev: device.
*
* Walk the list of drivers that the bus has and call
* driver_probe_device() for each pair. If a compatible
* pair is found, break out and return.
*
* Returns 1 if the device was bound to a driver;
* 0 if no matching driver was found;
* -ENODEV if the device is not registered.
*
* When called for a USB interface, @dev->parent lock must be held.
*/
int device_attach(struct device *dev)
{
return __device_attach(dev, false);
}
EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
__device_attach(dev, true);
}
/*
* __device_driver_lock - acquire locks needed to manipulate dev->drv
* @dev: Device we will update driver info for
* @parent: Parent device. Needed if the bus requires parent lock
*
* This function will take the required locks for manipulating dev->drv.
* Normally this will just be the @dev lock, but when called for a USB
* interface, @parent lock will be held as well.
*/
static void __device_driver_lock(struct device *dev, struct device *parent)
{
if (parent && dev->bus->need_parent_lock)
device_lock(parent);
device_lock(dev);
}
/*
* __device_driver_unlock - release locks needed to manipulate dev->drv
* @dev: Device we will update driver info for
* @parent: Parent device. Needed if the bus requires parent lock
*
* This function will release the required locks for manipulating dev->drv.
* Normally this will just be the @dev lock, but when called for a
* USB interface, @parent lock will be released as well.
*/
static void __device_driver_unlock(struct device *dev, struct device *parent)
{
device_unlock(dev);
if (parent && dev->bus->need_parent_lock)
device_unlock(parent);
}
/**
* device_driver_attach - attach a specific driver to a specific device
* @drv: Driver to attach
* @dev: Device to attach it to
*
* Manually attach driver to a device. Will acquire both @dev lock and
* @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
*/
int device_driver_attach(struct device_driver *drv, struct device *dev)
{
int ret;
__device_driver_lock(dev, dev->parent);
ret = __driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
/* also return probe errors as normal negative errnos */
if (ret > 0)
ret = -ret;
if (ret == -EPROBE_DEFER)
return -EAGAIN;
return ret;
}
EXPORT_SYMBOL_GPL(device_driver_attach);
static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
struct device_driver *drv;
int ret;
__device_driver_lock(dev, dev->parent);
drv = dev->p->async_driver;
dev->p->async_driver = NULL;
ret = driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
put_device(dev);
}
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
bool async = false;
int ret;
/*
* Lock device and try to bind to it. We drop the error
* here and always return 0, because we need to keep trying
* to bind to devices and some drivers will return an error
* simply if it didn't support the device.
*
* driver_probe_device() will spit a warning if there
* is an error.
*/
ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
/*
* Driver could not match with device, but may match with
* another device on the bus.
*/
return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
/*
* Driver could not match with device, but may match with
* another device on the bus.
*/
return 0;
} /* ret > 0 means positive match */
if (driver_allows_async_probing(drv)) {
/*
* Instead of probing the device synchronously we will
* probe it asynchronously to allow for more parallelism.
*
* We only take the device lock here in order to guarantee
* that the dev->driver and async_driver fields are protected
*/
dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
device_lock(dev);
if (!dev->driver && !dev->p->async_driver) {
get_device(dev);
dev->p->async_driver = drv;
async = true;
}
device_unlock(dev);
if (async)
async_schedule_dev(__driver_attach_async_helper, dev);
return 0;
}
__device_driver_lock(dev, dev->parent);
driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
return 0;
}
/**
* driver_attach - try to bind driver to devices.
* @drv: driver.
*
* Walk the list of devices that the bus has on it and try to
* match the driver with each one. If driver_probe_device()
* returns 0 and the @dev->driver is set, we've found a
* compatible pair.
*/
int driver_attach(struct device_driver *drv)
{
return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
}
EXPORT_SYMBOL_GPL(driver_attach);
/*
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
drv = dev->driver;
if (drv) {
pm_runtime_get_sync(dev);
while (device_links_busy(dev)) {
__device_driver_unlock(dev, parent);
device_links_unbind_consumers(dev);
__device_driver_lock(dev, parent);
/*
* A concurrent invocation of the same function might
* have released the driver successfully while this one
* was waiting, so check for that.
*/
if (dev->driver != drv) {
pm_runtime_put(dev);
return;
}
}
driver_sysfs_remove(dev);
bus_notify(dev, BUS_NOTIFY_UNBIND_DRIVER);
pm_runtime_put_sync(dev);
device_remove(dev);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
bus_notify(dev, BUS_NOTIFY_UNBOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent)
{
__device_driver_lock(dev, parent);
if (!drv || drv == dev->driver)
__device_release_driver(dev, parent);
__device_driver_unlock(dev, parent);
}
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
*
* If this function is to be called with @dev->parent lock held, ensure that
* the device's consumers are unbound in advance or that their locks can be
* acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
/*
* If anyone calls device_release_driver() recursively from
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
/**
* device_driver_detach - detach driver from a specific device
* @dev: device to detach driver from
*
* Detach driver from device. Will acquire both @dev lock and @dev->parent
* lock if needed.
*/
void device_driver_detach(struct device *dev)
{
device_release_driver_internal(dev, NULL, dev->parent);
}
/**
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
void driver_detach(struct device_driver *drv)
{
struct device_private *dev_prv;
struct device *dev;
if (driver_allows_async_probing(drv))
async_synchronize_full();
for (;;) {
spin_lock(&drv->p->klist_devices.k_lock);
if (list_empty(&drv->p->klist_devices.k_list)) {
spin_unlock(&drv->p->klist_devices.k_lock);
break;
}
dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
struct device_private,
knode_driver.n_node);
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
| linux-master | drivers/base/dd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* module.c - module sysfs fun for drivers
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "base.h"
static char *make_driver_name(struct device_driver *drv)
{
char *driver_name;
driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
if (!driver_name)
return NULL;
return driver_name;
}
static void module_create_drivers_dir(struct module_kobject *mk)
{
static DEFINE_MUTEX(drivers_dir_mutex);
mutex_lock(&drivers_dir_mutex);
if (mk && !mk->drivers_dir)
mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
mutex_unlock(&drivers_dir_mutex);
}
void module_add_driver(struct module *mod, struct device_driver *drv)
{
char *driver_name;
int no_warn;
struct module_kobject *mk = NULL;
if (!drv)
return;
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
struct kobject *mkobj;
/* Lookup built-in module entry in /sys/modules */
mkobj = kset_find_obj(module_kset, drv->mod_name);
if (mkobj) {
mk = container_of(mkobj, struct module_kobject, kobj);
/* remember our module structure */
drv->p->mkobj = mk;
/* kset_find_obj took a reference */
kobject_put(mkobj);
}
}
if (!mk)
return;
/* Don't check return codes; these calls are idempotent */
no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
driver_name = make_driver_name(drv);
if (driver_name) {
module_create_drivers_dir(mk);
no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
driver_name);
kfree(driver_name);
}
}
void module_remove_driver(struct device_driver *drv)
{
struct module_kobject *mk = NULL;
char *driver_name;
if (!drv)
return;
sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner)
mk = &drv->owner->mkobj;
else if (drv->p->mkobj)
mk = drv->p->mkobj;
if (mk && mk->drivers_dir) {
driver_name = make_driver_name(drv);
if (driver_name) {
sysfs_remove_link(mk->drivers_dir, driver_name);
kfree(driver_name);
}
}
}
| linux-master | drivers/base/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* property.c - Unified device property interface.
*
* Copyright (C) 2014, Intel Corporation
* Authors: Rafael J. Wysocki <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/phy.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
}
EXPORT_SYMBOL_GPL(__dev_fwnode);
const struct fwnode_handle *__dev_fwnode_const(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
}
EXPORT_SYMBOL_GPL(__dev_fwnode_const);
/**
* device_property_present - check if a property of a device is present
* @dev: Device whose property is being checked
* @propname: Name of the property
*
* Check if property @propname is present in the device firmware description.
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool device_property_present(const struct device *dev, const char *propname)
{
return fwnode_property_present(dev_fwnode(dev), propname);
}
EXPORT_SYMBOL_GPL(device_property_present);
/**
* fwnode_property_present - check if a property of a firmware node is present
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
bool ret;
if (IS_ERR_OR_NULL(fwnode))
return false;
ret = fwnode_call_bool_op(fwnode, property_present, propname);
if (ret)
return ret;
return fwnode_call_bool_op(fwnode->secondary, property_present, propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u8 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u8() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval)
{
return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u8_array);
/**
* device_property_read_u16_array - return a u16 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u16 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u16() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval)
{
return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u16_array);
/**
* device_property_read_u32_array - return a u32 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u32 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u32() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval)
{
return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u32_array);
/**
* device_property_read_u64_array - return a u64 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of u64 properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_count_u64() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval)
{
return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_u64_array);
/**
* device_property_read_string_array - return a string array property of device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Function reads an array of string properties with @propname from the device
* firmware description and stores them to @val if found.
*
* It's recommended to call device_property_string_array_count() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval)
{
return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
}
EXPORT_SYMBOL_GPL(device_property_read_string_array);
/**
* device_property_read_string - return a string property of a device
* @dev: Device to get the property of
* @propname: Name of the property
* @val: The value is stored here
*
* Function reads property @propname from the device firmware description and
* stores the value into @val if found. The value is checked to be a string.
*
* Return: %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property type is not a string.
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string(const struct device *dev, const char *propname,
const char **val)
{
return fwnode_property_read_string(dev_fwnode(dev), propname, val);
}
EXPORT_SYMBOL_GPL(device_property_read_string);
/**
* device_property_match_string - find a string in an array and return index
* @dev: Device to get the property of
* @propname: Name of the property holding the array
* @string: String to look for
*
* Find a given string in a string array and if it is found return the
* index back.
*
* Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
int device_property_match_string(const struct device *dev, const char *propname,
const char *string)
{
return fwnode_property_match_string(dev_fwnode(dev), propname, string);
}
EXPORT_SYMBOL_GPL(device_property_match_string);
static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -EINVAL;
ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
elem_size, val, nval);
if (ret != -EINVAL)
return ret;
return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname,
elem_size, val, nval);
}
/**
* fwnode_property_read_u8_array - return a u8 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u8 properties with @propname from @fwnode and stores them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u8() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
/**
* fwnode_property_read_u16_array - return a u16 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u16 properties with @propname from @fwnode and store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u16() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
/**
* fwnode_property_read_u32_array - return a u32 array property of firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u32 properties with @propname from @fwnode store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u32() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
/**
* fwnode_property_read_u64_array - return a u64 array property firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an array of u64 properties with @propname from @fwnode and store them to
* @val if found.
*
* It's recommended to call fwnode_property_count_u64() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values if @val was %NULL,
* %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
/**
* fwnode_property_read_string_array - return string array property of a node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The values are stored here or %NULL to return the number of values
* @nval: Size of the @val array
*
* Read an string list property @propname from the given firmware node and store
* them to @val if found.
*
* It's recommended to call fwnode_property_string_array_count() instead of calling
* this function with @val equals %NULL and @nval equals 0.
*
* Return: number of values read on success if @val is non-NULL,
* number of values available on success if @val is NULL,
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -EINVAL;
ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
val, nval);
if (ret != -EINVAL)
return ret;
return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname,
val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
/**
* fwnode_property_read_string - return a string property of a firmware node
* @fwnode: Firmware node to get the property of
* @propname: Name of the property
* @val: The value is stored here
*
* Read property @propname from the given firmware node and store the value into
* @val if found. The value is checked to be a string.
*
* Return: %0 if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
/**
* fwnode_property_match_string - find a string in an array and return index
* @fwnode: Firmware node to get the property of
* @propname: Name of the property holding the array
* @string: String to look for
*
* Find a given string in a string array and if it is found return the
* index back.
*
* Return: index, starting from %0, if the property was found (success),
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
int nval, ret;
nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
if (nval < 0)
return nval;
if (nval == 0)
return -ENODATA;
values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
if (!values)
return -ENOMEM;
ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
if (ret < 0)
goto out_free;
ret = match_string(values, nval, string);
if (ret < 0)
ret = -ENODATA;
out_free:
kfree(values);
return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
/**
* fwnode_property_get_reference_args() - Find a reference with arguments
* @fwnode: Firmware node where to look for the reference
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
* otherwise @nargs is ignored. Only relevant on OF.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
*
* Obtain a reference based on a named property in an fwnode, with
* integer arguments.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* @args->fwnode pointer.
*
* Return: %0 on success
* %-ENOENT when the index is out of bounds, the index has an empty
* reference or the property was not found
* %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
int ret;
if (IS_ERR_OR_NULL(fwnode))
return -ENOENT;
ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
nargs, index, args);
if (ret == 0)
return ret;
if (IS_ERR_OR_NULL(fwnode->secondary))
return ret;
return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop,
nargs, index, args);
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
/**
* fwnode_find_reference - Find named reference to a fwnode_handle
* @fwnode: Firmware node where to look for the reference
* @name: The name of the reference
* @index: Index of the reference
*
* @index can be used when the named reference holds a table of references.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: a pointer to the reference fwnode, when found. Otherwise,
* returns an error pointer.
*/
struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
unsigned int index)
{
struct fwnode_reference_args args;
int ret;
ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index,
&args);
return ret ? ERR_PTR(ret) : args.fwnode;
}
EXPORT_SYMBOL_GPL(fwnode_find_reference);
/**
* fwnode_get_name - Return the name of a node
* @fwnode: The firmware node
*
* Return: a pointer to the node name, or %NULL.
*/
const char *fwnode_get_name(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_name);
}
EXPORT_SYMBOL_GPL(fwnode_get_name);
/**
* fwnode_get_name_prefix - Return the prefix of node for printing purposes
* @fwnode: The firmware node
*
* Return: the prefix of a node, intended to be printed right before the node.
* The prefix works also as a separator between the nodes.
*/
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_name_prefix);
}
/**
* fwnode_get_parent - Return parent firwmare node
* @fwnode: Firmware whose parent is retrieved
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_parent);
}
EXPORT_SYMBOL_GPL(fwnode_get_parent);
/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
* This is like fwnode_get_parent() except that it drops the refcount
* on the passed node, making it suitable for iterating through a
* node's parents.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @fwnode
* unconditionally.
*
* Return: parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent = fwnode_get_parent(fwnode);
fwnode_handle_put(fwnode);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
* fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
* @fwnode: firmware node
*
* Given a firmware node (@fwnode), this function finds its closest ancestor
* firmware node that has a corresponding struct device and returns that struct
* device.
*
* The caller is responsible for calling put_device() on the returned device
* pointer.
*
* Return: a pointer to the device of the @fwnode's closest ancestor.
*/
struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
struct device *dev;
fwnode_for_each_parent_node(fwnode, parent) {
dev = get_dev_from_fwnode(parent);
if (dev) {
fwnode_handle_put(parent);
return dev;
}
}
return NULL;
}
/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
* Return: the number of parents a node has.
*/
unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
unsigned int count = 0;
fwnode_for_each_parent_node(fwnode, parent)
count++;
return count;
}
EXPORT_SYMBOL_GPL(fwnode_count_parents);
/**
* fwnode_get_nth_parent - Return an nth parent of a node
* @fwnode: The node the parent of which is requested
* @depth: Distance of the parent from the node
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the nth parent of a node. If there is no parent at the requested
* @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
* fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
*/
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
unsigned int depth)
{
struct fwnode_handle *parent;
if (depth == 0)
return fwnode_handle_get(fwnode);
fwnode_for_each_parent_node(fwnode, parent) {
if (--depth == 0)
return parent;
}
return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
* @ancestor: Firmware which is tested for being an ancestor
* @child: Firmware which is tested for being the child
*
* A node is considered an ancestor of itself too.
*
* Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
*/
bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
{
struct fwnode_handle *parent;
if (IS_ERR_OR_NULL(ancestor))
return false;
if (child == ancestor)
return true;
fwnode_for_each_parent_node(child, parent) {
if (parent == ancestor) {
fwnode_handle_put(parent);
return true;
}
}
return false;
}
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
}
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
/**
* fwnode_get_next_available_child_node - Return the next available child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *
fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
struct fwnode_handle *next_child = child;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
do {
next_child = fwnode_get_next_child_node(fwnode, next_child);
if (!next_child)
return NULL;
} while (!fwnode_device_is_available(next_child));
return next_child;
}
EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a %NULL handle.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @child
* unconditionally.
*/
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child)
{
const struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *next;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
return fwnode_get_next_child_node(fwnode->secondary, child);
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
/**
* fwnode_get_named_child_node - Return first matching named child node handle
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
}
EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
/**
* device_get_named_child_node - Return first matching named child node handle
* @dev: Device to find the named child node for.
* @childname: String to match child node name against.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname)
{
return fwnode_get_named_child_node(dev_fwnode(dev), childname);
}
EXPORT_SYMBOL_GPL(device_get_named_child_node);
/**
* fwnode_handle_get - Obtain a reference to a device node
* @fwnode: Pointer to the device node to obtain the reference to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the fwnode handle.
*/
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
{
if (!fwnode_has_op(fwnode, get))
return fwnode;
return fwnode_call_ptr_op(fwnode, get);
}
EXPORT_SYMBOL_GPL(fwnode_handle_get);
/**
* fwnode_handle_put - Drop reference to a device node
* @fwnode: Pointer to the device node to drop the reference to.
*
* This has to be used when terminating device_for_each_child_node() iteration
* with break or return to prevent stale device node references from being left
* behind.
*/
void fwnode_handle_put(struct fwnode_handle *fwnode)
{
fwnode_call_void_op(fwnode, put);
}
EXPORT_SYMBOL_GPL(fwnode_handle_put);
/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
* Return: true if device is available for use. Otherwise, returns false.
*
* For fwnode node types that don't implement the .device_is_available()
* operation, this function returns true.
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (IS_ERR_OR_NULL(fwnode))
return false;
if (!fwnode_has_op(fwnode, device_is_available))
return true;
return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
* device_get_child_node_count - return the number of child nodes for device
* @dev: Device to cound the child nodes for
*
* Return: the number of child nodes for a given device.
*/
unsigned int device_get_child_node_count(const struct device *dev)
{
struct fwnode_handle *child;
unsigned int count = 0;
device_for_each_child_node(dev, child)
count++;
return count;
}
EXPORT_SYMBOL_GPL(device_get_child_node_count);
bool device_dma_supported(const struct device *dev)
{
return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported);
}
EXPORT_SYMBOL_GPL(device_dma_supported);
enum dev_dma_attr device_get_dma_attr(const struct device *dev)
{
if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr))
return DEV_DMA_NOT_SUPPORTED;
return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr);
}
EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
* fwnode_get_phy_mode - Get phy mode for given firmware node
* @fwnode: Pointer to the given node
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
int fwnode_get_phy_mode(const struct fwnode_handle *fwnode)
{
const char *pm;
int err, i;
err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
if (err < 0)
err = fwnode_property_read_string(fwnode,
"phy-connection-type", &pm);
if (err < 0)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
/**
* device_get_phy_mode - Get phy mode for given device
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
int device_get_phy_mode(struct device *dev)
{
return fwnode_get_phy_mode(dev_fwnode(dev));
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
/**
* fwnode_iomap - Maps the memory mapped IO for a given fwnode
* @fwnode: Pointer to the firmware node
* @index: Index of the IO range
*
* Return: a pointer to the mapped memory.
*/
void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
{
return fwnode_call_ptr_op(fwnode, iomap, index);
}
EXPORT_SYMBOL(fwnode_iomap);
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
* @index: Zero-based index of the IRQ
*
* Return: Linux IRQ number on success. Negative errno on failure.
*/
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
{
int ret;
ret = fwnode_call_int_op(fwnode, irq_get, index);
/* We treat mapping errors as invalid case */
if (ret == 0)
return -EINVAL;
return ret;
}
EXPORT_SYMBOL(fwnode_irq_get);
/**
* fwnode_irq_get_byname - Get IRQ from a fwnode using its name
* @fwnode: Pointer to the firmware node
* @name: IRQ name
*
* Description:
* Find a match to the string @name in the 'interrupt-names' string array
* in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ
* number of the IRQ resource corresponding to the index of the matched
* string.
*
* Return: Linux IRQ number on success, or negative errno otherwise.
*/
int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
{
int index;
if (!name)
return -EINVAL;
index = fwnode_property_match_string(fwnode, "interrupt-names", name);
if (index < 0)
return index;
return fwnode_irq_get(fwnode, index);
}
EXPORT_SYMBOL(fwnode_irq_get_byname);
/**
* fwnode_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer. Note that this function also puts a reference to @prev
* unconditionally.
*
* Return: an endpoint firmware node pointer or %NULL if no more endpoints
* are available.
*/
struct fwnode_handle *
fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
struct fwnode_handle *ep, *port_parent = NULL;
const struct fwnode_handle *parent;
/*
* If this function is in a loop and the previous iteration returned
* an endpoint from fwnode->secondary, then we need to use the secondary
* as parent rather than @fwnode.
*/
if (prev) {
port_parent = fwnode_graph_get_port_parent(prev);
parent = port_parent;
} else {
parent = fwnode;
}
if (IS_ERR_OR_NULL(parent))
return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
if (ep)
goto out_put_port_parent;
ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
out_put_port_parent:
fwnode_handle_put(port_parent);
return ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
/**
* fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
* @endpoint: Endpoint firmware node of the port
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
{
struct fwnode_handle *port, *parent;
port = fwnode_get_parent(endpoint);
parent = fwnode_call_ptr_op(port, graph_get_port_parent);
fwnode_handle_put(port);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
/**
* fwnode_graph_get_remote_port_parent - Return fwnode of a remote device
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote device the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint, *parent;
endpoint = fwnode_graph_get_remote_endpoint(fwnode);
parent = fwnode_graph_get_port_parent(endpoint);
fwnode_handle_put(endpoint);
return parent;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
/**
* fwnode_graph_get_remote_port - Return fwnode of a remote port
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote port the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
{
return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
/**
* fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint
* @fwnode: Endpoint firmware node pointing to the remote endpoint
*
* Extracts firmware node of a remote endpoint the @fwnode points to.
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*/
struct fwnode_handle *
fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
{
struct fwnode_handle *dev_node;
bool available;
dev_node = fwnode_graph_get_remote_port_parent(ep);
available = fwnode_device_is_available(dev_node);
fwnode_handle_put(dev_node);
return available;
}
/**
* fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
* @fwnode: parent fwnode_handle containing the graph
* @port: identifier of the port node
* @endpoint: identifier of the endpoint node under the port node
* @flags: fwnode lookup flags
*
* The caller is responsible for calling fwnode_handle_put() on the returned
* fwnode pointer.
*
* Return: the fwnode handle of the local endpoint corresponding the port and
* endpoint IDs or %NULL if not found.
*
* If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
* has not been found, look for the closest endpoint ID greater than the
* specified one and return the endpoint that corresponds to it, if present.
*
* Does not return endpoints that belong to disabled devices or endpoints that
* are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
*/
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags)
{
struct fwnode_handle *ep, *best_ep = NULL;
unsigned int best_ep_id = 0;
bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
fwnode_graph_for_each_endpoint(fwnode, ep) {
struct fwnode_endpoint fwnode_ep = { 0 };
int ret;
if (enabled_only && !fwnode_graph_remote_available(ep))
continue;
ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
if (ret < 0)
continue;
if (fwnode_ep.port != port)
continue;
if (fwnode_ep.id == endpoint)
return ep;
if (!endpoint_next)
continue;
/*
* If the endpoint that has just been found is not the first
* matching one and the ID of the one found previously is closer
* to the requested endpoint ID, skip it.
*/
if (fwnode_ep.id < endpoint ||
(best_ep && best_ep_id < fwnode_ep.id))
continue;
fwnode_handle_put(best_ep);
best_ep = fwnode_handle_get(ep);
best_ep_id = fwnode_ep.id;
}
return best_ep;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
/**
* fwnode_graph_get_endpoint_count - Count endpoints on a device node
* @fwnode: The node related to a device
* @flags: fwnode lookup flags
* Count endpoints in a device node.
*
* If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
* and endpoints connected to disabled devices are counted.
*/
unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
unsigned long flags)
{
struct fwnode_handle *ep;
unsigned int count = 0;
fwnode_graph_for_each_endpoint(fwnode, ep) {
if (flags & FWNODE_GRAPH_DEVICE_DISABLED ||
fwnode_graph_remote_available(ep))
count++;
}
return count;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count);
/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
*
* Parse @fwnode representing a graph endpoint node and store the
* information in @endpoint. The caller must hold a reference to
* @fwnode.
*/
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
memset(endpoint, 0, sizeof(*endpoint));
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
static unsigned int fwnode_graph_devcon_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches,
unsigned int matches_len)
{
struct fwnode_handle *node;
struct fwnode_handle *ep;
unsigned int count = 0;
void *ret;
fwnode_graph_for_each_endpoint(fwnode, ep) {
if (matches && count >= matches_len) {
fwnode_handle_put(ep);
break;
}
node = fwnode_graph_get_remote_port_parent(ep);
if (!fwnode_device_is_available(node)) {
fwnode_handle_put(node);
continue;
}
ret = match(node, con_id, data);
fwnode_handle_put(node);
if (ret) {
if (matches)
matches[count] = ret;
count++;
}
}
return count;
}
static unsigned int fwnode_devcon_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches,
unsigned int matches_len)
{
struct fwnode_handle *node;
unsigned int count = 0;
unsigned int i;
void *ret;
for (i = 0; ; i++) {
if (matches && count >= matches_len)
break;
node = fwnode_find_reference(fwnode, con_id, i);
if (IS_ERR(node))
break;
ret = match(node, NULL, data);
fwnode_handle_put(node);
if (ret) {
if (matches)
matches[count] = ret;
count++;
}
}
return count;
}
/**
* fwnode_connection_find_match - Find connection from a device node
* @fwnode: Device node with the connection
* @con_id: Identifier for the connection
* @data: Data for the match function
* @match: Function to check and convert the connection description
*
* Find a connection with unique identifier @con_id between @fwnode and another
* device node. @match will be used to convert the connection description to
* data the caller is expecting to be returned.
*/
void *fwnode_connection_find_match(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match)
{
unsigned int count;
void *ret;
if (!fwnode || !match)
return NULL;
count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
if (count)
return ret;
count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
return count ? ret : NULL;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
/**
* fwnode_connection_find_matches - Find connections from a device node
* @fwnode: Device node with the connection
* @con_id: Identifier for the connection
* @data: Data for the match function
* @match: Function to check and convert the connection description
* @matches: (Optional) array of pointers to fill with matches
* @matches_len: Length of @matches
*
* Find up to @matches_len connections with unique identifier @con_id between
* @fwnode and other device nodes. @match will be used to convert the
* connection description to data the caller is expecting to be returned
* through the @matches array.
*
* If @matches is %NULL @matches_len is ignored and the total number of resolved
* matches is returned.
*
* Return: Number of matches resolved, or negative errno.
*/
int fwnode_connection_find_matches(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match,
void **matches, unsigned int matches_len)
{
unsigned int count_graph;
unsigned int count_ref;
if (!fwnode || !match)
return -EINVAL;
count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
matches, matches_len);
if (matches) {
matches += count_graph;
matches_len -= count_graph;
}
count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
matches, matches_len);
return count_graph + count_ref;
}
EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
| linux-master | drivers/base/property.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Componentized device handling.
*/
#include <linux/component.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
/**
* DOC: overview
*
* The component helper allows drivers to collect a pile of sub-devices,
* including their bound drivers, into an aggregate driver. Various subsystems
* already provide functions to get hold of such components, e.g.
* of_clk_get_by_name(). The component helper can be used when such a
* subsystem-specific way to find a device is not available: The component
* helper fills the niche of aggregate drivers for specific hardware, where
* further standardization into a subsystem would not be practical. The common
* example is when a logical device (e.g. a DRM display driver) is spread around
* the SoC on various components (scanout engines, blending blocks, transcoders
* for various outputs and so on).
*
* The component helper also doesn't solve runtime dependencies, e.g. for system
* suspend and resume operations. See also :ref:`device links<device_link>`.
*
* Components are registered using component_add() and unregistered with
* component_del(), usually from the driver's probe and disconnect functions.
*
* Aggregate drivers first assemble a component match list of what they need
* using component_match_add(). This is then registered as an aggregate driver
* using component_master_add_with_match(), and unregistered using
* component_master_del().
*/
struct component;
struct component_match_array {
void *data;
int (*compare)(struct device *, void *);
int (*compare_typed)(struct device *, int, void *);
void (*release)(struct device *, void *);
struct component *component;
bool duplicate;
};
struct component_match {
size_t alloc;
size_t num;
struct component_match_array *compare;
};
struct aggregate_device {
struct list_head node;
bool bound;
const struct component_master_ops *ops;
struct device *parent;
struct component_match *match;
};
struct component {
struct list_head node;
struct aggregate_device *adev;
bool bound;
const struct component_ops *ops;
int subcomponent;
struct device *dev;
};
static DEFINE_MUTEX(component_mutex);
static LIST_HEAD(component_list);
static LIST_HEAD(aggregate_devices);
#ifdef CONFIG_DEBUG_FS
static struct dentry *component_debugfs_dir;
static int component_devices_show(struct seq_file *s, void *data)
{
struct aggregate_device *m = s->private;
struct component_match *match = m->match;
size_t i;
mutex_lock(&component_mutex);
seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
seq_printf(s, "%-40s %20s\n\n",
dev_name(m->parent), m->bound ? "bound" : "not bound");
seq_printf(s, "%-40s %20s\n", "device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
struct component *component = match->compare[i].component;
seq_printf(s, "%-40s %20s\n",
component ? dev_name(component->dev) : "(unknown)",
component ? (component->bound ? "bound" : "not bound") : "not registered");
}
mutex_unlock(&component_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(component_devices);
static int __init component_debug_init(void)
{
component_debugfs_dir = debugfs_create_dir("device_component", NULL);
return 0;
}
core_initcall(component_debug_init);
static void component_debugfs_add(struct aggregate_device *m)
{
debugfs_create_file(dev_name(m->parent), 0444, component_debugfs_dir, m,
&component_devices_fops);
}
static void component_debugfs_del(struct aggregate_device *m)
{
debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
}
#else
static void component_debugfs_add(struct aggregate_device *m)
{ }
static void component_debugfs_del(struct aggregate_device *m)
{ }
#endif
static struct aggregate_device *__aggregate_find(struct device *parent,
const struct component_master_ops *ops)
{
struct aggregate_device *m;
list_for_each_entry(m, &aggregate_devices, node)
if (m->parent == parent && (!ops || m->ops == ops))
return m;
return NULL;
}
static struct component *find_component(struct aggregate_device *adev,
struct component_match_array *mc)
{
struct component *c;
list_for_each_entry(c, &component_list, node) {
if (c->adev && c->adev != adev)
continue;
if (mc->compare && mc->compare(c->dev, mc->data))
return c;
if (mc->compare_typed &&
mc->compare_typed(c->dev, c->subcomponent, mc->data))
return c;
}
return NULL;
}
static int find_components(struct aggregate_device *adev)
{
struct component_match *match = adev->match;
size_t i;
int ret = 0;
/*
* Scan the array of match functions and attach
* any components which are found to this adev.
*/
for (i = 0; i < match->num; i++) {
struct component_match_array *mc = &match->compare[i];
struct component *c;
dev_dbg(adev->parent, "Looking for component %zu\n", i);
if (match->compare[i].component)
continue;
c = find_component(adev, mc);
if (!c) {
ret = -ENXIO;
break;
}
dev_dbg(adev->parent, "found component %s, duplicate %u\n",
dev_name(c->dev), !!c->adev);
/* Attach this component to the adev */
match->compare[i].duplicate = !!c->adev;
match->compare[i].component = c;
c->adev = adev;
}
return ret;
}
/* Detach component from associated aggregate_device */
static void remove_component(struct aggregate_device *adev, struct component *c)
{
size_t i;
/* Detach the component from this adev. */
for (i = 0; i < adev->match->num; i++)
if (adev->match->compare[i].component == c)
adev->match->compare[i].component = NULL;
}
/*
* Try to bring up an aggregate device. If component is NULL, we're interested
* in this aggregate device, otherwise it's a component which must be present
* to try and bring up the aggregate device.
*
* Returns 1 for successful bringup, 0 if not ready, or -ve errno.
*/
static int try_to_bring_up_aggregate_device(struct aggregate_device *adev,
struct component *component)
{
int ret;
dev_dbg(adev->parent, "trying to bring up adev\n");
if (find_components(adev)) {
dev_dbg(adev->parent, "master has incomplete components\n");
return 0;
}
if (component && component->adev != adev) {
dev_dbg(adev->parent, "master is not for this component (%s)\n",
dev_name(component->dev));
return 0;
}
if (!devres_open_group(adev->parent, adev, GFP_KERNEL))
return -ENOMEM;
/* Found all components */
ret = adev->ops->bind(adev->parent);
if (ret < 0) {
devres_release_group(adev->parent, NULL);
if (ret != -EPROBE_DEFER)
dev_info(adev->parent, "adev bind failed: %d\n", ret);
return ret;
}
devres_close_group(adev->parent, NULL);
adev->bound = true;
return 1;
}
static int try_to_bring_up_masters(struct component *component)
{
struct aggregate_device *adev;
int ret = 0;
list_for_each_entry(adev, &aggregate_devices, node) {
if (!adev->bound) {
ret = try_to_bring_up_aggregate_device(adev, component);
if (ret != 0)
break;
}
}
return ret;
}
static void take_down_aggregate_device(struct aggregate_device *adev)
{
if (adev->bound) {
adev->ops->unbind(adev->parent);
devres_release_group(adev->parent, adev);
adev->bound = false;
}
}
/**
* component_compare_of - A common component compare function for of_node
* @dev: component device
* @data: @compare_data from component_match_add_release()
*
* A common compare function when compare_data is device of_node. e.g.
* component_match_add_release(masterdev, &match, component_release_of,
* component_compare_of, component_dev_of_node)
*/
int component_compare_of(struct device *dev, void *data)
{
return device_match_of_node(dev, data);
}
EXPORT_SYMBOL_GPL(component_compare_of);
/**
* component_release_of - A common component release function for of_node
* @dev: component device
* @data: @compare_data from component_match_add_release()
*
* About the example, Please see component_compare_of().
*/
void component_release_of(struct device *dev, void *data)
{
of_node_put(data);
}
EXPORT_SYMBOL_GPL(component_release_of);
/**
* component_compare_dev - A common component compare function for dev
* @dev: component device
* @data: @compare_data from component_match_add_release()
*
* A common compare function when compare_data is struce device. e.g.
* component_match_add(masterdev, &match, component_compare_dev, component_dev)
*/
int component_compare_dev(struct device *dev, void *data)
{
return dev == data;
}
EXPORT_SYMBOL_GPL(component_compare_dev);
/**
* component_compare_dev_name - A common component compare function for device name
* @dev: component device
* @data: @compare_data from component_match_add_release()
*
* A common compare function when compare_data is device name string. e.g.
* component_match_add(masterdev, &match, component_compare_dev_name,
* "component_dev_name")
*/
int component_compare_dev_name(struct device *dev, void *data)
{
return device_match_name(dev, data);
}
EXPORT_SYMBOL_GPL(component_compare_dev_name);
static void devm_component_match_release(struct device *parent, void *res)
{
struct component_match *match = res;
unsigned int i;
for (i = 0; i < match->num; i++) {
struct component_match_array *mc = &match->compare[i];
if (mc->release)
mc->release(parent, mc->data);
}
kfree(match->compare);
}
static int component_match_realloc(struct component_match *match, size_t num)
{
struct component_match_array *new;
if (match->alloc == num)
return 0;
new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (match->compare) {
memcpy(new, match->compare, sizeof(*new) *
min(match->num, num));
kfree(match->compare);
}
match->compare = new;
match->alloc = num;
return 0;
}
static void __component_match_add(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *),
int (*compare_typed)(struct device *, int, void *),
void *compare_data)
{
struct component_match *match = *matchptr;
if (IS_ERR(match))
return;
if (!match) {
match = devres_alloc(devm_component_match_release,
sizeof(*match), GFP_KERNEL);
if (!match) {
*matchptr = ERR_PTR(-ENOMEM);
return;
}
devres_add(parent, match);
*matchptr = match;
}
if (match->num == match->alloc) {
size_t new_size = match->alloc + 16;
int ret;
ret = component_match_realloc(match, new_size);
if (ret) {
*matchptr = ERR_PTR(ret);
return;
}
}
match->compare[match->num].compare = compare;
match->compare[match->num].compare_typed = compare_typed;
match->compare[match->num].release = release;
match->compare[match->num].data = compare_data;
match->compare[match->num].component = NULL;
match->num++;
}
/**
* component_match_add_release - add a component match entry with release callback
* @parent: parent device of the aggregate driver
* @matchptr: pointer to the list of component matches
* @release: release function for @compare_data
* @compare: compare function to match against all components
* @compare_data: opaque pointer passed to the @compare function
*
* Adds a new component match to the list stored in @matchptr, which the
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add().
*
* The allocated match list in @matchptr is automatically released using devm
* actions, where upon @release will be called to free any references held by
* @compare_data, e.g. when @compare_data is a &device_node that must be
* released with of_node_put().
*
* See also component_match_add() and component_match_add_typed().
*/
void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data)
{
__component_match_add(parent, matchptr, release, compare, NULL,
compare_data);
}
EXPORT_SYMBOL(component_match_add_release);
/**
* component_match_add_typed - add a component match entry for a typed component
* @parent: parent device of the aggregate driver
* @matchptr: pointer to the list of component matches
* @compare_typed: compare function to match against all typed components
* @compare_data: opaque pointer passed to the @compare function
*
* Adds a new component match to the list stored in @matchptr, which the
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add_typed().
*
* The allocated match list in @matchptr is automatically released using devm
* actions.
*
* See also component_match_add_release() and component_match_add_typed().
*/
void component_match_add_typed(struct device *parent,
struct component_match **matchptr,
int (*compare_typed)(struct device *, int, void *), void *compare_data)
{
__component_match_add(parent, matchptr, NULL, NULL, compare_typed,
compare_data);
}
EXPORT_SYMBOL(component_match_add_typed);
static void free_aggregate_device(struct aggregate_device *adev)
{
struct component_match *match = adev->match;
int i;
component_debugfs_del(adev);
list_del(&adev->node);
if (match) {
for (i = 0; i < match->num; i++) {
struct component *c = match->compare[i].component;
if (c)
c->adev = NULL;
}
}
kfree(adev);
}
/**
* component_master_add_with_match - register an aggregate driver
* @parent: parent device of the aggregate driver
* @ops: callbacks for the aggregate driver
* @match: component match list for the aggregate driver
*
* Registers a new aggregate driver consisting of the components added to @match
* by calling one of the component_match_add() functions. Once all components in
* @match are available, it will be assembled by calling
* &component_master_ops.bind from @ops. Must be unregistered by calling
* component_master_del().
*/
int component_master_add_with_match(struct device *parent,
const struct component_master_ops *ops,
struct component_match *match)
{
struct aggregate_device *adev;
int ret;
/* Reallocate the match array for its true size */
ret = component_match_realloc(match, match->num);
if (ret)
return ret;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
return -ENOMEM;
adev->parent = parent;
adev->ops = ops;
adev->match = match;
component_debugfs_add(adev);
/* Add to the list of available aggregate devices. */
mutex_lock(&component_mutex);
list_add(&adev->node, &aggregate_devices);
ret = try_to_bring_up_aggregate_device(adev, NULL);
if (ret < 0)
free_aggregate_device(adev);
mutex_unlock(&component_mutex);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(component_master_add_with_match);
/**
* component_master_del - unregister an aggregate driver
* @parent: parent device of the aggregate driver
* @ops: callbacks for the aggregate driver
*
* Unregisters an aggregate driver registered with
* component_master_add_with_match(). If necessary the aggregate driver is first
* disassembled by calling &component_master_ops.unbind from @ops.
*/
void component_master_del(struct device *parent,
const struct component_master_ops *ops)
{
struct aggregate_device *adev;
mutex_lock(&component_mutex);
adev = __aggregate_find(parent, ops);
if (adev) {
take_down_aggregate_device(adev);
free_aggregate_device(adev);
}
mutex_unlock(&component_mutex);
}
EXPORT_SYMBOL_GPL(component_master_del);
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
WARN_ON(!component->bound);
if (component->ops && component->ops->unbind)
component->ops->unbind(component->dev, adev->parent, data);
component->bound = false;
/* Release all resources claimed in the binding of this component */
devres_release_group(component->dev, component);
}
/**
* component_unbind_all - unbind all components of an aggregate driver
* @parent: parent device of the aggregate driver
* @data: opaque pointer, passed to all components
*
* Unbinds all components of the aggregate device by passing @data to their
* &component_ops.unbind functions. Should be called from
* &component_master_ops.unbind.
*/
void component_unbind_all(struct device *parent, void *data)
{
struct aggregate_device *adev;
struct component *c;
size_t i;
WARN_ON(!mutex_is_locked(&component_mutex));
adev = __aggregate_find(parent, NULL);
if (!adev)
return;
/* Unbind components in reverse order */
for (i = adev->match->num; i--; )
if (!adev->match->compare[i].duplicate) {
c = adev->match->compare[i].component;
component_unbind(c, adev, data);
}
}
EXPORT_SYMBOL_GPL(component_unbind_all);
static int component_bind(struct component *component, struct aggregate_device *adev,
void *data)
{
int ret;
/*
* Each component initialises inside its own devres group.
* This allows us to roll-back a failed component without
* affecting anything else.
*/
if (!devres_open_group(adev->parent, NULL, GFP_KERNEL))
return -ENOMEM;
/*
* Also open a group for the device itself: this allows us
* to release the resources claimed against the sub-device
* at the appropriate moment.
*/
if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
devres_release_group(adev->parent, NULL);
return -ENOMEM;
}
dev_dbg(adev->parent, "binding %s (ops %ps)\n",
dev_name(component->dev), component->ops);
ret = component->ops->bind(component->dev, adev->parent, data);
if (!ret) {
component->bound = true;
/*
* Close the component device's group so that resources
* allocated in the binding are encapsulated for removal
* at unbind. Remove the group on the DRM device as we
* can clean those resources up independently.
*/
devres_close_group(component->dev, NULL);
devres_remove_group(adev->parent, NULL);
dev_info(adev->parent, "bound %s (ops %ps)\n",
dev_name(component->dev), component->ops);
} else {
devres_release_group(component->dev, NULL);
devres_release_group(adev->parent, NULL);
if (ret != -EPROBE_DEFER)
dev_err(adev->parent, "failed to bind %s (ops %ps): %d\n",
dev_name(component->dev), component->ops, ret);
}
return ret;
}
/**
* component_bind_all - bind all components of an aggregate driver
* @parent: parent device of the aggregate driver
* @data: opaque pointer, passed to all components
*
* Binds all components of the aggregate @dev by passing @data to their
* &component_ops.bind functions. Should be called from
* &component_master_ops.bind.
*/
int component_bind_all(struct device *parent, void *data)
{
struct aggregate_device *adev;
struct component *c;
size_t i;
int ret = 0;
WARN_ON(!mutex_is_locked(&component_mutex));
adev = __aggregate_find(parent, NULL);
if (!adev)
return -EINVAL;
/* Bind components in match order */
for (i = 0; i < adev->match->num; i++)
if (!adev->match->compare[i].duplicate) {
c = adev->match->compare[i].component;
ret = component_bind(c, adev, data);
if (ret)
break;
}
if (ret != 0) {
for (; i > 0; i--)
if (!adev->match->compare[i - 1].duplicate) {
c = adev->match->compare[i - 1].component;
component_unbind(c, adev, data);
}
}
return ret;
}
EXPORT_SYMBOL_GPL(component_bind_all);
static int __component_add(struct device *dev, const struct component_ops *ops,
int subcomponent)
{
struct component *component;
int ret;
component = kzalloc(sizeof(*component), GFP_KERNEL);
if (!component)
return -ENOMEM;
component->ops = ops;
component->dev = dev;
component->subcomponent = subcomponent;
dev_dbg(dev, "adding component (ops %ps)\n", ops);
mutex_lock(&component_mutex);
list_add_tail(&component->node, &component_list);
ret = try_to_bring_up_masters(component);
if (ret < 0) {
if (component->adev)
remove_component(component->adev, component);
list_del(&component->node);
kfree(component);
}
mutex_unlock(&component_mutex);
return ret < 0 ? ret : 0;
}
/**
* component_add_typed - register a component
* @dev: component device
* @ops: component callbacks
* @subcomponent: nonzero identifier for subcomponents
*
* Register a new component for @dev. Functions in @ops will be call when the
* aggregate driver is ready to bind the overall driver by calling
* component_bind_all(). See also &struct component_ops.
*
* @subcomponent must be nonzero and is used to differentiate between multiple
* components registerd on the same device @dev. These components are match
* using component_match_add_typed().
*
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
* See also component_add().
*/
int component_add_typed(struct device *dev, const struct component_ops *ops,
int subcomponent)
{
if (WARN_ON(subcomponent == 0))
return -EINVAL;
return __component_add(dev, ops, subcomponent);
}
EXPORT_SYMBOL_GPL(component_add_typed);
/**
* component_add - register a component
* @dev: component device
* @ops: component callbacks
*
* Register a new component for @dev. Functions in @ops will be called when the
* aggregate driver is ready to bind the overall driver by calling
* component_bind_all(). See also &struct component_ops.
*
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
* See also component_add_typed() for a variant that allows multipled different
* components on the same device.
*/
int component_add(struct device *dev, const struct component_ops *ops)
{
return __component_add(dev, ops, 0);
}
EXPORT_SYMBOL_GPL(component_add);
/**
* component_del - unregister a component
* @dev: component device
* @ops: component callbacks
*
* Unregister a component added with component_add(). If the component is bound
* into an aggregate driver, this will force the entire aggregate driver, including
* all its components, to be unbound.
*/
void component_del(struct device *dev, const struct component_ops *ops)
{
struct component *c, *component = NULL;
mutex_lock(&component_mutex);
list_for_each_entry(c, &component_list, node)
if (c->dev == dev && c->ops == ops) {
list_del(&c->node);
component = c;
break;
}
if (component && component->adev) {
take_down_aggregate_device(component->adev);
remove_component(component->adev, component);
}
mutex_unlock(&component_mutex);
WARN_ON(!component);
kfree(component);
}
EXPORT_SYMBOL_GPL(component_del);
| linux-master | drivers/base/component.c |
// SPDX-License-Identifier: GPL-2.0
/*
* devtmpfs - kernel-maintained tmpfs-based /dev
*
* Copyright (C) 2009, Kay Sievers <[email protected]>
*
* During bootup, before any driver core device is registered,
* devtmpfs, a tmpfs-based filesystem is created. Every driver-core
* device which requests a device node, will add a node in this
* filesystem.
* By default, all devices are named after the name of the device,
* owned by root and have a default mode of 0600. Subsystems can
* overwrite the default setting if needed.
*/
#define pr_fmt(fmt) "devtmpfs: " fmt
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/ramfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/init_syscalls.h>
#include <uapi/linux/mount.h>
#include "base.h"
#ifdef CONFIG_DEVTMPFS_SAFE
#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
#else
#define DEVTMPFS_MFLAGS (MS_SILENT)
#endif
static struct task_struct *thread;
static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
static DEFINE_SPINLOCK(req_lock);
static struct req {
struct req *next;
struct completion done;
int err;
const char *name;
umode_t mode; /* 0 => delete */
kuid_t uid;
kgid_t gid;
struct device *dev;
} *requests;
static int __init mount_param(char *str)
{
mount_dev = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
struct super_block *s = mnt->mnt_sb;
int err;
atomic_inc(&s->s_active);
down_write(&s->s_umount);
err = reconfigure_single(s, flags, data);
if (err < 0) {
deactivate_locked_super(s);
return ERR_PTR(err);
}
return dget(s->s_root);
}
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
.init_fs_context = shmem_init_fs_context,
#else
.init_fs_context = ramfs_init_fs_context,
#endif
.kill_sb = kill_litter_super,
};
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
.mount = public_dev_mount,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
{
init_completion(&req->done);
spin_lock(&req_lock);
req->next = requests;
requests = req;
spin_unlock(&req_lock);
wake_up_process(thread);
wait_for_completion(&req->done);
kfree(tmp);
return req->err;
}
int devtmpfs_create_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.mode = 0;
req.uid = GLOBAL_ROOT_UID;
req.gid = GLOBAL_ROOT_GID;
req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
if (!req.name)
return -ENOMEM;
if (req.mode == 0)
req.mode = 0600;
if (is_blockdev(dev))
req.mode |= S_IFBLK;
else
req.mode |= S_IFCHR;
req.dev = dev;
return devtmpfs_submit_req(&req, tmp);
}
int devtmpfs_delete_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
if (!req.name)
return -ENOMEM;
req.mode = 0;
req.dev = dev;
return devtmpfs_submit_req(&req, tmp);
}
static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
return err;
}
static int create_path(const char *nodepath)
{
char *path;
char *s;
int err = 0;
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
s = path;
for (;;) {
s = strchr(s, '/');
if (!s)
break;
s[0] = '\0';
err = dev_mkdir(path, 0755);
if (err && err != -EEXIST)
break;
s[0] = '/';
s++;
}
kfree(path);
return err;
}
static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
kgid_t gid, struct device *dev)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
dev->devt);
if (!err) {
struct iattr newattrs;
newattrs.ia_mode = mode;
newattrs.ia_uid = uid;
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
done_path_create(&path, dentry);
return err;
}
static int dev_rmdir(const char *name)
{
struct path parent;
struct dentry *dentry;
int err;
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
dentry);
else
err = -EPERM;
} else {
err = -ENOENT;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
return err;
}
static int delete_path(const char *nodepath)
{
char *path;
int err = 0;
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
for (;;) {
char *base;
base = strrchr(path, '/');
if (!base)
break;
base[0] = '\0';
err = dev_rmdir(path);
if (err)
break;
}
kfree(path);
return err;
}
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
{
/* did we create it */
if (inode->i_private != &thread)
return 0;
/* does the dev_t match */
if (is_blockdev(dev)) {
if (!S_ISBLK(stat->mode))
return 0;
} else {
if (!S_ISCHR(stat->mode))
return 0;
}
if (stat->rdev != dev->devt)
return 0;
/* ours */
return 1;
}
static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
int deleted = 0;
int err;
dentry = kern_path_locked(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
struct kstat stat;
struct path p = {.mnt = parent.mnt, .dentry = dentry};
err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
AT_STATX_SYNC_AS_STAT);
if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
* of possible references like hardlinks
*/
newattrs.ia_uid = GLOBAL_ROOT_UID;
newattrs.ia_gid = GLOBAL_ROOT_GID;
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
} else {
err = -ENOENT;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
}
/*
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
int __init devtmpfs_mount(void)
{
int err;
if (!mount_dev)
return 0;
if (!thread)
return 0;
err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
pr_info("error mounting %d\n", err);
else
pr_info("mounted\n");
return err;
}
static __initdata DECLARE_COMPLETION(setup_done);
static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
struct device *dev)
{
if (mode)
return handle_create(name, mode, uid, gid, dev);
else
return handle_remove(name, dev);
}
static void __noreturn devtmpfs_work_loop(void)
{
while (1) {
spin_lock(&req_lock);
while (requests) {
struct req *req = requests;
requests = NULL;
spin_unlock(&req_lock);
while (req) {
struct req *next = req->next;
req->err = handle(req->name, req->mode,
req->uid, req->gid, req->dev);
complete(&req->done);
req = next;
}
spin_lock(&req_lock);
}
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&req_lock);
schedule();
}
}
static noinline int __init devtmpfs_setup(void *p)
{
int err;
err = ksys_unshare(CLONE_NEWNS);
if (err)
goto out;
err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
goto out;
init_chdir("/.."); /* will traverse into overmounted root */
init_chroot(".");
out:
*(int *)p = err;
return err;
}
/*
* The __ref is because devtmpfs_setup needs to be __init for the routines it
* calls. That call is done while devtmpfs_init, which is marked __init,
* synchronously waits for it to complete.
*/
static int __ref devtmpfsd(void *p)
{
int err = devtmpfs_setup(p);
complete(&setup_done);
if (err)
return err;
devtmpfs_work_loop();
return 0;
}
/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
int __init devtmpfs_init(void)
{
char opts[] = "mode=0755";
int err;
mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
if (IS_ERR(mnt)) {
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
return err;
}
thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
if (!IS_ERR(thread)) {
wait_for_completion(&setup_done);
} else {
err = PTR_ERR(thread);
thread = NULL;
}
if (err) {
pr_err("unable to create devtmpfs %d\n", err);
unregister_filesystem(&dev_fs_type);
thread = NULL;
return err;
}
pr_info("initialized\n");
return 0;
}
| linux-master | drivers/base/devtmpfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* platform.c - platform 'pseudo' bus for legacy devices
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
*
* Please see Documentation/driver-api/driver-model/platform.rst for more
* information.
*/
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/idr.h>
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
#include <linux/limits.h>
#include <linux/property.h>
#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#include "base.h"
#include "power/power.h"
/* For automatically allocated device IDs */
static DEFINE_IDA(platform_devid_ida);
struct device platform_bus = {
.init_name = "platform",
};
EXPORT_SYMBOL_GPL(platform_bus);
/**
* platform_get_resource - get a resource for a device
* @dev: platform device
* @type: resource type
* @num: resource index
*
* Return: a pointer to the resource or NULL on failure.
*/
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if (type == resource_type(r) && num-- == 0)
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(platform_get_resource);
struct resource *platform_get_mem_or_io(struct platform_device *dev,
unsigned int num)
{
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
#ifdef CONFIG_HAS_IOMEM
/**
* devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
* platform device and get resource
*
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @index: resource index
* @res: optional output parameter to store a pointer to the obtained resource.
*
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure.
*/
void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res)
{
struct resource *r;
r = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (res)
*res = r;
return devm_ioremap_resource(&pdev->dev, r);
}
EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
*
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @index: resource index
*
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure.
*/
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
/**
* devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
* a platform device, retrieve the
* resource by name
*
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @name: name of the resource
*
* Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure.
*/
void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name)
{
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
return devm_ioremap_resource(&pdev->dev, res);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
/**
* platform_get_irq_optional - get an optional IRQ for a device
* @dev: platform device
* @num: IRQ number index
*
* Gets an IRQ for a platform device. Device drivers should check the return
* value for errors so as to not pass a negative integer value to the
* request_irq() APIs. This is the same as platform_get_irq(), except that it
* does not print an error message if an IRQ can not be obtained.
*
* For example::
*
* int irq = platform_get_irq_optional(pdev, 0);
* if (irq < 0)
* return irq;
*
* Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
int ret;
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
if (!dev || num >= dev->archdata.num_irqs)
goto out_not_found;
ret = dev->archdata.irqs[num];
goto out;
#else
struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
if (has_acpi_companion(&dev->dev)) {
if (r && r->flags & IORESOURCE_DISABLED) {
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
goto out;
}
}
/*
* The resources may pass trigger flags to the irqs that need
* to be set up. It so happens that the trigger flags for
* IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
* settings.
*/
if (r && r->flags & IORESOURCE_BITS) {
struct irq_data *irqd;
irqd = irq_get_irq_data(r->start);
if (!irqd)
goto out_not_found;
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
if (r) {
ret = r->start;
goto out;
}
/*
* For the index 0 interrupt, allow falling back to GpioInt
* resources. While a device could have both Interrupt and GpioInt
* resources, making this fallback ambiguous, in many common cases
* the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource.
*/
if (num == 0 && has_acpi_companion(&dev->dev)) {
ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
goto out;
}
#endif
out_not_found:
ret = -ENXIO;
out:
if (WARN(!ret, "0 is an invalid IRQ number\n"))
return -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
* platform_get_irq - get an IRQ for a device
* @dev: platform device
* @num: IRQ number index
*
* Gets an IRQ for a platform device and prints an error message if finding the
* IRQ fails. Device drivers should check the return value for errors so as to
* not pass a negative integer value to the request_irq() APIs.
*
* For example::
*
* int irq = platform_get_irq(pdev, 0);
* if (irq < 0)
* return irq;
*
* Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
int ret;
ret = platform_get_irq_optional(dev, num);
if (ret < 0)
return dev_err_probe(&dev->dev, ret,
"IRQ index %u not found\n", num);
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
* platform_irq_count - Count the number of IRQs a platform device uses
* @dev: platform device
*
* Return: Number of IRQs a platform device uses or EPROBE_DEFER
*/
int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
return ret;
return nr;
}
EXPORT_SYMBOL_GPL(platform_irq_count);
struct irq_affinity_devres {
unsigned int count;
unsigned int irq[];
};
static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
{
struct resource *r;
r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
if (r)
irqresource_disabled(r, 0);
}
static void devm_platform_get_irqs_affinity_release(struct device *dev,
void *res)
{
struct irq_affinity_devres *ptr = res;
int i;
for (i = 0; i < ptr->count; i++) {
irq_dispose_mapping(ptr->irq[i]);
if (has_acpi_companion(dev))
platform_disable_acpi_irq(to_platform_device(dev), i);
}
}
/**
* devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
* device using an interrupt affinity descriptor
* @dev: platform device pointer
* @affd: affinity descriptor
* @minvec: minimum count of interrupt vectors
* @maxvec: maximum count of interrupt vectors
* @irqs: pointer holder for IRQ numbers
*
* Gets a set of IRQs for a platform device, and updates IRQ afffinty according
* to the passed affinity descriptor
*
* Return: Number of vectors on success, negative error number on failure.
*/
int devm_platform_get_irqs_affinity(struct platform_device *dev,
struct irq_affinity *affd,
unsigned int minvec,
unsigned int maxvec,
int **irqs)
{
struct irq_affinity_devres *ptr;
struct irq_affinity_desc *desc;
size_t size;
int i, ret, nvec;
if (!affd)
return -EPERM;
if (maxvec < minvec)
return -ERANGE;
nvec = platform_irq_count(dev);
if (nvec < 0)
return nvec;
if (nvec < minvec)
return -ENOSPC;
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
if (nvec < minvec)
return -ENOSPC;
if (nvec > maxvec)
nvec = maxvec;
size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ptr->count = nvec;
for (i = 0; i < nvec; i++) {
int irq = platform_get_irq(dev, i);
if (irq < 0) {
ret = irq;
goto err_free_devres;
}
ptr->irq[i] = irq;
}
desc = irq_create_affinity_masks(nvec, affd);
if (!desc) {
ret = -ENOMEM;
goto err_free_devres;
}
for (i = 0; i < nvec; i++) {
ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
if (ret) {
dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
ptr->irq[i], ret);
goto err_free_desc;
}
}
devres_add(&dev->dev, ptr);
kfree(desc);
*irqs = ptr->irq;
return nvec;
err_free_desc:
kfree(desc);
err_free_devres:
devres_free(ptr);
return ret;
}
EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
* @name: resource name
*/
struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if (unlikely(!r->name))
continue;
if (type == resource_type(r) && !strcmp(r->name, name))
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
static int __platform_get_irq_byname(struct platform_device *dev,
const char *name)
{
struct resource *r;
int ret;
ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
if (WARN(!r->start, "0 is an invalid IRQ number\n"))
return -EINVAL;
return r->start;
}
return -ENXIO;
}
/**
* platform_get_irq_byname - get an IRQ for a device by name
* @dev: platform device
* @name: IRQ name
*
* Get an IRQ like platform_get_irq(), but then by name rather then by index.
*
* Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
int ret;
ret = __platform_get_irq_byname(dev, name);
if (ret < 0)
return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
name);
return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
/**
* platform_get_irq_byname_optional - get an optional IRQ for a device by name
* @dev: platform device
* @name: IRQ name
*
* Get an optional IRQ by name like platform_get_irq_byname(). Except that it
* does not print an error message if an IRQ can not be obtained.
*
* Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name)
{
return __platform_get_irq_byname(dev, name);
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
/**
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
*
* Return: 0 on success, negative error number on failure.
*/
int platform_add_devices(struct platform_device **devs, int num)
{
int i, ret = 0;
for (i = 0; i < num; i++) {
ret = platform_device_register(devs[i]);
if (ret) {
while (--i >= 0)
platform_device_unregister(devs[i]);
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(platform_add_devices);
struct platform_object {
struct platform_device pdev;
char name[];
};
/*
* Set up default DMA mask for platform devices if the they weren't
* previously set by the architecture / DT.
*/
static void setup_pdev_dma_masks(struct platform_device *pdev)
{
pdev->dev.dma_parms = &pdev->dma_parms;
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!pdev->dev.dma_mask) {
pdev->platform_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->platform_dma_mask;
}
};
/**
* platform_device_put - destroy a platform device
* @pdev: platform device to free
*
* Free all memory associated with a platform device. This function must
* _only_ be externally called in error cases. All other usage is a bug.
*/
void platform_device_put(struct platform_device *pdev)
{
if (!IS_ERR_OR_NULL(pdev))
put_device(&pdev->dev);
}
EXPORT_SYMBOL_GPL(platform_device_put);
static void platform_device_release(struct device *dev)
{
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
of_node_put(pa->pdev.dev.of_node);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa->pdev.driver_override);
kfree(pa);
}
/**
* platform_device_alloc - create a platform device
* @name: base name of the device we're adding
* @id: instance id
*
* Create a platform device object which can have other objects attached
* to it, and which will have attached objects freed when it is released.
*/
struct platform_device *platform_device_alloc(const char *name, int id)
{
struct platform_object *pa;
pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
if (pa) {
strcpy(pa->name, name);
pa->pdev.name = pa->name;
pa->pdev.id = id;
device_initialize(&pa->pdev.dev);
pa->pdev.dev.release = platform_device_release;
setup_pdev_dma_masks(&pa->pdev);
}
return pa ? &pa->pdev : NULL;
}
EXPORT_SYMBOL_GPL(platform_device_alloc);
/**
* platform_device_add_resources - add resources to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @res: set of resources that needs to be allocated for the device
* @num: number of resources
*
* Add a copy of the resources to the platform device. The memory
* associated with the resources will be freed when the platform device is
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
const struct resource *res, unsigned int num)
{
struct resource *r = NULL;
if (res) {
r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
if (!r)
return -ENOMEM;
}
kfree(pdev->resource);
pdev->resource = r;
pdev->num_resources = num;
return 0;
}
EXPORT_SYMBOL_GPL(platform_device_add_resources);
/**
* platform_device_add_data - add platform-specific data to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @data: platform specific data for this platform device
* @size: size of platform specific data
*
* Add a copy of platform specific data to the platform device's
* platform_data pointer. The memory associated with the platform data
* will be freed when the platform device is released.
*/
int platform_device_add_data(struct platform_device *pdev, const void *data,
size_t size)
{
void *d = NULL;
if (data) {
d = kmemdup(data, size, GFP_KERNEL);
if (!d)
return -ENOMEM;
}
kfree(pdev->dev.platform_data);
pdev->dev.platform_data = d;
return 0;
}
EXPORT_SYMBOL_GPL(platform_device_add_data);
/**
* platform_device_add - add a platform device to device hierarchy
* @pdev: platform device we're adding
*
* This is part 2 of platform_device_register(), though may be called
* separately _iff_ pdev was allocated by platform_device_alloc().
*/
int platform_device_add(struct platform_device *pdev)
{
u32 i;
int ret;
if (!pdev)
return -EINVAL;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus;
pdev->dev.bus = &platform_bus_type;
switch (pdev->id) {
default:
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE:
dev_set_name(&pdev->dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO:
/*
* Automatically allocated device ID. We mark it as such so
* that we remember it must be freed, and we append a suffix
* to avoid namespace collision with explicit IDs.
*/
ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
if (ret < 0)
goto err_out;
pdev->id = ret;
pdev->id_auto = true;
dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev);
p = r->parent;
if (!p) {
if (resource_type(r) == IORESOURCE_MEM)
p = &iomem_resource;
else if (resource_type(r) == IORESOURCE_IO)
p = &ioport_resource;
}
if (p) {
ret = insert_resource(p, r);
if (ret) {
dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
goto failed;
}
}
}
pr_debug("Registering platform device '%s'. Parent at %s\n",
dev_name(&pdev->dev), dev_name(pdev->dev.parent));
ret = device_add(&pdev->dev);
if (ret == 0)
return ret;
failed:
if (pdev->id_auto) {
ida_free(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
}
err_out:
return ret;
}
EXPORT_SYMBOL_GPL(platform_device_add);
/**
* platform_device_del - remove a platform-level device
* @pdev: platform device we're removing
*
* Note that this function will also release all memory- and port-based
* resources owned by the device (@dev->resource). This function must
* _only_ be externally called in error cases. All other usage is a bug.
*/
void platform_device_del(struct platform_device *pdev)
{
u32 i;
if (!IS_ERR_OR_NULL(pdev)) {
device_del(&pdev->dev);
if (pdev->id_auto) {
ida_free(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
}
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
*
* NOTE: _Never_ directly free @pdev after calling this function, even if it
* returned an error! Always use platform_device_put() to give up the
* reference initialised in this function instead.
*/
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
setup_pdev_dma_masks(pdev);
return platform_device_add(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_register);
/**
* platform_device_unregister - unregister a platform-level device
* @pdev: platform device we're unregistering
*
* Unregistration is done in 2 steps. First we release all resources
* and remove it from the subsystem, then we drop reference count by
* calling platform_device_put().
*/
void platform_device_unregister(struct platform_device *pdev)
{
platform_device_del(pdev);
platform_device_put(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_unregister);
/**
* platform_device_register_full - add a platform-level device with
* resources and platform-specific data
*
* @pdevinfo: data used to create device
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
{
int ret;
struct platform_device *pdev;
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
if (!pdev)
return ERR_PTR(-ENOMEM);
pdev->dev.parent = pdevinfo->parent;
pdev->dev.fwnode = pdevinfo->fwnode;
pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
pdev->platform_dma_mask = pdevinfo->dma_mask;
pdev->dev.dma_mask = &pdev->platform_dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
ret = platform_device_add_resources(pdev,
pdevinfo->res, pdevinfo->num_res);
if (ret)
goto err;
ret = platform_device_add_data(pdev,
pdevinfo->data, pdevinfo->size_data);
if (ret)
goto err;
if (pdevinfo->properties) {
ret = device_create_managed_software_node(&pdev->dev,
pdevinfo->properties, NULL);
if (ret)
goto err;
}
ret = platform_device_add(pdev);
if (ret) {
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
platform_device_put(pdev);
return ERR_PTR(ret);
}
return pdev;
}
EXPORT_SYMBOL_GPL(platform_device_register_full);
/**
* __platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
* @owner: owning module/driver
*/
int __platform_driver_register(struct platform_driver *drv,
struct module *owner)
{
drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__platform_driver_register);
/**
* platform_driver_unregister - unregister a driver for platform-level devices
* @drv: platform driver structure
*/
void platform_driver_unregister(struct platform_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_unregister);
static int platform_probe_fail(struct platform_device *pdev)
{
return -ENXIO;
}
static int is_bound_to_driver(struct device *dev, void *driver)
{
if (dev->driver == driver)
return 1;
return 0;
}
/**
* __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
* @probe: the driver probe routine, probably from an __init section
* @module: module which will be the owner of the driver
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
* remove its run-once probe() infrastructure from memory after the driver
* has bound to the device.
*
* One typical use for this would be with drivers for controllers integrated
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
* Note that this is incompatible with deferred probing.
*
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
*/
int __init_or_module __platform_driver_probe(struct platform_driver *drv,
int (*probe)(struct platform_device *), struct module *module)
{
int retval;
if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
drv->driver.name, __func__);
return -EINVAL;
}
/*
* We have to run our probes synchronously because we check if
* we find any devices to bind to and exit with error if there
* are any.
*/
drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
/*
* Prevent driver from requesting probe deferral to avoid further
* futile probe attempts.
*/
drv->prevent_deferred_probe = true;
/* make sure driver won't have bind/unbind attributes */
drv->driver.suppress_bind_attrs = true;
/* temporary section violation during probe() */
drv->probe = probe;
retval = __platform_driver_register(drv, module);
if (retval)
return retval;
/* Force all new probes of this driver to fail */
drv->probe = platform_probe_fail;
/* Walk all platform devices and see if any actually bound to this driver.
* If not, return an error as the device should have done so by now.
*/
if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) {
retval = -ENODEV;
platform_driver_unregister(drv);
}
return retval;
}
EXPORT_SYMBOL_GPL(__platform_driver_probe);
/**
* __platform_create_bundle - register driver and create corresponding device
* @driver: platform driver structure
* @probe: the driver probe routine, probably from an __init section
* @res: set of resources that needs to be allocated for the device
* @n_res: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
* @module: module which will be the owner of the driver
*
* Use this in legacy-style modules that probe hardware directly and
* register a single platform device and corresponding platform driver.
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device * __init_or_module __platform_create_bundle(
struct platform_driver *driver,
int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
const void *data, size_t size, struct module *module)
{
struct platform_device *pdev;
int error;
pdev = platform_device_alloc(driver->driver.name, -1);
if (!pdev) {
error = -ENOMEM;
goto err_out;
}
error = platform_device_add_resources(pdev, res, n_res);
if (error)
goto err_pdev_put;
error = platform_device_add_data(pdev, data, size);
if (error)
goto err_pdev_put;
error = platform_device_add(pdev);
if (error)
goto err_pdev_put;
error = __platform_driver_probe(driver, probe, module);
if (error)
goto err_pdev_del;
return pdev;
err_pdev_del:
platform_device_del(pdev);
err_pdev_put:
platform_device_put(pdev);
err_out:
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(__platform_create_bundle);
/**
* __platform_register_drivers - register an array of platform drivers
* @drivers: an array of drivers to register
* @count: the number of drivers to register
* @owner: module owning the drivers
*
* Registers platform drivers specified by an array. On failure to register a
* driver, all previously registered drivers will be unregistered. Callers of
* this API should use platform_unregister_drivers() to unregister drivers in
* the reverse order.
*
* Returns: 0 on success or a negative error code on failure.
*/
int __platform_register_drivers(struct platform_driver * const *drivers,
unsigned int count, struct module *owner)
{
unsigned int i;
int err;
for (i = 0; i < count; i++) {
pr_debug("registering platform driver %ps\n", drivers[i]);
err = __platform_driver_register(drivers[i], owner);
if (err < 0) {
pr_err("failed to register platform driver %ps: %d\n",
drivers[i], err);
goto error;
}
}
return 0;
error:
while (i--) {
pr_debug("unregistering platform driver %ps\n", drivers[i]);
platform_driver_unregister(drivers[i]);
}
return err;
}
EXPORT_SYMBOL_GPL(__platform_register_drivers);
/**
* platform_unregister_drivers - unregister an array of platform drivers
* @drivers: an array of drivers to unregister
* @count: the number of drivers to unregister
*
* Unregisters platform drivers specified by an array. This is typically used
* to complement an earlier call to platform_register_drivers(). Drivers are
* unregistered in the reverse order in which they were registered.
*/
void platform_unregister_drivers(struct platform_driver * const *drivers,
unsigned int count)
{
while (count--) {
pr_debug("unregistering platform driver %ps\n", drivers[count]);
platform_driver_unregister(drivers[count]);
}
}
EXPORT_SYMBOL_GPL(platform_unregister_drivers);
static const struct platform_device_id *platform_match_id(
const struct platform_device_id *id,
struct platform_device *pdev)
{
while (id->name[0]) {
if (strcmp(pdev->name, id->name) == 0) {
pdev->id_entry = id;
return id;
}
id++;
}
return NULL;
}
#ifdef CONFIG_PM_SLEEP
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
if (dev->driver && pdrv->suspend)
ret = pdrv->suspend(pdev, mesg);
return ret;
}
static int platform_legacy_resume(struct device *dev)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
if (dev->driver && pdrv->resume)
ret = pdrv->resume(pdev);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
int platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->suspend)
ret = drv->pm->suspend(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
}
return ret;
}
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->resume)
ret = drv->pm->resume(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
int platform_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->freeze)
ret = drv->pm->freeze(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_FREEZE);
}
return ret;
}
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->thaw)
ret = drv->pm->thaw(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->poweroff)
ret = drv->pm->poweroff(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
}
return ret;
}
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->restore)
ret = drv->pm->restore(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
#endif /* CONFIG_HIBERNATE_CALLBACKS */
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
* fully running: "modprobe $MODALIAS"
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
* mishandled before system is fully running: "modprobe $(cat modalias)"
*/
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
int len;
len = of_device_modalias(dev, buf, PAGE_SIZE);
if (len != -ENODEV)
return len;
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
return sysfs_emit(buf, "platform:%s\n", pdev->name);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
ret = driver_set_override(dev, &pdev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *platform_dev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_numa_node.attr,
&dev_attr_driver_override.attr,
NULL,
};
static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
if (a == &dev_attr_numa_node.attr &&
dev_to_node(dev) == NUMA_NO_NODE)
return 0;
return a->mode;
}
static const struct attribute_group platform_dev_group = {
.attrs = platform_dev_attrs,
.is_visible = platform_dev_attrs_visible,
};
__ATTRIBUTE_GROUPS(platform_dev);
/**
* platform_match - bind platform device to platform driver.
* @dev: device.
* @drv: driver.
*
* Platform device IDs are assumed to be encoded like this:
* "<name><instance>", where <name> is a short description of the type of
* device, like "pci" or "floppy", and <instance> is the enumerated
* instance of the device, like '0' or '42'. Driver IDs are simply
* "<name>". So, extract the <name> from the platform_device structure,
* and compare it against the name of the driver. Return whether they match
* or not.
*/
static int platform_match(struct device *dev, struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
/* When driver_override is set, only bind to the matching driver */
if (pdev->driver_override)
return !strcmp(pdev->driver_override, drv->name);
/* Attempt an OF style match first */
if (of_driver_match_device(dev, drv))
return 1;
/* Then try ACPI style match */
if (acpi_driver_match_device(dev, drv))
return 1;
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
/* fall-back to driver name match */
return (strcmp(pdev->name, drv->name) == 0);
}
static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct platform_device *pdev = to_platform_device(dev);
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
rc = of_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
return 0;
}
static int platform_probe(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
int ret;
/*
* A driver registered using platform_driver_probe() cannot be bound
* again later because the probe function usually lives in __init code
* and so is gone. For these drivers .probe is set to
* platform_probe_fail in __platform_driver_probe(). Don't even prepare
* clocks and PM domains for these to match the traditional behaviour.
*/
if (unlikely(drv->probe == platform_probe_fail))
return -ENXIO;
ret = of_clk_set_defaults(_dev->of_node, false);
if (ret < 0)
return ret;
ret = dev_pm_domain_attach(_dev, true);
if (ret)
goto out;
if (drv->probe) {
ret = drv->probe(dev);
if (ret)
dev_pm_domain_detach(_dev, true);
}
out:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
}
return ret;
}
static void platform_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
if (drv->remove_new) {
drv->remove_new(dev);
} else if (drv->remove) {
int ret = drv->remove(dev);
if (ret)
dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
}
dev_pm_domain_detach(_dev, true);
}
static void platform_shutdown(struct device *_dev)
{
struct platform_device *dev = to_platform_device(_dev);
struct platform_driver *drv;
if (!_dev->driver)
return;
drv = to_platform_driver(_dev->driver);
if (drv->shutdown)
drv->shutdown(dev);
}
static int platform_dma_configure(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
if (dev->of_node) {
ret = of_dma_configure(dev, dev->of_node, true);
} else if (has_acpi_companion(dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
ret = acpi_dma_configure(dev, attr);
}
if (!ret && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void platform_dma_cleanup(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
if (!drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
static const struct dev_pm_ops platform_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
USE_PLATFORM_PM_SLEEP_OPS
};
struct bus_type platform_bus_type = {
.name = "platform",
.dev_groups = platform_dev_groups,
.match = platform_match,
.uevent = platform_uevent,
.probe = platform_probe,
.remove = platform_remove,
.shutdown = platform_shutdown,
.dma_configure = platform_dma_configure,
.dma_cleanup = platform_dma_cleanup,
.pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
static inline int __platform_match(struct device *dev, const void *drv)
{
return platform_match(dev, (struct device_driver *)drv);
}
/**
* platform_find_device_by_driver - Find a platform device with a given
* driver.
* @start: The device to start the search from.
* @drv: The device driver to look for.
*/
struct device *platform_find_device_by_driver(struct device *start,
const struct device_driver *drv)
{
return bus_find_device(&platform_bus_type, start, drv,
__platform_match);
}
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
void __weak __init early_platform_cleanup(void) { }
int __init platform_bus_init(void)
{
int error;
early_platform_cleanup();
error = device_register(&platform_bus);
if (error) {
put_device(&platform_bus);
return error;
}
error = bus_register(&platform_bus_type);
if (error)
device_unregister(&platform_bus);
return error;
}
| linux-master | drivers/base/platform.c |
// SPDX-License-Identifier: GPL-2.0
/*
* attribute_container.c - implementation of a simple container for classes
*
* Copyright (c) 2005 - James Bottomley <[email protected]>
*
* The basic idea here is to enable a device to be attached to an
* aritrary numer of classes without having to allocate storage for them.
* Instead, the contained classes select the devices they need to attach
* to via a matching function.
*/
#include <linux/attribute_container.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include "base.h"
/* This is a private structure used to tie the classdev and the
* container .. it should never be visible outside this file */
struct internal_container {
struct klist_node node;
struct attribute_container *cont;
struct device classdev;
};
static void internal_container_klist_get(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
get_device(&ic->classdev);
}
static void internal_container_klist_put(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
put_device(&ic->classdev);
}
/**
* attribute_container_classdev_to_container - given a classdev, return the container
*
* @classdev: the class device created by attribute_container_add_device.
*
* Returns the container associated with this classdev.
*/
struct attribute_container *
attribute_container_classdev_to_container(struct device *classdev)
{
struct internal_container *ic =
container_of(classdev, struct internal_container, classdev);
return ic->cont;
}
EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
static LIST_HEAD(attribute_container_list);
static DEFINE_MUTEX(attribute_container_mutex);
/**
* attribute_container_register - register an attribute container
*
* @cont: The container to register. This must be allocated by the
* callee and should also be zeroed by it.
*/
int
attribute_container_register(struct attribute_container *cont)
{
INIT_LIST_HEAD(&cont->node);
klist_init(&cont->containers, internal_container_klist_get,
internal_container_klist_put);
mutex_lock(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list);
mutex_unlock(&attribute_container_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(attribute_container_register);
/**
* attribute_container_unregister - remove a container registration
*
* @cont: previously registered container to remove
*/
int
attribute_container_unregister(struct attribute_container *cont)
{
int retval = -EBUSY;
mutex_lock(&attribute_container_mutex);
spin_lock(&cont->containers.k_lock);
if (!list_empty(&cont->containers.k_list))
goto out;
retval = 0;
list_del(&cont->node);
out:
spin_unlock(&cont->containers.k_lock);
mutex_unlock(&attribute_container_mutex);
return retval;
}
EXPORT_SYMBOL_GPL(attribute_container_unregister);
/* private function used as class release */
static void attribute_container_release(struct device *classdev)
{
struct internal_container *ic
= container_of(classdev, struct internal_container, classdev);
struct device *dev = classdev->parent;
kfree(ic);
put_device(dev);
}
/**
* attribute_container_add_device - see if any container is interested in dev
*
* @dev: device to add attributes to
* @fn: function to trigger addition of class device.
*
* This function allocates storage for the class device(s) to be
* attached to dev (one for each matching attribute_container). If no
* fn is provided, the code will simply register the class device via
* device_add. If a function is provided, it is expected to add
* the class device at the appropriate time. One of the things that
* might be necessary is to allocate and initialise the classdev and
* then add it a later time. To do this, call this routine for
* allocation and initialisation and then use
* attribute_container_device_trigger() to call device_add() on
* it. Note: after this, the class device contains a reference to dev
* which is not relinquished until the release of the classdev.
*/
void
attribute_container_add_device(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *))
{
struct attribute_container *cont;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
ic = kzalloc(sizeof(*ic), GFP_KERNEL);
if (!ic) {
dev_err(dev, "failed to allocate class container\n");
continue;
}
ic->cont = cont;
device_initialize(&ic->classdev);
ic->classdev.parent = get_device(dev);
ic->classdev.class = cont->class;
cont->class->dev_release = attribute_container_release;
dev_set_name(&ic->classdev, "%s", dev_name(dev));
if (fn)
fn(cont, dev, &ic->classdev);
else
attribute_container_add_class_device(&ic->classdev);
klist_add_tail(&ic->node, &cont->containers);
}
mutex_unlock(&attribute_container_mutex);
}
/* FIXME: can't break out of this unless klist_iter_exit is also
* called before doing the break
*/
#define klist_for_each_entry(pos, head, member, iter) \
for (klist_iter_init(head, iter); (pos = ({ \
struct klist_node *n = klist_next(iter); \
n ? container_of(n, typeof(*pos), member) : \
({ klist_iter_exit(iter) ; NULL; }); \
})) != NULL;)
/**
* attribute_container_remove_device - make device eligible for removal.
*
* @dev: The generic device
* @fn: A function to call to remove the device
*
* This routine triggers device removal. If fn is NULL, then it is
* simply done via device_unregister (note that if something
* still has a reference to the classdev, then the memory occupied
* will not be freed until the classdev is released). If you want a
* two phase release: remove from visibility and then delete the
* device, then you should use this routine with a fn that calls
* device_del() and then use attribute_container_device_trigger()
* to do the final put on the classdev.
*/
void
attribute_container_remove_device(struct device *dev,
void (*fn)(struct attribute_container *,
struct device *,
struct device *))
{
struct attribute_container *cont;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic;
struct klist_iter iter;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev != ic->classdev.parent)
continue;
klist_del(&ic->node);
if (fn)
fn(cont, dev, &ic->classdev);
else {
attribute_container_remove_attrs(&ic->classdev);
device_unregister(&ic->classdev);
}
}
}
mutex_unlock(&attribute_container_mutex);
}
static int
do_attribute_container_device_trigger_safe(struct device *dev,
struct attribute_container *cont,
int (*fn)(struct attribute_container *,
struct device *, struct device *),
int (*undo)(struct attribute_container *,
struct device *, struct device *))
{
int ret;
struct internal_container *ic, *failed;
struct klist_iter iter;
if (attribute_container_no_classdevs(cont))
return fn(cont, dev, NULL);
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.parent) {
ret = fn(cont, dev, &ic->classdev);
if (ret) {
failed = ic;
klist_iter_exit(&iter);
goto fail;
}
}
}
return 0;
fail:
if (!undo)
return ret;
/* Attempt to undo the work partially done. */
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic == failed) {
klist_iter_exit(&iter);
break;
}
if (dev == ic->classdev.parent)
undo(cont, dev, &ic->classdev);
}
return ret;
}
/**
* attribute_container_device_trigger_safe - execute a trigger for each
* matching classdev or fail all of them.
*
* @dev: The generic device to run the trigger for
* @fn: the function to execute for each classdev.
* @undo: A function to undo the work previously done in case of error
*
* This function is a safe version of
* attribute_container_device_trigger. It stops on the first error and
* undo the partial work that has been done, on previous classdev. It
* is guaranteed that either they all succeeded, or none of them
* succeeded.
*/
int
attribute_container_device_trigger_safe(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *),
int (*undo)(struct attribute_container *,
struct device *,
struct device *))
{
struct attribute_container *cont, *failed = NULL;
int ret = 0;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
if (!cont->match(cont, dev))
continue;
ret = do_attribute_container_device_trigger_safe(dev, cont,
fn, undo);
if (ret) {
failed = cont;
break;
}
}
if (ret && !WARN_ON(!undo)) {
list_for_each_entry(cont, &attribute_container_list, node) {
if (failed == cont)
break;
if (!cont->match(cont, dev))
continue;
do_attribute_container_device_trigger_safe(dev, cont,
undo, NULL);
}
}
mutex_unlock(&attribute_container_mutex);
return ret;
}
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*
* @dev: The generic device to run the trigger for
* @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
* the container and the classdev. If you only care about the
* container, then use attribute_container_trigger() instead.
*/
void
attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *))
{
struct attribute_container *cont;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic;
struct klist_iter iter;
if (!cont->match(cont, dev))
continue;
if (attribute_container_no_classdevs(cont)) {
fn(cont, dev, NULL);
continue;
}
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.parent)
fn(cont, dev, &ic->classdev);
}
}
mutex_unlock(&attribute_container_mutex);
}
/**
* attribute_container_trigger - trigger a function for each matching container
*
* @dev: The generic device to activate the trigger for
* @fn: the function to trigger
*
* This routine triggers a function that only needs to know the
* matching containers (not the classdev) associated with a device.
* It is more lightweight than attribute_container_device_trigger, so
* should be used in preference unless the triggering function
* actually needs to know the classdev.
*/
void
attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *))
{
struct attribute_container *cont;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
if (cont->match(cont, dev))
fn(cont, dev);
}
mutex_unlock(&attribute_container_mutex);
}
/**
* attribute_container_add_attrs - add attributes
*
* @classdev: The class device
*
* This simply creates all the class device sysfs files from the
* attributes listed in the container
*/
int
attribute_container_add_attrs(struct device *classdev)
{
struct attribute_container *cont =
attribute_container_classdev_to_container(classdev);
struct device_attribute **attrs = cont->attrs;
int i, error;
BUG_ON(attrs && cont->grp);
if (!attrs && !cont->grp)
return 0;
if (cont->grp)
return sysfs_create_group(&classdev->kobj, cont->grp);
for (i = 0; attrs[i]; i++) {
sysfs_attr_init(&attrs[i]->attr);
error = device_create_file(classdev, attrs[i]);
if (error)
return error;
}
return 0;
}
/**
* attribute_container_add_class_device - same function as device_add
*
* @classdev: the class device to add
*
* This performs essentially the same function as device_add except for
* attribute containers, namely add the classdev to the system and then
* create the attribute files
*/
int
attribute_container_add_class_device(struct device *classdev)
{
int error = device_add(classdev);
if (error)
return error;
return attribute_container_add_attrs(classdev);
}
/**
* attribute_container_add_class_device_adapter - simple adapter for triggers
*
* @cont: the container to register.
* @dev: the generic device to activate the trigger for
* @classdev: the class device to add
*
* This function is identical to attribute_container_add_class_device except
* that it is designed to be called from the triggers
*/
int
attribute_container_add_class_device_adapter(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
return attribute_container_add_class_device(classdev);
}
/**
* attribute_container_remove_attrs - remove any attribute files
*
* @classdev: The class device to remove the files from
*
*/
void
attribute_container_remove_attrs(struct device *classdev)
{
struct attribute_container *cont =
attribute_container_classdev_to_container(classdev);
struct device_attribute **attrs = cont->attrs;
int i;
if (!attrs && !cont->grp)
return;
if (cont->grp) {
sysfs_remove_group(&classdev->kobj, cont->grp);
return ;
}
for (i = 0; attrs[i]; i++)
device_remove_file(classdev, attrs[i]);
}
/**
* attribute_container_class_device_del - equivalent of class_device_del
*
* @classdev: the class device
*
* This function simply removes all the attribute files and then calls
* device_del.
*/
void
attribute_container_class_device_del(struct device *classdev)
{
attribute_container_remove_attrs(classdev);
device_del(classdev);
}
/**
* attribute_container_find_class_device - find the corresponding class_device
*
* @cont: the container
* @dev: the generic device
*
* Looks up the device in the container's list of class devices and returns
* the corresponding class_device.
*/
struct device *
attribute_container_find_class_device(struct attribute_container *cont,
struct device *dev)
{
struct device *cdev = NULL;
struct internal_container *ic;
struct klist_iter iter;
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic->classdev.parent == dev) {
cdev = &ic->classdev;
/* FIXME: must exit iterator then break */
klist_iter_exit(&iter);
break;
}
}
return cdev;
}
EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
| linux-master | drivers/base/attribute_container.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Software nodes for the firmware node framework.
*
* Copyright (C) 2018, Intel Corporation
* Author: Heikki Krogerus <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/property.h>
#include <linux/slab.h>
#include "base.h"
struct swnode {
struct kobject kobj;
struct fwnode_handle fwnode;
const struct software_node *node;
int id;
/* hierarchy */
struct ida child_ids;
struct list_head entry;
struct list_head children;
struct swnode *parent;
unsigned int allocated:1;
unsigned int managed:1;
};
static DEFINE_IDA(swnode_root_ids);
static struct kset *swnode_kset;
#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct swnode, kobj)
static const struct fwnode_operations software_node_ops;
bool is_software_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops;
}
EXPORT_SYMBOL_GPL(is_software_node);
#define to_swnode(__fwnode) \
({ \
typeof(__fwnode) __to_swnode_fwnode = __fwnode; \
\
is_software_node(__to_swnode_fwnode) ? \
container_of(__to_swnode_fwnode, \
struct swnode, fwnode) : NULL; \
})
static inline struct swnode *dev_to_swnode(struct device *dev)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
if (!fwnode)
return NULL;
if (!is_software_node(fwnode))
fwnode = fwnode->secondary;
return to_swnode(fwnode);
}
static struct swnode *
software_node_to_swnode(const struct software_node *node)
{
struct swnode *swnode = NULL;
struct kobject *k;
if (!node)
return NULL;
spin_lock(&swnode_kset->list_lock);
list_for_each_entry(k, &swnode_kset->list, entry) {
swnode = kobj_to_swnode(k);
if (swnode->node == node)
break;
swnode = NULL;
}
spin_unlock(&swnode_kset->list_lock);
return swnode;
}
const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
{
const struct swnode *swnode = to_swnode(fwnode);
return swnode ? swnode->node : NULL;
}
EXPORT_SYMBOL_GPL(to_software_node);
struct fwnode_handle *software_node_fwnode(const struct software_node *node)
{
struct swnode *swnode = software_node_to_swnode(node);
return swnode ? &swnode->fwnode : NULL;
}
EXPORT_SYMBOL_GPL(software_node_fwnode);
/* -------------------------------------------------------------------------- */
/* property_entry processing */
static const struct property_entry *
property_entry_get(const struct property_entry *prop, const char *name)
{
if (!prop)
return NULL;
for (; prop->name; prop++)
if (!strcmp(name, prop->name))
return prop;
return NULL;
}
static const void *property_get_pointer(const struct property_entry *prop)
{
if (!prop->length)
return NULL;
return prop->is_inline ? &prop->value : prop->pointer;
}
static const void *property_entry_find(const struct property_entry *props,
const char *propname, size_t length)
{
const struct property_entry *prop;
const void *pointer;
prop = property_entry_get(props, propname);
if (!prop)
return ERR_PTR(-EINVAL);
pointer = property_get_pointer(prop);
if (!pointer)
return ERR_PTR(-ENODATA);
if (length > prop->length)
return ERR_PTR(-EOVERFLOW);
return pointer;
}
static int
property_entry_count_elems_of_size(const struct property_entry *props,
const char *propname, size_t length)
{
const struct property_entry *prop;
prop = property_entry_get(props, propname);
if (!prop)
return -EINVAL;
return prop->length / length;
}
static int property_entry_read_int_array(const struct property_entry *props,
const char *name,
unsigned int elem_size, void *val,
size_t nval)
{
const void *pointer;
size_t length;
if (!val)
return property_entry_count_elems_of_size(props, name,
elem_size);
if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
return -ENXIO;
length = nval * elem_size;
pointer = property_entry_find(props, name, length);
if (IS_ERR(pointer))
return PTR_ERR(pointer);
memcpy(val, pointer, length);
return 0;
}
static int property_entry_read_string_array(const struct property_entry *props,
const char *propname,
const char **strings, size_t nval)
{
const void *pointer;
size_t length;
int array_len;
/* Find out the array length. */
array_len = property_entry_count_elems_of_size(props, propname,
sizeof(const char *));
if (array_len < 0)
return array_len;
/* Return how many there are if strings is NULL. */
if (!strings)
return array_len;
array_len = min_t(size_t, nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
if (IS_ERR(pointer))
return PTR_ERR(pointer);
memcpy(strings, pointer, length);
return array_len;
}
static void property_entry_free_data(const struct property_entry *p)
{
const char * const *src_str;
size_t i, nval;
if (p->type == DEV_PROP_STRING) {
src_str = property_get_pointer(p);
nval = p->length / sizeof(*src_str);
for (i = 0; i < nval; i++)
kfree(src_str[i]);
}
if (!p->is_inline)
kfree(p->pointer);
kfree(p->name);
}
static bool property_copy_string_array(const char **dst_ptr,
const char * const *src_ptr,
size_t nval)
{
int i;
for (i = 0; i < nval; i++) {
dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
if (!dst_ptr[i] && src_ptr[i]) {
while (--i >= 0)
kfree(dst_ptr[i]);
return false;
}
}
return true;
}
static int property_entry_copy_data(struct property_entry *dst,
const struct property_entry *src)
{
const void *pointer = property_get_pointer(src);
void *dst_ptr;
size_t nval;
/*
* Properties with no data should not be marked as stored
* out of line.
*/
if (!src->is_inline && !src->length)
return -ENODATA;
/*
* Reference properties are never stored inline as
* they are too big.
*/
if (src->type == DEV_PROP_REF && src->is_inline)
return -EINVAL;
if (src->length <= sizeof(dst->value)) {
dst_ptr = &dst->value;
dst->is_inline = true;
} else {
dst_ptr = kmalloc(src->length, GFP_KERNEL);
if (!dst_ptr)
return -ENOMEM;
dst->pointer = dst_ptr;
}
if (src->type == DEV_PROP_STRING) {
nval = src->length / sizeof(const char *);
if (!property_copy_string_array(dst_ptr, pointer, nval)) {
if (!dst->is_inline)
kfree(dst->pointer);
return -ENOMEM;
}
} else {
memcpy(dst_ptr, pointer, src->length);
}
dst->length = src->length;
dst->type = src->type;
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name) {
property_entry_free_data(dst);
return -ENOMEM;
}
return 0;
}
/**
* property_entries_dup - duplicate array of properties
* @properties: array of properties to copy
*
* This function creates a deep copy of the given NULL-terminated array
* of property entries.
*/
struct property_entry *
property_entries_dup(const struct property_entry *properties)
{
struct property_entry *p;
int i, n = 0;
int ret;
if (!properties)
return NULL;
while (properties[n].name)
n++;
p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
for (i = 0; i < n; i++) {
ret = property_entry_copy_data(&p[i], &properties[i]);
if (ret) {
while (--i >= 0)
property_entry_free_data(&p[i]);
kfree(p);
return ERR_PTR(ret);
}
}
return p;
}
EXPORT_SYMBOL_GPL(property_entries_dup);
/**
* property_entries_free - free previously allocated array of properties
* @properties: array of properties to destroy
*
* This function frees given NULL-terminated array of property entries,
* along with their data.
*/
void property_entries_free(const struct property_entry *properties)
{
const struct property_entry *p;
if (!properties)
return;
for (p = properties; p->name; p++)
property_entry_free_data(p);
kfree(properties);
}
EXPORT_SYMBOL_GPL(property_entries_free);
/* -------------------------------------------------------------------------- */
/* fwnode operations */
static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
kobject_get(&swnode->kobj);
return &swnode->fwnode;
}
static void software_node_put(struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
kobject_put(&swnode->kobj);
}
static bool software_node_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
struct swnode *swnode = to_swnode(fwnode);
return !!property_entry_get(swnode->node->properties, propname);
}
static int software_node_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
struct swnode *swnode = to_swnode(fwnode);
return property_entry_read_int_array(swnode->node->properties, propname,
elem_size, val, nval);
}
static int software_node_read_string_array(const struct fwnode_handle *fwnode,
const char *propname,
const char **val, size_t nval)
{
struct swnode *swnode = to_swnode(fwnode);
return property_entry_read_string_array(swnode->node->properties,
propname, val, nval);
}
static const char *
software_node_get_name(const struct fwnode_handle *fwnode)
{
const struct swnode *swnode = to_swnode(fwnode);
return kobject_name(&swnode->kobj);
}
static const char *
software_node_get_name_prefix(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
const char *prefix;
parent = fwnode_get_parent(fwnode);
if (!parent)
return "";
/* Figure out the prefix from the parents. */
while (is_software_node(parent))
parent = fwnode_get_next_parent(parent);
prefix = fwnode_get_name_prefix(parent);
fwnode_handle_put(parent);
/* Guess something if prefix was NULL. */
return prefix ?: "/";
}
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
if (!swnode || !swnode->parent)
return NULL;
return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
software_node_get_next_child(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
struct swnode *p = to_swnode(fwnode);
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
(c && list_is_last(&c->entry, &p->children))) {
fwnode_handle_put(child);
return NULL;
}
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
fwnode_handle_put(child);
return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *
software_node_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct swnode *swnode = to_swnode(fwnode);
struct swnode *child;
if (!swnode || list_empty(&swnode->children))
return NULL;
list_for_each_entry(child, &swnode->children, entry) {
if (!strcmp(childname, kobject_name(&child->kobj))) {
kobject_get(&child->kobj);
return &child->fwnode;
}
}
return NULL;
}
static int
software_node_get_reference_args(const struct fwnode_handle *fwnode,
const char *propname, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
struct swnode *swnode = to_swnode(fwnode);
const struct software_node_ref_args *ref_array;
const struct software_node_ref_args *ref;
const struct property_entry *prop;
struct fwnode_handle *refnode;
u32 nargs_prop_val;
int error;
int i;
prop = property_entry_get(swnode->node->properties, propname);
if (!prop)
return -ENOENT;
if (prop->type != DEV_PROP_REF)
return -EINVAL;
/*
* We expect that references are never stored inline, even
* single ones, as they are too big.
*/
if (prop->is_inline)
return -EINVAL;
if (index * sizeof(*ref) >= prop->length)
return -ENOENT;
ref_array = prop->pointer;
ref = &ref_array[index];
refnode = software_node_fwnode(ref->node);
if (!refnode)
return -ENOENT;
if (nargs_prop) {
error = property_entry_read_int_array(ref->node->properties,
nargs_prop, sizeof(u32),
&nargs_prop_val, 1);
if (error)
return error;
nargs = nargs_prop_val;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
args->fwnode = software_node_get(refnode);
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = ref->args[i];
return 0;
}
static struct fwnode_handle *
swnode_graph_find_next_port(const struct fwnode_handle *parent,
struct fwnode_handle *port)
{
struct fwnode_handle *old = port;
while ((port = software_node_get_next_child(parent, old))) {
/*
* fwnode ports have naming style "port@", so we search for any
* children that follow that convention.
*/
if (!strncmp(to_swnode(port)->node->name, "port@",
strlen("port@")))
return port;
old = port;
}
return NULL;
}
static struct fwnode_handle *
software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *endpoint)
{
struct swnode *swnode = to_swnode(fwnode);
struct fwnode_handle *parent;
struct fwnode_handle *port;
if (!swnode)
return NULL;
if (endpoint) {
port = software_node_get_parent(endpoint);
parent = software_node_get_parent(port);
} else {
parent = software_node_get_named_child_node(fwnode, "ports");
if (!parent)
parent = software_node_get(&swnode->fwnode);
port = swnode_graph_find_next_port(parent, NULL);
}
for (; port; port = swnode_graph_find_next_port(parent, port)) {
endpoint = software_node_get_next_child(port, endpoint);
if (endpoint) {
fwnode_handle_put(port);
break;
}
}
fwnode_handle_put(parent);
return endpoint;
}
static struct fwnode_handle *
software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
const struct software_node_ref_args *ref;
const struct property_entry *prop;
if (!swnode)
return NULL;
prop = property_entry_get(swnode->node->properties, "remote-endpoint");
if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
return NULL;
ref = prop->pointer;
return software_node_get(software_node_fwnode(ref[0].node));
}
static struct fwnode_handle *
software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
swnode = swnode->parent;
if (swnode && !strcmp(swnode->node->name, "ports"))
swnode = swnode->parent;
return swnode ? software_node_get(&swnode->fwnode) : NULL;
}
static int
software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct swnode *swnode = to_swnode(fwnode);
const char *parent_name = swnode->parent->node->name;
int ret;
if (strlen("port@") >= strlen(parent_name) ||
strncmp(parent_name, "port@", strlen("port@")))
return -EINVAL;
/* Ports have naming style "port@n", we need to select the n */
ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
if (ret)
return ret;
endpoint->id = swnode->id;
endpoint->local_fwnode = fwnode;
return 0;
}
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
.get_name_prefix = software_node_get_name_prefix,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
.get_reference_args = software_node_get_reference_args,
.graph_get_next_endpoint = software_node_graph_get_next_endpoint,
.graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
.graph_get_port_parent = software_node_graph_get_port_parent,
.graph_parse_endpoint = software_node_graph_parse_endpoint,
};
/* -------------------------------------------------------------------------- */
/**
* software_node_find_by_name - Find software node by name
* @parent: Parent of the software node
* @name: Name of the software node
*
* The function will find a node that is child of @parent and that is named
* @name. If no node is found, the function returns NULL.
*
* NOTE: you will need to drop the reference with fwnode_handle_put() after use.
*/
const struct software_node *
software_node_find_by_name(const struct software_node *parent, const char *name)
{
struct swnode *swnode = NULL;
struct kobject *k;
if (!name)
return NULL;
spin_lock(&swnode_kset->list_lock);
list_for_each_entry(k, &swnode_kset->list, entry) {
swnode = kobj_to_swnode(k);
if (parent == swnode->node->parent && swnode->node->name &&
!strcmp(name, swnode->node->name)) {
kobject_get(&swnode->kobj);
break;
}
swnode = NULL;
}
spin_unlock(&swnode_kset->list_lock);
return swnode ? swnode->node : NULL;
}
EXPORT_SYMBOL_GPL(software_node_find_by_name);
static struct software_node *software_node_alloc(const struct property_entry *properties)
{
struct property_entry *props;
struct software_node *node;
props = property_entries_dup(properties);
if (IS_ERR(props))
return ERR_CAST(props);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
property_entries_free(props);
return ERR_PTR(-ENOMEM);
}
node->properties = props;
return node;
}
static void software_node_free(const struct software_node *node)
{
property_entries_free(node->properties);
kfree(node);
}
static void software_node_release(struct kobject *kobj)
{
struct swnode *swnode = kobj_to_swnode(kobj);
if (swnode->parent) {
ida_simple_remove(&swnode->parent->child_ids, swnode->id);
list_del(&swnode->entry);
} else {
ida_simple_remove(&swnode_root_ids, swnode->id);
}
if (swnode->allocated)
software_node_free(swnode->node);
ida_destroy(&swnode->child_ids);
kfree(swnode);
}
static const struct kobj_type software_node_type = {
.release = software_node_release,
.sysfs_ops = &kobj_sysfs_ops,
};
static struct fwnode_handle *
swnode_register(const struct software_node *node, struct swnode *parent,
unsigned int allocated)
{
struct swnode *swnode;
int ret;
swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
if (!swnode)
return ERR_PTR(-ENOMEM);
ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
0, 0, GFP_KERNEL);
if (ret < 0) {
kfree(swnode);
return ERR_PTR(ret);
}
swnode->id = ret;
swnode->node = node;
swnode->parent = parent;
swnode->kobj.kset = swnode_kset;
fwnode_init(&swnode->fwnode, &software_node_ops);
ida_init(&swnode->child_ids);
INIT_LIST_HEAD(&swnode->entry);
INIT_LIST_HEAD(&swnode->children);
if (node->name)
ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
parent ? &parent->kobj : NULL,
"%s", node->name);
else
ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
parent ? &parent->kobj : NULL,
"node%d", swnode->id);
if (ret) {
kobject_put(&swnode->kobj);
return ERR_PTR(ret);
}
/*
* Assign the flag only in the successful case, so
* the above kobject_put() won't mess up with properties.
*/
swnode->allocated = allocated;
if (parent)
list_add_tail(&swnode->entry, &parent->children);
kobject_uevent(&swnode->kobj, KOBJ_ADD);
return &swnode->fwnode;
}
/**
* software_node_register_node_group - Register a group of software nodes
* @node_group: NULL terminated array of software node pointers to be registered
*
* Register multiple software nodes at once. If any node in the array
* has its .parent pointer set (which can only be to another software_node),
* then its parent **must** have been registered before it is; either outside
* of this function or by ordering the array such that parent comes before
* child.
*/
int software_node_register_node_group(const struct software_node **node_group)
{
unsigned int i;
int ret;
if (!node_group)
return 0;
for (i = 0; node_group[i]; i++) {
ret = software_node_register(node_group[i]);
if (ret) {
software_node_unregister_node_group(node_group);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(software_node_register_node_group);
/**
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
* Unregister multiple software nodes at once. If parent pointers are set up
* in any of the software nodes then the array **must** be ordered such that
* parents come before their children.
*
* NOTE: If you are uncertain whether the array is ordered such that
* parents will be unregistered before their children, it is wiser to
* remove the nodes individually, in the correct order (child before
* parent).
*/
void software_node_unregister_node_group(
const struct software_node **node_group)
{
unsigned int i = 0;
if (!node_group)
return;
while (node_group[i])
i++;
while (i--)
software_node_unregister(node_group[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
/**
* software_node_register - Register static software node
* @node: The software node to be registered
*/
int software_node_register(const struct software_node *node)
{
struct swnode *parent = software_node_to_swnode(node->parent);
if (software_node_to_swnode(node))
return -EEXIST;
if (node->parent && !parent)
return -EINVAL;
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
}
EXPORT_SYMBOL_GPL(software_node_register);
/**
* software_node_unregister - Unregister static software node
* @node: The software node to be unregistered
*/
void software_node_unregister(const struct software_node *node)
{
struct swnode *swnode;
swnode = software_node_to_swnode(node);
if (swnode)
fwnode_remove_software_node(&swnode->fwnode);
}
EXPORT_SYMBOL_GPL(software_node_unregister);
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent)
{
struct fwnode_handle *fwnode;
struct software_node *node;
struct swnode *p;
if (IS_ERR(parent))
return ERR_CAST(parent);
p = to_swnode(parent);
if (parent && !p)
return ERR_PTR(-EINVAL);
node = software_node_alloc(properties);
if (IS_ERR(node))
return ERR_CAST(node);
node->parent = p ? p->node : NULL;
fwnode = swnode_register(node, p, 1);
if (IS_ERR(fwnode))
software_node_free(node);
return fwnode;
}
EXPORT_SYMBOL_GPL(fwnode_create_software_node);
void fwnode_remove_software_node(struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
if (!swnode)
return;
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
/**
* device_add_software_node - Assign software node to a device
* @dev: The device the software node is meant for.
* @node: The software node.
*
* This function will make @node the secondary firmware node pointer of @dev. If
* @dev has no primary node, then @node will become the primary node. The
* function will register @node automatically if it wasn't already registered.
*/
int device_add_software_node(struct device *dev, const struct software_node *node)
{
struct swnode *swnode;
int ret;
/* Only one software node per device. */
if (dev_to_swnode(dev))
return -EBUSY;
swnode = software_node_to_swnode(node);
if (swnode) {
kobject_get(&swnode->kobj);
} else {
ret = software_node_register(node);
if (ret)
return ret;
swnode = software_node_to_swnode(node);
}
set_secondary_fwnode(dev, &swnode->fwnode);
/*
* If the device has been fully registered by the time this function is
* called, software_node_notify() must be called separately so that the
* symlinks get created and the reference count of the node is kept in
* balance.
*/
if (device_is_registered(dev))
software_node_notify(dev);
return 0;
}
EXPORT_SYMBOL_GPL(device_add_software_node);
/**
* device_remove_software_node - Remove device's software node
* @dev: The device with the software node.
*
* This function will unregister the software node of @dev.
*/
void device_remove_software_node(struct device *dev)
{
struct swnode *swnode;
swnode = dev_to_swnode(dev);
if (!swnode)
return;
if (device_is_registered(dev))
software_node_notify_remove(dev);
set_secondary_fwnode(dev, NULL);
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(device_remove_software_node);
/**
* device_create_managed_software_node - Create a software node for a device
* @dev: The device the software node is assigned to.
* @properties: Device properties for the software node.
* @parent: Parent of the software node.
*
* Creates a software node as a managed resource for @dev, which means the
* lifetime of the newly created software node is tied to the lifetime of @dev.
* Software nodes created with this function should not be reused or shared
* because of that. The function takes a deep copy of @properties for the
* software node.
*
* Since the new software node is assigned directly to @dev, and since it should
* not be shared, it is not returned to the caller. The function returns 0 on
* success, and errno in case of an error.
*/
int device_create_managed_software_node(struct device *dev,
const struct property_entry *properties,
const struct software_node *parent)
{
struct fwnode_handle *p = software_node_fwnode(parent);
struct fwnode_handle *fwnode;
if (parent && !p)
return -EINVAL;
fwnode = fwnode_create_software_node(properties, p);
if (IS_ERR(fwnode))
return PTR_ERR(fwnode);
to_swnode(fwnode)->managed = true;
set_secondary_fwnode(dev, fwnode);
if (device_is_registered(dev))
software_node_notify(dev);
return 0;
}
EXPORT_SYMBOL_GPL(device_create_managed_software_node);
void software_node_notify(struct device *dev)
{
struct swnode *swnode;
int ret;
swnode = dev_to_swnode(dev);
if (!swnode)
return;
ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
return;
ret = sysfs_create_link(&swnode->kobj, &dev->kobj, dev_name(dev));
if (ret) {
sysfs_remove_link(&dev->kobj, "software_node");
return;
}
kobject_get(&swnode->kobj);
}
void software_node_notify_remove(struct device *dev)
{
struct swnode *swnode;
swnode = dev_to_swnode(dev);
if (!swnode)
return;
sysfs_remove_link(&swnode->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "software_node");
kobject_put(&swnode->kobj);
if (swnode->managed) {
set_secondary_fwnode(dev, NULL);
kobject_put(&swnode->kobj);
}
}
static int __init software_node_init(void)
{
swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj);
if (!swnode_kset)
return -ENOMEM;
return 0;
}
postcore_initcall(software_node_init);
static void __exit software_node_exit(void)
{
ida_destroy(&swnode_root_ids);
kset_unregister(swnode_kset);
}
__exitcall(software_node_exit);
| linux-master | drivers/base/swnode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Device physical location support
*
* Author: Won Chung <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/sysfs.h>
#include "physical_location.h"
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
acpi_status status;
if (!has_acpi_companion(dev))
return false;
status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
if (ACPI_FAILURE(status))
return false;
dev->physical_location =
kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
if (!dev->physical_location) {
ACPI_FREE(pld);
return false;
}
dev->physical_location->panel = pld->panel;
dev->physical_location->vertical_position = pld->vertical_position;
dev->physical_location->horizontal_position = pld->horizontal_position;
dev->physical_location->dock = pld->dock;
dev->physical_location->lid = pld->lid;
ACPI_FREE(pld);
return true;
}
static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const char *panel;
switch (dev->physical_location->panel) {
case DEVICE_PANEL_TOP:
panel = "top";
break;
case DEVICE_PANEL_BOTTOM:
panel = "bottom";
break;
case DEVICE_PANEL_LEFT:
panel = "left";
break;
case DEVICE_PANEL_RIGHT:
panel = "right";
break;
case DEVICE_PANEL_FRONT:
panel = "front";
break;
case DEVICE_PANEL_BACK:
panel = "back";
break;
default:
panel = "unknown";
}
return sysfs_emit(buf, "%s\n", panel);
}
static DEVICE_ATTR_RO(panel);
static ssize_t vertical_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *vertical_position;
switch (dev->physical_location->vertical_position) {
case DEVICE_VERT_POS_UPPER:
vertical_position = "upper";
break;
case DEVICE_VERT_POS_CENTER:
vertical_position = "center";
break;
case DEVICE_VERT_POS_LOWER:
vertical_position = "lower";
break;
default:
vertical_position = "unknown";
}
return sysfs_emit(buf, "%s\n", vertical_position);
}
static DEVICE_ATTR_RO(vertical_position);
static ssize_t horizontal_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *horizontal_position;
switch (dev->physical_location->horizontal_position) {
case DEVICE_HORI_POS_LEFT:
horizontal_position = "left";
break;
case DEVICE_HORI_POS_CENTER:
horizontal_position = "center";
break;
case DEVICE_HORI_POS_RIGHT:
horizontal_position = "right";
break;
default:
horizontal_position = "unknown";
}
return sysfs_emit(buf, "%s\n", horizontal_position);
}
static DEVICE_ATTR_RO(horizontal_position);
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
dev->physical_location->dock ? "yes" : "no");
}
static DEVICE_ATTR_RO(dock);
static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
dev->physical_location->lid ? "yes" : "no");
}
static DEVICE_ATTR_RO(lid);
static struct attribute *dev_attr_physical_location[] = {
&dev_attr_panel.attr,
&dev_attr_vertical_position.attr,
&dev_attr_horizontal_position.attr,
&dev_attr_dock.attr,
&dev_attr_lid.attr,
NULL,
};
const struct attribute_group dev_attr_physical_location_group = {
.name = "physical_location",
.attrs = dev_attr_physical_location,
};
| linux-master | drivers/base/physical_location.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cacheinfo support - processor cache information via sysfs
*
* Based on arch/x86/kernel/cpu/intel_cacheinfo.c
* Author: Sudeep Holla <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/sysfs.h>
/* pointer to per cpu cacheinfo */
static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
#define per_cpu_cacheinfo_idx(cpu, idx) \
(per_cpu_cacheinfo(cpu) + (idx))
/* Set if no cache information is found in DT/ACPI. */
static bool use_arch_info;
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
{
return ci_cacheinfo(cpu);
}
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
/*
* For non DT/ACPI systems, assume unique level 1 caches,
* system-wide shared caches for all other levels.
*/
if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
use_arch_info)
return (this_leaf->level != 1) && (sib_leaf->level != 1);
if ((sib_leaf->attributes & CACHE_ID) &&
(this_leaf->attributes & CACHE_ID))
return sib_leaf->id == this_leaf->id;
return sib_leaf->fw_token == this_leaf->fw_token;
}
bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
if (!cache_leaves(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
return (llc->attributes & CACHE_ID) || !!llc->fw_token;
}
bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
{
struct cacheinfo *llc_x, *llc_y;
if (!last_level_cache_is_valid(cpu_x) ||
!last_level_cache_is_valid(cpu_y))
return false;
llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
return cache_leaves_are_shared(llc_x, llc_y);
}
#ifdef CONFIG_OF
static bool of_check_cache_nodes(struct device_node *np);
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
const char *line_size_props[2];
const char *nr_sets_prop;
};
static const struct cache_type_info cache_type_info[] = {
{
.size_prop = "cache-size",
.line_size_props = { "cache-line-size",
"cache-block-size", },
.nr_sets_prop = "cache-sets",
}, {
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size",
"i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
}, {
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
static inline int get_cacheinfo_idx(enum cache_type type)
{
if (type == CACHE_TYPE_UNIFIED)
return 0;
return type;
}
static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
of_property_read_u32(np, propname, &this_leaf->size);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf,
struct device_node *np)
{
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
int ret;
u32 line_size;
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
ret = of_property_read_u32(np, propname, &line_size);
if (!ret) {
this_leaf->coherency_line_size = line_size;
break;
}
}
}
static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
of_property_read_u32(np, propname, &this_leaf->number_of_sets);
}
static void cache_associativity(struct cacheinfo *this_leaf)
{
unsigned int line_size = this_leaf->coherency_line_size;
unsigned int nr_sets = this_leaf->number_of_sets;
unsigned int size = this_leaf->size;
/*
* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
static bool cache_node_is_unified(struct cacheinfo *this_leaf,
struct device_node *np)
{
return of_property_read_bool(np, "cache-unified");
}
static void cache_of_set_props(struct cacheinfo *this_leaf,
struct device_node *np)
{
/*
* init_cache_level must setup the cache level correctly
* overriding the architecturally specified levels, so
* if type is NONE at this stage, it should be unified
*/
if (this_leaf->type == CACHE_TYPE_NOCACHE &&
cache_node_is_unified(this_leaf, np))
this_leaf->type = CACHE_TYPE_UNIFIED;
cache_size(this_leaf, np);
cache_get_line_size(this_leaf, np);
cache_nr_sets(this_leaf, np);
cache_associativity(this_leaf);
}
static int cache_setup_of_node(unsigned int cpu)
{
struct device_node *np, *prev;
struct cacheinfo *this_leaf;
unsigned int index = 0;
np = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
if (!of_check_cache_nodes(np)) {
of_node_put(np);
return -ENOENT;
}
prev = np;
while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) {
np = of_find_next_cache_node(np);
of_node_put(prev);
prev = np;
if (!np)
break;
}
cache_of_set_props(this_leaf, np);
this_leaf->fw_token = np;
index++;
}
of_node_put(np);
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT;
return 0;
}
static bool of_check_cache_nodes(struct device_node *np)
{
struct device_node *next;
if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified"))
return true;
next = of_find_next_cache_node(np);
if (next) {
of_node_put(next);
return true;
}
return false;
}
static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
if (of_property_read_bool(np, "cache-size"))
++leaves;
if (of_property_read_bool(np, "i-cache-size"))
++leaves;
if (of_property_read_bool(np, "d-cache-size"))
++leaves;
if (!leaves) {
/* The '[i-|d-|]cache-size' property is required, but
* if absent, fallback on the 'cache-unified' property.
*/
if (of_property_read_bool(np, "cache-unified"))
return 1;
else
return 2;
}
return leaves;
}
int init_of_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu);
struct device_node *prev = NULL;
unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) {
of_node_put(np);
return -ENOENT;
}
leaves = of_count_cache_leaves(np);
if (leaves > 0)
levels = 1;
prev = np;
while ((np = of_find_next_cache_node(np))) {
of_node_put(prev);
prev = np;
if (!of_device_is_compatible(np, "cache"))
goto err_out;
if (of_property_read_u32(np, "cache-level", &level))
goto err_out;
if (level <= levels)
goto err_out;
leaves += of_count_cache_leaves(np);
levels = level;
}
of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
err_out:
of_node_put(np);
return -EINVAL;
}
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
int init_of_cache_level(unsigned int cpu) { return 0; }
#endif
int __weak cache_setup_acpi(unsigned int cpu)
{
return -ENOTSUPP;
}
unsigned int coherency_max_size;
static int cache_setup_properties(unsigned int cpu)
{
int ret = 0;
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
ret = cache_setup_acpi(cpu);
// Assume there is no cache information available in DT/ACPI from now.
if (ret && use_arch_cache_info())
use_arch_info = true;
return ret;
}
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
return 0;
/*
* skip setting up cache properties if LLC is valid, just need
* to update the shared cpu_map if the cache attributes were
* populated early before all the cpus are brought online
*/
if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
ret = cache_setup_properties(cpu);
if (ret)
return ret;
}
for (index = 0; index < cache_leaves(cpu); index++) {
unsigned int i;
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
/*
* Comparing cache IDs only makes sense if the leaves
* belong to the same cache level of same type. Skip
* the check if level and type do not match.
*/
if (sib_leaf->level != this_leaf->level ||
sib_leaf->type != this_leaf->type)
continue;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
break;
}
}
}
/* record the maximum cache line size */
if (this_leaf->coherency_line_size > coherency_max_size)
coherency_max_size = this_leaf->coherency_line_size;
}
/* shared_cpu_map is now populated for the cpu */
this_cpu_ci->cpu_map_populated = true;
return 0;
}
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
struct cpu_cacheinfo *sib_cpu_ci =
get_cpu_cacheinfo(sibling);
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
/*
* Comparing cache IDs only makes sense if the leaves
* belong to the same cache level of same type. Skip
* the check if level and type do not match.
*/
if (sib_leaf->level != this_leaf->level ||
sib_leaf->type != this_leaf->type)
continue;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
break;
}
}
}
}
/* cpu is no longer populated in the shared map */
this_cpu_ci->cpu_map_populated = false;
}
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
return;
cache_shared_cpu_map_remove(cpu);
}
int __weak early_cache_level(unsigned int cpu)
{
return -ENOENT;
}
int __weak init_cache_level(unsigned int cpu)
{
return -ENOENT;
}
int __weak populate_cache_leaves(unsigned int cpu)
{
return -ENOENT;
}
static inline
int allocate_cache_info(int cpu)
{
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
}
return 0;
}
int fetch_cache_info(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int levels = 0, split_levels = 0;
int ret;
if (acpi_disabled) {
ret = init_of_cache_level(cpu);
} else {
ret = acpi_get_cache_info(cpu, &levels, &split_levels);
if (!ret) {
this_cpu_ci->num_levels = levels;
/*
* This assumes that:
* - there cannot be any split caches (data/instruction)
* above a unified cache
* - data/instruction caches come by pair
*/
this_cpu_ci->num_leaves = levels + split_levels;
}
}
if (ret || !cache_leaves(cpu)) {
ret = early_cache_level(cpu);
if (ret)
return ret;
if (!cache_leaves(cpu))
return -ENOENT;
this_cpu_ci->early_ci_levels = true;
}
return allocate_cache_info(cpu);
}
static inline int init_level_allocate_ci(unsigned int cpu)
{
unsigned int early_leaves = cache_leaves(cpu);
/* Since early initialization/allocation of the cacheinfo is allowed
* via fetch_cache_info() and this also gets called as CPU hotplug
* callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
* as it will happen only once (the cacheinfo memory is never freed).
* Just populate the cacheinfo. However, if the cacheinfo has been
* allocated early through the arch-specific early_cache_level() call,
* there is a chance the info is wrong (this can happen on arm64). In
* that case, call init_cache_level() anyway to give the arch-specific
* code a chance to make things right.
*/
if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
return 0;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
/*
* Now that we have properly initialized the cache level info, make
* sure we don't try to do that again the next time we are called
* (e.g. as CPU hotplug callbacks).
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
if (cache_leaves(cpu) <= early_leaves)
return 0;
kfree(per_cpu_cacheinfo(cpu));
return allocate_cache_info(cpu);
}
int detect_cache_attributes(unsigned int cpu)
{
int ret;
ret = init_level_allocate_ci(cpu);
if (ret)
return ret;
/*
* If LLC is valid the cache leaves were already populated so just go to
* update the cpu map.
*/
if (!last_level_cache_is_valid(cpu)) {
/*
* populate_cache_leaves() may completely setup the cache leaves and
* shared_cpu_map or it may leave it partially setup.
*/
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
}
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
* not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
return 0;
free_ci:
free_cache_attributes(cpu);
return ret;
}
/* pointer to cpuX/cache device */
static DEFINE_PER_CPU(struct device *, ci_cache_dev);
#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
static cpumask_t cache_dev_map;
/* pointer to array of devices for cpuX/cache/indexY */
static DEFINE_PER_CPU(struct device **, ci_index_dev);
#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
#define show_one(file_name, object) \
static ssize_t file_name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
return sysfs_emit(buf, "%u\n", this_leaf->object); \
}
show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
show_one(physical_line_partition, physical_line_partition);
show_one(ways_of_associativity, ways_of_associativity);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
}
static ssize_t shared_cpu_map_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
const struct cpumask *mask = &this_leaf->shared_cpu_map;
return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
}
static ssize_t shared_cpu_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
const struct cpumask *mask = &this_leaf->shared_cpu_map;
return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
}
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
const char *output;
switch (this_leaf->type) {
case CACHE_TYPE_DATA:
output = "Data";
break;
case CACHE_TYPE_INST:
output = "Instruction";
break;
case CACHE_TYPE_UNIFIED:
output = "Unified";
break;
default:
return -EINVAL;
}
return sysfs_emit(buf, "%s\n", output);
}
static ssize_t allocation_policy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
unsigned int ci_attr = this_leaf->attributes;
const char *output;
if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
output = "ReadWriteAllocate";
else if (ci_attr & CACHE_READ_ALLOCATE)
output = "ReadAllocate";
else if (ci_attr & CACHE_WRITE_ALLOCATE)
output = "WriteAllocate";
else
return 0;
return sysfs_emit(buf, "%s\n", output);
}
static ssize_t write_policy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
unsigned int ci_attr = this_leaf->attributes;
int n = 0;
if (ci_attr & CACHE_WRITE_THROUGH)
n = sysfs_emit(buf, "WriteThrough\n");
else if (ci_attr & CACHE_WRITE_BACK)
n = sysfs_emit(buf, "WriteBack\n");
return n;
}
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
static DEVICE_ATTR_RO(ways_of_associativity);
static DEVICE_ATTR_RO(number_of_sets);
static DEVICE_ATTR_RO(size);
static DEVICE_ATTR_RO(allocation_policy);
static DEVICE_ATTR_RO(write_policy);
static DEVICE_ATTR_RO(shared_cpu_map);
static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
&dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
&dev_attr_shared_cpu_list.attr,
&dev_attr_coherency_line_size.attr,
&dev_attr_ways_of_associativity.attr,
&dev_attr_number_of_sets.attr,
&dev_attr_size.attr,
&dev_attr_allocation_policy.attr,
&dev_attr_write_policy.attr,
&dev_attr_physical_line_partition.attr,
NULL
};
static umode_t
cache_default_attrs_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
return mode;
if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
return mode;
if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
return mode;
if ((attr == &dev_attr_coherency_line_size.attr) &&
this_leaf->coherency_line_size)
return mode;
if ((attr == &dev_attr_ways_of_associativity.attr) &&
this_leaf->size) /* allow 0 = full associativity */
return mode;
if ((attr == &dev_attr_number_of_sets.attr) &&
this_leaf->number_of_sets)
return mode;
if ((attr == &dev_attr_size.attr) && this_leaf->size)
return mode;
if ((attr == &dev_attr_write_policy.attr) &&
(this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
return mode;
if ((attr == &dev_attr_allocation_policy.attr) &&
(this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
return mode;
if ((attr == &dev_attr_physical_line_partition.attr) &&
this_leaf->physical_line_partition)
return mode;
return 0;
}
static const struct attribute_group cache_default_group = {
.attrs = cache_default_attrs,
.is_visible = cache_default_attrs_is_visible,
};
static const struct attribute_group *cache_default_groups[] = {
&cache_default_group,
NULL,
};
static const struct attribute_group *cache_private_groups[] = {
&cache_default_group,
NULL, /* Place holder for private group */
NULL,
};
const struct attribute_group *
__weak cache_get_priv_group(struct cacheinfo *this_leaf)
{
return NULL;
}
static const struct attribute_group **
cache_get_attribute_groups(struct cacheinfo *this_leaf)
{
const struct attribute_group *priv_group =
cache_get_priv_group(this_leaf);
if (!priv_group)
return cache_default_groups;
if (!cache_private_groups[1])
cache_private_groups[1] = priv_group;
return cache_private_groups;
}
/* Add/Remove cache interface for CPU device */
static void cpu_cache_sysfs_exit(unsigned int cpu)
{
int i;
struct device *ci_dev;
if (per_cpu_index_dev(cpu)) {
for (i = 0; i < cache_leaves(cpu); i++) {
ci_dev = per_cache_index_dev(cpu, i);
if (!ci_dev)
continue;
device_unregister(ci_dev);
}
kfree(per_cpu_index_dev(cpu));
per_cpu_index_dev(cpu) = NULL;
}
device_unregister(per_cpu_cache_dev(cpu));
per_cpu_cache_dev(cpu) = NULL;
}
static int cpu_cache_sysfs_init(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
if (per_cpu_cacheinfo(cpu) == NULL)
return -ENOENT;
per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
if (IS_ERR(per_cpu_cache_dev(cpu)))
return PTR_ERR(per_cpu_cache_dev(cpu));
/* Allocate all required memory */
per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
sizeof(struct device *), GFP_KERNEL);
if (unlikely(per_cpu_index_dev(cpu) == NULL))
goto err_out;
return 0;
err_out:
cpu_cache_sysfs_exit(cpu);
return -ENOMEM;
}
static int cache_add_dev(unsigned int cpu)
{
unsigned int i;
int rc;
struct device *ci_dev, *parent;
struct cacheinfo *this_leaf;
const struct attribute_group **cache_groups;
rc = cpu_cache_sysfs_init(cpu);
if (unlikely(rc < 0))
return rc;
parent = per_cpu_cache_dev(cpu);
for (i = 0; i < cache_leaves(cpu); i++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, i);
if (this_leaf->disable_sysfs)
continue;
if (this_leaf->type == CACHE_TYPE_NOCACHE)
break;
cache_groups = cache_get_attribute_groups(this_leaf);
ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
"index%1u", i);
if (IS_ERR(ci_dev)) {
rc = PTR_ERR(ci_dev);
goto err;
}
per_cache_index_dev(cpu, i) = ci_dev;
}
cpumask_set_cpu(cpu, &cache_dev_map);
return 0;
err:
cpu_cache_sysfs_exit(cpu);
return rc;
}
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
if (rc)
return rc;
rc = cache_add_dev(cpu);
if (rc)
free_cache_attributes(cpu);
return rc;
}
static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
return 0;
}
static int __init cacheinfo_sysfs_init(void)
{
return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
"base/cacheinfo:online",
cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
device_initcall(cacheinfo_sysfs_init);
| linux-master | drivers/base/cacheinfo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/core.c - core driver model code (device registration, etc)
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2006 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2006 Novell, Inc.
*/
#include <linux/acpi.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "physical_location.h"
#include "power/power.h"
/* Device links support. */
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
*
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
* represents the detail that the firmware lists @sup fwnode as supplying a
* resource to @con.
*
* The driver core will use the fwnode link to create a device link between the
* two device objects corresponding to @con and @sup when they are created. The
* driver core will automatically delete the fwnode link between @con and @sup
* after doing that.
*
* Attempts to create duplicate links between the same pair of fwnode handles
* are ignored and there is no reference counting.
*/
static int __fwnode_link_add(struct fwnode_handle *con,
struct fwnode_handle *sup, u8 flags)
{
struct fwnode_link *link;
list_for_each_entry(link, &sup->consumers, s_hook)
if (link->consumer == con) {
link->flags |= flags;
return 0;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
link->supplier = sup;
INIT_LIST_HEAD(&link->s_hook);
link->consumer = con;
INIT_LIST_HEAD(&link->c_hook);
link->flags = flags;
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n",
con, sup);
return 0;
}
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
{
int ret;
mutex_lock(&fwnode_link_lock);
ret = __fwnode_link_add(con, sup, 0);
mutex_unlock(&fwnode_link_lock);
return ret;
}
/**
* __fwnode_link_del - Delete a link between two fwnode_handles.
* @link: the fwnode_link to be deleted
*
* The fwnode_link_lock needs to be held when this function is called.
*/
static void __fwnode_link_del(struct fwnode_link *link)
{
pr_debug("%pfwf Dropping the fwnode link to %pfwf\n",
link->consumer, link->supplier);
list_del(&link->s_hook);
list_del(&link->c_hook);
kfree(link);
}
/**
* __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
* @link: the fwnode_link to be marked
*
* The fwnode_link_lock needs to be held when this function is called.
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
pr_debug("%pfwf: Relaxing link with %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
* Deletes all supplier links connecting directly to @fwnode.
*/
static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
/**
* fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
* @fwnode: fwnode whose consumer links need to be deleted
*
* Deletes all consumer links connecting directly to @fwnode.
*/
static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
/**
* fwnode_links_purge - Delete all links connected to a fwnode_handle.
* @fwnode: fwnode whose links needs to be deleted
*
* Deletes all links connecting directly to a fwnode.
*/
void fwnode_links_purge(struct fwnode_handle *fwnode)
{
fwnode_links_purge_suppliers(fwnode);
fwnode_links_purge_consumers(fwnode);
}
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
/* Don't purge consumer links of an added child */
if (fwnode->dev)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_links_purge_consumers(fwnode);
fwnode_for_each_available_child_node(fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
/**
* __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
* @from: move consumers away from this fwnode
* @to: move consumers to this fwnode
*
* Move all consumer links from @from fwnode to @to fwnode.
*/
static void __fwnode_links_move_consumers(struct fwnode_handle *from,
struct fwnode_handle *to)
{
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
__fwnode_link_add(link->consumer, to, link->flags);
__fwnode_link_del(link);
}
}
/**
* __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
* @fwnode: fwnode from which to pick up dangling consumers
* @new_sup: fwnode of new supplier
*
* If the @fwnode has a corresponding struct device and the device supports
* probing (that is, added to a bus), then we want to let fw_devlink create
* MANAGED device links to this device, so leave @fwnode and its descendant's
* fwnode links alone.
*
* Otherwise, move its consumers to the new supplier @new_sup.
*/
static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
struct fwnode_handle *new_sup)
{
struct fwnode_handle *child;
if (fwnode->dev && fwnode->dev->bus)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
__fwnode_links_move_consumers(fwnode, new_sup);
fwnode_for_each_available_child_node(fwnode, child)
__fw_devlink_pickup_dangling_consumers(child, new_sup);
}
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
static inline void device_links_write_lock(void)
{
mutex_lock(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
mutex_unlock(&device_links_lock);
}
int device_links_read_lock(void) __acquires(&device_links_srcu)
{
return srcu_read_lock(&device_links_srcu);
}
void device_links_read_unlock(int idx) __releases(&device_links_srcu)
{
srcu_read_unlock(&device_links_srcu, idx);
}
int device_links_read_lock_held(void)
{
return srcu_read_lock_held(&device_links_srcu);
}
static void device_link_synchronize_removal(void)
{
synchronize_srcu(&device_links_srcu);
}
static void device_link_remove_from_lists(struct device_link *link)
{
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
}
static bool device_is_ancestor(struct device *dev, struct device *target)
{
while (target->parent) {
target = target->parent;
if (dev == target)
return true;
}
return false;
}
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
}
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
* @target: Device to check against.
*
* Check if @target depends on @dev or any device dependent on it (its child or
* its consumer etc). Return 1 if that is the case or 0 otherwise.
*/
int device_is_dependent(struct device *dev, void *target)
{
struct device_link *link;
int ret;
/*
* The "ancestors" check is needed to catch the case when the target
* device has not been completely initialized yet and it is still
* missing from the list of children of its parent device.
*/
if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
if (ret)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (device_link_flag_is_sync_state_only(link->flags))
continue;
if (link->consumer == target)
return 1;
ret = device_is_dependent(link->consumer, target);
if (ret)
break;
}
return ret;
}
static void device_link_init_status(struct device_link *link,
struct device *consumer,
struct device *supplier)
{
switch (supplier->links.status) {
case DL_DEV_PROBING:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* A consumer driver can create a link to a supplier
* that has not completed its probing yet as long as it
* knows that the supplier is already functional (for
* example, it has just acquired some resources from the
* supplier).
*/
link->status = DL_STATE_CONSUMER_PROBE;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
break;
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
/*
* Devices that have not been registered yet will be put to the ends
* of the lists during the registration, so skip them here.
*/
if (device_is_registered(dev))
devices_kset_move_last(dev);
if (device_pm_initialized(dev))
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (device_link_flag_is_sync_state_only(link->flags))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
return 0;
}
/**
* device_pm_move_to_tail - Move set of devices to the end of device lists
* @dev: Device to move
*
* This is a device_reorder_to_tail() wrapper taking the requisite locks.
*
* It moves the @dev along with all of its children and all of its consumers
* to the ends of the device_kset and dpm_list, recursively.
*/
void device_pm_move_to_tail(struct device *dev)
{
int idx;
idx = device_links_read_lock();
device_pm_lock();
device_reorder_to_tail(dev, NULL);
device_pm_unlock();
device_links_read_unlock(idx);
}
#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *output;
switch (to_devlink(dev)->status) {
case DL_STATE_NONE:
output = "not tracked";
break;
case DL_STATE_DORMANT:
output = "dormant";
break;
case DL_STATE_AVAILABLE:
output = "available";
break;
case DL_STATE_CONSUMER_PROBE:
output = "consumer probing";
break;
case DL_STATE_ACTIVE:
output = "active";
break;
case DL_STATE_SUPPLIER_UNBIND:
output = "supplier unbinding";
break;
default:
output = "unknown";
break;
}
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(status);
static ssize_t auto_remove_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
const char *output;
if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
output = "supplier unbind";
else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
output = "consumer unbind";
else
output = "never";
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(auto_remove_on);
static ssize_t runtime_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
static ssize_t sync_state_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
return sysfs_emit(buf, "%d\n",
!!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
static struct attribute *devlink_attrs[] = {
&dev_attr_status.attr,
&dev_attr_auto_remove_on.attr,
&dev_attr_runtime_pm.attr,
&dev_attr_sync_state_only.attr,
NULL,
};
ATTRIBUTE_GROUPS(devlink);
static void device_link_release_fn(struct work_struct *work)
{
struct device_link *link = container_of(work, struct device_link, rm_work);
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
pm_runtime_release_supplier(link);
/*
* If supplier_preactivated is set, the link has been dropped between
* the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
* in __driver_probe_device(). In that case, drop the supplier's
* PM-runtime usage counter to remove the reference taken by
* pm_runtime_get_suppliers().
*/
if (link->supplier_preactivated)
pm_runtime_put_noidle(link->supplier);
pm_request_idle(link->supplier);
put_device(link->consumer);
put_device(link->supplier);
kfree(link);
}
static void devlink_dev_release(struct device *dev)
{
struct device_link *link = to_devlink(dev);
INIT_WORK(&link->rm_work, device_link_release_fn);
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
* supplier devices get deleted when it runs, so put it into the "long"
* workqueue.
*/
queue_work(system_long_wq, &link->rm_work);
}
static struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
};
static int devlink_add_symlinks(struct device *dev)
{
int ret;
size_t len;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
char *buf;
len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
goto out;
ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
if (ret)
goto err_con;
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_con_dev;
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
kfree(buf);
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
struct device_link *link = to_devlink(dev);
size_t len;
struct device *sup = link->supplier;
struct device *con = link->consumer;
char *buf;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
len += strlen(":");
len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
WARN(1, "Unable to properly free device link symlinks!\n");
return;
}
if (device_is_registered(con)) {
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf);
}
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
}
static struct class_interface devlink_class_intf = {
.class = &devlink_class,
.add_dev = devlink_add_symlinks,
.remove_dev = devlink_remove_symlinks,
};
static int __init devlink_class_init(void)
{
int ret;
ret = class_register(&devlink_class);
if (ret)
return ret;
ret = class_interface_register(&devlink_class_intf);
if (ret)
class_unregister(&devlink_class);
return ret;
}
postcore_initcall(devlink_class_init);
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY | \
DL_FLAG_INFERRED | \
DL_FLAG_CYCLE)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
* be forced into the active meta state and reference-counted upon the creation
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
* If DL_FLAG_STATELESS is set in @flags, the caller of this function is
* expected to release the link returned by it directly with the help of either
* device_link_del() or device_link_remove().
*
* If that flag is not set, however, the caller of this function is handing the
* management of the link over to the driver core entirely and its return value
* can only be used to check whether or not the link is present. In that case,
* the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
* flags can be used to indicate to the driver core when the link can be safely
* deleted. Namely, setting one of them in @flags indicates to the driver core
* that the link is not going to be used (by the given caller of this function)
* after unbinding the consumer or supplier driver, respectively, from its
* device, so the link can be deleted at that point. If none of them is set,
* the link will be maintained until one of the devices pointed to by it (either
* the consumer or the supplier) is unregistered.
*
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
* be used to request the driver core to automatically probe for a consumer
* driver after successfully binding a driver to the supplier device.
*
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
* DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
* the same time is invalid and will cause NULL to be returned upfront.
* However, if a device link between the given @consumer and @supplier pair
* exists already when this function is called for them, the existing link will
* be returned regardless of its current type and status (the link's flags may
* be modified then). The caller of this function is then expected to treat
* the link as though it has just been created, so (in particular) if
* DL_FLAG_STATELESS was passed in @flags, the link needs to be released
* explicitly when not needed any more (as stated above).
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
*
* The supplier device is required to be registered when this function is called
* and NULL will be returned if that is not the case. The consumer device need
* not be registered, however.
*/
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags)
{
struct device_link *link;
if (!consumer || !supplier || consumer == supplier ||
flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
return NULL;
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
if (pm_runtime_get_sync(supplier) < 0) {
pm_runtime_put_noidle(supplier);
return NULL;
}
}
if (!(flags & DL_FLAG_STATELESS))
flags |= DL_FLAG_MANAGED;
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
!device_link_flag_is_sync_state_only(flags))
return NULL;
device_links_write_lock();
device_pm_lock();
/*
* If the supplier has not been fully registered yet or there is a
* reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
* the supplier already in the graph, return NULL. If the link is a
* SYNC_STATE_ONLY link, we don't check for reverse dependencies
* because it only affects sync_state() callbacks.
*/
if (!device_pm_initialized(supplier)
|| (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
device_is_dependent(consumer, supplier))) {
link = NULL;
goto out;
}
/*
* SYNC_STATE_ONLY links are useless once a consumer device has probed.
* So, only create it if the consumer hasn't probed yet.
*/
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
consumer->links.status != DL_DEV_NO_DRIVER &&
consumer->links.status != DL_DEV_PROBING) {
link = NULL;
goto out;
}
/*
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
* together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer != consumer)
continue;
if (link->flags & DL_FLAG_INFERRED &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
if (flags & DL_FLAG_RPM_ACTIVE)
refcount_inc(&link->rpm_active);
}
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
!(link->flags & DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
link->flags |= DL_FLAG_STATELESS;
goto out;
}
}
/*
* If the life time of the link following from the new flags is
* longer than indicated by the flags of the existing link,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
if (!(link->flags & DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
}
goto out;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
goto out;
refcount_set(&link->rpm_active, 1);
get_device(supplier);
link->supplier = supplier;
INIT_LIST_HEAD(&link->s_node);
get_device(consumer);
link->consumer = consumer;
INIT_LIST_HEAD(&link->c_node);
link->flags = flags;
kref_init(&link->kref);
link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev);
dev_set_name(&link->link_dev, "%s:%s--%s:%s",
dev_bus_name(supplier), dev_name(supplier),
dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) {
put_device(&link->link_dev);
link = NULL;
goto out;
}
if (flags & DL_FLAG_PM_RUNTIME) {
if (flags & DL_FLAG_RPM_ACTIVE)
refcount_inc(&link->rpm_active);
pm_runtime_new_link(consumer);
}
/* Determine the initial link state. */
if (flags & DL_FLAG_STATELESS)
link->status = DL_STATE_NONE;
else
device_link_init_status(link, consumer, supplier);
/*
* Some callers expect the link creation during consumer driver probe to
* resume the supplier even without DL_FLAG_RPM_ACTIVE.
*/
if (link->status == DL_STATE_CONSUMER_PROBE &&
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
dev_dbg(consumer,
"Linked as a sync state only consumer to %s\n",
dev_name(supplier));
goto out;
}
reorder:
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
*
* It is necessary to hold dpm_list locked throughout all that or else
* we may end up suspending with a wrong ordering of it.
*/
device_reorder_to_tail(consumer, NULL);
dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
out:
device_pm_unlock();
device_links_write_unlock();
if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
pm_runtime_put(supplier);
return link;
}
EXPORT_SYMBOL_GPL(device_link_add);
static void __device_link_del(struct kref *kref)
{
struct device_link *link = container_of(kref, struct device_link, kref);
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
pm_runtime_drop_link(link);
device_link_remove_from_lists(link);
device_unregister(&link->link_dev);
}
static void device_link_put_kref(struct device_link *link)
{
if (link->flags & DL_FLAG_STATELESS)
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
else
WARN(1, "Unable to drop a managed device link reference\n");
}
/**
* device_link_del - Delete a stateless link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
* PM. If the link was added multiple times, it needs to be deleted as often.
* Care is required for hotplugged devices: Their links are purged on removal
* and calling device_link_del() is then no longer allowed.
*/
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_link_put_kref(link);
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
/**
* device_link_remove - Delete a stateless link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
*
* The caller must ensure proper synchronization of this function with runtime
* PM.
*/
void device_link_remove(void *consumer, struct device *supplier)
{
struct device_link *link;
if (WARN_ON(consumer == supplier))
return;
device_links_write_lock();
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
device_link_put_kref(link);
break;
}
}
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_remove);
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->status != DL_STATE_CONSUMER_PROBE)
continue;
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
}
static bool dev_is_best_effort(struct device *dev)
{
return (fw_devlink_best_effort && dev->can_match) ||
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
}
static struct fwnode_handle *fwnode_links_check_suppliers(
struct fwnode_handle *fwnode)
{
struct fwnode_link *link;
if (!fwnode || fw_devlink_is_permissive())
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
if (!(link->flags & FWLINK_FLAG_CYCLE))
return link->supplier;
return NULL;
}
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
*
* Check links from this device to any suppliers. Walk the list of the device's
* links to suppliers and see if all of them are available. If not, simply
* return -EPROBE_DEFER.
*
* We need to guarantee that the supplier will not go away after the check has
* been positive here. It only can go away in __device_release_driver() and
* that function checks the device's links to consumers. This means we need to
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0, fwnode_ret = 0;
struct fwnode_handle *sup_fw;
/*
* Device waiting for supplier to become available is not allowed to
* probe.
*/
mutex_lock(&fwnode_link_lock);
sup_fw = fwnode_links_check_suppliers(dev->fwnode);
if (sup_fw) {
if (!dev_is_best_effort(dev)) {
fwnode_ret = -EPROBE_DEFER;
dev_err_probe(dev, -EPROBE_DEFER,
"wait for supplier %pfwf\n", sup_fw);
} else {
fwnode_ret = -EAGAIN;
}
}
mutex_unlock(&fwnode_link_lock);
if (fwnode_ret == -EPROBE_DEFER)
return fwnode_ret;
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
link->flags & DL_FLAG_INFERRED &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
}
device_links_missing_supplier(dev);
dev_err_probe(dev, -EPROBE_DEFER,
"supplier %s not ready\n",
dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
return ret ? ret : fwnode_ret;
}
/**
* __device_links_queue_sync_state - Queue a device for sync_state() callback
* @dev: Device to call sync_state() on
* @list: List head to queue the @dev on
*
* Queues a device for a sync_state() callback when the device links write lock
* isn't held. This allows the sync_state() execution flow to use device links
* APIs. The caller must ensure this function is called with
* device_links_write_lock() held.
*
* This function does a get_device() to make sure the device is not freed while
* on this list.
*
* So the caller must also ensure that device_links_flush_sync_list() is called
* as soon as the caller releases device_links_write_lock(). This is necessary
* to make sure the sync_state() is called in a timely fashion and the
* put_device() is called on this device.
*/
static void __device_links_queue_sync_state(struct device *dev,
struct list_head *list)
{
struct device_link *link;
if (!dev_has_sync_state(dev))
return;
if (dev->state_synced)
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
}
/*
* Set the flag here to avoid adding the same device to a list more
* than once. This can happen if new consumers get added to the device
* and probed before the list is flushed.
*/
dev->state_synced = true;
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
return;
get_device(dev);
list_add_tail(&dev->links.defer_sync, list);
}
/**
* device_links_flush_sync_list - Call sync_state() on a list of devices
* @list: List of devices to call sync_state() on
* @dont_lock_dev: Device for which lock is already held by the caller
*
* Calls sync_state() on all the devices that have been queued for it. This
* function is used in conjunction with __device_links_queue_sync_state(). The
* @dont_lock_dev parameter is useful when this function is called from a
* context where a device lock is already held.
*/
static void device_links_flush_sync_list(struct list_head *list,
struct device *dont_lock_dev)
{
struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
list_del_init(&dev->links.defer_sync);
if (dev != dont_lock_dev)
device_lock(dev);
dev_sync_state(dev);
if (dev != dont_lock_dev)
device_unlock(dev);
put_device(dev);
}
}
void device_links_supplier_sync_state_pause(void)
{
device_links_write_lock();
defer_sync_state_count++;
device_links_write_unlock();
}
void device_links_supplier_sync_state_resume(void)
{
struct device *dev, *tmp;
LIST_HEAD(sync_list);
device_links_write_lock();
if (!defer_sync_state_count) {
WARN(true, "Unmatched sync_state pause/resume!");
goto out;
}
defer_sync_state_count--;
if (defer_sync_state_count)
goto out;
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
/*
* Delete from deferred_sync list before queuing it to
* sync_list because defer_sync is used for both lists.
*/
list_del_init(&dev->links.defer_sync);
__device_links_queue_sync_state(dev, &sync_list);
}
out:
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, NULL);
}
static int sync_state_resume_initcall(void)
{
device_links_supplier_sync_state_resume();
return 0;
}
late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
static void device_link_drop_managed(struct device_link *link)
{
link->flags &= ~DL_FLAG_MANAGED;
WRITE_ONCE(link->status, DL_STATE_NONE);
kref_put(&link->kref, __device_link_del);
}
static ssize_t waiting_for_supplier_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
bool val;
device_lock(dev);
mutex_lock(&fwnode_link_lock);
val = !!fwnode_links_check_suppliers(dev->fwnode);
mutex_unlock(&fwnode_link_lock);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
static DEVICE_ATTR_RO(waiting_for_supplier);
/**
* device_links_force_bind - Prepares device to be force bound
* @dev: Consumer device.
*
* device_bind_driver() force binds a device to a driver without calling any
* driver probe functions. So the consumer really isn't going to wait for any
* supplier before it's bound to the driver. We still want the device link
* states to be sensible when this happens.
*
* In preparation for device_bind_driver(), this function goes through each
* supplier device links and checks if the supplier is bound. If it is, then
* the device link status is set to CONSUMER_PROBE. Otherwise, the device link
* is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_force_bind(struct device *dev)
{
struct device_link *link, *ln;
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
device_link_drop_managed(link);
continue;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
}
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
* The probe has been successful, so update links from this device to any
* consumers by changing their status to "available".
*
* Also change the status of @dev's links to suppliers to "active".
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
struct device_link *link, *ln;
LIST_HEAD(sync_list);
/*
* If a device binds successfully, it's expected to have created all
* the device links it needs to or make new device links as it needs
* them. So, fw_devlink no longer needs to create device links to any
* of the device's suppliers.
*
* Also, if a child firmware node of this bound device is not added as a
* device by now, assume it is never going to be added. Make this bound
* device the fallback supplier to the dangling consumers of the child
* firmware node because this bound device is probably implementing the
* child firmware node functionality and we don't want the dangling
* consumers to defer probe indefinitely waiting for a device for the
* child firmware node.
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
mutex_lock(&fwnode_link_lock);
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
* Links created during consumer probe may be in the "consumer
* probe" state to start with if the supplier is still probing
* when they are created and they may become "active" if the
* consumer probe returns first. Skip them here.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
continue;
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
driver_deferred_probe_add(link->consumer);
}
if (defer_sync_state_count)
__device_links_supplier_defer_sync(dev);
else
__device_links_queue_sync_state(dev, &sync_list);
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
if (!(link->flags & DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
* save to drop the managed link completely.
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
link->flags & DL_FLAG_INFERRED &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
* When dev_is_best_effort() is true, we ignore device
* links to suppliers that don't have a driver. If the
* consumer device still managed to probe, there's no
* point in maintaining a device link in a weird state
* (consumer probed before supplier). So delete it.
*/
device_link_drop_managed(link);
} else {
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
}
/*
* This needs to be done even for the deleted
* DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
* device link that was preventing the supplier from getting a
* sync_state() call.
*/
if (defer_sync_state_count)
__device_links_supplier_defer_sync(supplier);
else
__device_links_queue_sync_state(supplier, &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, dev);
}
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
*
* Delete all non-persistent links from this device to any suppliers.
*
* Persistent links stay around, but their status is changed to "available",
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
device_link_drop_managed(link);
continue;
}
if (link->status != DL_STATE_CONSUMER_PROBE &&
link->status != DL_STATE_ACTIVE)
continue;
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
dev->links.status = DL_DEV_NO_DRIVER;
}
/**
* device_links_no_driver - Update links after failing driver probe.
* @dev: Device whose driver has just failed to probe.
*
* Clean up leftover links to consumers for @dev and invoke
* %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_no_driver(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
* The probe has failed, so if the status of the link is
* "consumer probe" or "active", it must have been added by
* a probing consumer while this device was still probing.
* Change its state to "dormant", as it represents a valid
* relationship, but it is not functionally meaningful.
*/
if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_driver_cleanup - Update links after driver removal.
* @dev: Device whose driver has just gone away.
*
* Update links to consumers for @dev by changing their status to "dormant" and
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
struct device_link *link, *ln;
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
* autoremove the links between this @dev and its consumer
* devices that are not active, i.e. where the link state
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
list_del_init(&dev->links.defer_sync);
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_busy - Check if there are any busy links to consumers.
* @dev: Device to check.
*
* Check each consumer of the device and return 'true' if its link's status
* is one of "consumer probe" or "active" (meaning that the given consumer is
* probing right now or its driver is present). Otherwise, change the link
* state to "supplier unbind" to prevent the consumer from being probed
* successfully going forward.
*
* Return 'false' if there are no probing or active consumers.
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
struct device_link *link;
bool ret = false;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
|| link->status == DL_STATE_ACTIVE) {
ret = true;
break;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
}
dev->links.status = DL_DEV_UNBINDING;
device_links_write_unlock();
return ret;
}
/**
* device_links_unbind_consumers - Force unbind consumers of the given device.
* @dev: Device to unbind the consumers of.
*
* Walk the list of links to consumers for @dev and if any of them is in the
* "consumer probe" state, wait for all device probes in progress to complete
* and start over.
*
* If that's not the case, change the status of the link to "supplier unbind"
* and check if the link was in the "active" state. If so, force the consumer
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
* Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
struct device_link *link;
start:
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
if (!(link->flags & DL_FLAG_MANAGED) ||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
status = link->status;
if (status == DL_STATE_CONSUMER_PROBE) {
device_links_write_unlock();
wait_for_device_probe();
goto start;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
if (status == DL_STATE_ACTIVE) {
struct device *consumer = link->consumer;
get_device(consumer);
device_links_write_unlock();
device_release_driver_internal(consumer, NULL,
consumer->parent);
put_device(consumer);
goto start;
}
}
device_links_write_unlock();
}
/**
* device_links_purge - Delete existing links to other devices.
* @dev: Target device.
*/
static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
if (dev->class == &devlink_class)
return;
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
*/
device_links_write_lock();
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
WARN_ON(link->status == DL_STATE_ACTIVE);
__device_link_del(&link->kref);
}
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
WARN_ON(link->status != DL_STATE_DORMANT &&
link->status != DL_STATE_NONE);
__device_link_del(&link->kref);
}
device_links_write_unlock();
}
#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
DL_FLAG_SYNC_STATE_ONLY)
#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
DL_FLAG_AUTOPROBE_CONSUMER)
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
DL_FLAG_PM_RUNTIME)
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
return -EINVAL;
if (strcmp(arg, "off") == 0) {
fw_devlink_flags = 0;
} else if (strcmp(arg, "permissive") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
} else if (strcmp(arg, "on") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
} else if (strcmp(arg, "rpm") == 0) {
fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
}
return 0;
}
early_param("fw_devlink", fw_devlink_setup);
static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return kstrtobool(arg, &fw_devlink_strict);
}
early_param("fw_devlink.strict", fw_devlink_strict_setup);
#define FW_DEVLINK_SYNC_STATE_STRICT 0
#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1
#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT
static int fw_devlink_sync_state;
#else
static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
#endif
static int __init fw_devlink_sync_state_setup(char *arg)
{
if (!arg)
return -EINVAL;
if (strcmp(arg, "strict") == 0) {
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT;
return 0;
} else if (strcmp(arg, "timeout") == 0) {
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
return 0;
}
return -EINVAL;
}
early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup);
static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
{
if (fwlink_flags & FWLINK_FLAG_CYCLE)
return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
return fw_devlink_flags;
}
static bool fw_devlink_is_permissive(void)
{
return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
}
bool fw_devlink_is_strict(void)
{
return fw_devlink_strict && !fw_devlink_is_permissive();
}
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
{
if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
return;
fwnode_call_int_op(fwnode, add_links);
fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
}
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child = NULL;
fw_devlink_parse_fwnode(fwnode);
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
fw_devlink_parse_fwtree(child);
}
static void fw_devlink_relax_link(struct device_link *link)
{
if (!(link->flags & DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
return;
pm_runtime_drop_link(link);
link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
dev_dbg(link->consumer, "Relaxing link with %s\n",
dev_name(link->supplier));
}
static int fw_devlink_no_driver(struct device *dev, void *data)
{
struct device_link *link = to_devlink(dev);
if (!link->supplier->can_match)
fw_devlink_relax_link(link);
return 0;
}
void fw_devlink_drivers_done(void)
{
fw_devlink_drv_reg_done = true;
device_links_write_lock();
class_for_each_device(&devlink_class, NULL, NULL,
fw_devlink_no_driver);
device_links_write_unlock();
}
static int fw_devlink_dev_sync_state(struct device *dev, void *data)
{
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
if (!(link->flags & DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
dev_warn(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
if (!list_empty(&sup->links.defer_sync))
return 0;
dev_warn(sup, "Timed out. Forcing sync_state()\n");
sup->state_synced = true;
get_device(sup);
list_add_tail(&sup->links.defer_sync, data);
return 0;
}
void fw_devlink_probing_done(void)
{
LIST_HEAD(sync_list);
device_links_write_lock();
class_for_each_device(&devlink_class, NULL, &sync_list,
fw_devlink_dev_sync_state);
device_links_write_unlock();
device_links_flush_sync_list(&sync_list, NULL);
}
/**
* wait_for_init_devices_probe - Try to probe any device needed for init
*
* Some devices might need to be probed and bound successfully before the kernel
* boot sequence can finish and move on to init/userspace. For example, a
* network interface might need to be bound to be able to mount a NFS rootfs.
*
* With fw_devlink=on by default, some of these devices might be blocked from
* probing because they are waiting on a optional supplier that doesn't have a
* driver. While fw_devlink will eventually identify such devices and unblock
* the probing automatically, it might be too late by the time it unblocks the
* probing of devices. For example, the IP4 autoconfig might timeout before
* fw_devlink unblocks probing of the network interface.
*
* This function is available to temporarily try and probe all devices that have
* a driver even if some of their suppliers haven't been added or don't have
* drivers.
*
* The drivers can then decide which of the suppliers are optional vs mandatory
* and probe the device if possible. By the time this function returns, all such
* "best effort" probes are guaranteed to be completed. If a device successfully
* probes in this mode, we delete all fw_devlink discovered dependencies of that
* device where the supplier hasn't yet probed successfully because they have to
* be optional dependencies.
*
* Any devices that didn't successfully probe go back to being treated as if
* this function was never called.
*
* This also means that some devices that aren't needed for init and could have
* waited for their optional supplier to probe (when the supplier's module is
* loaded later on) would end up probing prematurely with limited functionality.
* So call this function only when boot would fail without it.
*/
void __init wait_for_init_devices_probe(void)
{
if (!fw_devlink_flags || fw_devlink_is_permissive())
return;
/*
* Wait for all ongoing probes to finish so that the "best effort" is
* only applied to devices that can't probe otherwise.
*/
wait_for_device_probe();
pr_info("Trying to probe devices needed for running init ...\n");
fw_devlink_best_effort = true;
driver_deferred_probe_trigger();
/*
* Wait for all "best effort" probes to finish before going back to
* normal enforcement.
*/
wait_for_device_probe();
fw_devlink_best_effort = false;
}
static void fw_devlink_unblock_consumers(struct device *dev)
{
struct device_link *link;
if (!fw_devlink_flags || fw_devlink_is_permissive())
return;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node)
fw_devlink_relax_link(link);
device_links_write_unlock();
}
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
bool ret;
if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
return false;
dev = get_dev_from_fwnode(fwnode);
ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
put_device(dev);
return ret;
}
static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
fwnode_for_each_parent_node(fwnode, parent) {
if (fwnode_init_without_drv(parent)) {
fwnode_handle_put(parent);
return true;
}
}
return false;
}
/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
* @con: Potential consumer device.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
*
* Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
* depend on @con. This function can detect multiple cyles between @sup_handle
* and @con. When such dependency cycles are found, convert all device links
* created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
* all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
* converted into a device link in the future, they are created as
* SYNC_STATE_ONLY device links. This is the equivalent of doing
* fw_devlink=permissive just between the devices in the cycle. We need to do
* this because, at this point, fw_devlink can't tell which of these
* dependencies is not a real dependency.
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
static bool __fw_devlink_relax_cycles(struct device *con,
struct fwnode_handle *sup_handle)
{
struct device *sup_dev = NULL, *par_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
if (!sup_handle)
return false;
/*
* We aren't trying to find all cycles. Just a cycle between con and
* sup_handle.
*/
if (sup_handle->flags & FWNODE_FLAG_VISITED)
return false;
sup_handle->flags |= FWNODE_FLAG_VISITED;
sup_dev = get_dev_from_fwnode(sup_handle);
/* Termination condition. */
if (sup_dev == con) {
ret = true;
goto out;
}
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
con->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
if (__fw_devlink_relax_cycles(con, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
}
/*
* Give priority to device parent over fwnode parent to account for any
* quirks in how fwnodes are converted to devices.
*/
if (sup_dev)
par_dev = get_device(sup_dev->parent);
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
ret = true;
if (!sup_dev)
goto out;
list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
/*
* Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
!(dev_link->flags & DL_FLAG_CYCLE))
continue;
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
}
}
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
put_device(par_dev);
return ret;
}
/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con: consumer device for the device link
* @sup_handle: fwnode handle of supplier
* @link: fwnode link that's being converted to a device link
*
* This function will try to create a device link between the consumer device
* @con and the supplier device represented by @sup_handle.
*
* The supplier has to be provided as a fwnode because incorrect cycles in
* fwnode links can sometimes cause the supplier device to never be created.
* This function detects such cases and returns an error if it cannot create a
* device link from the consumer to a missing supplier.
*
* Returns,
* 0 on successfully creating a device link
* -EINVAL if the device link cannot be created as expected
* -EAGAIN if the device link cannot be created right now, but it may be
* possible to do that in the future
*/
static int fw_devlink_create_devlink(struct device *con,
struct fwnode_handle *sup_handle,
struct fwnode_link *link)
{
struct device *sup_dev;
int ret = 0;
u32 flags;
if (con->fwnode == link->consumer)
flags = fw_devlink_get_flags(link->flags);
else
flags = FW_DEVLINK_FLAGS_PERMISSIVE;
/*
* In some cases, a device P might also be a supplier to its child node
* C. However, this would defer the probe of C until the probe of P
* completes successfully. This is perfectly fine in the device driver
* model. device_add() doesn't guarantee probe completion of the device
* by the time it returns.
*
* However, there are a few drivers that assume C will finish probing
* as soon as it's added and before P finishes probing. So, we provide
* a flag to let fw_devlink know not to delay the probe of C until the
* probe of P completes successfully.
*
* When such a flag is set, we can't create device links where P is the
* supplier of C as that would delay the probe of C.
*/
if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
* So cycle detection isn't necessary and shouldn't be done.
*/
if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
device_links_write_unlock();
}
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
else
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
* If it's one of those drivers that don't actually bind to
* their device using driver core, then don't wait on this
* supplier device indefinitely.
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
dev_dbg(con,
"Not linking %pfwf - dev might never probe\n",
sup_handle);
ret = -EINVAL;
goto out;
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
dev_err(con, "Failed to create device link (0x%x) with %s\n",
flags, dev_name(sup_dev));
ret = -EINVAL;
}
goto out;
}
/*
* Supplier or supplier's ancestor already initialized without a struct
* device or being probed by a driver.
*/
if (fwnode_init_without_drv(sup_handle) ||
fwnode_ancestor_init_without_drv(sup_handle)) {
dev_dbg(con, "Not linking %pfwf - might never become dev\n",
sup_handle);
return -EINVAL;
}
ret = -EAGAIN;
out:
put_device(sup_dev);
return ret;
}
/**
* __fw_devlink_link_to_consumers - Create device links to consumers of a device
* @dev: Device that needs to be linked to its consumers
*
* This function looks at all the consumer fwnodes of @dev and creates device
* links between the consumer device and @dev (supplier).
*
* If the consumer device has not been added yet, then this function creates a
* SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
* of the consumer fwnode. This is necessary to make sure @dev doesn't get a
* sync_state() callback before the real consumer device gets to be added and
* then probed.
*
* Once device links are created from the real consumer to @dev (supplier), the
* fwnode links are deleted.
*/
static void __fw_devlink_link_to_consumers(struct device *dev)
{
struct fwnode_handle *fwnode = dev->fwnode;
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
struct device *con_dev;
bool own_link = true;
int ret;
con_dev = get_dev_from_fwnode(link->consumer);
/*
* If consumer device is not available yet, make a "proxy"
* SYNC_STATE_ONLY link from the consumer's parent device to
* the supplier device. This is necessary to make sure the
* supplier doesn't get a sync_state() callback before the real
* consumer can create a device link to the supplier.
*
* This proxy link step is needed to handle the case where the
* consumer's parent device is added before the supplier.
*/
if (!con_dev) {
con_dev = fwnode_get_next_parent_dev(link->consumer);
/*
* However, if the consumer's parent device is also the
* parent of the supplier, don't create a
* consumer-supplier link from the parent to its child
* device. Such a dependency is impossible.
*/
if (con_dev &&
fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
put_device(con_dev);
con_dev = NULL;
} else {
own_link = false;
}
}
if (!con_dev)
continue;
ret = fw_devlink_create_devlink(con_dev, fwnode, link);
put_device(con_dev);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
}
}
/**
* __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
* @dev: The consumer device that needs to be linked to its suppliers
* @fwnode: Root of the fwnode tree that is used to create device links
*
* This function looks at all the supplier fwnodes of fwnode tree rooted at
* @fwnode and creates device links between @dev (consumer) and all the
* supplier devices of the entire fwnode tree at @fwnode.
*
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
* and the real suppliers of @dev. Once these device links are created, the
* fwnode links are deleted.
*
* In addition, it also looks at all the suppliers of the entire fwnode tree
* because some of the child devices of @dev that have not been added yet
* (because @dev hasn't probed) might already have their suppliers added to
* driver core. So, this function creates SYNC_STATE_ONLY device links between
* @dev (consumer) and these suppliers to make sure they don't execute their
* sync_state() callbacks before these child devices have a chance to create
* their device links. The fwnode links that correspond to the child devices
* aren't delete because they are needed later to create the device links
* between the real consumer and supplier devices.
*/
static void __fw_devlink_link_to_suppliers(struct device *dev,
struct fwnode_handle *fwnode)
{
bool own_link = (dev->fwnode == fwnode);
struct fwnode_link *link, *tmp;
struct fwnode_handle *child = NULL;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
struct fwnode_handle *sup = link->supplier;
ret = fw_devlink_create_devlink(dev, sup, link);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
}
/*
* Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
* all the descendants. This proxy link step is needed to handle the
* case where the supplier is added before the consumer's parent device
* (@dev).
*/
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
__fw_devlink_link_to_suppliers(dev, child);
}
static void fw_devlink_link_device(struct device *dev)
{
struct fwnode_handle *fwnode = dev->fwnode;
if (!fw_devlink_flags)
return;
fw_devlink_parse_fwtree(fwnode);
mutex_lock(&fwnode_link_lock);
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
mutex_unlock(&fwnode_link_lock);
}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
/* /sys/dev/char */
static struct kobject *sysfs_dev_char_kobj;
/* /sys/dev/block */
static struct kobject *sysfs_dev_block_kobj;
static DEFINE_MUTEX(device_hotplug_lock);
void lock_device_hotplug(void)
{
mutex_lock(&device_hotplug_lock);
}
void unlock_device_hotplug(void)
{
mutex_unlock(&device_hotplug_lock);
}
int lock_device_hotplug_sysfs(void)
{
if (mutex_trylock(&device_hotplug_lock))
return 0;
/* Avoid busy looping (5 ms of sleep should do). */
msleep(5);
return restart_syscall();
}
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
return !(dev->type == &part_type);
}
#else
static inline int device_is_not_partition(struct device *dev)
{
return 1;
}
#endif
static void device_platform_notify(struct device *dev)
{
acpi_device_notify(dev);
software_node_notify(dev);
if (platform_notify)
platform_notify(dev);
}
static void device_platform_notify_remove(struct device *dev)
{
if (platform_notify_remove)
platform_notify_remove(dev);
software_node_notify_remove(dev);
acpi_device_notify_remove(dev);
}
/**
* dev_driver_string - Return a device's driver name, if at all possible
* @dev: struct device to get the name of
*
* Will return the device's driver's name if it is bound to a device. If
* the device is not bound to a driver, it will return the name of the bus
* it is attached to. If it is not attached to a bus either, an empty
* string will be returned.
*/
const char *dev_driver_string(const struct device *dev)
{
struct device_driver *drv;
/* dev->driver can change to NULL underneath us because of unbinding,
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
drv = READ_ONCE(dev->driver);
return drv ? drv->name : dev_bus_name(dev);
}
EXPORT_SYMBOL(dev_driver_string);
#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct device_attribute *dev_attr = to_dev_attr(attr);
struct device *dev = kobj_to_dev(kobj);
ssize_t ret = -EIO;
if (dev_attr->show)
ret = dev_attr->show(dev, dev_attr, buf);
if (ret >= (ssize_t)PAGE_SIZE) {
printk("dev_attr_show: %pS returned bad count\n",
dev_attr->show);
}
return ret;
}
static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct device_attribute *dev_attr = to_dev_attr(attr);
struct device *dev = kobj_to_dev(kobj);
ssize_t ret = -EIO;
if (dev_attr->store)
ret = dev_attr->store(dev, dev_attr, buf, count);
return ret;
}
static const struct sysfs_ops dev_sysfs_ops = {
.show = dev_attr_show,
.store = dev_attr_store,
};
#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
ssize_t device_store_ulong(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
int ret;
unsigned long new;
ret = kstrtoul(buf, 0, &new);
if (ret)
return ret;
*(unsigned long *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
return size;
}
EXPORT_SYMBOL_GPL(device_store_ulong);
ssize_t device_show_ulong(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_ulong);
ssize_t device_store_int(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
int ret;
long new;
ret = kstrtol(buf, 0, &new);
if (ret)
return ret;
if (new > INT_MAX || new < INT_MIN)
return -EINVAL;
*(int *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
return size;
}
EXPORT_SYMBOL_GPL(device_store_int);
ssize_t device_show_int(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_int);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
if (kstrtobool(buf, ea->var) < 0)
return -EINVAL;
return size;
}
EXPORT_SYMBOL_GPL(device_store_bool);
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_bool);
/**
* device_release - free device structure.
* @kobj: device's kobject.
*
* This is called once the reference count for the object
* reaches 0. We forward the call to the device's release
* method, which should handle actually freeing the structure.
*/
static void device_release(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
struct device_private *p = dev->p;
/*
* Some platform devices are driven without driver attached
* and managed resources may have been acquired. Make sure
* all resources are released.
*
* Drivers still can add resources into device after device
* is deleted but alive, so release devres here to avoid
* possible memory leak.
*/
devres_release_all(dev);
kfree(dev->dma_range_map);
if (dev->release)
dev->release(dev);
else if (dev->type && dev->type->release)
dev->type->release(dev);
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
else
WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
dev_name(dev));
kfree(p);
}
static const void *device_namespace(const struct kobject *kobj)
{
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
if (dev->class && dev->class->ns_type)
ns = dev->class->namespace(dev);
return ns;
}
static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
const struct device *dev = kobj_to_dev(kobj);
if (dev->class && dev->class->get_ownership)
dev->class->get_ownership(dev, uid, gid);
}
static const struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
.get_ownership = device_get_ownership,
};
static int dev_uevent_filter(const struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
const struct device *dev = kobj_to_dev(kobj);
if (dev->bus)
return 1;
if (dev->class)
return 1;
}
return 0;
}
static const char *dev_uevent_name(const struct kobject *kobj)
{
const struct device *dev = kobj_to_dev(kobj);
if (dev->bus)
return dev->bus->name;
if (dev->class)
return dev->class->name;
return NULL;
}
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
int retval = 0;
/* add device node properties if present */
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
umode_t mode = 0;
kuid_t uid = GLOBAL_ROOT_UID;
kgid_t gid = GLOBAL_ROOT_GID;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
if (name) {
add_uevent_var(env, "DEVNAME=%s", name);
if (mode)
add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
if (!uid_eq(uid, GLOBAL_ROOT_UID))
add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
if (!gid_eq(gid, GLOBAL_ROOT_GID))
add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
kfree(tmp);
}
}
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
/* Add common DT information about the device */
of_device_uevent(dev, env);
/* have the bus specific function add its stuff */
if (dev->bus && dev->bus->uevent) {
retval = dev->bus->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: bus uevent() returned %d\n",
dev_name(dev), __func__, retval);
}
/* have the class specific function add its stuff */
if (dev->class && dev->class->dev_uevent) {
retval = dev->class->dev_uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: class uevent() "
"returned %d\n", dev_name(dev),
__func__, retval);
}
/* have the device type specific function add its stuff */
if (dev->type && dev->type->uevent) {
retval = dev->type->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: dev_type uevent() "
"returned %d\n", dev_name(dev),
__func__, retval);
}
return retval;
}
static const struct kset_uevent_ops device_uevent_ops = {
.filter = dev_uevent_filter,
.name = dev_uevent_name,
.uevent = dev_uevent,
};
static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct kobject *top_kobj;
struct kset *kset;
struct kobj_uevent_env *env = NULL;
int i;
int len = 0;
int retval;
/* search the kset, the device belongs to */
top_kobj = &dev->kobj;
while (!top_kobj->kset && top_kobj->parent)
top_kobj = top_kobj->parent;
if (!top_kobj->kset)
goto out;
kset = top_kobj->kset;
if (!kset->uevent_ops || !kset->uevent_ops->uevent)
goto out;
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
/* copy keys to file */
for (i = 0; i < env->envp_idx; i++)
len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
out:
kfree(env);
return len;
}
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc;
}
return count;
}
static DEVICE_ATTR_RW(uevent);
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
bool val;
device_lock(dev);
val = !dev->offline;
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t online_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
ret = val ? device_online(dev) : device_offline(dev);
unlock_device_hotplug();
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(online);
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const char *loc;
switch (dev->removable) {
case DEVICE_REMOVABLE:
loc = "removable";
break;
case DEVICE_FIXED:
loc = "fixed";
break;
default:
loc = "unknown";
}
return sysfs_emit(buf, "%s\n", loc);
}
static DEVICE_ATTR_RO(removable);
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}
EXPORT_SYMBOL_GPL(device_remove_groups);
union device_attr_group_devres {
const struct attribute_group *group;
const struct attribute_group **groups;
};
static void devm_attr_group_remove(struct device *dev, void *res)
{
union device_attr_group_devres *devres = res;
const struct attribute_group *group = devres->group;
dev_dbg(dev, "%s: removing group %p\n", __func__, group);
sysfs_remove_group(&dev->kobj, group);
}
static void devm_attr_groups_remove(struct device *dev, void *res)
{
union device_attr_group_devres *devres = res;
const struct attribute_group **groups = devres->groups;
dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
sysfs_remove_groups(&dev->kobj, groups);
}
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
* @grp: The attribute group to create
*
* This function creates a group for the first time. It will explicitly
* warn and error if any of the attribute files being created already exist.
*
* Returns 0 on success or error code on failure.
*/
int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
{
union device_attr_group_devres *devres;
int error;
devres = devres_alloc(devm_attr_group_remove,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
error = sysfs_create_group(&dev->kobj, grp);
if (error) {
devres_free(devres);
return error;
}
devres->group = grp;
devres_add(dev, devres);
return 0;
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
/**
* devm_device_add_groups - create a bunch of managed attribute groups
* @dev: The device to create the group for
* @groups: The attribute groups to create, NULL terminated
*
* This function creates a bunch of managed attribute groups. If an error
* occurs when creating a group, all previously created groups will be
* removed, unwinding everything back to the original state when this
* function was called. It will explicitly warn and error if any of the
* attribute files being created already exist.
*
* Returns 0 on success or error code from sysfs_create_group on failure.
*/
int devm_device_add_groups(struct device *dev,
const struct attribute_group **groups)
{
union device_attr_group_devres *devres;
int error;
devres = devres_alloc(devm_attr_groups_remove,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
error = sysfs_create_groups(&dev->kobj, groups);
if (error) {
devres_free(devres);
return error;
}
devres->groups = groups;
devres_add(dev, devres);
return 0;
}
EXPORT_SYMBOL_GPL(devm_device_add_groups);
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
if (class) {
error = device_add_groups(dev, class->dev_groups);
if (error)
return error;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
goto err_remove_class_groups;
}
error = device_add_groups(dev, dev->groups);
if (error)
goto err_remove_type_groups;
if (device_supports_offline(dev) && !dev->offline_disabled) {
error = device_create_file(dev, &dev_attr_online);
if (error)
goto err_remove_dev_groups;
}
if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
error = device_create_file(dev, &dev_attr_waiting_for_supplier);
if (error)
goto err_remove_dev_online;
}
if (dev_removable_is_valid(dev)) {
error = device_create_file(dev, &dev_attr_removable);
if (error)
goto err_remove_dev_waiting_for_supplier;
}
if (dev_add_physical_location(dev)) {
error = device_add_group(dev,
&dev_attr_physical_location_group);
if (error)
goto err_remove_dev_removable;
}
return 0;
err_remove_dev_removable:
device_remove_file(dev, &dev_attr_removable);
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
err_remove_class_groups:
if (class)
device_remove_groups(dev, class->dev_groups);
return error;
}
static void device_remove_attrs(struct device *dev)
{
const struct class *class = dev->class;
const struct device_type *type = dev->type;
if (dev->physical_location) {
device_remove_group(dev, &dev_attr_physical_location_group);
kfree(dev->physical_location);
}
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
if (type)
device_remove_groups(dev, type->groups);
if (class)
device_remove_groups(dev, class->dev_groups);
}
static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return print_dev_t(buf, dev->devt);
}
static DEVICE_ATTR_RO(dev);
/* /sys/devices/ */
struct kset *devices_kset;
/**
* devices_kset_move_before - Move device in the devices_kset's list.
* @deva: Device to move.
* @devb: Device @deva should come before.
*/
static void devices_kset_move_before(struct device *deva, struct device *devb)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s before %s\n",
dev_name(deva), dev_name(devb));
spin_lock(&devices_kset->list_lock);
list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
spin_unlock(&devices_kset->list_lock);
}
/**
* devices_kset_move_after - Move device in the devices_kset's list.
* @deva: Device to move
* @devb: Device @deva should come after.
*/
static void devices_kset_move_after(struct device *deva, struct device *devb)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s after %s\n",
dev_name(deva), dev_name(devb));
spin_lock(&devices_kset->list_lock);
list_move(&deva->kobj.entry, &devb->kobj.entry);
spin_unlock(&devices_kset->list_lock);
}
/**
* devices_kset_move_last - move the device to the end of devices_kset's list.
* @dev: device to move
*/
void devices_kset_move_last(struct device *dev)
{
if (!devices_kset)
return;
pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
spin_lock(&devices_kset->list_lock);
list_move_tail(&dev->kobj.entry, &devices_kset->list);
spin_unlock(&devices_kset->list_lock);
}
/**
* device_create_file - create sysfs attribute file for device.
* @dev: device.
* @attr: device attribute descriptor.
*/
int device_create_file(struct device *dev,
const struct device_attribute *attr)
{
int error = 0;
if (dev) {
WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
"Attribute %s: write permission without 'store'\n",
attr->attr.name);
WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
"Attribute %s: read permission without 'show'\n",
attr->attr.name);
error = sysfs_create_file(&dev->kobj, &attr->attr);
}
return error;
}
EXPORT_SYMBOL_GPL(device_create_file);
/**
* device_remove_file - remove sysfs attribute file.
* @dev: device.
* @attr: device attribute descriptor.
*/
void device_remove_file(struct device *dev,
const struct device_attribute *attr)
{
if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
}
EXPORT_SYMBOL_GPL(device_remove_file);
/**
* device_remove_file_self - remove sysfs attribute file from its own method.
* @dev: device.
* @attr: device attribute descriptor.
*
* See kernfs_remove_self() for details.
*/
bool device_remove_file_self(struct device *dev,
const struct device_attribute *attr)
{
if (dev)
return sysfs_remove_file_self(&dev->kobj, &attr->attr);
else
return false;
}
EXPORT_SYMBOL_GPL(device_remove_file_self);
/**
* device_create_bin_file - create sysfs binary attribute file for device.
* @dev: device.
* @attr: device binary attribute descriptor.
*/
int device_create_bin_file(struct device *dev,
const struct bin_attribute *attr)
{
int error = -EINVAL;
if (dev)
error = sysfs_create_bin_file(&dev->kobj, attr);
return error;
}
EXPORT_SYMBOL_GPL(device_create_bin_file);
/**
* device_remove_bin_file - remove sysfs binary attribute file
* @dev: device.
* @attr: device binary attribute descriptor.
*/
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr)
{
if (dev)
sysfs_remove_bin_file(&dev->kobj, attr);
}
EXPORT_SYMBOL_GPL(device_remove_bin_file);
static void klist_children_get(struct klist_node *n)
{
struct device_private *p = to_device_private_parent(n);
struct device *dev = p->device;
get_device(dev);
}
static void klist_children_put(struct klist_node *n)
{
struct device_private *p = to_device_private_parent(n);
struct device *dev = p->device;
put_device(dev);
}
/**
* device_initialize - init device structure.
* @dev: device.
*
* This prepares the device for use by other layers by initializing
* its fields.
* It is the first half of device_register(), if called by
* that function, though it can also be called separately, so one
* may use @dev's fields. In particular, get_device()/put_device()
* may be used for reference counting of @dev after calling this
* function.
*
* All fields in @dev must be initialized by the caller to 0, except
* for those explicitly set to some other value. The simplest
* approach is to use kzalloc() to allocate the structure containing
* @dev.
*
* NOTE: Use put_device() to give up your reference instead of freeing
* @dev directly once you have called this function.
*/
void device_initialize(struct device *dev)
{
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, NUMA_NO_NODE);
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dev->dma_coherent = dma_default_coherent;
#endif
swiotlb_dev_init(dev);
}
EXPORT_SYMBOL_GPL(device_initialize);
struct kobject *virtual_device_parent(struct device *dev)
{
static struct kobject *virtual_dir = NULL;
if (!virtual_dir)
virtual_dir = kobject_create_and_add("virtual",
&devices_kset->kobj);
return virtual_dir;
}
struct class_dir {
struct kobject kobj;
const struct class *class;
};
#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
static void class_dir_release(struct kobject *kobj)
{
struct class_dir *dir = to_class_dir(kobj);
kfree(dir);
}
static const
struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj)
{
const struct class_dir *dir = to_class_dir(kobj);
return dir->class->ns_type;
}
static const struct kobj_type class_dir_ktype = {
.release = class_dir_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = class_dir_child_ns_type
};
static struct kobject *class_dir_create_and_add(struct subsys_private *sp,
struct kobject *parent_kobj)
{
struct class_dir *dir;
int retval;
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
return ERR_PTR(-ENOMEM);
dir->class = sp->class;
kobject_init(&dir->kobj, &class_dir_ktype);
dir->kobj.kset = &sp->glue_dirs;
retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
return ERR_PTR(retval);
}
return &dir->kobj;
}
static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
struct subsys_private *sp = class_to_subsys(dev->class);
struct kobject *kobj = NULL;
if (sp) {
struct kobject *parent_kobj;
struct kobject *k;
/*
* If we have no parent, we live in "virtual".
* Class-devices with a non class-device as parent, live
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL)
parent_kobj = virtual_device_parent(dev);
else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
} else {
parent_kobj = &parent->kobj;
}
mutex_lock(&gdp_mutex);
/* find our class-directory at the parent and reference it */
spin_lock(&sp->glue_dirs.list_lock);
list_for_each_entry(k, &sp->glue_dirs.list, entry)
if (k->parent == parent_kobj) {
kobj = kobject_get(k);
break;
}
spin_unlock(&sp->glue_dirs.list_lock);
if (kobj) {
mutex_unlock(&gdp_mutex);
subsys_put(sp);
return kobj;
}
/* or create a new class-directory at the parent device */
k = class_dir_create_and_add(sp, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
subsys_put(sp);
return k;
}
/* subsystems can specify a default root directory for their devices */
if (!parent && dev->bus) {
struct device *dev_root = bus_get_dev_root(dev->bus);
if (dev_root) {
kobj = &dev_root->kobj;
put_device(dev_root);
return kobj;
}
}
if (parent)
return &parent->kobj;
return NULL;
}
static inline bool live_in_glue_dir(struct kobject *kobj,
struct device *dev)
{
struct subsys_private *sp;
bool retval;
if (!kobj || !dev->class)
return false;
sp = class_to_subsys(dev->class);
if (!sp)
return false;
if (kobj->kset == &sp->glue_dirs)
retval = true;
else
retval = false;
subsys_put(sp);
return retval;
}
static inline struct kobject *get_glue_dir(struct device *dev)
{
return dev->kobj.parent;
}
/**
* kobject_has_children - Returns whether a kobject has children.
* @kobj: the object to test
*
* This will return whether a kobject has other kobjects as children.
*
* It does NOT account for the presence of attribute files, only sub
* directories. It also assumes there is no concurrent addition or
* removal of such children, and thus relies on external locking.
*/
static inline bool kobject_has_children(struct kobject *kobj)
{
WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
return kobj->sd && kobj->sd->dir.subdirs;
}
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
* global lock
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
unsigned int ref;
/* see if we live in a "glue" directory */
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
/**
* There is a race condition between removing glue directory
* and adding a new device under the glue directory.
*
* CPU1: CPU2:
*
* device_add()
* get_device_parent()
* class_dir_create_and_add()
* kobject_add_internal()
* create_dir() // create glue_dir
*
* device_add()
* get_device_parent()
* kobject_get() // get glue_dir
*
* device_del()
* cleanup_glue_dir()
* kobject_del(glue_dir)
*
* kobject_add()
* kobject_add_internal()
* create_dir() // in glue_dir
* sysfs_create_dir_ns()
* kernfs_create_dir_ns(sd)
*
* sysfs_remove_dir() // glue_dir->sd=NULL
* sysfs_put() // free glue_dir->sd
*
* // sd is freed
* kernfs_new_node(sd)
* kernfs_get(glue_dir)
* kernfs_add_one()
* kernfs_put()
*
* Before CPU1 remove last child device under glue dir, if CPU2 add
* a new device under glue dir, the glue_dir kobject reference count
* will be increase to 2 in kobject_get(k). And CPU2 has been called
* kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
* and sysfs_put(). This result in glue_dir->sd is freed.
*
* Then the CPU2 will see a stale "empty" but still potentially used
* glue dir around in kernfs_new_node().
*
* In order to avoid this happening, we also should make sure that
* kernfs_node for glue_dir is released in CPU1 only when refcount
* for glue_dir kobj is 1.
*/
ref = kref_read(&glue_dir->kref);
if (!kobject_has_children(glue_dir) && !--ref)
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
static int device_add_class_symlinks(struct device *dev)
{
struct device_node *of_node = dev_of_node(dev);
struct subsys_private *sp;
int error;
if (of_node) {
error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
}
sp = class_to_subsys(dev->class);
if (!sp)
return 0;
error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
if (error)
goto out_devnode;
if (dev->parent && device_is_not_partition(dev)) {
error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
"device");
if (error)
goto out_subsys;
}
/* link in the class directory pointing to the device */
error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
if (error)
goto out_device;
goto exit;
out_device:
sysfs_remove_link(&dev->kobj, "device");
out_subsys:
sysfs_remove_link(&dev->kobj, "subsystem");
out_devnode:
sysfs_remove_link(&dev->kobj, "of_node");
exit:
subsys_put(sp);
return error;
}
static void device_remove_class_symlinks(struct device *dev)
{
struct subsys_private *sp = class_to_subsys(dev->class);
if (dev_of_node(dev))
sysfs_remove_link(&dev->kobj, "of_node");
if (!sp)
return;
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(&dev->kobj, "subsystem");
sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
subsys_put(sp);
}
/**
* dev_set_name - set a device name
* @dev: device
* @fmt: format string for the device's name
*/
int dev_set_name(struct device *dev, const char *fmt, ...)
{
va_list vargs;
int err;
va_start(vargs, fmt);
err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
va_end(vargs);
return err;
}
EXPORT_SYMBOL_GPL(dev_set_name);
/* select a /sys/dev/ directory for the device */
static struct kobject *device_to_dev_kobj(struct device *dev)
{
if (is_blockdev(dev))
return sysfs_dev_block_kobj;
else
return sysfs_dev_char_kobj;
}
static int device_create_sys_dev_entry(struct device *dev)
{
struct kobject *kobj = device_to_dev_kobj(dev);
int error = 0;
char devt_str[15];
if (kobj) {
format_dev_t(devt_str, dev->devt);
error = sysfs_create_link(kobj, &dev->kobj, devt_str);
}
return error;
}
static void device_remove_sys_dev_entry(struct device *dev)
{
struct kobject *kobj = device_to_dev_kobj(dev);
char devt_str[15];
if (kobj) {
format_dev_t(devt_str, dev->devt);
sysfs_remove_link(kobj, devt_str);
}
}
static int device_private_init(struct device *dev)
{
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p)
return -ENOMEM;
dev->p->device = dev;
klist_init(&dev->p->klist_children, klist_children_get,
klist_children_put);
INIT_LIST_HEAD(&dev->p->deferred_probe);
return 0;
}
/**
* device_add - add device to device hierarchy.
* @dev: device.
*
* This is part 2 of device_register(), though may be called
* separately _iff_ device_initialize() has been called separately.
*
* This adds @dev to the kobject hierarchy via kobject_add(), adds it
* to the global and sibling lists for the device, then
* adds it to the other relevant subsystems of the driver model.
*
* Do not call this routine or device_register() more than once for
* any device structure. The driver model core is not designed to work
* with devices that get unregistered and then spring back to life.
* (Among other things, it's very hard to guarantee that all references
* to the previous incarnation of @dev have been dropped.) Allocate
* and register a fresh new struct device instead.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up your
* reference instead.
*
* Rule of thumb is: if device_add() succeeds, you should call
* device_del() when you want to get rid of it. If device_add() has
* *not* succeeded, use *only* put_device() to drop the reference
* count.
*/
int device_add(struct device *dev)
{
struct subsys_private *sp;
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
if (!dev)
goto done;
if (!dev->p) {
error = device_private_init(dev);
if (error)
goto done;
}
/*
* for statically allocated devices, which should all be converted
* some day, we need to initialize the name. We prevent reading back
* the name, and force the use of dev_name()
*/
if (dev->init_name) {
error = dev_set_name(dev, "%s", dev->init_name);
dev->init_name = NULL;
}
if (dev_name(dev))
error = 0;
/* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
else
error = -EINVAL;
if (error)
goto name_error;
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
parent = get_device(dev->parent);
kobj = get_device_parent(dev, parent);
if (IS_ERR(kobj)) {
error = PTR_ERR(kobj);
goto parent_error;
}
if (kobj)
dev->kobj.parent = kobj;
/* use parent numa_node */
if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error) {
glue_dir = kobj;
goto Error;
}
/* notify platform of device entry */
device_platform_notify(dev);
error = device_create_file(dev, &dev_attr_uevent);
if (error)
goto attrError;
error = device_add_class_symlinks(dev);
if (error)
goto SymlinkError;
error = device_add_attrs(dev);
if (error)
goto AttrsError;
error = bus_add_device(dev);
if (error)
goto BusError;
error = dpm_sysfs_add(dev);
if (error)
goto DPMError;
device_pm_add(dev);
if (MAJOR(dev->devt)) {
error = device_create_file(dev, &dev_attr_dev);
if (error)
goto DevAttrError;
error = device_create_sys_dev_entry(dev);
if (error)
goto SysEntryError;
devtmpfs_create_node(dev);
}
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
bus_notify(dev, BUS_NOTIFY_ADD_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_ADD);
/*
* Check if any of the other devices (consumers) have been waiting for
* this device (supplier) to be added so that they can create a device
* link to it.
*
* This needs to happen after device_pm_add() because device_link_add()
* requires the supplier be registered before it's called.
*
* But this also needs to happen before bus_probe_device() to make sure
* waiting consumers can link to it before the driver is bound to the
* device and the driver sync_state callback is called for this device.
*/
if (dev->fwnode && !dev->fwnode->dev) {
dev->fwnode->dev = dev;
fw_devlink_link_device(dev);
}
bus_probe_device(dev);
/*
* If all driver registration is done and a newly added device doesn't
* match with any driver, don't block its consumers from probing in
* case the consumer device is able to operate without this supplier.
*/
if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
fw_devlink_unblock_consumers(dev);
if (parent)
klist_add_tail(&dev->p->knode_parent,
&parent->p->klist_children);
sp = class_to_subsys(dev->class);
if (sp) {
mutex_lock(&sp->mutex);
/* tie the class to the device */
klist_add_tail(&dev->p->knode_class, &sp->klist_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf, &sp->interfaces, node)
if (class_intf->add_dev)
class_intf->add_dev(dev);
mutex_unlock(&sp->mutex);
subsys_put(sp);
}
done:
put_device(dev);
return error;
SysEntryError:
if (MAJOR(dev->devt))
device_remove_file(dev, &dev_attr_dev);
DevAttrError:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
dev->driver = NULL;
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
AttrsError:
device_remove_class_symlinks(dev);
SymlinkError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
device_platform_notify_remove(dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
parent_error:
put_device(parent);
name_error:
kfree(dev->p);
dev->p = NULL;
goto done;
}
EXPORT_SYMBOL_GPL(device_add);
/**
* device_register - register a device with the system.
* @dev: pointer to the device structure
*
* This happens in two clean steps - initialize the device
* and add it to the system. The two steps can be called
* separately, but this is the easiest and most common.
* I.e. you should only call the two helpers separately if
* have a clearly defined need to use and refcount the device
* before it is added to the hierarchy.
*
* For more information, see the kerneldoc for device_initialize()
* and device_add().
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up the
* reference initialized in this function instead.
*/
int device_register(struct device *dev)
{
device_initialize(dev);
return device_add(dev);
}
EXPORT_SYMBOL_GPL(device_register);
/**
* get_device - increment reference count for device.
* @dev: device.
*
* This simply forwards the call to kobject_get(), though
* we do take care to provide for the case that we get a NULL
* pointer passed in.
*/
struct device *get_device(struct device *dev)
{
return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
}
EXPORT_SYMBOL_GPL(get_device);
/**
* put_device - decrement reference count.
* @dev: device in question.
*/
void put_device(struct device *dev)
{
/* might_sleep(); */
if (dev)
kobject_put(&dev->kobj);
}
EXPORT_SYMBOL_GPL(put_device);
bool kill_device(struct device *dev)
{
/*
* Require the device lock and set the "dead" flag to guarantee that
* the update behavior is consistent with the other bitfields near
* it and that we cannot have an asynchronous probe routine trying
* to run while we are tearing out the bus/class/sysfs from
* underneath the device.
*/
device_lock_assert(dev);
if (dev->p->dead)
return false;
dev->p->dead = true;
return true;
}
EXPORT_SYMBOL_GPL(kill_device);
/**
* device_del - delete device from system.
* @dev: device.
*
* This is the first part of the device unregistration
* sequence. This removes the device from the lists we control
* from here, has it removed from the other driver model
* subsystems it was added to in device_add(), and removes it
* from the kobject hierarchy.
*
* NOTE: this should be called manually _iff_ device_add() was
* also called manually.
*/
void device_del(struct device *dev)
{
struct subsys_private *sp;
struct device *parent = dev->parent;
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
unsigned int noio_flag;
device_lock(dev);
kill_device(dev);
device_unlock(dev);
if (dev->fwnode && dev->fwnode->dev == dev)
dev->fwnode->dev = NULL;
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
noio_flag = memalloc_noio_save();
bus_notify(dev, BUS_NOTIFY_DEL_DEVICE);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
if (MAJOR(dev->devt)) {
devtmpfs_delete_node(dev);
device_remove_sys_dev_entry(dev);
device_remove_file(dev, &dev_attr_dev);
}
sp = class_to_subsys(dev->class);
if (sp) {
device_remove_class_symlinks(dev);
mutex_lock(&sp->mutex);
/* notify any interfaces that the device is now gone */
list_for_each_entry(class_intf, &sp->interfaces, node)
if (class_intf->remove_dev)
class_intf->remove_dev(dev);
/* remove the device from the class list */
klist_del(&dev->p->knode_class);
mutex_unlock(&sp->mutex);
subsys_put(sp);
}
device_remove_file(dev, &dev_attr_uevent);
device_remove_attrs(dev);
bus_remove_device(dev);
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_platform_notify_remove(dev);
device_links_purge(dev);
/*
* If a device does not have a driver attached, we need to clean
* up any managed resources. We do this in device_release(), but
* it's never called (and we leak the device) if a managed
* resource holds a reference to the device. So release all
* managed resources here, like we do in driver_detach(). We
* still need to do so again in device_release() in case someone
* adds a new resource after this point, though.
*/
devres_release_all(dev);
bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
cleanup_glue_dir(dev, glue_dir);
memalloc_noio_restore(noio_flag);
put_device(parent);
}
EXPORT_SYMBOL_GPL(device_del);
/**
* device_unregister - unregister device from system.
* @dev: device going away.
*
* We do this in two parts, like we do device_register(). First,
* we remove it from all the subsystems with device_del(), then
* we decrement the reference count via put_device(). If that
* is the final reference count, the device will be cleaned up
* via device_release() above. Otherwise, the structure will
* stick around until the final reference to the device is dropped.
*/
void device_unregister(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
device_del(dev);
put_device(dev);
}
EXPORT_SYMBOL_GPL(device_unregister);
static struct device *prev_device(struct klist_iter *i)
{
struct klist_node *n = klist_prev(i);
struct device *dev = NULL;
struct device_private *p;
if (n) {
p = to_device_private_parent(n);
dev = p->device;
}
return dev;
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct device *dev = NULL;
struct device_private *p;
if (n) {
p = to_device_private_parent(n);
dev = p->device;
}
return dev;
}
/**
* device_get_devnode - path of device node file
* @dev: device
* @mode: returned file access mode
* @uid: returned file owner
* @gid: returned file group
* @tmp: possibly allocated string
*
* Return the relative path of a possible device node.
* Non-default names may need to allocate a memory to compose
* a name. This memory is returned in tmp and needs to be
* freed by the caller.
*/
const char *device_get_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid,
const char **tmp)
{
char *s;
*tmp = NULL;
/* the device type may provide a specific name */
if (dev->type && dev->type->devnode)
*tmp = dev->type->devnode(dev, mode, uid, gid);
if (*tmp)
return *tmp;
/* the class may provide a specific name */
if (dev->class && dev->class->devnode)
*tmp = dev->class->devnode(dev, mode);
if (*tmp)
return *tmp;
/* return name without allocation, tmp == NULL */
if (strchr(dev_name(dev), '!') == NULL)
return dev_name(dev);
/* replace '!' in the name with '/' */
s = kstrdup_and_replace(dev_name(dev), '!', '/', GFP_KERNEL);
if (!s)
return NULL;
return *tmp = s;
}
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
* @fn: function to be called for each device.
* @data: data for the callback.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
int (*fn)(struct device *dev, void *data))
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
while (!error && (child = next_device(&i)))
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
* @fn: function to be called for each device.
* @data: data for the callback.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
int (*fn)(struct device *dev, void *data))
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = prev_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @match: Callback function to check device
* @data: Data to pass to match function
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero and a reference to the
* current device can be obtained, this function will return to the caller
* and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_child(struct device *parent, void *data,
int (*match)(struct device *dev, void *data))
{
struct klist_iter i;
struct device *child;
if (!parent)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i)))
if (match(child, data) && get_device(child))
break;
klist_iter_exit(&i);
return child;
}
EXPORT_SYMBOL_GPL(device_find_child);
/**
* device_find_child_by_name - device iterator for locating a child device.
* @parent: parent struct device
* @name: name of the child device
*
* This is similar to the device_find_child() function above, but it
* returns a reference to a device that has the name @name.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_child_by_name(struct device *parent,
const char *name)
{
struct klist_iter i;
struct device *child;
if (!parent)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i)))
if (sysfs_streq(dev_name(child), name) && get_device(child))
break;
klist_iter_exit(&i);
return child;
}
EXPORT_SYMBOL_GPL(device_find_child_by_name);
static int match_any(struct device *dev, void *unused)
{
return 1;
}
/**
* device_find_any_child - device iterator for locating a child device, if any.
* @parent: parent struct device
*
* This is similar to the device_find_child() function above, but it
* returns a reference to a child device, if any.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_any_child(struct device *parent)
{
return device_find_child(parent, NULL, match_any);
}
EXPORT_SYMBOL_GPL(device_find_any_child);
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
if (!devices_kset)
return -ENOMEM;
dev_kobj = kobject_create_and_add("dev", NULL);
if (!dev_kobj)
goto dev_kobj_err;
sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
if (!sysfs_dev_block_kobj)
goto block_kobj_err;
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
return 0;
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
kobject_put(dev_kobj);
dev_kobj_err:
kset_unregister(devices_kset);
return -ENOMEM;
}
static int device_check_offline(struct device *dev, void *not_used)
{
int ret;
ret = device_for_each_child(dev, NULL, device_check_offline);
if (ret)
return ret;
return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
}
/**
* device_offline - Prepare the device for hot-removal.
* @dev: Device to be put offline.
*
* Execute the device bus type's .offline() callback, if present, to prepare
* the device for a subsequent hot-removal. If that succeeds, the device must
* not be used until either it is removed or its bus type's .online() callback
* is executed.
*
* Call under device_hotplug_lock.
*/
int device_offline(struct device *dev)
{
int ret;
if (dev->offline_disabled)
return -EPERM;
ret = device_for_each_child(dev, NULL, device_check_offline);
if (ret)
return ret;
device_lock(dev);
if (device_supports_offline(dev)) {
if (dev->offline) {
ret = 1;
} else {
ret = dev->bus->offline(dev);
if (!ret) {
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
dev->offline = true;
}
}
}
device_unlock(dev);
return ret;
}
/**
* device_online - Put the device back online after successful device_offline().
* @dev: Device to be put back online.
*
* If device_offline() has been successfully executed for @dev, but the device
* has not been removed subsequently, execute its bus type's .online() callback
* to indicate that the device can be used again.
*
* Call under device_hotplug_lock.
*/
int device_online(struct device *dev)
{
int ret = 0;
device_lock(dev);
if (device_supports_offline(dev)) {
if (dev->offline) {
ret = dev->bus->online(dev);
if (!ret) {
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
dev->offline = false;
}
} else {
ret = 1;
}
}
device_unlock(dev);
return ret;
}
struct root_device {
struct device dev;
struct module *owner;
};
static inline struct root_device *to_root_device(struct device *d)
{
return container_of(d, struct root_device, dev);
}
static void root_device_release(struct device *dev)
{
kfree(to_root_device(dev));
}
/**
* __root_device_register - allocate and register a root device
* @name: root device name
* @owner: owner module of the root device, usually THIS_MODULE
*
* This function allocates a root device and registers it
* using device_register(). In order to free the returned
* device, use root_device_unregister().
*
* Root devices are dummy devices which allow other devices
* to be grouped under /sys/devices. Use this function to
* allocate a root device and then use it as the parent of
* any device which should appear under /sys/devices/{name}
*
* The /sys/devices/{name} directory will also contain a
* 'module' symlink which points to the @owner directory
* in sysfs.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*
* Note: You probably want to use root_device_register().
*/
struct device *__root_device_register(const char *name, struct module *owner)
{
struct root_device *root;
int err = -ENOMEM;
root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
if (!root)
return ERR_PTR(err);
err = dev_set_name(&root->dev, "%s", name);
if (err) {
kfree(root);
return ERR_PTR(err);
}
root->dev.release = root_device_release;
err = device_register(&root->dev);
if (err) {
put_device(&root->dev);
return ERR_PTR(err);
}
#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
if (err) {
device_unregister(&root->dev);
return ERR_PTR(err);
}
root->owner = owner;
}
#endif
return &root->dev;
}
EXPORT_SYMBOL_GPL(__root_device_register);
/**
* root_device_unregister - unregister and free a root device
* @dev: device going away
*
* This function unregisters and cleans up a device that was created by
* root_device_register().
*/
void root_device_unregister(struct device *dev)
{
struct root_device *root = to_root_device(dev);
if (root->owner)
sysfs_remove_link(&root->dev.kobj, "module");
device_unregister(dev);
}
EXPORT_SYMBOL_GPL(root_device_unregister);
static void device_create_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
kfree(dev);
}
static __printf(6, 0) struct device *
device_create_groups_vargs(const struct class *class, struct device *parent,
dev_t devt, void *drvdata,
const struct attribute_group **groups,
const char *fmt, va_list args)
{
struct device *dev = NULL;
int retval = -ENODEV;
if (IS_ERR_OR_NULL(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto error;
}
device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
if (retval)
goto error;
retval = device_add(dev);
if (retval)
goto error;
return dev;
error:
put_device(dev);
return ERR_PTR(retval);
}
/**
* device_create - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
* @devt: the dev_t for the char device to be added
* @drvdata: the data to be added to the device for callbacks
* @fmt: string for the device's name
*
* This function can be used by char device classes. A struct device
* will be created in sysfs, registered to the specified class.
*
* A "dev" file will be created, showing the dev_t for the device, if
* the dev_t is not 0,0.
* If a pointer to a parent struct device is passed in, the newly created
* struct device will be a child of that device in sysfs.
* The pointer to the struct device will be returned from the call.
* Any further sysfs files that might be required can be created using this
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*/
struct device *device_create(const struct class *class, struct device *parent,
dev_t devt, void *drvdata, const char *fmt, ...)
{
va_list vargs;
struct device *dev;
va_start(vargs, fmt);
dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
fmt, vargs);
va_end(vargs);
return dev;
}
EXPORT_SYMBOL_GPL(device_create);
/**
* device_create_with_groups - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
* @devt: the dev_t for the char device to be added
* @drvdata: the data to be added to the device for callbacks
* @groups: NULL-terminated list of attribute groups to be created
* @fmt: string for the device's name
*
* This function can be used by char device classes. A struct device
* will be created in sysfs, registered to the specified class.
* Additional attributes specified in the groups parameter will also
* be created automatically.
*
* A "dev" file will be created, showing the dev_t for the device, if
* the dev_t is not 0,0.
* If a pointer to a parent struct device is passed in, the newly created
* struct device will be a child of that device in sysfs.
* The pointer to the struct device will be returned from the call.
* Any further sysfs files that might be required can be created using this
* pointer.
*
* Returns &struct device pointer on success, or ERR_PTR() on error.
*/
struct device *device_create_with_groups(const struct class *class,
struct device *parent, dev_t devt,
void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...)
{
va_list vargs;
struct device *dev;
va_start(vargs, fmt);
dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
fmt, vargs);
va_end(vargs);
return dev;
}
EXPORT_SYMBOL_GPL(device_create_with_groups);
/**
* device_destroy - removes a device that was created with device_create()
* @class: pointer to the struct class that this device was registered with
* @devt: the dev_t of the device that was previously registered
*
* This call unregisters and cleans up a device that was created with a
* call to device_create().
*/
void device_destroy(const struct class *class, dev_t devt)
{
struct device *dev;
dev = class_find_device_by_devt(class, devt);
if (dev) {
put_device(dev);
device_unregister(dev);
}
}
EXPORT_SYMBOL_GPL(device_destroy);
/**
* device_rename - renames a device
* @dev: the pointer to the struct device to be renamed
* @new_name: the new name of the device
*
* It is the responsibility of the caller to provide mutual
* exclusion between two different calls of device_rename
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
*
* Note: given that some subsystems (networking and infiniband) use this
* function, with no immediate plans for this to change, we cannot assume or
* require that this function not be called at all.
*
* However, if you're writing new code, do not call this function. The following
* text from Kay Sievers offers some insight:
*
* Renaming devices is racy at many levels, symlinks and other stuff are not
* replaced atomically, and you get a "move" uevent, but it's not easy to
* connect the event to the old and new device. Device nodes are not renamed at
* all, there isn't even support for that in the kernel now.
*
* In the meantime, during renaming, your target name might be taken by another
* driver, creating conflicts. Or the old name is taken directly after you
* renamed it -- then you get events for the same DEVPATH, before you even see
* the "move" event. It's just a mess, and nothing new should ever rely on
* kernel device renaming. Besides that, it's not even implemented now for
* other things than (driver-core wise very simple) network devices.
*
* Make up a "real" name in the driver before you register anything, or add
* some other attributes for userspace to find the device, or use udev to add
* symlinks -- but never rename kernel devices later, it's a complete mess. We
* don't even want to get into that and try to implement the missing pieces in
* the core. We really have other pieces to fix in the driver core mess. :)
*/
int device_rename(struct device *dev, const char *new_name)
{
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
dev = get_device(dev);
if (!dev)
return -EINVAL;
dev_dbg(dev, "renaming to %s\n", new_name);
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
error = -ENOMEM;
goto out;
}
if (dev->class) {
struct subsys_private *sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
goto out;
}
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
subsys_put(sp);
if (error)
goto out;
}
error = kobject_rename(kobj, new_name);
if (error)
goto out;
out:
put_device(dev);
kfree(old_device_name);
return error;
}
EXPORT_SYMBOL_GPL(device_rename);
static int device_move_class_links(struct device *dev,
struct device *old_parent,
struct device *new_parent)
{
int error = 0;
if (old_parent)
sysfs_remove_link(&dev->kobj, "device");
if (new_parent)
error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
"device");
return error;
}
/**
* device_move - moves a device to a new parent
* @dev: the pointer to the struct device to be moved
* @new_parent: the new parent of the device (can be NULL)
* @dpm_order: how to reorder the dpm_list
*/
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order)
{
int error;
struct device *old_parent;
struct kobject *new_parent_kobj;
dev = get_device(dev);
if (!dev)
return -EINVAL;
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
if (IS_ERR(new_parent_kobj)) {
error = PTR_ERR(new_parent_kobj);
put_device(new_parent);
goto out;
}
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
error = kobject_move(&dev->kobj, new_parent_kobj);
if (error) {
cleanup_glue_dir(dev, new_parent_kobj);
put_device(new_parent);
goto out;
}
old_parent = dev->parent;
dev->parent = new_parent;
if (old_parent)
klist_remove(&dev->p->knode_parent);
if (new_parent) {
klist_add_tail(&dev->p->knode_parent,
&new_parent->p->klist_children);
set_dev_node(dev, dev_to_node(new_parent));
}
if (dev->class) {
error = device_move_class_links(dev, old_parent, new_parent);
if (error) {
/* We ignore errors on cleanup since we're hosed anyway... */
device_move_class_links(dev, new_parent, old_parent);
if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
if (new_parent)
klist_remove(&dev->p->knode_parent);
dev->parent = old_parent;
if (old_parent) {
klist_add_tail(&dev->p->knode_parent,
&old_parent->p->klist_children);
set_dev_node(dev, dev_to_node(old_parent));
}
}
cleanup_glue_dir(dev, new_parent_kobj);
put_device(new_parent);
goto out;
}
}
switch (dpm_order) {
case DPM_ORDER_NONE:
break;
case DPM_ORDER_DEV_AFTER_PARENT:
device_pm_move_after(dev, new_parent);
devices_kset_move_after(dev, new_parent);
break;
case DPM_ORDER_PARENT_BEFORE_DEV:
device_pm_move_before(new_parent, dev);
devices_kset_move_before(new_parent, dev);
break;
case DPM_ORDER_DEV_LAST:
device_pm_move_last(dev);
devices_kset_move_last(dev);
break;
}
put_device(old_parent);
out:
device_pm_unlock();
put_device(dev);
return error;
}
EXPORT_SYMBOL_GPL(device_move);
static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
{
struct kobject *kobj = &dev->kobj;
const struct class *class = dev->class;
const struct device_type *type = dev->type;
int error;
if (class) {
/*
* Change the device groups of the device class for @dev to
* @kuid/@kgid.
*/
error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
kgid);
if (error)
return error;
}
if (type) {
/*
* Change the device groups of the device type for @dev to
* @kuid/@kgid.
*/
error = sysfs_groups_change_owner(kobj, type->groups, kuid,
kgid);
if (error)
return error;
}
/* Change the device groups of @dev to @kuid/@kgid. */
error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
if (error)
return error;
if (device_supports_offline(dev) && !dev->offline_disabled) {
/* Change online device attributes of @dev to @kuid/@kgid. */
error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
kuid, kgid);
if (error)
return error;
}
return 0;
}
/**
* device_change_owner - change the owner of an existing device.
* @dev: device.
* @kuid: new owner's kuid
* @kgid: new owner's kgid
*
* This changes the owner of @dev and its corresponding sysfs entries to
* @kuid/@kgid. This function closely mirrors how @dev was added via driver
* core.
*
* Returns 0 on success or error code on failure.
*/
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
{
int error;
struct kobject *kobj = &dev->kobj;
struct subsys_private *sp;
dev = get_device(dev);
if (!dev)
return -EINVAL;
/*
* Change the kobject and the default attributes and groups of the
* ktype associated with it to @kuid/@kgid.
*/
error = sysfs_change_owner(kobj, kuid, kgid);
if (error)
goto out;
/*
* Change the uevent file for @dev to the new owner. The uevent file
* was created in a separate step when @dev got added and we mirror
* that step here.
*/
error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
kgid);
if (error)
goto out;
/*
* Change the device groups, the device groups associated with the
* device class, and the groups associated with the device type of @dev
* to @kuid/@kgid.
*/
error = device_attrs_change_owner(dev, kuid, kgid);
if (error)
goto out;
error = dpm_sysfs_change_owner(dev, kuid, kgid);
if (error)
goto out;
/*
* Change the owner of the symlink located in the class directory of
* the device class associated with @dev which points to the actual
* directory entry for @dev to @kuid/@kgid. This ensures that the
* symlink shows the same permissions as its target.
*/
sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
goto out;
}
error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid);
subsys_put(sp);
out:
put_device(dev);
return error;
}
EXPORT_SYMBOL_GPL(device_change_owner);
/**
* device_shutdown - call ->shutdown() on each device to shutdown.
*/
void device_shutdown(void)
{
struct device *dev, *parent;
wait_for_device_probe();
device_block_probing();
cpufreq_suspend();
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
* Beware that device unplug events may also start pulling
* devices offline, even as the system is shutting down.
*/
while (!list_empty(&devices_kset->list)) {
dev = list_entry(devices_kset->list.prev, struct device,
kobj.entry);
/*
* hold reference count of device's parent to
* prevent it from being freed because parent's
* lock is to be held
*/
parent = get_device(dev->parent);
get_device(dev);
/*
* Make sure the device is off the kset list, in the
* event that dev->*->shutdown() doesn't remove it.
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
/* hold lock to avoid race with probe/release */
if (parent)
device_lock(parent);
device_lock(dev);
/* Don't allow any more runtime suspends */
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
if (dev->class && dev->class->shutdown_pre) {
if (initcall_debug)
dev_info(dev, "shutdown_pre\n");
dev->class->shutdown_pre(dev);
}
if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
} else if (dev->driver && dev->driver->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
device_unlock(dev);
if (parent)
device_unlock(parent);
put_device(dev);
put_device(parent);
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
}
/*
* Device logging functions
*/
#ifdef CONFIG_PRINTK
static void
set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
{
const char *subsys;
memset(dev_info, 0, sizeof(*dev_info));
if (dev->class)
subsys = dev->class->name;
else if (dev->bus)
subsys = dev->bus->name;
else
return;
strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
/*
* Add device identifier DEVICE=:
* b12:8 block dev_t
* c127:3 char dev_t
* n8 netdev ifindex
* +sound:card0 subsystem:devname
*/
if (MAJOR(dev->devt)) {
char c;
if (strcmp(subsys, "block") == 0)
c = 'b';
else
c = 'c';
snprintf(dev_info->device, sizeof(dev_info->device),
"%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
} else if (strcmp(subsys, "net") == 0) {
struct net_device *net = to_net_dev(dev);
snprintf(dev_info->device, sizeof(dev_info->device),
"n%u", net->ifindex);
} else {
snprintf(dev_info->device, sizeof(dev_info->device),
"+%s:%s", subsys, dev_name(dev));
}
}
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
{
struct dev_printk_info dev_info;
set_dev_info(dev, &dev_info);
return vprintk_emit(0, level, &dev_info, fmt, args);
}
EXPORT_SYMBOL(dev_vprintk_emit);
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = dev_vprintk_emit(level, dev, fmt, args);
va_end(args);
return r;
}
EXPORT_SYMBOL(dev_printk_emit);
static void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
if (dev)
dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
dev_driver_string(dev), dev_name(dev), vaf);
else
printk("%s(NULL device *): %pV", level, vaf);
}
void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
__dev_printk(level, dev, &vaf);
va_end(args);
}
EXPORT_SYMBOL(_dev_printk);
#define define_dev_printk_level(func, kern_level) \
void func(const struct device *dev, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
__dev_printk(kern_level, dev, &vaf); \
\
va_end(args); \
} \
EXPORT_SYMBOL(func);
define_dev_printk_level(_dev_emerg, KERN_EMERG);
define_dev_printk_level(_dev_alert, KERN_ALERT);
define_dev_printk_level(_dev_crit, KERN_CRIT);
define_dev_printk_level(_dev_err, KERN_ERR);
define_dev_printk_level(_dev_warn, KERN_WARNING);
define_dev_printk_level(_dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
* @err: error value to test
* @fmt: printf-style format string
* @...: arguments as specified in the format string
*
* This helper implements common pattern present in probe functions for error
* checking: print debug or error message depending if the error value is
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
* It replaces code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
* else
* dev_dbg(dev, ...);
* return err;
*
* with::
*
* return dev_err_probe(dev, err, ...);
*
* Note that it is deemed acceptable to use this function for error
* prints during probe even if the @err is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
* of the error code and the fact that the error code is returned.
*
* Returns @err.
*
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (err != -EPROBE_DEFER) {
dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
} else {
device_set_deferred_probe_reason(dev, &vaf);
dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
}
va_end(args);
return err;
}
EXPORT_SYMBOL_GPL(dev_err_probe);
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
return fwnode && !IS_ERR(fwnode->secondary);
}
/**
* set_primary_fwnode - Change the primary firmware node of a given device.
* @dev: Device to handle.
* @fwnode: New primary firmware node of the device.
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
*
* Valid fwnode cases are:
* - primary --> secondary --> -ENODEV
* - primary --> NULL
* - secondary --> -ENODEV
* - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
struct device *parent = dev->parent;
struct fwnode_handle *fn = dev->fwnode;
if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
if (fn) {
WARN_ON(fwnode->secondary);
fwnode->secondary = fn;
}
dev->fwnode = fwnode;
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
/* Skip nullifying fn->secondary if the primary is shared */
if (parent && fn == parent->fwnode)
return;
/* Set fn->secondary = NULL, so fn remains the primary fwnode */
fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
/**
* set_secondary_fwnode - Change the secondary firmware node of a given device.
* @dev: Device to handle.
* @fwnode: New secondary firmware node of the device.
*
* If a primary firmware node of the device is present, set its secondary
* pointer to @fwnode. Otherwise, set the device's firmware node pointer to
* @fwnode.
*/
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
if (fwnode)
fwnode->secondary = ERR_PTR(-ENODEV);
if (fwnode_is_primary(dev->fwnode))
dev->fwnode->secondary = fwnode;
else
dev->fwnode = fwnode;
}
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
* device_set_of_node_from_dev - reuse device-tree node of another device
* @dev: device whose device-tree node is being set
* @dev2: device whose device-tree node is being reused
*
* Takes another reference to the new device-tree node after first dropping
* any reference held to the old node.
*/
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
{
of_node_put(dev->of_node);
dev->of_node = of_node_get(dev2->of_node);
dev->of_node_reused = true;
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
{
dev->fwnode = fwnode;
dev->of_node = to_of_node(fwnode);
}
EXPORT_SYMBOL_GPL(device_set_node);
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
}
EXPORT_SYMBOL_GPL(device_match_name);
int device_match_of_node(struct device *dev, const void *np)
{
return dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
return dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
int device_match_devt(struct device *dev, const void *pdevt)
{
return dev->devt == *(dev_t *)pdevt;
}
EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
return ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
return ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
int device_match_any(struct device *dev, const void *unused)
{
return 1;
}
EXPORT_SYMBOL_GPL(device_match_any);
| linux-master | drivers/base/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bus.c - bus driver management
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2007 Novell Inc.
* Copyright (c) 2023 Greg Kroah-Hartman <[email protected]>
*/
#include <linux/async.h>
#include <linux/device/bus.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
/* /sys/devices/system */
static struct kset *system_kset;
/* /sys/bus */
static struct kset *bus_kset;
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
* sysfs bindings for drivers
*/
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct driver_attribute driver_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
/**
* bus_to_subsys - Turn a struct bus_type into a struct subsys_private
*
* @bus: pointer to the struct bus_type to look up
*
* The driver core internals needs to work on the subsys_private structure, not
* the external struct bus_type pointer. This function walks the list of
* registered busses in the system and finds the matching one and returns the
* internal struct subsys_private that relates to that bus.
*
* Note, the reference count of the return value is INCREMENTED if it is not
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
if (!bus || !bus_kset)
return NULL;
spin_lock(&bus_kset->list_lock);
if (list_empty(&bus_kset->list))
goto done;
list_for_each_entry(kobj, &bus_kset->list, entry) {
struct kset *kset = container_of(kobj, struct kset, kobj);
sp = container_of_const(kset, struct subsys_private, subsys);
if (sp->bus == bus)
goto done;
}
sp = NULL;
done:
sp = subsys_get(sp);
spin_unlock(&bus_kset->list_lock);
return sp;
}
static const struct bus_type *bus_get(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (sp)
return bus;
return NULL;
}
static void bus_put(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
/* two puts are required as the call to bus_to_subsys incremented it again */
subsys_put(sp);
subsys_put(sp);
}
static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct driver_attribute *drv_attr = to_drv_attr(attr);
struct driver_private *drv_priv = to_driver(kobj);
ssize_t ret = -EIO;
if (drv_attr->show)
ret = drv_attr->show(drv_priv->driver, buf);
return ret;
}
static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct driver_attribute *drv_attr = to_drv_attr(attr);
struct driver_private *drv_priv = to_driver(kobj);
ssize_t ret = -EIO;
if (drv_attr->store)
ret = drv_attr->store(drv_priv->driver, buf, count);
return ret;
}
static const struct sysfs_ops driver_sysfs_ops = {
.show = drv_attr_show,
.store = drv_attr_store,
};
static void driver_release(struct kobject *kobj)
{
struct driver_private *drv_priv = to_driver(kobj);
pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__);
kfree(drv_priv);
}
static const struct kobj_type driver_ktype = {
.sysfs_ops = &driver_sysfs_ops,
.release = driver_release,
};
/*
* sysfs bindings for buses
*/
static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
return ret;
}
static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
return ret;
}
static const struct sysfs_ops bus_sysfs_ops = {
.show = bus_attr_show,
.store = bus_attr_store,
};
int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr)
{
struct subsys_private *sp = bus_to_subsys(bus);
int error;
if (!sp)
return -EINVAL;
error = sysfs_create_file(&sp->subsys.kobj, &attr->attr);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_create_file);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (!sp)
return;
sysfs_remove_file(&sp->subsys.kobj, &attr->attr);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
struct subsys_private *priv = to_subsys_private(kobj);
lockdep_unregister_key(&priv->lock_key);
kfree(priv);
}
static const struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
.release = bus_release,
};
static int bus_uevent_filter(const struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
return 0;
}
static const struct kset_uevent_ops bus_uevent_ops = {
.filter = bus_uevent_filter,
};
/* Manually detach a device from its associated driver. */
static ssize_t unbind_store(struct device_driver *drv, const char *buf,
size_t count)
{
const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == drv) {
device_driver_detach(dev);
err = count;
}
put_device(dev);
bus_put(bus);
return err;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
/*
* Manually attach a device to a driver.
* Note: the driver must want to bind to the device,
* it is not possible to override the driver's id table.
*/
static ssize_t bind_store(struct device_driver *drv, const char *buf,
size_t count)
{
const struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
int err = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && driver_match_device(drv, dev)) {
err = device_driver_attach(drv, dev);
if (!err) {
/* success */
err = count;
}
}
put_device(dev);
bus_put(bus);
return err;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf)
{
struct subsys_private *sp = bus_to_subsys(bus);
int ret;
if (!sp)
return -EINVAL;
ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe);
subsys_put(sp);
return ret;
}
static ssize_t drivers_autoprobe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct subsys_private *sp = bus_to_subsys(bus);
if (!sp)
return -EINVAL;
if (buf[0] == '0')
sp->drivers_autoprobe = 0;
else
sp->drivers_autoprobe = 1;
subsys_put(sp);
return count;
}
static ssize_t drivers_probe_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
int err = -EINVAL;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev)
return -ENODEV;
if (bus_rescan_devices_helper(dev, NULL) == 0)
err = count;
put_device(dev);
return err;
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct device *dev = NULL;
struct device_private *dev_prv;
if (n) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
}
return dev;
}
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
* @start: device to start iterating from.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @bus's list of devices, and call @fn for each,
* passing it @data. If @start is not NULL, we use that device to
* begin iterating from.
*
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*
* NOTE: The device that returns a non-zero value is not retained
* in any way, nor is its refcount incremented. If the caller needs
* to retain this data, it should do so, and increment the reference
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
void *data, int (*fn)(struct device *, void *))
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
int error = 0;
if (!sp)
return -EINVAL;
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_dev);
/**
* bus_find_device - device iterator for locating a particular device.
* @bus: bus type
* @start: Device to begin with
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This is similar to the bus_for_each_dev() function above, but it
* returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data))
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
if (!sp)
return NULL;
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
klist_iter_exit(&i);
subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(bus_find_device);
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct driver_private *drv_priv;
if (n) {
drv_priv = container_of(n, struct driver_private, knode_bus);
return drv_priv->driver;
}
return NULL;
}
/**
* bus_for_each_drv - driver iterator
* @bus: bus we're dealing with.
* @start: driver to start iterating on.
* @data: data to pass to the callback.
* @fn: function to call for each driver.
*
* This is nearly identical to the device iterator above.
* We iterate over each driver that belongs to @bus, and call
* @fn for each. If @fn returns anything but 0, we break out
* and return it. If @start is not NULL, we use it as the head
* of the list.
*
* NOTE: we don't return the driver that returns a non-zero
* value, nor do we leave the reference count incremented for that
* driver. If the caller needs to know that info, it must set it
* in the callback. It must also be sure to increment the refcount
* so it doesn't disappear before returning to the caller.
*/
int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *))
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device_driver *drv;
int error = 0;
if (!sp)
return -EINVAL;
klist_iter_init_node(&sp->klist_drivers, &i,
start ? &start->p->knode_bus : NULL);
while ((drv = next_driver(&i)) && !error)
error = fn(drv, data);
klist_iter_exit(&i);
subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_drv);
/**
* bus_add_device - add device to bus
* @dev: device being added
*
* - Add device's bus attributes.
* - Create links to device's bus.
* - Add the device to its bus's list of devices.
*/
int bus_add_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
int error;
if (!sp) {
/*
* This is a normal operation for many devices that do not
* have a bus assigned to them, just say that all went
* well.
*/
return 0;
}
/*
* Reference in sp is now incremented and will be dropped when
* the device is removed from the bus
*/
pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev));
error = device_add_groups(dev, sp->bus->dev_groups);
if (error)
goto out_put;
error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev));
if (error)
goto out_groups;
error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
if (error)
goto out_subsys;
klist_add_tail(&dev->p->knode_bus, &sp->klist_devices);
return 0;
out_subsys:
sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
out_groups:
device_remove_groups(dev, sp->bus->dev_groups);
out_put:
subsys_put(sp);
return error;
}
/**
* bus_probe_device - probe drivers for a new device
* @dev: device to probe
*
* - Automatically probe for a driver if the bus allows it.
*/
void bus_probe_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
if (!sp)
return;
if (sp->drivers_autoprobe)
device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
if (sif->add_dev)
sif->add_dev(dev, sif);
mutex_unlock(&sp->mutex);
subsys_put(sp);
}
/**
* bus_remove_device - remove device from bus
* @dev: device to be removed
*
* - Remove device from all interfaces.
* - Remove symlink from bus' directory.
* - Delete device from bus's list.
* - Detach from its driver.
* - Drop reference taken in bus_add_device().
*/
void bus_remove_device(struct device *dev)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
if (!sp)
return;
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
if (sif->remove_dev)
sif->remove_dev(dev, sif);
mutex_unlock(&sp->mutex);
sysfs_remove_link(&dev->kobj, "subsystem");
sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
pr_debug("bus: '%s': remove device %s\n",
dev->bus->name, dev_name(dev));
device_release_driver(dev);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in bus_add_device()
*/
subsys_put(sp);
subsys_put(sp);
}
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
ret = driver_create_file(drv, &driver_attr_unbind);
if (ret == 0) {
ret = driver_create_file(drv, &driver_attr_bind);
if (ret)
driver_remove_file(drv, &driver_attr_unbind);
}
return ret;
}
static void remove_bind_files(struct device_driver *drv)
{
driver_remove_file(drv, &driver_attr_bind);
driver_remove_file(drv, &driver_attr_unbind);
}
static BUS_ATTR_WO(drivers_probe);
static BUS_ATTR_RW(drivers_autoprobe);
static int add_probe_files(const struct bus_type *bus)
{
int retval;
retval = bus_create_file(bus, &bus_attr_drivers_probe);
if (retval)
goto out;
retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
if (retval)
bus_remove_file(bus, &bus_attr_drivers_probe);
out:
return retval;
}
static void remove_probe_files(const struct bus_type *bus)
{
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
}
static ssize_t uevent_store(struct device_driver *drv, const char *buf,
size_t count)
{
int rc;
rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
return rc ? rc : count;
}
static DRIVER_ATTR_WO(uevent);
/**
* bus_add_driver - Add a driver to the bus.
* @drv: driver.
*/
int bus_add_driver(struct device_driver *drv)
{
struct subsys_private *sp = bus_to_subsys(drv->bus);
struct driver_private *priv;
int error = 0;
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the driver is removed from the bus
*/
pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto out_put_bus;
}
klist_init(&priv->klist_devices, NULL, NULL);
priv->driver = drv;
drv->p = priv;
priv->kobj.kset = sp->drivers_kset;
error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
"%s", drv->name);
if (error)
goto out_unregister;
klist_add_tail(&priv->knode_bus, &sp->klist_drivers);
if (sp->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_del_list;
}
module_add_driver(drv->owner, drv);
error = driver_create_file(drv, &driver_attr_uevent);
if (error) {
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
error = driver_add_groups(drv, sp->bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
__func__, drv->name);
}
if (!drv->suppress_bind_attrs) {
error = add_bind_files(drv);
if (error) {
/* Ditto */
printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
__func__, drv->name);
}
}
return 0;
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
subsys_put(sp);
return error;
}
/**
* bus_remove_driver - delete driver from bus's knowledge.
* @drv: driver.
*
* Detach the driver from the devices it controls, and remove
* it from its bus's list of drivers. Finally, we drop the reference
* to the bus we took in bus_add_driver().
*/
void bus_remove_driver(struct device_driver *drv)
{
struct subsys_private *sp = bus_to_subsys(drv->bus);
if (!sp)
return;
pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name);
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
driver_remove_groups(drv, sp->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
driver_detach(drv);
module_remove_driver(drv);
kobject_put(&drv->p->kobj);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in bus_add_driver()
*/
subsys_put(sp);
subsys_put(sp);
}
/* Helper for bus_rescan_devices's iter */
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data)
{
int ret = 0;
if (!dev->driver) {
if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
ret = device_attach(dev);
if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
}
return ret < 0 ? ret : 0;
}
/**
* bus_rescan_devices - rescan devices on the bus for possible drivers
* @bus: the bus to scan.
*
* This function will look for devices on the bus with no driver
* attached and rescan it against existing drivers to see if it matches
* any by calling device_attach() for the unbound devices.
*/
int bus_rescan_devices(const struct bus_type *bus)
{
return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
}
EXPORT_SYMBOL_GPL(bus_rescan_devices);
/**
* device_reprobe - remove driver for a device and probe for a new driver
* @dev: the device to reprobe
*
* This function detaches the attached driver (if any) for the given
* device and restarts the driver probing process. It is intended
* to use if probing criteria changed during a devices lifetime and
* driver attachment should change accordingly.
*/
int device_reprobe(struct device *dev)
{
if (dev->driver)
device_driver_detach(dev);
return bus_rescan_devices_helper(dev, NULL);
}
EXPORT_SYMBOL_GPL(device_reprobe);
static void klist_devices_get(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
struct device *dev = dev_prv->device;
get_device(dev);
}
static void klist_devices_put(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
struct device *dev = dev_prv->device;
put_device(dev);
}
static ssize_t bus_uevent_store(const struct bus_type *bus,
const char *buf, size_t count)
{
struct subsys_private *sp = bus_to_subsys(bus);
int ret;
if (!sp)
return -EINVAL;
ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count);
subsys_put(sp);
if (ret)
return ret;
return count;
}
/*
* "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
* here, but can not use it as earlier in the file we have
* DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store
* function name.
*/
static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL,
bus_uevent_store);
/**
* bus_register - register a driver-core subsystem
* @bus: bus to register
*
* Once we have that, we register the bus with the kobject
* infrastructure, then register the children subsystems it has:
* the devices and drivers that belong to the subsystem.
*/
int bus_register(const struct bus_type *bus)
{
int retval;
struct subsys_private *priv;
struct kobject *bus_kobj;
struct lock_class_key *key;
priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = bus;
BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
bus_kobj = &priv->subsys.kobj;
retval = kobject_set_name(bus_kobj, "%s", bus->name);
if (retval)
goto out;
bus_kobj->kset = bus_kset;
bus_kobj->ktype = &bus_ktype;
priv->drivers_autoprobe = 1;
retval = kset_register(&priv->subsys);
if (retval)
goto out;
retval = bus_create_file(bus, &bus_attr_uevent);
if (retval)
goto bus_uevent_fail;
priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj);
if (!priv->devices_kset) {
retval = -ENOMEM;
goto bus_devices_fail;
}
priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj);
if (!priv->drivers_kset) {
retval = -ENOMEM;
goto bus_drivers_fail;
}
INIT_LIST_HEAD(&priv->interfaces);
key = &priv->lock_key;
lockdep_register_key(key);
__mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
retval = add_probe_files(bus);
if (retval)
goto bus_probe_files_fail;
retval = sysfs_create_groups(bus_kobj, bus->bus_groups);
if (retval)
goto bus_groups_fail;
pr_debug("bus: '%s': registered\n", bus->name);
return 0;
bus_groups_fail:
remove_probe_files(bus);
bus_probe_files_fail:
kset_unregister(priv->drivers_kset);
bus_drivers_fail:
kset_unregister(priv->devices_kset);
bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
out:
kfree(priv);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register);
/**
* bus_unregister - remove a bus from the system
* @bus: bus.
*
* Unregister the child subsystems and the bus itself.
* Finally, we call bus_put() to release the refcount
*/
void bus_unregister(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kobject *bus_kobj;
if (!sp)
return;
pr_debug("bus: '%s': unregistering\n", bus->name);
if (sp->dev_root)
device_unregister(sp->dev_root);
bus_kobj = &sp->subsys.kobj;
sysfs_remove_groups(bus_kobj, bus->bus_groups);
remove_probe_files(bus);
bus_remove_file(bus, &bus_attr_uevent);
kset_unregister(sp->drivers_kset);
kset_unregister(sp->devices_kset);
kset_unregister(&sp->subsys);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_unregister);
int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
struct subsys_private *sp = bus_to_subsys(bus);
int retval;
if (!sp)
return -EINVAL;
retval = blocking_notifier_chain_register(&sp->bus_notifier, nb);
subsys_put(sp);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register_notifier);
int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
struct subsys_private *sp = bus_to_subsys(bus);
int retval;
if (!sp)
return -EINVAL;
retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb);
subsys_put(sp);
return retval;
}
EXPORT_SYMBOL_GPL(bus_unregister_notifier);
void bus_notify(struct device *dev, enum bus_notifier_event value)
{
struct subsys_private *sp = bus_to_subsys(dev->bus);
if (!sp)
return;
blocking_notifier_call_chain(&sp->bus_notifier, value, dev);
subsys_put(sp);
}
struct kset *bus_get_kset(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kset *kset;
if (!sp)
return NULL;
kset = &sp->subsys;
subsys_put(sp);
return kset;
}
EXPORT_SYMBOL_GPL(bus_get_kset);
/*
* Yes, this forcibly breaks the klist abstraction temporarily. It
* just wants to sort the klist, not change reference counts and
* take/drop locks rapidly in the process. It does all this while
* holding the lock for the list, so objects can't otherwise be
* added/removed while we're swizzling.
*/
static void device_insertion_sort_klist(struct device *a, struct list_head *list,
int (*compare)(const struct device *a,
const struct device *b))
{
struct klist_node *n;
struct device_private *dev_prv;
struct device *b;
list_for_each_entry(n, list, n_node) {
dev_prv = to_device_private_bus(n);
b = dev_prv->device;
if (compare(a, b) <= 0) {
list_move_tail(&a->p->knode_bus.n_node,
&b->p->knode_bus.n_node);
return;
}
}
list_move_tail(&a->p->knode_bus.n_node, list);
}
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b))
{
struct subsys_private *sp = bus_to_subsys(bus);
LIST_HEAD(sorted_devices);
struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
if (!sp)
return;
device_klist = &sp->klist_devices;
spin_lock(&device_klist->k_lock);
list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
device_insertion_sort_klist(dev, &sorted_devices, compare);
}
list_splice(&sorted_devices, &device_klist->k_list);
spin_unlock(&device_klist->k_lock);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
struct subsys_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
/**
* subsys_dev_iter_init - initialize subsys device iterator
* @iter: subsys iterator to initialize
* @sp: the subsys private (i.e. bus) we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
* Initialize subsys iterator @iter such that it iterates over devices
* of @subsys. If @start is set, the list iteration will start there,
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp,
struct device *start, const struct device_type *type)
{
struct klist_node *start_knode = NULL;
if (start)
start_knode = &start->p->knode_bus;
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
/**
* subsys_dev_iter_next - iterate to the next device
* @iter: subsys iterator to proceed
*
* Proceed @iter to the next device and return it. Returns NULL if
* iteration is complete.
*
* The returned device is referenced and won't be released till
* iterator is proceed to the next device or exited. The caller is
* free to do whatever it wants to do with the device including
* calling back into subsys code.
*/
static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
for (;;) {
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
dev = to_device_private_bus(knode)->device;
if (!iter->type || iter->type == dev->type)
return dev;
}
}
/**
* subsys_dev_iter_exit - finish iteration
* @iter: subsys iterator to finish
*
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
static void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
}
int subsys_interface_register(struct subsys_interface *sif)
{
struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return -ENODEV;
sp = bus_to_subsys(sif->subsys);
if (!sp)
return -EINVAL;
/*
* Reference in sp is now incremented and will be dropped when
* the interface is removed from the bus
*/
mutex_lock(&sp->mutex);
list_add_tail(&sif->node, &sp->interfaces);
if (sif->add_dev) {
subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->add_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return;
sp = bus_to_subsys(sif->subsys);
if (!sp)
return;
mutex_lock(&sp->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->remove_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
mutex_unlock(&sp->mutex);
/*
* Decrement the reference count twice, once for the bus_to_subsys()
* call in the start of this function, and the second one from the
* reference increment in subsys_interface_register()
*/
subsys_put(sp);
subsys_put(sp);
}
EXPORT_SYMBOL_GPL(subsys_interface_unregister);
static void system_root_device_release(struct device *dev)
{
kfree(dev);
}
static int subsys_register(struct bus_type *subsys,
const struct attribute_group **groups,
struct kobject *parent_of_root)
{
struct subsys_private *sp;
struct device *dev;
int err;
err = bus_register(subsys);
if (err < 0)
return err;
sp = bus_to_subsys(subsys);
if (!sp) {
err = -EINVAL;
goto err_sp;
}
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!dev) {
err = -ENOMEM;
goto err_dev;
}
err = dev_set_name(dev, "%s", subsys->name);
if (err < 0)
goto err_name;
dev->kobj.parent = parent_of_root;
dev->groups = groups;
dev->release = system_root_device_release;
err = device_register(dev);
if (err < 0)
goto err_dev_reg;
sp->dev_root = dev;
subsys_put(sp);
return 0;
err_dev_reg:
put_device(dev);
dev = NULL;
err_name:
kfree(dev);
err_dev:
subsys_put(sp);
err_sp:
bus_unregister(subsys);
return err;
}
/**
* subsys_system_register - register a subsystem at /sys/devices/system/
* @subsys: system subsystem
* @groups: default attributes for the root device
*
* All 'system' subsystems have a /sys/devices/system/<name> root device
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
* number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
* with bad ideas only. New subsystems should use plain subsystems; and
* add the subsystem-wide attributes should be added to the subsystem
* directory itself and not some create fake root-device placed in
* /sys/devices/system/<name>.
*/
int subsys_system_register(struct bus_type *subsys,
const struct attribute_group **groups)
{
return subsys_register(subsys, groups, &system_kset->kobj);
}
EXPORT_SYMBOL_GPL(subsys_system_register);
/**
* subsys_virtual_register - register a subsystem at /sys/devices/virtual/
* @subsys: virtual subsystem
* @groups: default attributes for the root device
*
* All 'virtual' subsystems have a /sys/devices/system/<name> root device
* with the name of the subystem. The root device can carry subsystem-wide
* attributes. All registered devices are below this single root device.
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
*/
int subsys_virtual_register(struct bus_type *subsys,
const struct attribute_group **groups)
{
struct kobject *virtual_dir;
virtual_dir = virtual_device_parent(NULL);
if (!virtual_dir)
return -ENOMEM;
return subsys_register(subsys, groups, virtual_dir);
}
EXPORT_SYMBOL_GPL(subsys_virtual_register);
/**
* driver_find - locate driver on a bus by its name.
* @name: name of the driver.
* @bus: bus to scan for the driver.
*
* Call kset_find_obj() to iterate over list of drivers on
* a bus to find driver by name. Return driver if found.
*
* This routine provides no locking to prevent the driver it returns
* from being unregistered or unloaded while the caller is using it.
* The caller is responsible for preventing this.
*/
struct device_driver *driver_find(const char *name, const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct kobject *k;
struct driver_private *priv;
if (!sp)
return NULL;
k = kset_find_obj(sp->drivers_kset, name);
subsys_put(sp);
if (!k)
return NULL;
priv = to_driver(k);
/* Drop reference added by kset_find_obj() */
kobject_put(k);
return priv->driver;
}
EXPORT_SYMBOL_GPL(driver_find);
/*
* Warning, the value could go to "removed" instantly after calling this function, so be very
* careful when calling it...
*/
bool bus_is_registered(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
bool is_initialized = false;
if (sp) {
is_initialized = true;
subsys_put(sp);
}
return is_initialized;
}
/**
* bus_get_dev_root - return a pointer to the "device root" of a bus
* @bus: bus to return the device root of.
*
* If a bus has a "device root" structure, return it, WITH THE REFERENCE
* COUNT INCREMENTED.
*
* Note, when finished with the device, a call to put_device() is required.
*
* If the device root is not present (or bus is not a valid pointer), NULL
* will be returned.
*/
struct device *bus_get_dev_root(const struct bus_type *bus)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct device *dev_root;
if (!sp)
return NULL;
dev_root = get_device(sp->dev_root);
subsys_put(sp);
return dev_root;
}
EXPORT_SYMBOL_GPL(bus_get_dev_root);
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
if (!bus_kset)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
if (!system_kset)
return -ENOMEM;
return 0;
}
| linux-master | drivers/base/bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* transport_class.c - implementation of generic transport classes
* using attribute_containers
*
* Copyright (c) 2005 - James Bottomley <[email protected]>
*
* The basic idea here is to allow any "device controller" (which
* would most often be a Host Bus Adapter to use the services of one
* or more tranport classes for performing transport specific
* services. Transport specific services are things that the generic
* command layer doesn't want to know about (speed settings, line
* condidtioning, etc), but which the user might be interested in.
* Thus, the HBA's use the routines exported by the transport classes
* to perform these functions. The transport classes export certain
* values to the user via sysfs using attribute containers.
*
* Note: because not every HBA will care about every transport
* attribute, there's a many to one relationship that goes like this:
*
* transport class<-----attribute container<----class device
*
* Usually the attribute container is per-HBA, but the design doesn't
* mandate that. Although most of the services will be specific to
* the actual external storage connection used by the HBA, the generic
* transport class is framed entirely in terms of generic devices to
* allow it to be used by any physical HBA in the system.
*/
#include <linux/export.h>
#include <linux/attribute_container.h>
#include <linux/transport_class.h>
static int transport_remove_classdev(struct attribute_container *cont,
struct device *dev,
struct device *classdev);
/**
* transport_class_register - register an initial transport class
*
* @tclass: a pointer to the transport class structure to be initialised
*
* The transport class contains an embedded class which is used to
* identify it. The caller should initialise this structure with
* zeros and then generic class must have been initialised with the
* actual transport class unique name. There's a macro
* DECLARE_TRANSPORT_CLASS() to do this (declared classes still must
* be registered).
*
* Returns 0 on success or error on failure.
*/
int transport_class_register(struct transport_class *tclass)
{
return class_register(&tclass->class);
}
EXPORT_SYMBOL_GPL(transport_class_register);
/**
* transport_class_unregister - unregister a previously registered class
*
* @tclass: The transport class to unregister
*
* Must be called prior to deallocating the memory for the transport
* class.
*/
void transport_class_unregister(struct transport_class *tclass)
{
class_unregister(&tclass->class);
}
EXPORT_SYMBOL_GPL(transport_class_unregister);
static int anon_transport_dummy_function(struct transport_container *tc,
struct device *dev,
struct device *cdev)
{
/* do nothing */
return 0;
}
/**
* anon_transport_class_register - register an anonymous class
*
* @atc: The anon transport class to register
*
* The anonymous transport class contains both a transport class and a
* container. The idea of an anonymous class is that it never
* actually has any device attributes associated with it (and thus
* saves on container storage). So it can only be used for triggering
* events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to
* initialise the anon transport class storage.
*/
int anon_transport_class_register(struct anon_transport_class *atc)
{
int error;
atc->container.class = &atc->tclass.class;
attribute_container_set_no_classdevs(&atc->container);
error = attribute_container_register(&atc->container);
if (error)
return error;
atc->tclass.setup = anon_transport_dummy_function;
atc->tclass.remove = anon_transport_dummy_function;
return 0;
}
EXPORT_SYMBOL_GPL(anon_transport_class_register);
/**
* anon_transport_class_unregister - unregister an anon class
*
* @atc: Pointer to the anon transport class to unregister
*
* Must be called prior to deallocating the memory for the anon
* transport class.
*/
void anon_transport_class_unregister(struct anon_transport_class *atc)
{
if (unlikely(attribute_container_unregister(&atc->container)))
BUG();
}
EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
static int transport_setup_classdev(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
struct transport_container *tcont = attribute_container_to_transport_container(cont);
if (tclass->setup)
tclass->setup(tcont, dev, classdev);
return 0;
}
/**
* transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
* @dev: the generic device representing the entity being added
*
* Usually, dev represents some component in the HBA system (either
* the HBA itself or a device remote across the HBA bus). This
* routine is simply a trigger point to see if any set of transport
* classes wishes to associate with the added device. This allocates
* storage for the class device and initialises it, but does not yet
* add it to the system or add attributes to it (you do this with
* transport_add_device). If you have no need for a separate setup
* and add operations, use transport_register_device (see
* transport_class.h).
*/
void transport_setup_device(struct device *dev)
{
attribute_container_add_device(dev, transport_setup_classdev);
}
EXPORT_SYMBOL_GPL(transport_setup_device);
static int transport_add_class_device(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
int error = attribute_container_add_class_device(classdev);
struct transport_container *tcont =
attribute_container_to_transport_container(cont);
if (error)
goto err_remove;
if (tcont->statistics) {
error = sysfs_create_group(&classdev->kobj, tcont->statistics);
if (error)
goto err_del;
}
return 0;
err_del:
attribute_container_class_device_del(classdev);
err_remove:
if (tclass->remove)
tclass->remove(tcont, dev, classdev);
return error;
}
/**
* transport_add_device - declare a new dev for transport class association
*
* @dev: the generic device representing the entity being added
*
* Usually, dev represents some component in the HBA system (either
* the HBA itself or a device remote across the HBA bus). This
* routine is simply a trigger point used to add the device to the
* system and register attributes for it.
*/
int transport_add_device(struct device *dev)
{
return attribute_container_device_trigger_safe(dev,
transport_add_class_device,
transport_remove_classdev);
}
EXPORT_SYMBOL_GPL(transport_add_device);
static int transport_configure(struct attribute_container *cont,
struct device *dev,
struct device *cdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
struct transport_container *tcont = attribute_container_to_transport_container(cont);
if (tclass->configure)
tclass->configure(tcont, dev, cdev);
return 0;
}
/**
* transport_configure_device - configure an already set up device
*
* @dev: generic device representing device to be configured
*
* The idea of configure is simply to provide a point within the setup
* process to allow the transport class to extract information from a
* device after it has been setup. This is used in SCSI because we
* have to have a setup device to begin using the HBA, but after we
* send the initial inquiry, we use configure to extract the device
* parameters. The device need not have been added to be configured.
*/
void transport_configure_device(struct device *dev)
{
attribute_container_device_trigger(dev, transport_configure);
}
EXPORT_SYMBOL_GPL(transport_configure_device);
static int transport_remove_classdev(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
struct transport_container *tcont =
attribute_container_to_transport_container(cont);
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->remove)
tclass->remove(tcont, dev, classdev);
if (tclass->remove != anon_transport_dummy_function) {
if (tcont->statistics)
sysfs_remove_group(&classdev->kobj, tcont->statistics);
attribute_container_class_device_del(classdev);
}
return 0;
}
/**
* transport_remove_device - remove the visibility of a device
*
* @dev: generic device to remove
*
* This call removes the visibility of the device (to the user from
* sysfs), but does not destroy it. To eliminate a device entirely
* you must also call transport_destroy_device. If you don't need to
* do remove and destroy as separate operations, use
* transport_unregister_device() (see transport_class.h) which will
* perform both calls for you.
*/
void transport_remove_device(struct device *dev)
{
attribute_container_device_trigger(dev, transport_remove_classdev);
}
EXPORT_SYMBOL_GPL(transport_remove_device);
static void transport_destroy_classdev(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->remove != anon_transport_dummy_function)
put_device(classdev);
}
/**
* transport_destroy_device - destroy a removed device
*
* @dev: device to eliminate from the transport class.
*
* This call triggers the elimination of storage associated with the
* transport classdev. Note: all it really does is relinquish a
* reference to the classdev. The memory will not be freed until the
* last reference goes to zero. Note also that the classdev retains a
* reference count on dev, so dev too will remain for as long as the
* transport class device remains around.
*/
void transport_destroy_device(struct device *dev)
{
attribute_container_remove_device(dev, transport_destroy_classdev);
}
EXPORT_SYMBOL_GPL(transport_destroy_device);
| linux-master | drivers/base/transport_class.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/base/map.c
*
* (C) Copyright Al Viro 2002,2003
*
* NOTE: data structure needs to be changed. It works, but for large dev_t
* it will be too slow. It is isolated, though, so these changes will be
* local to that file.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/kdev_t.h>
#include <linux/kobject.h>
#include <linux/kobj_map.h>
struct kobj_map {
struct probe {
struct probe *next;
dev_t dev;
unsigned long range;
struct module *owner;
kobj_probe_t *get;
int (*lock)(dev_t, void *);
void *data;
} *probes[255];
struct mutex *lock;
};
int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
struct module *module, kobj_probe_t *probe,
int (*lock)(dev_t, void *), void *data)
{
unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
unsigned int index = MAJOR(dev);
unsigned int i;
struct probe *p;
if (n > 255)
n = 255;
p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL);
if (p == NULL)
return -ENOMEM;
for (i = 0; i < n; i++, p++) {
p->owner = module;
p->get = probe;
p->lock = lock;
p->dev = dev;
p->range = range;
p->data = data;
}
mutex_lock(domain->lock);
for (i = 0, p -= n; i < n; i++, p++, index++) {
struct probe **s = &domain->probes[index % 255];
while (*s && (*s)->range < range)
s = &(*s)->next;
p->next = *s;
*s = p;
}
mutex_unlock(domain->lock);
return 0;
}
void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range)
{
unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
unsigned int index = MAJOR(dev);
unsigned int i;
struct probe *found = NULL;
if (n > 255)
n = 255;
mutex_lock(domain->lock);
for (i = 0; i < n; i++, index++) {
struct probe **s;
for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) {
struct probe *p = *s;
if (p->dev == dev && p->range == range) {
*s = p->next;
if (!found)
found = p;
break;
}
}
}
mutex_unlock(domain->lock);
kfree(found);
}
struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index)
{
struct kobject *kobj;
struct probe *p;
unsigned long best = ~0UL;
retry:
mutex_lock(domain->lock);
for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) {
struct kobject *(*probe)(dev_t, int *, void *);
struct module *owner;
void *data;
if (p->dev > dev || p->dev + p->range - 1 < dev)
continue;
if (p->range - 1 >= best)
break;
if (!try_module_get(p->owner))
continue;
owner = p->owner;
data = p->data;
probe = p->get;
best = p->range - 1;
*index = dev - p->dev;
if (p->lock && p->lock(dev, data) < 0) {
module_put(owner);
continue;
}
mutex_unlock(domain->lock);
kobj = probe(dev, index, data);
/* Currently ->owner protects _only_ ->probe() itself. */
module_put(owner);
if (kobj)
return kobj;
goto retry;
}
mutex_unlock(domain->lock);
return NULL;
}
struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock)
{
struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
int i;
if ((p == NULL) || (base == NULL)) {
kfree(p);
kfree(base);
return NULL;
}
base->dev = 1;
base->range = ~0;
base->get = base_probe;
for (i = 0; i < 255; i++)
p->probes[i] = base;
p->lock = lock;
return p;
}
| linux-master | drivers/base/map.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Author: Johannes Berg <[email protected]>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/devcoredump.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/workqueue.h>
static struct class devcd_class;
/* global disable flag, for security purposes */
static bool devcd_disabled;
/* if data isn't read by userspace after 5 minutes then delete it */
#define DEVCD_TIMEOUT (HZ * 60 * 5)
struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
*
*
*
* cpu0(X) cpu1(Y)
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* del_timer()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
*/
struct mutex mutex;
bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
struct delayed_work del_wk;
struct device *failing_dev;
};
static struct devcd_entry *dev_to_devcd(struct device *dev)
{
return container_of(dev, struct devcd_entry, devcd_dev);
}
static void devcd_dev_release(struct device *dev)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
devcd->free(devcd->data);
module_put(devcd->owner);
/*
* this seems racy, but I don't see a notifier or such on
* a struct device to know when it goes away?
*/
if (devcd->failing_dev->kobj.sd)
sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
"devcoredump");
put_device(devcd->failing_dev);
kfree(devcd);
}
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
return count;
}
static struct bin_attribute devcd_attr_data = {
.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
.size = 0,
.read = devcd_data_read,
.write = devcd_data_write,
};
static struct bin_attribute *devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
static const struct attribute_group devcd_dev_group = {
.bin_attrs = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
&devcd_dev_group, NULL,
};
static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
mutex_unlock(&devcd->mutex);
return 0;
}
static ssize_t disabled_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
/*
*
* disabled_store() worker()
* class_for_each_device(&devcd_class,
* NULL, NULL, devcd_free)
* ...
* ...
* while ((dev = class_dev_iter_next(&iter))
* devcd_del()
* device_del()
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, It looks like disabled_store() would be racing with parallely
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
*/
static ssize_t disabled_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
long tmp = simple_strtol(buf, NULL, 10);
/*
* This essentially makes the attribute write-once, since you can't
* go back to not having it disabled. This is intentional, it serves
* as a system lockdown feature.
*/
if (tmp != 1)
return -EINVAL;
devcd_disabled = true;
class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
return count;
}
static CLASS_ATTR_RW(disabled);
static struct attribute *devcd_class_attrs[] = {
&class_attr_disabled.attr,
NULL,
};
ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
.class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen)
{
return memory_read_from_buffer(buffer, count, &offset, data, datalen);
}
static void devcd_freev(void *data)
{
vfree(data);
}
/**
* dev_coredumpv - create device coredump with vmalloc data
* @dev: the struct device for the crashed device
* @data: vmalloc data containing the device coredump
* @datalen: length of the data
* @gfp: allocation flags
*
* This function takes ownership of the vmalloc'ed data and will free
* it when it is no longer used. See dev_coredumpm() for more information.
*/
void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp)
{
dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
}
EXPORT_SYMBOL_GPL(dev_coredumpv);
static int devcd_match_failing(struct device *dev, const void *failing)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
return devcd->failing_dev == failing;
}
/**
* devcd_free_sgtable - free all the memory of the given scatterlist table
* (i.e. both pages and scatterlist instances)
* NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
* using the sg_chain function then that function should be called only once
* on the chained table
* @data: pointer to sg_table to free
*/
static void devcd_free_sgtable(void *data)
{
_devcd_free_sgtable(data);
}
/**
* devcd_read_from_sgtable - copy data from sg_table to a given buffer
* and return the number of bytes read
* @buffer: the buffer to copy the data to it
* @buf_len: the length of the buffer
* @data: the scatterlist table to copy from
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
size_t data_len)
{
struct scatterlist *table = data;
if (offset > data_len)
return -EINVAL;
if (offset + buf_len > data_len)
buf_len = data_len - offset;
return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
offset);
}
/**
* dev_coredumpm - create device coredump with read/free methods
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
* @data: data cookie for the @read/@free functions
* @datalen: length of the data
* @gfp: allocation flags
* @read: function to read from the given buffer
* @free: function to free the given buffer
*
* Creates a new device coredump for the given device. If a previous one hasn't
* been read yet, the new coredump is discarded. The data lifetime is determined
* by the device coredump framework and when it is no longer needed the @free
* function will be called to free the data.
*/
void dev_coredumpm(struct device *dev, struct module *owner,
void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen),
void (*free)(void *data))
{
static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
struct device *existing;
if (devcd_disabled)
goto free;
existing = class_find_device(&devcd_class, NULL, dev,
devcd_match_failing);
if (existing) {
put_device(existing);
goto free;
}
if (!try_module_get(owner))
goto free;
devcd = kzalloc(sizeof(*devcd), gfp);
if (!devcd)
goto put_module;
devcd->owner = owner;
devcd->data = data;
devcd->datalen = datalen;
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
/*
* These should normally not fail, but there is no problem
* continuing without the links, so just warn instead of
* failing.
*/
if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
"failing_device") ||
sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
"devcoredump"))
dev_warn(dev, "devcoredump create_link failed\n");
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:
free(data);
}
EXPORT_SYMBOL_GPL(dev_coredumpm);
/**
* dev_coredumpsg - create device coredump that uses scatterlist as data
* parameter
* @dev: the struct device for the crashed device
* @table: the dump data
* @datalen: length of the data
* @gfp: allocation flags
*
* Creates a new device coredump for the given device. If a previous one hasn't
* been read yet, the new coredump is discarded. The data lifetime is determined
* by the device coredump framework and when it is no longer needed
* it will free the data.
*/
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp)
{
dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
devcd_free_sgtable);
}
EXPORT_SYMBOL_GPL(dev_coredumpsg);
static int __init devcoredump_init(void)
{
return class_register(&devcd_class);
}
__initcall(devcoredump_init);
static void __exit devcoredump_exit(void)
{
class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
class_unregister(&devcd_class);
}
__exitcall(devcoredump_exit);
| linux-master | drivers/base/devcoredump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/devres.c - device resource management
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/percpu.h>
#include <asm/sections.h>
#include "base.h"
#include "trace.h"
struct devres_node {
struct list_head entry;
dr_release_t release;
const char *name;
size_t size;
};
struct devres {
struct devres_node node;
/*
* Some archs want to perform DMA into kmalloc caches
* and need a guaranteed alignment larger than
* the alignment of a 64-bit integer.
* Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
* alignment for struct devres when allocated by kmalloc().
*/
u8 __aligned(ARCH_DMA_MINALIGN) data[];
};
struct devres_group {
struct devres_node node[2];
void *id;
int color;
/* -- 8 pointers */
};
static void set_node_dbginfo(struct devres_node *node, const char *name,
size_t size)
{
node->name = name;
node->size = size;
}
#ifdef CONFIG_DEBUG_DEVRES
static int log_devres = 0;
module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
static void devres_dbg(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
op, node, node->name, node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
#define devres_dbg(dev, node, op) do {} while (0)
#endif /* CONFIG_DEBUG_DEVRES */
static void devres_log(struct device *dev, struct devres_node *node,
const char *op)
{
trace_devres_log(dev, op, node, node->name, node->size);
devres_dbg(dev, node, op);
}
/*
* Release functions for devres group. These callbacks are used only
* for identification.
*/
static void group_open_release(struct device *dev, void *res)
{
/* noop */
}
static void group_close_release(struct device *dev, void *res)
{
/* noop */
}
static struct devres_group * node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
if (node->release == &group_close_release)
return container_of(node, struct devres_group, node[1]);
return NULL;
}
static bool check_dr_size(size_t size, size_t *tot_size)
{
/* We must catch any near-SIZE_MAX cases that could overflow. */
if (unlikely(check_add_overflow(sizeof(struct devres),
size, tot_size)))
return false;
/* Actually allocate the full kmalloc bucket size. */
*tot_size = kmalloc_size_roundup(*tot_size);
return true;
}
static __always_inline struct devres * alloc_dr(dr_release_t release,
size_t size, gfp_t gfp, int nid)
{
size_t tot_size;
struct devres *dr;
if (!check_dr_size(size, &tot_size))
return NULL;
dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
/* No need to clear memory twice */
if (!(gfp & __GFP_ZERO))
memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
}
static void add_dr(struct device *dev, struct devres_node *node)
{
devres_log(dev, node, "ADD");
BUG_ON(!list_empty(&node->entry));
list_add_tail(&node->entry, &dev->devres_head);
}
static void replace_dr(struct device *dev,
struct devres_node *old, struct devres_node *new)
{
devres_log(dev, old, "REPLACE");
BUG_ON(!list_empty(&new->entry));
list_replace(&old->entry, &new->entry);
}
/**
* __devres_alloc_node - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
* @nid: NUMA node
* @name: Name of the resource
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
* other devres_*() functions.
*
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
const char *name)
{
struct devres *dr;
dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
EXPORT_SYMBOL_GPL(__devres_alloc_node);
/**
* devres_for_each_res - Resource iterator
* @dev: Device to iterate resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
* @fn: Function to be called for each matched resource.
* @data: Data for @fn, the 3rd parameter of @fn
*
* Call @fn for each devres of @dev which is associated with @release
* and for which @match returns 1.
*
* RETURNS:
* void
*/
void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
void (*fn)(struct device *, void *, void *),
void *data)
{
struct devres_node *node;
struct devres_node *tmp;
unsigned long flags;
if (!fn)
return;
spin_lock_irqsave(&dev->devres_lock, flags);
list_for_each_entry_safe_reverse(node, tmp,
&dev->devres_head, entry) {
struct devres *dr = container_of(node, struct devres, node);
if (node->release != release)
continue;
if (match && !match(dev, dr->data, match_data))
continue;
fn(dev, dr->data, data);
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_for_each_res);
/**
* devres_free - Free device resource data
* @res: Pointer to devres data to free
*
* Free devres created with devres_alloc().
*/
void devres_free(void *res)
{
if (res) {
struct devres *dr = container_of(res, struct devres, data);
BUG_ON(!list_empty(&dr->node.entry));
kfree(dr);
}
}
EXPORT_SYMBOL_GPL(devres_free);
/**
* devres_add - Register device resource
* @dev: Device to add resource to
* @res: Resource to register
*
* Register devres @res to @dev. @res should have been allocated
* using devres_alloc(). On driver detach, the associated release
* function will be invoked and devres will be freed automatically.
*/
void devres_add(struct device *dev, void *res)
{
struct devres *dr = container_of(res, struct devres, data);
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &dr->node);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_add);
static struct devres *find_dr(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres_node *node;
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres *dr = container_of(node, struct devres, node);
if (node->release != release)
continue;
if (match && !match(dev, dr->data, match_data))
continue;
return dr;
}
return NULL;
}
/**
* devres_find - Find device resource
* @dev: Device to lookup resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev which is associated with @release
* and for which @match returns 1. If @match is NULL, it's considered
* to match all.
*
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
void * devres_find(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, release, match, match_data);
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
return NULL;
}
EXPORT_SYMBOL_GPL(devres_find);
/**
* devres_get - Find devres, if non-existent, add one atomically
* @dev: Device to lookup or add devres for
* @new_res: Pointer to new initialized devres to add if not found
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev which has the same release function
* as @new_res and for which @match return 1. If found, @new_res is
* freed; otherwise, @new_res is added atomically.
*
* RETURNS:
* Pointer to found or added devres.
*/
void * devres_get(struct device *dev, void *new_res,
dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, new_dr->node.release, match, match_data);
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
new_res = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
devres_free(new_res);
return dr->data;
}
EXPORT_SYMBOL_GPL(devres_get);
/**
* devres_remove - Find a device resource and remove it
* @dev: Device to find resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev associated with @release and for
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically and
* returned.
*
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
void * devres_remove(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, release, match, match_data);
if (dr) {
list_del_init(&dr->node.entry);
devres_log(dev, &dr->node, "REM");
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
return NULL;
}
EXPORT_SYMBOL_GPL(devres_remove);
/**
* devres_destroy - Find a device resource and destroy it
* @dev: Device to find resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev associated with @release and for
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically and freed.
*
* Note that the release function for the resource will not be called,
* only the devres-allocated data will be freed. The caller becomes
* responsible for freeing any other data.
*
* RETURNS:
* 0 if devres is found and freed, -ENOENT if not found.
*/
int devres_destroy(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
void *res;
res = devres_remove(dev, release, match, match_data);
if (unlikely(!res))
return -ENOENT;
devres_free(res);
return 0;
}
EXPORT_SYMBOL_GPL(devres_destroy);
/**
* devres_release - Find a device resource and destroy it, calling release
* @dev: Device to find resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev associated with @release and for
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically, the
* release function called and the resource freed.
*
* RETURNS:
* 0 if devres is found and freed, -ENOENT if not found.
*/
int devres_release(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
void *res;
res = devres_remove(dev, release, match, match_data);
if (unlikely(!res))
return -ENOENT;
(*release)(dev, res);
devres_free(res);
return 0;
}
EXPORT_SYMBOL_GPL(devres_release);
static int remove_nodes(struct device *dev,
struct list_head *first, struct list_head *end,
struct list_head *todo)
{
struct devres_node *node, *n;
int cnt = 0, nr_groups = 0;
/* First pass - move normal devres entries to @todo and clear
* devres_group colors.
*/
node = list_entry(first, struct devres_node, entry);
list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
grp = node_to_group(node);
if (grp) {
/* clear color of group markers in the first pass */
grp->color = 0;
nr_groups++;
} else {
/* regular devres entry */
if (&node->entry == first)
first = first->next;
list_move_tail(&node->entry, todo);
cnt++;
}
}
if (!nr_groups)
return cnt;
/* Second pass - Scan groups and color them. A group gets
* color value of two iff the group is wholly contained in
* [current node, end). That is, for a closed group, both opening
* and closing markers should be in the range, while just the
* opening marker is enough for an open group.
*/
node = list_entry(first, struct devres_node, entry);
list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
grp = node_to_group(node);
BUG_ON(!grp || list_empty(&grp->node[0].entry));
grp->color++;
if (list_empty(&grp->node[1].entry))
grp->color++;
BUG_ON(grp->color <= 0 || grp->color > 2);
if (grp->color == 2) {
/* No need to update current node or end. The removed
* nodes are always before both.
*/
list_move_tail(&grp->node[0].entry, todo);
list_del_init(&grp->node[1].entry);
}
}
return cnt;
}
static void release_nodes(struct device *dev, struct list_head *todo)
{
struct devres *dr, *tmp;
/* Release. Note that both devres and devres_group are
* handled as devres in the following loop. This is safe.
*/
list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
devres_log(dev, &dr->node, "REL");
dr->node.release(dev, dr->data);
kfree(dr);
}
}
/**
* devres_release_all - Release all managed resources
* @dev: Device to release resources for
*
* Release all resources associated with @dev. This function is
* called on driver detach.
*/
int devres_release_all(struct device *dev)
{
unsigned long flags;
LIST_HEAD(todo);
int cnt;
/* Looks like an uninitialized device structure */
if (WARN_ON(dev->devres_head.next == NULL))
return -ENODEV;
/* Nothing to release if list is empty */
if (list_empty(&dev->devres_head))
return 0;
spin_lock_irqsave(&dev->devres_lock, flags);
cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
return cnt;
}
/**
* devres_open_group - Open a new devres group
* @dev: Device to open devres group for
* @id: Separator ID
* @gfp: Allocation flags
*
* Open a new devres group for @dev with @id. For @id, using a
* pointer to an object which won't be used for another group is
* recommended. If @id is NULL, address-wise unique ID is created.
*
* RETURNS:
* ID of the new group, NULL on failure.
*/
void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
grp = kmalloc(sizeof(*grp), gfp);
if (unlikely(!grp))
return NULL;
grp->node[0].release = &group_open_release;
grp->node[1].release = &group_close_release;
INIT_LIST_HEAD(&grp->node[0].entry);
INIT_LIST_HEAD(&grp->node[1].entry);
set_node_dbginfo(&grp->node[0], "grp<", 0);
set_node_dbginfo(&grp->node[1], "grp>", 0);
grp->id = grp;
if (id)
grp->id = id;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
spin_unlock_irqrestore(&dev->devres_lock, flags);
return grp->id;
}
EXPORT_SYMBOL_GPL(devres_open_group);
/* Find devres group with ID @id. If @id is NULL, look for the latest. */
static struct devres_group * find_group(struct device *dev, void *id)
{
struct devres_node *node;
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres_group *grp;
if (node->release != &group_open_release)
continue;
grp = container_of(node, struct devres_group, node[0]);
if (id) {
if (grp->id == id)
return grp;
} else if (list_empty(&grp->node[1].entry))
return grp;
}
return NULL;
}
/**
* devres_close_group - Close a devres group
* @dev: Device to close devres group for
* @id: ID of target group, can be NULL
*
* Close the group identified by @id. If @id is NULL, the latest open
* group is selected.
*/
void devres_close_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp)
add_dr(dev, &grp->node[1]);
else
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_close_group);
/**
* devres_remove_group - Remove a devres group
* @dev: Device to remove group for
* @id: ID of target group, can be NULL
*
* Remove the group identified by @id. If @id is NULL, the latest
* open group is selected. Note that removing a group doesn't affect
* any other resources.
*/
void devres_remove_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp) {
list_del_init(&grp->node[0].entry);
list_del_init(&grp->node[1].entry);
devres_log(dev, &grp->node[0], "REM");
} else
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
kfree(grp);
}
EXPORT_SYMBOL_GPL(devres_remove_group);
/**
* devres_release_group - Release resources in a devres group
* @dev: Device to release group for
* @id: ID of target group, can be NULL
*
* Release all resources in the group identified by @id. If @id is
* NULL, the latest open group is selected. The selected group and
* groups properly nested inside the selected group are removed.
*
* RETURNS:
* The number of released non-group resources.
*/
int devres_release_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
LIST_HEAD(todo);
int cnt = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp) {
struct list_head *first = &grp->node[0].entry;
struct list_head *end = &dev->devres_head;
if (!list_empty(&grp->node[1].entry))
end = grp->node[1].entry.next;
cnt = remove_nodes(dev, first, end, &todo);
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
return cnt;
}
EXPORT_SYMBOL_GPL(devres_release_group);
/*
* Custom devres actions allow inserting a simple function call
* into the teardown sequence.
*/
struct action_devres {
void *data;
void (*action)(void *);
};
static int devm_action_match(struct device *dev, void *res, void *p)
{
struct action_devres *devres = res;
struct action_devres *target = p;
return devres->action == target->action &&
devres->data == target->data;
}
static void devm_action_release(struct device *dev, void *res)
{
struct action_devres *devres = res;
devres->action(devres->data);
}
/**
* __devm_add_action() - add a custom action to list of managed resources
* @dev: Device that owns the action
* @action: Function that should be called
* @data: Pointer to data passed to @action implementation
* @name: Name of the resource (for debugging purposes)
*
* This adds a custom action to the list of managed resources so that
* it gets executed as part of standard resource unwinding.
*/
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
{
struct action_devres *devres;
devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
GFP_KERNEL, NUMA_NO_NODE, name);
if (!devres)
return -ENOMEM;
devres->data = data;
devres->action = action;
devres_add(dev, devres);
return 0;
}
EXPORT_SYMBOL_GPL(__devm_add_action);
/**
* devm_remove_action() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
*/
void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
&devres));
}
EXPORT_SYMBOL_GPL(devm_remove_action);
/**
* devm_release_action() - release previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Releases and removes instance of @action previously added by
* devm_add_action(). Both action and data should match one of the
* existing entries.
*/
void devm_release_action(struct device *dev, void (*action)(void *), void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
&devres));
}
EXPORT_SYMBOL_GPL(devm_release_action);
/*
* Managed kmalloc/kfree
*/
static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
* devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
* Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
if (unlikely(!size))
return ZERO_SIZE_PTR;
/* use raw alloc_dr for kmalloc caller tracing */
dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
if (unlikely(!dr))
return NULL;
/*
* This is named devm_kzalloc_release for historical reasons
* The initial implementation did not support kmalloc, only kzalloc
*/
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
* devm_krealloc - Resource-managed krealloc()
* @dev: Device to re-allocate memory for
* @ptr: Pointer to the memory chunk to re-allocate
* @new_size: New allocation size
* @gfp: Allocation gfp flags
*
* Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
* Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
* it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
* previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
* change the order in which the release callback for the re-alloc'ed devres
* will be called (except when falling back to devm_kmalloc() or when freeing
* resources when new_size is zero). The contents of the memory are preserved
* up to the lesser of new and old sizes.
*/
void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
{
size_t total_new_size, total_old_size;
struct devres *old_dr, *new_dr;
unsigned long flags;
if (unlikely(!new_size)) {
devm_kfree(dev, ptr);
return ZERO_SIZE_PTR;
}
if (unlikely(ZERO_OR_NULL_PTR(ptr)))
return devm_kmalloc(dev, new_size, gfp);
if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
/*
* We cannot reliably realloc a const string returned by
* devm_kstrdup_const().
*/
return NULL;
if (!check_dr_size(new_size, &total_new_size))
return NULL;
total_old_size = ksize(container_of(ptr, struct devres, data));
if (total_old_size == 0) {
WARN(1, "Pointer doesn't point to dynamically allocated memory.");
return NULL;
}
/*
* If new size is smaller or equal to the actual number of bytes
* allocated previously - just return the same pointer.
*/
if (total_new_size <= total_old_size)
return ptr;
/*
* Otherwise: allocate new, larger chunk. We need to allocate before
* taking the lock as most probably the caller uses GFP_KERNEL.
*/
new_dr = alloc_dr(devm_kmalloc_release,
total_new_size, gfp, dev_to_node(dev));
if (!new_dr)
return NULL;
/*
* The spinlock protects the linked list against concurrent
* modifications but not the resource itself.
*/
spin_lock_irqsave(&dev->devres_lock, flags);
old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
if (!old_dr) {
spin_unlock_irqrestore(&dev->devres_lock, flags);
kfree(new_dr);
WARN(1, "Memory chunk not managed or managed by a different device.");
return NULL;
}
replace_dr(dev, &old_dr->node, &new_dr->node);
spin_unlock_irqrestore(&dev->devres_lock, flags);
/*
* We can copy the memory contents after releasing the lock as we're
* no longer modifying the list links.
*/
memcpy(new_dr->data, old_dr->data,
total_old_size - offsetof(struct devres, data));
/*
* Same for releasing the old devres - it's now been removed from the
* list. This is also the reason why we must not use devm_kfree() - the
* links are no longer valid.
*/
kfree(old_dr);
return new_dr->data;
}
EXPORT_SYMBOL_GPL(devm_krealloc);
/**
* devm_kstrdup - Allocate resource managed space and
* copy an existing string into that.
* @dev: Device to allocate memory for
* @s: the string to duplicate
* @gfp: the GFP mask used in the devm_kmalloc() call when
* allocating memory
* RETURNS:
* Pointer to allocated string on success, NULL on failure.
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
size_t size;
char *buf;
if (!s)
return NULL;
size = strlen(s) + 1;
buf = devm_kmalloc(dev, size, gfp);
if (buf)
memcpy(buf, s, size);
return buf;
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
/**
* devm_kstrdup_const - resource managed conditional string duplication
* @dev: device for which to duplicate the string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Strings allocated by devm_kstrdup_const will be automatically freed when
* the associated device is detached.
*
* RETURNS:
* Source string if it is in .rodata section otherwise it falls back to
* devm_kstrdup.
*/
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
{
if (is_kernel_rodata((unsigned long)s))
return s;
return devm_kstrdup(dev, s, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup_const);
/**
* devm_kvasprintf - Allocate resource managed space and format a string
* into that.
* @dev: Device to allocate memory for
* @gfp: the GFP mask used in the devm_kmalloc() call when
* allocating memory
* @fmt: The printf()-style format string
* @ap: Arguments for the format string
* RETURNS:
* Pointer to allocated string on success, NULL on failure.
*/
char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
va_list ap)
{
unsigned int len;
char *p;
va_list aq;
va_copy(aq, ap);
len = vsnprintf(NULL, 0, fmt, aq);
va_end(aq);
p = devm_kmalloc(dev, len+1, gfp);
if (!p)
return NULL;
vsnprintf(p, len+1, fmt, ap);
return p;
}
EXPORT_SYMBOL(devm_kvasprintf);
/**
* devm_kasprintf - Allocate resource managed space and format a string
* into that.
* @dev: Device to allocate memory for
* @gfp: the GFP mask used in the devm_kmalloc() call when
* allocating memory
* @fmt: The printf()-style format string
* @...: Arguments for the format string
* RETURNS:
* Pointer to allocated string on success, NULL on failure.
*/
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = devm_kvasprintf(dev, gfp, fmt, ap);
va_end(ap);
return p;
}
EXPORT_SYMBOL_GPL(devm_kasprintf);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
* Free memory allocated with devm_kmalloc().
*/
void devm_kfree(struct device *dev, const void *p)
{
int rc;
/*
* Special cases: pointer to a string in .rodata returned by
* devm_kstrdup_const() or NULL/ZERO ptr.
*/
if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
return;
rc = devres_destroy(dev, devm_kmalloc_release,
devm_kmalloc_match, (void *)p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
/**
* devm_kmemdup - Resource-managed kmemdup
* @dev: Device this memory belongs to
* @src: Memory region to duplicate
* @len: Memory region length
* @gfp: GFP mask to use
*
* Duplicate region of a memory using resource managed kmalloc
*/
void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
{
void *p;
p = devm_kmalloc(dev, len, gfp);
if (p)
memcpy(p, src, len);
return p;
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
struct pages_devres {
unsigned long addr;
unsigned int order;
};
static int devm_pages_match(struct device *dev, void *res, void *p)
{
struct pages_devres *devres = res;
struct pages_devres *target = p;
return devres->addr == target->addr;
}
static void devm_pages_release(struct device *dev, void *res)
{
struct pages_devres *devres = res;
free_pages(devres->addr, devres->order);
}
/**
* devm_get_free_pages - Resource-managed __get_free_pages
* @dev: Device to allocate memory for
* @gfp_mask: Allocation gfp flags
* @order: Allocation size is (1 << order) pages
*
* Managed get_free_pages. Memory allocated with this function is
* automatically freed on driver detach.
*
* RETURNS:
* Address of allocated memory on success, 0 on failure.
*/
unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order)
{
struct pages_devres *devres;
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
if (unlikely(!addr))
return 0;
devres = devres_alloc(devm_pages_release,
sizeof(struct pages_devres), GFP_KERNEL);
if (unlikely(!devres)) {
free_pages(addr, order);
return 0;
}
devres->addr = addr;
devres->order = order;
devres_add(dev, devres);
return addr;
}
EXPORT_SYMBOL_GPL(devm_get_free_pages);
/**
* devm_free_pages - Resource-managed free_pages
* @dev: Device this memory belongs to
* @addr: Memory to free
*
* Free memory allocated with devm_get_free_pages(). Unlike free_pages,
* there is no need to supply the @order.
*/
void devm_free_pages(struct device *dev, unsigned long addr)
{
struct pages_devres devres = { .addr = addr };
WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
&devres));
}
EXPORT_SYMBOL_GPL(devm_free_pages);
static void devm_percpu_release(struct device *dev, void *pdata)
{
void __percpu *p;
p = *(void __percpu **)pdata;
free_percpu(p);
}
static int devm_percpu_match(struct device *dev, void *data, void *p)
{
struct devres *devr = container_of(data, struct devres, data);
return *(void **)devr->data == p;
}
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
* @size: Size of per-cpu memory to allocate
* @align: Alignment of per-cpu memory to allocate
*
* Managed alloc_percpu. Per-cpu memory allocated with this function is
* automatically freed on driver detach.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
size_t align)
{
void *p;
void __percpu *pcpu;
pcpu = __alloc_percpu(size, align);
if (!pcpu)
return NULL;
p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
if (!p) {
free_percpu(pcpu);
return NULL;
}
*(void __percpu **)p = pcpu;
devres_add(dev, p);
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
/**
* devm_free_percpu - Resource-managed free_percpu
* @dev: Device this memory belongs to
* @pdata: Per-cpu memory to free
*
* Free memory allocated with devm_alloc_percpu().
*/
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
(__force void *)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
| linux-master | drivers/base/devres.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ISA bus.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/isa.h>
static struct device isa_bus = {
.init_name = "isa"
};
struct isa_dev {
struct device dev;
struct device *next;
unsigned int id;
};
#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
static int isa_bus_match(struct device *dev, struct device_driver *driver)
{
struct isa_driver *isa_driver = to_isa_driver(driver);
if (dev->platform_data == isa_driver) {
if (!isa_driver->match ||
isa_driver->match(dev, to_isa_dev(dev)->id))
return 1;
dev->platform_data = NULL;
}
return 0;
}
static int isa_bus_probe(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->probe)
return isa_driver->probe(dev, to_isa_dev(dev)->id);
return 0;
}
static void isa_bus_remove(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->remove)
isa_driver->remove(dev, to_isa_dev(dev)->id);
}
static void isa_bus_shutdown(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->shutdown)
isa_driver->shutdown(dev, to_isa_dev(dev)->id);
}
static int isa_bus_suspend(struct device *dev, pm_message_t state)
{
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->suspend)
return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
return 0;
}
static int isa_bus_resume(struct device *dev)
{
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->resume)
return isa_driver->resume(dev, to_isa_dev(dev)->id);
return 0;
}
static struct bus_type isa_bus_type = {
.name = "isa",
.match = isa_bus_match,
.probe = isa_bus_probe,
.remove = isa_bus_remove,
.shutdown = isa_bus_shutdown,
.suspend = isa_bus_suspend,
.resume = isa_bus_resume
};
static void isa_dev_release(struct device *dev)
{
kfree(to_isa_dev(dev));
}
void isa_unregister_driver(struct isa_driver *isa_driver)
{
struct device *dev = isa_driver->devices;
while (dev) {
struct device *tmp = to_isa_dev(dev)->next;
device_unregister(dev);
dev = tmp;
}
driver_unregister(&isa_driver->driver);
}
EXPORT_SYMBOL_GPL(isa_unregister_driver);
int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
{
int error;
unsigned int id;
isa_driver->driver.bus = &isa_bus_type;
isa_driver->devices = NULL;
error = driver_register(&isa_driver->driver);
if (error)
return error;
for (id = 0; id < ndev; id++) {
struct isa_dev *isa_dev;
isa_dev = kzalloc(sizeof *isa_dev, GFP_KERNEL);
if (!isa_dev) {
error = -ENOMEM;
break;
}
isa_dev->dev.parent = &isa_bus;
isa_dev->dev.bus = &isa_bus_type;
dev_set_name(&isa_dev->dev, "%s.%u",
isa_driver->driver.name, id);
isa_dev->dev.platform_data = isa_driver;
isa_dev->dev.release = isa_dev_release;
isa_dev->id = id;
isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24);
isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask;
error = device_register(&isa_dev->dev);
if (error) {
put_device(&isa_dev->dev);
break;
}
isa_dev->next = isa_driver->devices;
isa_driver->devices = &isa_dev->dev;
}
if (!error && !isa_driver->devices)
error = -ENODEV;
if (error)
isa_unregister_driver(isa_driver);
return error;
}
EXPORT_SYMBOL_GPL(isa_register_driver);
static int __init isa_bus_init(void)
{
int error;
error = bus_register(&isa_bus_type);
if (!error) {
error = device_register(&isa_bus);
if (error)
bus_unregister(&isa_bus_type);
}
return error;
}
postcore_initcall(isa_bus_init);
| linux-master | drivers/base/isa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver core interface to the pinctrl subsystem.
*
* Copyright (C) 2012 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <[email protected]>
*/
#include <linux/device.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pinctrl/consumer.h>
#include <linux/slab.h>
/**
* pinctrl_bind_pins() - called by the device core before probe
* @dev: the device that is just about to probe
*/
int pinctrl_bind_pins(struct device *dev)
{
int ret;
if (dev->of_node_reused)
return 0;
dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
if (!dev->pins)
return -ENOMEM;
dev->pins->p = devm_pinctrl_get(dev);
if (IS_ERR(dev->pins->p)) {
dev_dbg(dev, "no pinctrl handle\n");
ret = PTR_ERR(dev->pins->p);
goto cleanup_alloc;
}
dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_DEFAULT);
if (IS_ERR(dev->pins->default_state)) {
dev_dbg(dev, "no default pinctrl state\n");
ret = 0;
goto cleanup_get;
}
dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_INIT);
if (IS_ERR(dev->pins->init_state)) {
/* Not supplying this state is perfectly legal */
dev_dbg(dev, "no init pinctrl state\n");
ret = pinctrl_select_state(dev->pins->p,
dev->pins->default_state);
} else {
ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
}
if (ret) {
dev_dbg(dev, "failed to activate initial pinctrl state\n");
goto cleanup_get;
}
#ifdef CONFIG_PM
/*
* If power management is enabled, we also look for the optional
* sleep and idle pin states, with semantics as defined in
* <linux/pinctrl/pinctrl-state.h>
*/
dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_SLEEP);
if (IS_ERR(dev->pins->sleep_state))
/* Not supplying this state is perfectly legal */
dev_dbg(dev, "no sleep pinctrl state\n");
dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_IDLE);
if (IS_ERR(dev->pins->idle_state))
/* Not supplying this state is perfectly legal */
dev_dbg(dev, "no idle pinctrl state\n");
#endif
return 0;
/*
* If no pinctrl handle or default state was found for this device,
* let's explicitly free the pin container in the device, there is
* no point in keeping it around.
*/
cleanup_get:
devm_pinctrl_put(dev->pins->p);
cleanup_alloc:
devm_kfree(dev, dev->pins);
dev->pins = NULL;
/* Return deferrals */
if (ret == -EPROBE_DEFER)
return ret;
/* Return serious errors */
if (ret == -EINVAL)
return ret;
/* We ignore errors like -ENOENT meaning no pinctrl state */
return 0;
}
| linux-master | drivers/base/pinctrl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* hypervisor.c - /sys/hypervisor subsystem.
*
* Copyright (C) IBM Corp. 2006
* Copyright (C) 2007 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2007 Novell Inc.
*/
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/export.h>
#include "base.h"
struct kobject *hypervisor_kobj;
EXPORT_SYMBOL_GPL(hypervisor_kobj);
int __init hypervisor_init(void)
{
hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
if (!hypervisor_kobj)
return -ENOMEM;
return 0;
}
| linux-master | drivers/base/hypervisor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System bus type for containers.
*
* Copyright (C) 2013, Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
*/
#include <linux/container.h>
#include "base.h"
#define CONTAINER_BUS_NAME "container"
static int trivial_online(struct device *dev)
{
return 0;
}
static int container_offline(struct device *dev)
{
struct container_dev *cdev = to_container_dev(dev);
return cdev->offline ? cdev->offline(cdev) : 0;
}
struct bus_type container_subsys = {
.name = CONTAINER_BUS_NAME,
.dev_name = CONTAINER_BUS_NAME,
.online = trivial_online,
.offline = container_offline,
};
void __init container_dev_init(void)
{
int ret;
ret = subsys_system_register(&container_subsys, NULL);
if (ret)
pr_err("%s() failed: %d\n", __func__, ret);
}
| linux-master | drivers/base/container.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Lee Jones <[email protected]> for ST-Ericsson.
*/
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
#include <linux/glob.h>
static DEFINE_IDA(soc_ida);
/* Prototype to allow declarations of DEVICE_ATTR(<foo>) before soc_info_show */
static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
char *buf);
struct soc_device {
struct device dev;
struct soc_device_attribute *attr;
int soc_dev_num;
};
static struct bus_type soc_bus_type = {
.name = "soc",
};
static bool soc_bus_registered;
static DEVICE_ATTR(machine, 0444, soc_info_show, NULL);
static DEVICE_ATTR(family, 0444, soc_info_show, NULL);
static DEVICE_ATTR(serial_number, 0444, soc_info_show, NULL);
static DEVICE_ATTR(soc_id, 0444, soc_info_show, NULL);
static DEVICE_ATTR(revision, 0444, soc_info_show, NULL);
struct device *soc_device_to_device(struct soc_device *soc_dev)
{
return &soc_dev->dev;
}
static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
struct device *dev = kobj_to_dev(kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr) && soc_dev->attr->machine)
return attr->mode;
if ((attr == &dev_attr_family.attr) && soc_dev->attr->family)
return attr->mode;
if ((attr == &dev_attr_revision.attr) && soc_dev->attr->revision)
return attr->mode;
if ((attr == &dev_attr_serial_number.attr) && soc_dev->attr->serial_number)
return attr->mode;
if ((attr == &dev_attr_soc_id.attr) && soc_dev->attr->soc_id)
return attr->mode;
/* Unknown or unfilled attribute */
return 0;
}
static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
const char *output;
if (attr == &dev_attr_machine)
output = soc_dev->attr->machine;
else if (attr == &dev_attr_family)
output = soc_dev->attr->family;
else if (attr == &dev_attr_revision)
output = soc_dev->attr->revision;
else if (attr == &dev_attr_serial_number)
output = soc_dev->attr->serial_number;
else if (attr == &dev_attr_soc_id)
output = soc_dev->attr->soc_id;
else
return -EINVAL;
return sysfs_emit(buf, "%s\n", output);
}
static struct attribute *soc_attr[] = {
&dev_attr_machine.attr,
&dev_attr_family.attr,
&dev_attr_serial_number.attr,
&dev_attr_soc_id.attr,
&dev_attr_revision.attr,
NULL,
};
static const struct attribute_group soc_attr_group = {
.attrs = soc_attr,
.is_visible = soc_attribute_mode,
};
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
kfree(soc_dev->dev.groups);
kfree(soc_dev);
}
static void soc_device_get_machine(struct soc_device_attribute *soc_dev_attr)
{
struct device_node *np;
if (soc_dev_attr->machine)
return;
np = of_find_node_by_path("/");
of_property_read_string(np, "model", &soc_dev_attr->machine);
of_node_put(np);
}
static struct soc_device_attribute *early_soc_dev_attr;
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
const struct attribute_group **soc_attr_groups;
int ret;
soc_device_get_machine(soc_dev_attr);
if (!soc_bus_registered) {
if (early_soc_dev_attr)
return ERR_PTR(-EBUSY);
early_soc_dev_attr = soc_dev_attr;
return NULL;
}
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
goto out1;
}
soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL);
if (!soc_attr_groups) {
ret = -ENOMEM;
goto out2;
}
soc_attr_groups[0] = &soc_attr_group;
soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
/* Fetch a unique (reclaimable) SOC ID. */
ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out3;
soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
soc_dev->dev.bus = &soc_bus_type;
soc_dev->dev.groups = soc_attr_groups;
soc_dev->dev.release = soc_release;
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
if (ret) {
put_device(&soc_dev->dev);
return ERR_PTR(ret);
}
return soc_dev;
out3:
kfree(soc_attr_groups);
out2:
kfree(soc_dev);
out1:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed after calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
EXPORT_SYMBOL_GPL(soc_device_unregister);
static int __init soc_bus_register(void)
{
int ret;
ret = bus_register(&soc_bus_type);
if (ret)
return ret;
soc_bus_registered = true;
if (early_soc_dev_attr)
return PTR_ERR(soc_device_register(early_soc_dev_attr));
return 0;
}
core_initcall(soc_bus_register);
static int soc_device_match_attr(const struct soc_device_attribute *attr,
const struct soc_device_attribute *match)
{
if (match->machine &&
(!attr->machine || !glob_match(match->machine, attr->machine)))
return 0;
if (match->family &&
(!attr->family || !glob_match(match->family, attr->family)))
return 0;
if (match->revision &&
(!attr->revision || !glob_match(match->revision, attr->revision)))
return 0;
if (match->soc_id &&
(!attr->soc_id || !glob_match(match->soc_id, attr->soc_id)))
return 0;
return 1;
}
static int soc_device_match_one(struct device *dev, void *arg)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
return soc_device_match_attr(soc_dev->attr, arg);
}
/*
* soc_device_match - identify the SoC in the machine
* @matches: zero-terminated array of possible matches
*
* returns the first matching entry of the argument array, or NULL
* if none of them match.
*
* This function is meant as a helper in place of of_match_node()
* in cases where either no device tree is available or the information
* in a device node is insufficient to identify a particular variant
* by its compatible strings or other properties. For new devices,
* the DT binding should always provide unique compatible strings
* that allow the use of of_match_node() instead.
*
* The calling function can use the .data entry of the
* soc_device_attribute to pass a structure or function pointer for
* each entry.
*/
const struct soc_device_attribute *soc_device_match(
const struct soc_device_attribute *matches)
{
int ret;
if (!matches)
return NULL;
while (matches->machine || matches->family || matches->revision ||
matches->soc_id) {
ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
soc_device_match_one);
if (ret < 0 && early_soc_dev_attr)
ret = soc_device_match_attr(early_soc_dev_attr,
matches);
if (ret < 0)
return NULL;
if (ret)
return matches;
matches++;
}
return NULL;
}
EXPORT_SYMBOL_GPL(soc_device_match);
| linux-master | drivers/base/soc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Arch specific cpu topology information
*
* Copyright (C) 2016, ARM Ltd.
* Written by: Juri Lelli, ARM Ltd.
*/
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sched/topology.h>
#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal_pressure.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
static DEFINE_PER_CPU(u32, freq_factor) = 1;
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
return cpumask_subset(cpus, &scale_freq_counters_mask);
}
bool topology_scale_freq_invariant(void)
{
return cpufreq_supports_freq_invariance() ||
supports_scale_freq_counters(cpu_online_mask);
}
static void update_scale_freq_invariant(bool status)
{
if (scale_freq_invariant == status)
return;
/*
* Task scheduler behavior depends on frequency invariance support,
* either cpufreq or counter driven. If the support status changes as
* a result of counter initialisation and use, retrigger the build of
* scheduling domains to ensure the information is propagated properly.
*/
if (topology_scale_freq_invariant() == status) {
scale_freq_invariant = status;
rebuild_sched_domains_energy();
}
}
void topology_set_scale_freq_source(struct scale_freq_data *data,
const struct cpumask *cpus)
{
struct scale_freq_data *sfd;
int cpu;
/*
* Avoid calling rebuild_sched_domains() unnecessarily if FIE is
* supported by cpufreq.
*/
if (cpumask_empty(&scale_freq_counters_mask))
scale_freq_invariant = topology_scale_freq_invariant();
rcu_read_lock();
for_each_cpu(cpu, cpus) {
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
/* Use ARCH provided counters whenever possible */
if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
rcu_assign_pointer(per_cpu(sft_data, cpu), data);
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
}
}
rcu_read_unlock();
update_scale_freq_invariant(true);
}
EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
void topology_clear_scale_freq_source(enum scale_freq_source source,
const struct cpumask *cpus)
{
struct scale_freq_data *sfd;
int cpu;
rcu_read_lock();
for_each_cpu(cpu, cpus) {
sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
if (sfd && sfd->source == source) {
rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
}
}
rcu_read_unlock();
/*
* Make sure all references to previous sft_data are dropped to avoid
* use-after-free races.
*/
synchronize_rcu();
update_scale_freq_invariant(false);
}
EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
void topology_scale_freq_tick(void)
{
struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
if (sfd)
sfd->set_freq_scale();
}
DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq)
{
unsigned long scale;
int i;
if (WARN_ON_ONCE(!cur_freq || !max_freq))
return;
/*
* If the use of counters for FIE is enabled, just return as we don't
* want to update the scale factor with information from CPUFREQ.
* Instead the scale factor will be updated from arch_scale_freq_tick.
*/
if (supports_scale_freq_counters(cpus))
return;
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
per_cpu(arch_freq_scale, i) = scale;
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
}
DEFINE_PER_CPU(unsigned long, thermal_pressure);
/**
* topology_update_thermal_pressure() - Update thermal pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
* Update the value of thermal pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
* thermal capping. It might be also a boost frequency value, which is bigger
* than the internal 'freq_factor' max frequency. In such case the pressure
* value should simply be removed, since this is an indication that there is
* no thermal throttling. The @capped_freq must be provided in kHz.
*/
void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{
unsigned long max_capacity, capacity, th_pressure;
u32 max_freq;
int cpu;
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
max_freq = per_cpu(freq_factor, cpu);
/* Convert to MHz scale which is used in 'freq_factor' */
capped_freq /= 1000;
/*
* Handle properly the boost frequencies, which should simply clean
* the thermal pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
th_pressure = max_capacity - capacity;
trace_thermal_pressure_update(cpu, th_pressure);
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
static DEVICE_ATTR_RO(cpu_capacity);
static int register_cpu_capacity_sysctl(void)
{
int i;
struct device *cpu;
for_each_possible_cpu(i) {
cpu = get_cpu_device(i);
if (!cpu) {
pr_err("%s: too early to get CPU%d device!\n",
__func__, i);
continue;
}
device_create_file(cpu, &dev_attr_cpu_capacity);
}
return 0;
}
subsys_initcall(register_cpu_capacity_sysctl);
static int update_topology;
int topology_update_cpu_topology(void)
{
return update_topology;
}
/*
* Updating the sched_domains can't be done directly from cpufreq callbacks
* due to locking, so queue the work for later.
*/
static void update_topology_flags_workfn(struct work_struct *work)
{
update_topology = 1;
rebuild_sched_domains();
pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
update_topology = 0;
}
static u32 *raw_capacity;
static int free_raw_capacity(void)
{
kfree(raw_capacity);
raw_capacity = NULL;
return 0;
}
void topology_normalize_cpu_scale(void)
{
u64 capacity;
u64 capacity_scale;
int cpu;
if (!raw_capacity)
return;
capacity_scale = 1;
for_each_possible_cpu(cpu) {
capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(cpu));
}
}
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
struct clk *cpu_clk;
static bool cap_parsing_failed;
int ret;
u32 cpu_capacity;
if (cap_parsing_failed)
return false;
ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
raw_capacity = kcalloc(num_possible_cpus(),
sizeof(*raw_capacity),
GFP_KERNEL);
if (!raw_capacity) {
cap_parsing_failed = true;
return false;
}
}
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
cpu_node, raw_capacity[cpu]);
/*
* Update freq_factor for calculating early boot cpu capacities.
* For non-clk CPU DVFS mechanism, there's no way to get the
* frequency value now, assuming they are running at the same
* frequency (by keeping the initial freq_factor value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
per_cpu(freq_factor, cpu) =
clk_get_rate(cpu_clk) / 1000;
clk_put(cpu_clk);
}
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %pOF raw capacity\n",
cpu_node);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
free_raw_capacity();
}
return !ret;
}
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
void topology_init_cpu_capacity_cppc(void)
{
struct cppc_perf_caps perf_caps;
int cpu;
if (likely(!acpi_cpc_valid()))
return;
raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
GFP_KERNEL);
if (!raw_capacity)
return;
for_each_possible_cpu(cpu) {
if (!cppc_get_perf_caps(cpu, &perf_caps) &&
(perf_caps.highest_perf >= perf_caps.nominal_perf) &&
(perf_caps.highest_perf >= perf_caps.lowest_perf)) {
raw_capacity[cpu] = perf_caps.highest_perf;
pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
cpu, raw_capacity[cpu]);
continue;
}
pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
goto exit;
}
topology_normalize_cpu_scale();
schedule_work(&update_topology_flags_work);
pr_debug("cpu_capacity: cpu_capacity initialization done\n");
exit:
free_raw_capacity();
}
#endif
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
static int
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
{
struct cpufreq_policy *policy = data;
int cpu;
if (!raw_capacity)
return 0;
if (val != CPUFREQ_CREATE_POLICY)
return 0;
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
cpumask_pr_args(policy->related_cpus),
cpumask_pr_args(cpus_to_visit));
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus)
per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
schedule_work(&update_topology_flags_work);
free_raw_capacity();
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
return 0;
}
static struct notifier_block init_cpu_capacity_notifier = {
.notifier_call = init_cpu_capacity_callback,
};
static int __init register_cpufreq_notifier(void)
{
int ret;
/*
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(cpus_to_visit, cpu_possible_mask);
ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
free_cpumask_var(cpus_to_visit);
return ret;
}
core_initcall(register_cpufreq_notifier);
static void parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
free_cpumask_var(cpus_to_visit);
}
#else
core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
* (1) logic cpu number which is > 0.
* (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
* there is no possible logical CPU in the kernel to match. This happens
* when CONFIG_NR_CPUS is configure to be smaller than the number of
* CPU nodes in DT. We need to just ignore this case.
* (3) -1 if the node does not exist in the device tree
*/
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
int cpu;
cpu_node = of_parse_phandle(node, "cpu", 0);
if (!cpu_node)
return -1;
cpu = of_cpu_node_to_id(cpu_node);
if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu);
else
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
of_node_put(cpu_node);
return cpu;
}
static int __init parse_core(struct device_node *core, int package_id,
int cluster_id, int core_id)
{
char name[20];
bool leaf = true;
int i = 0;
int cpu;
struct device_node *t;
do {
snprintf(name, sizeof(name), "thread%d", i);
t = of_get_child_by_name(core, name);
if (t) {
leaf = false;
cpu = get_cpu_for_node(t);
if (cpu >= 0) {
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else if (cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for thread\n", t);
of_node_put(t);
return -EINVAL;
}
of_node_put(t);
}
i++;
} while (t);
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
pr_err("%pOF: Core has both threads and CPU\n",
core);
return -EINVAL;
}
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
return 0;
}
static int __init parse_cluster(struct device_node *cluster, int package_id,
int cluster_id, int depth)
{
char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
int core_id = 0;
int i, ret;
/*
* First check for child clusters; we currently ignore any
* information about the nesting of clusters and present the
* scheduler with a flat list of them.
*/
i = 0;
do {
snprintf(name, sizeof(name), "cluster%d", i);
c = of_get_child_by_name(cluster, name);
if (c) {
leaf = false;
ret = parse_cluster(c, package_id, i, depth + 1);
if (depth > 0)
pr_warn("Topology for clusters of clusters not yet supported\n");
of_node_put(c);
if (ret != 0)
return ret;
}
i++;
} while (c);
/* Now check for cores */
i = 0;
do {
snprintf(name, sizeof(name), "core%d", i);
c = of_get_child_by_name(cluster, name);
if (c) {
has_cores = true;
if (depth == 0) {
pr_err("%pOF: cpu-map children should be clusters\n",
c);
of_node_put(c);
return -EINVAL;
}
if (leaf) {
ret = parse_core(c, package_id, cluster_id,
core_id++);
} else {
pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster, name);
ret = -EINVAL;
}
of_node_put(c);
if (ret != 0)
return ret;
}
i++;
} while (c);
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
return 0;
}
static int __init parse_socket(struct device_node *socket)
{
char name[20];
struct device_node *c;
bool has_socket = false;
int package_id = 0, ret;
do {
snprintf(name, sizeof(name), "socket%d", package_id);
c = of_get_child_by_name(socket, name);
if (c) {
has_socket = true;
ret = parse_cluster(c, package_id, -1, 0);
of_node_put(c);
if (ret != 0)
return ret;
}
package_id++;
} while (c);
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
return ret;
}
static int __init parse_dt_topology(void)
{
struct device_node *cn, *map;
int ret = 0;
int cpu;
cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return 0;
}
/*
* When topology is provided cpu-map is essentially a root
* cluster with restricted subnodes.
*/
map = of_get_child_by_name(cn, "cpu-map");
if (!map)
goto out;
ret = parse_socket(map);
if (ret != 0)
goto out_map;
topology_normalize_cpu_scale();
/*
* Check that all cores are in the topology; the SMP code will
* only mark cores described in the DT as possible.
*/
for_each_possible_cpu(cpu)
if (cpu_topology[cpu].package_id < 0) {
ret = -EINVAL;
break;
}
out_map:
of_node_put(map);
out:
of_node_put(cn);
return ret;
}
#endif
/*
* cpu topology table
*/
struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
/* Find the smaller of NUMA, core or LLC siblings */
if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
/* not numa in package, lets use the package siblings */
core_mask = &cpu_topology[cpu].core_sibling;
}
if (last_level_cache_is_valid(cpu)) {
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
core_mask = &cpu_topology[cpu].llc_sibling;
}
/*
* For systems with no shared cpu-side LLC but with clusters defined,
* extend core_mask to cluster_siblings. The sched domain builder will
* then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
*/
if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
core_mask = &cpu_topology[cpu].cluster_sibling;
return core_mask;
}
const struct cpumask *cpu_clustergroup_mask(int cpu)
{
/*
* Forbid cpu_clustergroup_mask() to span more or the same CPUs as
* cpu_coregroup_mask().
*/
if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling))
return topology_sibling_cpumask(cpu);
return &cpu_topology[cpu].cluster_sibling;
}
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu, ret;
ret = detect_cache_attributes(cpuid);
if (ret && ret != -ENOENT)
pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (last_level_cache_is_shared(cpu, cpuid)) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
continue;
if (cpuid_topo->cluster_id >= 0) {
cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
}
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
static void clear_cpu_topology(int cpu)
{
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpumask_clear(&cpu_topo->llc_sibling);
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
cpumask_clear(&cpu_topo->cluster_sibling);
cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
}
void __init reset_cpu_topology(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
clear_cpu_topology(cpu);
}
}
void remove_cpu_topology(unsigned int cpu)
{
int sibling;
for_each_cpu(sibling, topology_core_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
for_each_cpu(sibling, topology_cluster_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
for_each_cpu(sibling, topology_llc_cpumask(cpu))
cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
clear_cpu_topology(cpu);
}
__weak int __init parse_acpi_topology(void)
{
return 0;
}
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
reset_cpu_topology();
ret = parse_acpi_topology();
if (!ret)
ret = of_have_populated_dt() && parse_dt_topology();
if (ret) {
/*
* Discard anything that was parsed if we hit an error so we
* don't use partial information. But do not return yet to give
* arch-specific early cache level detection a chance to run.
*/
reset_cpu_topology();
}
for_each_possible_cpu(cpu) {
ret = fetch_cache_info(cpu);
if (!ret)
continue;
else if (ret != -ENOENT)
pr_err("Early cacheinfo failed, ret = %d\n", ret);
return;
}
}
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
if (cpuid_topo->package_id != -1)
goto topology_populated;
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = cpuid;
cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: package %d core %d thread %d\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
cpuid_topo->thread_id);
topology_populated:
update_siblings_masks(cpuid);
}
#endif
| linux-master | drivers/base/arch_topology.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MSI framework for platform devices
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <[email protected]>
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/slab.h>
#define DEV_ID_SHIFT 21
#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
/*
* Internal data structure containing a (made up, but unique) devid
* and the callback to write the MSI message.
*/
struct platform_msi_priv_data {
struct device *dev;
void *host_data;
msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
};
/* The devid allocator */
static DEFINE_IDA(platform_msi_devid_ida);
#ifdef GENERIC_MSI_DOMAIN_OPS
/*
* Convert an msi_desc to a globaly unique identifier (per-device
* devid + msi_desc position in the msi_list).
*/
static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
{
u32 devid = desc->dev->msi.data->platform_data->devid;
return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = platform_msi_calc_hwirq(desc);
}
static int platform_msi_init(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq, irq_hw_number_t hwirq,
msi_alloc_info_t *arg)
{
return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
info->chip, info->chip_data);
}
static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
{
arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
}
#else
#define platform_msi_set_desc NULL
#define platform_msi_init NULL
#define platform_msi_set_proxy_dev(x) do {} while(0)
#endif
static void platform_msi_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
BUG_ON(!ops);
if (ops->msi_init == NULL)
ops->msi_init = platform_msi_init;
if (ops->set_desc == NULL)
ops->set_desc = platform_msi_set_desc;
}
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
desc->dev->msi.data->platform_data->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
BUG_ON(!chip);
if (!chip->irq_mask)
chip->irq_mask = irq_chip_mask_parent;
if (!chip->irq_unmask)
chip->irq_unmask = irq_chip_unmask_parent;
if (!chip->irq_eoi)
chip->irq_eoi = irq_chip_eoi_parent;
if (!chip->irq_set_affinity)
chip->irq_set_affinity = msi_domain_set_affinity;
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = platform_msi_write_msg;
if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
!(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
}
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Updates the domain and chip ops and creates a platform MSI
* interrupt domain.
*
* Returns:
* A domain pointer or NULL in case of failure.
*/
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
struct irq_domain *domain;
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
platform_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
return domain;
}
EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
struct platform_msi_priv_data *datap;
int err;
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
return -EINVAL;
if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
return -EINVAL;
}
err = msi_setup_device_data(dev);
if (err)
return err;
/* Already initialized? */
if (dev->msi.data->platform_data)
return -EBUSY;
datap = kzalloc(sizeof(*datap), GFP_KERNEL);
if (!datap)
return -ENOMEM;
datap->devid = ida_simple_get(&platform_msi_devid_ida,
0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
if (datap->devid < 0) {
err = datap->devid;
kfree(datap);
return err;
}
datap->write_msg = write_msi_msg;
datap->dev = dev;
dev->msi.data->platform_data = datap;
return 0;
}
static void platform_msi_free_priv_data(struct device *dev)
{
struct platform_msi_priv_data *data = dev->msi.data->platform_data;
dev->msi.data->platform_data = NULL;
ida_simple_remove(&platform_msi_devid_ida, data->devid);
kfree(data);
}
/**
* platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
* @dev: The device for which to allocate interrupts
* @nvec: The number of interrupts to allocate
* @write_msi_msg: Callback to write an interrupt message for @dev
*
* Returns:
* Zero for success, or an error code in case of failure
*/
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
int err;
err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
return err;
err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
if (err)
platform_msi_free_priv_data(dev);
return err;
}
EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
/**
* platform_msi_domain_free_irqs - Free MSI interrupts for @dev
* @dev: The device for which to free interrupts
*/
void platform_msi_domain_free_irqs(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
* a platform-msi domain
* @domain: The platform-msi domain
*
* Return: The private data provided when calling
* platform_msi_create_device_domain().
*/
void *platform_msi_get_host_data(struct irq_domain *domain)
{
struct platform_msi_priv_data *data = domain->host_data;
return data->host_data;
}
static struct lock_class_key platform_device_msi_lock_class;
/**
* __platform_msi_create_device_domain - Create a platform-msi device domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
* @is_tree: flag to indicate tree hierarchy
* @write_msi_msg: Callback to write an interrupt message for @dev
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
*
* Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
*
* This is for interrupt domains which stack on a platform-msi domain
* created by platform_msi_create_irq_domain(). @dev->msi.domain points to
* that platform-msi domain which is the parent for the new domain.
*/
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
bool is_tree,
irq_write_msi_msg_t write_msi_msg,
const struct irq_domain_ops *ops,
void *host_data)
{
struct platform_msi_priv_data *data;
struct irq_domain *domain;
int err;
err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
return NULL;
/*
* Use a separate lock class for the MSI descriptor mutex on
* platform MSI device domains because the descriptor mutex nests
* into the domain mutex. See alloc/free below.
*/
lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
data = dev->msi.data->platform_data;
data->host_data = host_data;
domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
goto free_priv;
platform_msi_set_proxy_dev(&data->arg);
err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
if (err)
goto free_domain;
return domain;
free_domain:
irq_domain_remove(domain);
free_priv:
platform_msi_free_priv_data(dev);
return NULL;
}
/**
* platform_msi_device_domain_free - Free interrupts associated with a platform-msi
* device domain
*
* @domain: The platform-msi device domain
* @virq: The base irq from which to perform the free operation
* @nr_irqs: How many interrupts to free from @virq
*/
void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
msi_lock_descs(data->dev);
msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
msi_unlock_descs(data->dev);
}
/**
* platform_msi_device_domain_alloc - Allocate interrupts associated with
* a platform-msi device domain
*
* @domain: The platform-msi device domain
* @virq: The base irq from which to perform the allocate operation
* @nr_irqs: How many interrupts to allocate from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
* top-level interrupt allocation).
*/
int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
struct device *dev = data->dev;
return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
| linux-master | drivers/base/platform-msi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Basic Node interface support
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
static struct bus_type node_subsys = {
.name = "node",
.dev_name = "node",
};
static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct node *node_dev = to_node(dev);
cpumask_var_t mask;
ssize_t n;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return 0;
cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
free_cpumask_var(mask);
return n;
}
static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct node *node_dev = to_node(dev);
cpumask_var_t mask;
ssize_t n;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return 0;
cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
n = cpumap_print_list_to_buf(buf, mask, off, count);
free_cpumask_var(mask);
return n;
}
static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
* relationships to other nodes.
* @dev: Device for this memory access class
* @list_node: List element in the node's access list
* @access: The access class rank
* @hmem_attrs: Heterogeneous memory performance attributes
*/
struct node_access_nodes {
struct device dev;
struct list_head list_node;
unsigned int access;
#ifdef CONFIG_HMEM_REPORTING
struct node_hmem_attrs hmem_attrs;
#endif
};
#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
static struct attribute *node_init_access_node_attrs[] = {
NULL,
};
static struct attribute *node_targ_access_node_attrs[] = {
NULL,
};
static const struct attribute_group initiators = {
.name = "initiators",
.attrs = node_init_access_node_attrs,
};
static const struct attribute_group targets = {
.name = "targets",
.attrs = node_targ_access_node_attrs,
};
static const struct attribute_group *node_access_node_groups[] = {
&initiators,
&targets,
NULL,
};
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
list_del(&c->list_node);
device_unregister(&c->dev);
}
}
static void node_access_release(struct device *dev)
{
kfree(to_access_nodes(dev));
}
static struct node_access_nodes *node_init_node_access(struct node *node,
unsigned int access)
{
struct node_access_nodes *access_node;
struct device *dev;
list_for_each_entry(access_node, &node->access_list, list_node)
if (access_node->access == access)
return access_node;
access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
if (!access_node)
return NULL;
access_node->access = access;
dev = &access_node->dev;
dev->parent = &node->dev;
dev->release = node_access_release;
dev->groups = node_access_node_groups;
if (dev_set_name(dev, "access%u", access))
goto free;
if (device_register(dev))
goto free_name;
pm_runtime_no_callbacks(dev);
list_add_tail(&access_node->list_node, &node->access_list);
return access_node;
free_name:
kfree_const(dev->kobj.name);
free:
kfree(access_node);
return NULL;
}
#ifdef CONFIG_HMEM_REPORTING
#define ACCESS_ATTR(property) \
static ssize_t property##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sysfs_emit(buf, "%u\n", \
to_access_nodes(dev)->hmem_attrs.property); \
} \
static DEVICE_ATTR_RO(property)
ACCESS_ATTR(read_bandwidth);
ACCESS_ATTR(read_latency);
ACCESS_ATTR(write_bandwidth);
ACCESS_ATTR(write_latency);
static struct attribute *access_attrs[] = {
&dev_attr_read_bandwidth.attr,
&dev_attr_read_latency.attr,
&dev_attr_write_bandwidth.attr,
&dev_attr_write_latency.attr,
NULL,
};
/**
* node_set_perf_attrs - Set the performance values for given access class
* @nid: Node identifier to be set
* @hmem_attrs: Heterogeneous memory performance attributes
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
unsigned int access)
{
struct node_access_nodes *c;
struct node *node;
int i;
if (WARN_ON_ONCE(!node_online(nid)))
return;
node = node_devices[nid];
c = node_init_node_access(node, access);
if (!c)
return;
c->hmem_attrs = *hmem_attrs;
for (i = 0; access_attrs[i] != NULL; i++) {
if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
"initiators")) {
pr_info("failed to add performance attribute to node %d\n",
nid);
break;
}
}
}
/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
* @cache_attrs:Attributes for this cache level
*/
struct node_cache_info {
struct device dev;
struct list_head node;
struct node_cache_attrs cache_attrs;
};
#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
#define CACHE_ATTR(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sysfs_emit(buf, fmt "\n", \
to_cache_info(dev)->cache_attrs.name); \
} \
static DEVICE_ATTR_RO(name);
CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
CACHE_ATTR(indexing, "%u")
CACHE_ATTR(write_policy, "%u")
static struct attribute *cache_attrs[] = {
&dev_attr_indexing.attr,
&dev_attr_size.attr,
&dev_attr_line_size.attr,
&dev_attr_write_policy.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache);
static void node_cache_release(struct device *dev)
{
kfree(dev);
}
static void node_cacheinfo_release(struct device *dev)
{
struct node_cache_info *info = to_cache_info(dev);
kfree(info);
}
static void node_init_cache_dev(struct node *node)
{
struct device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
device_initialize(dev);
dev->parent = &node->dev;
dev->release = node_cache_release;
if (dev_set_name(dev, "memory_side_cache"))
goto put_device;
if (device_add(dev))
goto put_device;
pm_runtime_no_callbacks(dev);
node->cache_dev = dev;
return;
put_device:
put_device(dev);
}
/**
* node_add_cache() - add cache attribute to a memory node
* @nid: Node identifier that has new cache attributes
* @cache_attrs: Attributes for the cache being added
*/
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
{
struct node_cache_info *info;
struct device *dev;
struct node *node;
if (!node_online(nid) || !node_devices[nid])
return;
node = node_devices[nid];
list_for_each_entry(info, &node->cache_attrs, node) {
if (info->cache_attrs.level == cache_attrs->level) {
dev_warn(&node->dev,
"attempt to add duplicate cache level:%d\n",
cache_attrs->level);
return;
}
}
if (!node->cache_dev)
node_init_cache_dev(node);
if (!node->cache_dev)
return;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return;
dev = &info->dev;
device_initialize(dev);
dev->parent = node->cache_dev;
dev->release = node_cacheinfo_release;
dev->groups = cache_groups;
if (dev_set_name(dev, "index%d", cache_attrs->level))
goto put_device;
info->cache_attrs = *cache_attrs;
if (device_add(dev)) {
dev_warn(&node->dev, "failed to add cache level:%d\n",
cache_attrs->level);
goto put_device;
}
pm_runtime_no_callbacks(dev);
list_add_tail(&info->node, &node->cache_attrs);
return;
put_device:
put_device(dev);
}
static void node_remove_caches(struct node *node)
{
struct node_cache_info *info, *next;
if (!node->cache_dev)
return;
list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
list_del(&info->node);
device_unregister(&info->dev);
}
device_unregister(node->cache_dev);
}
static void node_init_caches(unsigned int nid)
{
INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
}
#else
static void node_init_caches(unsigned int nid) { }
static void node_remove_caches(struct node *node) { }
#endif
#define K(x) ((x) << (PAGE_SHIFT - 10))
static ssize_t node_read_meminfo(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0;
int nid = dev->id;
struct pglist_data *pgdat = NODE_DATA(nid);
struct sysinfo i;
unsigned long sreclaimable, sunreclaimable;
unsigned long swapcached = 0;
si_meminfo_node(&i, nid);
sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
#ifdef CONFIG_SWAP
swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
#endif
len = sysfs_emit_at(buf, len,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
"Node %d SwapCached: %8lu kB\n"
"Node %d Active: %8lu kB\n"
"Node %d Inactive: %8lu kB\n"
"Node %d Active(anon): %8lu kB\n"
"Node %d Inactive(anon): %8lu kB\n"
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
"Node %d Unevictable: %8lu kB\n"
"Node %d Mlocked: %8lu kB\n",
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
nid, K(swapcached),
nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
node_page_state(pgdat, NR_ACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
node_page_state(pgdat, NR_INACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
#ifdef CONFIG_HIGHMEM
len += sysfs_emit_at(buf, len,
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
"Node %d LowTotal: %8lu kB\n"
"Node %d LowFree: %8lu kB\n",
nid, K(i.totalhigh),
nid, K(i.freehigh),
nid, K(i.totalram - i.totalhigh),
nid, K(i.freeram - i.freehigh));
#endif
len += sysfs_emit_at(buf, len,
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n"
#ifdef CONFIG_SHADOW_CALL_STACK
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n"
"Node %d SecPageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
"Node %d KReclaimable: %8lu kB\n"
"Node %d Slab: %8lu kB\n"
"Node %d SReclaimable: %8lu kB\n"
"Node %d SUnreclaim: %8lu kB\n"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"Node %d AnonHugePages: %8lu kB\n"
"Node %d ShmemHugePages: %8lu kB\n"
"Node %d ShmemPmdMapped: %8lu kB\n"
"Node %d FileHugePages: %8lu kB\n"
"Node %d FilePmdMapped: %8lu kB\n"
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
"Node %d Unaccepted: %8lu kB\n"
#endif
,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
nid, K(node_page_state(pgdat, NR_WRITEBACK)),
nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
#ifdef CONFIG_SHADOW_CALL_STACK
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
nid, K(sreclaimable),
nid, K(sunreclaimable)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
,
nid, K(node_page_state(pgdat, NR_ANON_THPS)),
nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
nid, K(node_page_state(pgdat, NR_FILE_THPS)),
nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
,
nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED))
#endif
);
len += hugetlb_report_node_meminfo(buf, len, nid);
return len;
}
#undef K
static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
static ssize_t node_read_numastat(struct device *dev,
struct device_attribute *attr, char *buf)
{
fold_vm_numa_events();
return sysfs_emit(buf,
"numa_hit %lu\n"
"numa_miss %lu\n"
"numa_foreign %lu\n"
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
sum_zone_numa_event_state(dev->id, NUMA_HIT),
sum_zone_numa_event_state(dev->id, NUMA_MISS),
sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
sum_zone_numa_event_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
static ssize_t node_read_vmstat(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nid = dev->id;
struct pglist_data *pgdat = NODE_DATA(nid);
int i;
int len = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
len += sysfs_emit_at(buf, len, "%s %lu\n",
zone_stat_name(i),
sum_zone_node_page_state(nid, i));
#ifdef CONFIG_NUMA
fold_vm_numa_events();
for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
len += sysfs_emit_at(buf, len, "%s %lu\n",
numa_stat_name(i),
sum_zone_numa_event_state(nid, i));
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
unsigned long pages = node_page_state_pages(pgdat, i);
if (vmstat_item_print_in_thp(i))
pages /= HPAGE_PMD_NR;
len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
pages);
}
return len;
}
static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
static ssize_t node_read_distance(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nid = dev->id;
int len = 0;
int i;
/*
* buf is currently PAGE_SIZE in length and each node needs 4 chars
* at the most (distance + space or newline).
*/
BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
for_each_online_node(i) {
len += sysfs_emit_at(buf, len, "%s%d",
i ? " " : "", node_distance(nid, i));
}
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
static struct attribute *node_dev_attrs[] = {
&dev_attr_meminfo.attr,
&dev_attr_numastat.attr,
&dev_attr_distance.attr,
&dev_attr_vmstat.attr,
NULL
};
static struct bin_attribute *node_dev_bin_attrs[] = {
&bin_attr_cpumap,
&bin_attr_cpulist,
NULL
};
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
.bin_attrs = node_dev_bin_attrs
};
static const struct attribute_group *node_dev_groups[] = {
&node_dev_group,
#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
&arch_node_dev_group,
#endif
#ifdef CONFIG_MEMORY_FAILURE
&memory_failure_attr_group,
#endif
NULL
};
static void node_device_release(struct device *dev)
{
kfree(to_node(dev));
}
/*
* register_node - Setup a sysfs device for a node.
* @num - Node number to use when creating the device.
*
* Initialize and register the node device.
*/
static int register_node(struct node *node, int num)
{
int error;
node->dev.id = num;
node->dev.bus = &node_subsys;
node->dev.release = node_device_release;
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
if (error) {
put_device(&node->dev);
} else {
hugetlb_register_node(node);
compaction_register_node(node);
}
return error;
}
/**
* unregister_node - unregister a node device
* @node: node going away
*
* Unregisters a node device @node. All the devices on the node must be
* unregistered before calling this function.
*/
void unregister_node(struct node *node)
{
hugetlb_unregister_node(node);
compaction_unregister_node(node);
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
}
struct node *node_devices[MAX_NUMNODES];
/*
* register cpu under node
*/
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
struct device *obj;
if (!node_online(nid))
return 0;
obj = get_cpu_device(cpu);
if (!obj)
return 0;
ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
/**
* register_memory_node_under_compute_node - link memory node to its compute
* node for a given access class.
* @mem_nid: Memory node number
* @cpu_nid: Cpu node number
* @access: Access class to register
*
* Description:
* For use with platforms that may have separate memory and compute nodes.
* This function will export node relationships linking which memory
* initiator nodes can access memory targets at a given ranked access
* class.
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
unsigned int access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;
int ret;
if (!node_online(cpu_nid) || !node_online(mem_nid))
return -ENODEV;
init_node = node_devices[cpu_nid];
targ_node = node_devices[mem_nid];
initiator = node_init_node_access(init_node, access);
target = node_init_node_access(targ_node, access);
if (!initiator || !target)
return -ENOMEM;
ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
&targ_node->dev.kobj,
dev_name(&targ_node->dev));
if (ret)
return ret;
ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
&init_node->dev.kobj,
dev_name(&init_node->dev));
if (ret)
goto err;
return 0;
err:
sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
dev_name(&targ_node->dev));
return ret;
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
struct device *obj;
if (!node_online(nid))
return 0;
obj = get_cpu_device(cpu);
if (!obj)
return 0;
sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
kobject_name(&node_devices[nid]->dev.kobj));
return 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG
static int __ref get_nid_for_pfn(unsigned long pfn)
{
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
if (system_state < SYSTEM_RUNNING)
return early_pfn_to_nid(pfn);
#endif
return pfn_to_nid(pfn);
}
static void do_register_memory_block_under_node(int nid,
struct memory_block *mem_blk,
enum meminit_context context)
{
int ret;
memory_block_add_nid(mem_blk, nid, context);
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret && ret != -EEXIST)
dev_err_ratelimited(&node_devices[nid]->dev,
"can't create link to %s in sysfs (%d)\n",
kobject_name(&mem_blk->dev.kobj), ret);
ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
if (ret && ret != -EEXIST)
dev_err_ratelimited(&mem_blk->dev,
"can't create link to %s in sysfs (%d)\n",
kobject_name(&node_devices[nid]->dev.kobj),
ret);
}
/* register memory section under specified node if it spans that node */
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
void *arg)
{
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int nid = *(int *)arg;
unsigned long pfn;
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
int page_nid;
/*
* memory block could have several absent sections from start.
* skip pfn range from absent section
*/
if (!pfn_in_present_section(pfn)) {
pfn = round_down(pfn + PAGES_PER_SECTION,
PAGES_PER_SECTION) - 1;
continue;
}
/*
* We need to check if page belongs to nid only at the boot
* case because node's ranges can be interleaved.
*/
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
return 0;
}
/* mem section does not span the specified node */
return 0;
}
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
*/
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
void *arg)
{
int nid = *(int *)arg;
do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
return 0;
}
/*
* Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed.
*/
void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
if (mem_blk->nid == NUMA_NO_NODE)
return;
sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
unsigned long end_pfn,
enum meminit_context context)
{
walk_memory_blocks_func_t func;
if (context == MEMINIT_HOTPLUG)
func = register_mem_block_under_node_hotplug;
else
func = register_mem_block_under_node_early;
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
(void *)&nid, func);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
int __register_one_node(int nid)
{
int error;
int cpu;
node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
if (!node_devices[nid])
return -ENOMEM;
error = register_node(node_devices[nid], nid);
/* link cpu under this node */
for_each_present_cpu(cpu) {
if (cpu_to_node(cpu) == nid)
register_cpu_under_node(cpu, nid);
}
INIT_LIST_HEAD(&node_devices[nid]->access_list);
node_init_caches(nid);
return error;
}
void unregister_one_node(int nid)
{
if (!node_devices[nid])
return;
unregister_node(node_devices[nid]);
node_devices[nid] = NULL;
}
/*
* node states attributes
*/
struct node_attr {
struct device_attribute attr;
enum node_states state;
};
static ssize_t show_node_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
return sysfs_emit(buf, "%*pbl\n",
nodemask_pr_args(&node_states[na->state]));
}
#define _NODE_ATTR(name, state) \
{ __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
[N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
N_GENERIC_INITIATOR),
};
static struct attribute *node_state_attrs[] = {
&node_state_attr[N_POSSIBLE].attr.attr,
&node_state_attr[N_ONLINE].attr.attr,
&node_state_attr[N_NORMAL_MEMORY].attr.attr,
#ifdef CONFIG_HIGHMEM
&node_state_attr[N_HIGH_MEMORY].attr.attr,
#endif
&node_state_attr[N_MEMORY].attr.attr,
&node_state_attr[N_CPU].attr.attr,
&node_state_attr[N_GENERIC_INITIATOR].attr.attr,
NULL
};
static const struct attribute_group memory_root_attr_group = {
.attrs = node_state_attrs,
};
static const struct attribute_group *cpu_root_attr_groups[] = {
&memory_root_attr_group,
NULL,
};
void __init node_dev_init(void)
{
int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
* Create all node devices, which will properly link the node
* to applicable memory block devices and already created cpu devices.
*/
for_each_online_node(i) {
ret = register_one_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
}
| linux-master | drivers/base/node.c |
// SPDX-License-Identifier: GPL-2.0
/*
* driver.c - centralized device driver management
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2007 Novell Inc.
*/
#include <linux/device/driver.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include "base.h"
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
struct device *dev = NULL;
struct device_private *dev_prv;
if (n) {
dev_prv = to_device_private_driver(n);
dev = dev_prv->device;
}
return dev;
}
/**
* driver_set_override() - Helper to set or clear driver override.
* @dev: Device to change
* @override: Address of string to change (e.g. &device->driver_override);
* The contents will be freed and hold newly allocated override.
* @s: NUL-terminated string, new driver name to force a match, pass empty
* string to clear it ("" or "\n", where the latter is only for sysfs
* interface).
* @len: length of @s
*
* Helper to set or clear driver override in a device, intended for the cases
* when the driver_override field is allocated by driver/bus code.
*
* Returns: 0 on success or a negative error code on failure.
*/
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len)
{
const char *new, *old;
char *cp;
if (!override || !s)
return -EINVAL;
/*
* The stored value will be used in sysfs show callback (sysfs_emit()),
* which has a length limit of PAGE_SIZE and adds a trailing newline.
* Thus we can store one character less to avoid truncation during sysfs
* show.
*/
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
/*
* Compute the real length of the string in case userspace sends us a
* bunch of \0 characters like python likes to do.
*/
len = strlen(s);
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
old = *override;
*override = NULL;
device_unlock(dev);
kfree(old);
return 0;
}
cp = strnchr(s, len, '\n');
if (cp)
len = cp - s;
new = kstrndup(s, len, GFP_KERNEL);
if (!new)
return -ENOMEM;
device_lock(dev);
old = *override;
if (cp != s) {
*override = new;
} else {
/* "\n" passed - clear override */
kfree(new);
*override = NULL;
}
device_unlock(dev);
kfree(old);
return 0;
}
EXPORT_SYMBOL_GPL(driver_set_override);
/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.
* @start: Device to begin with
* @data: Data to pass to the callback.
* @fn: Function to call for each device.
*
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
void *data, int (*fn)(struct device *, void *))
{
struct klist_iter i;
struct device *dev;
int error = 0;
if (!drv)
return -EINVAL;
klist_iter_init_node(&drv->p->klist_devices, &i,
start ? &start->p->knode_driver : NULL);
while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(driver_for_each_device);
/**
* driver_find_device - device iterator for locating a particular device.
* @drv: The device's driver
* @start: Device to begin with
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This is similar to the driver_for_each_device() function above, but
* it returns a reference to a device that is 'found' for later use, as
* determined by the @match callback.
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
struct device *driver_find_device(struct device_driver *drv,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data))
{
struct klist_iter i;
struct device *dev;
if (!drv || !drv->p)
return NULL;
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
klist_iter_exit(&i);
return dev;
}
EXPORT_SYMBOL_GPL(driver_find_device);
/**
* driver_create_file - create sysfs file for driver.
* @drv: driver.
* @attr: driver attribute descriptor.
*/
int driver_create_file(struct device_driver *drv,
const struct driver_attribute *attr)
{
int error;
if (drv)
error = sysfs_create_file(&drv->p->kobj, &attr->attr);
else
error = -EINVAL;
return error;
}
EXPORT_SYMBOL_GPL(driver_create_file);
/**
* driver_remove_file - remove sysfs file for driver.
* @drv: driver.
* @attr: driver attribute descriptor.
*/
void driver_remove_file(struct device_driver *drv,
const struct driver_attribute *attr)
{
if (drv)
sysfs_remove_file(&drv->p->kobj, &attr->attr);
}
EXPORT_SYMBOL_GPL(driver_remove_file);
int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
return sysfs_create_groups(&drv->p->kobj, groups);
}
void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
sysfs_remove_groups(&drv->p->kobj, groups);
}
/**
* driver_register - register driver with bus
* @drv: driver to register
*
* We pass off most of the work to the bus_add_driver() call,
* since most of the things we have to do deal with the bus
* structures.
*/
int driver_register(struct device_driver *drv)
{
int ret;
struct device_driver *other;
if (!bus_is_registered(drv->bus)) {
pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n",
drv->name, drv->bus->name);
return -EINVAL;
}
if ((drv->bus->probe && drv->probe) ||
(drv->bus->remove && drv->remove) ||
(drv->bus->shutdown && drv->shutdown))
pr_warn("Driver '%s' needs updating - please use "
"bus_type methods\n", drv->name);
other = driver_find(drv->name, drv->bus);
if (other) {
pr_err("Error: Driver '%s' is already registered, "
"aborting...\n", drv->name);
return -EBUSY;
}
ret = bus_add_driver(drv);
if (ret)
return ret;
ret = driver_add_groups(drv, drv->groups);
if (ret) {
bus_remove_driver(drv);
return ret;
}
kobject_uevent(&drv->p->kobj, KOBJ_ADD);
deferred_probe_extend_timeout();
return ret;
}
EXPORT_SYMBOL_GPL(driver_register);
/**
* driver_unregister - remove driver from system.
* @drv: driver.
*
* Again, we pass off most of the work to the bus-level call.
*/
void driver_unregister(struct device_driver *drv)
{
if (!drv || !drv->p) {
WARN(1, "Unexpected driver unregister!\n");
return;
}
driver_remove_groups(drv, drv->groups);
bus_remove_driver(drv);
}
EXPORT_SYMBOL_GPL(driver_unregister);
| linux-master | drivers/base/driver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* NUMA support, based on the x86 implementation.
*
* Copyright (C) 2015 Cavium Inc.
* Author: Ganapatrao Kulkarni <[email protected]>
*/
#define pr_fmt(fmt) "NUMA: " fmt
#include <linux/acpi.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/sections.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
nodemask_t numa_nodes_parsed __initdata;
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
static int numa_distance_cnt;
static u8 *numa_distance;
bool numa_off;
static __init int numa_parse_early_param(char *opt)
{
if (!opt)
return -EINVAL;
if (str_has_prefix(opt, "off"))
numa_off = true;
return 0;
}
early_param("numa", numa_parse_early_param);
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
/*
* Returns a pointer to the bitmask of CPUs on Node 'node'.
*/
const struct cpumask *cpumask_of_node(int node)
{
if (node == NUMA_NO_NODE)
return cpu_all_mask;
if (WARN_ON(node < 0 || node >= nr_node_ids))
return cpu_none_mask;
if (WARN_ON(node_to_cpumask_map[node] == NULL))
return cpu_online_mask;
return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(cpumask_of_node);
#endif
static void numa_update_cpu(unsigned int cpu, bool remove)
{
int nid = cpu_to_node(cpu);
if (nid == NUMA_NO_NODE)
return;
if (remove)
cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
else
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
void numa_add_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, false);
}
void numa_remove_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, true);
}
void numa_clear_node(unsigned int cpu)
{
numa_remove_cpu(cpu);
set_cpu_numa_node(cpu, NUMA_NO_NODE);
}
/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
* Note: cpumask_of_node() is not valid until after this is done.
* (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/
static void __init setup_node_to_cpumask_map(void)
{
int node;
/* setup nr_node_ids if not done yet */
if (nr_node_ids == MAX_NUMNODES)
setup_nr_node_ids();
/* allocate and clear the mapping */
for (node = 0; node < nr_node_ids; node++) {
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
cpumask_clear(node_to_cpumask_map[node]);
}
/* cpumask_of_node() will now work */
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
/*
* Set the cpu to node and mem mapping
*/
void numa_store_cpu_info(unsigned int cpu)
{
set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
}
void __init early_map_cpu_to_node(unsigned int cpu, int nid)
{
/* fallback to node 0 */
if (nid < 0 || nid >= MAX_NUMNODES || numa_off)
nid = 0;
cpu_to_node_map[cpu] = nid;
/*
* We should set the numa node of cpu0 as soon as possible, because it
* has already been set up online before. cpu_to_node(0) will soon be
* called.
*/
if (!cpu)
set_cpu_numa_node(cpu, nid);
}
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
static int __init early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
early_cpu_to_node);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
#endif
}
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node);
#endif
if (rc < 0)
panic("Failed to initialize percpu areas (err=%d).", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#endif
/**
* numa_add_memblk() - Set node id to memblk
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
* @end: End address of the new memblk
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int __init numa_add_memblk(int nid, u64 start, u64 end)
{
int ret;
ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
if (ret < 0) {
pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
start, (end - 1), nid);
return ret;
}
node_set(nid, numa_nodes_parsed);
return ret;
}
/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
u64 nd_pa;
void *nd;
int tnid;
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa)
panic("Cannot allocate %zu bytes for node %d data\n",
nd_size, nid);
nd = __va(nd_pa);
/* report and initialize */
pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
nd_pa, nd_pa + nd_size - 1);
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (tnid != nid)
pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
node_data[nid] = nd;
memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
/*
* numa_free_distance
*
* The current table is freed.
*/
void __init numa_free_distance(void)
{
size_t size;
if (!numa_distance)
return;
size = numa_distance_cnt * numa_distance_cnt *
sizeof(numa_distance[0]);
memblock_free(numa_distance, size);
numa_distance_cnt = 0;
numa_distance = NULL;
}
/*
* Create a new NUMA distance table.
*/
static int __init numa_alloc_distance(void)
{
size_t size;
int i, j;
size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
numa_distance = memblock_alloc(size, PAGE_SIZE);
if (WARN_ON(!numa_distance))
return -ENOMEM;
numa_distance_cnt = nr_node_ids;
/* fill with the default distances */
for (i = 0; i < numa_distance_cnt; i++)
for (j = 0; j < numa_distance_cnt; j++)
numa_distance[i * numa_distance_cnt + j] = i == j ?
LOCAL_DISTANCE : REMOTE_DISTANCE;
pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
return 0;
}
/**
* numa_set_distance() - Set inter node NUMA distance from node to node.
* @from: the 'from' node to set distance
* @to: the 'to' node to set distance
* @distance: NUMA distance
*
* Set the distance from node @from to @to to @distance.
* If distance table doesn't exist, a warning is printed.
*
* If @from or @to is higher than the highest known node or lower than zero
* or @distance doesn't make sense, the call is ignored.
*/
void __init numa_set_distance(int from, int to, int distance)
{
if (!numa_distance) {
pr_warn_once("Warning: distance table not allocated yet\n");
return;
}
if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
from < 0 || to < 0) {
pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
from, to, distance);
return;
}
if ((u8)distance != distance ||
(from == to && distance != LOCAL_DISTANCE)) {
pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
from, to, distance);
return;
}
numa_distance[from * numa_distance_cnt + to] = distance;
}
/*
* Return NUMA distance @from to @to
*/
int __node_distance(int from, int to)
{
if (from >= numa_distance_cnt || to >= numa_distance_cnt)
return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
return numa_distance[from * numa_distance_cnt + to];
}
EXPORT_SYMBOL(__node_distance);
static int __init numa_register_nodes(void)
{
int nid;
struct memblock_region *mblk;
/* Check that valid nid is set to memblks */
for_each_mem_region(mblk) {
int mblk_nid = memblock_get_region_node(mblk);
phys_addr_t start = mblk->base;
phys_addr_t end = mblk->base + mblk->size - 1;
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
mblk_nid, &start, &end);
return -EINVAL;
}
}
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
setup_node_data(nid, start_pfn, end_pfn);
node_set_online(nid);
}
/* Setup online nodes to actual nodes*/
node_possible_map = numa_nodes_parsed;
return 0;
}
static int __init numa_init(int (*init_func)(void))
{
int ret;
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
ret = numa_alloc_distance();
if (ret < 0)
return ret;
ret = init_func();
if (ret < 0)
goto out_free_distance;
if (nodes_empty(numa_nodes_parsed)) {
pr_info("No NUMA configuration found\n");
ret = -EINVAL;
goto out_free_distance;
}
ret = numa_register_nodes();
if (ret < 0)
goto out_free_distance;
setup_node_to_cpumask_map();
return 0;
out_free_distance:
numa_free_distance();
return ret;
}
/**
* dummy_numa_init() - Fallback dummy NUMA init
*
* Used if there's no underlying NUMA architecture, NUMA initialization
* fails, or NUMA is disabled on the command line.
*
* Must online at least one node (node 0) and add memory blocks that cover all
* allowed memory. It is unlikely that this function fails.
*
* Return: 0 on success, -errno on failure.
*/
static int __init dummy_numa_init(void)
{
phys_addr_t start = memblock_start_of_DRAM();
phys_addr_t end = memblock_end_of_DRAM() - 1;
int ret;
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
ret = numa_add_memblk(0, start, end + 1);
if (ret) {
pr_err("NUMA init failed\n");
return ret;
}
numa_off = true;
return 0;
}
#ifdef CONFIG_ACPI_NUMA
static int __init arch_acpi_numa_init(void)
{
int ret;
ret = acpi_numa_init();
if (ret) {
pr_info("Failed to initialise from firmware\n");
return ret;
}
return srat_disabled() ? -EINVAL : 0;
}
#else
static int __init arch_acpi_numa_init(void)
{
return -EOPNOTSUPP;
}
#endif
/**
* arch_numa_init() - Initialize NUMA
*
* Try each configured NUMA initialization method until one succeeds. The
* last fallback is dummy single node config encompassing whole memory.
*/
void __init arch_numa_init(void)
{
if (!numa_off) {
if (!acpi_disabled && !numa_init(arch_acpi_numa_init))
return;
if (acpi_disabled && !numa_init(of_numa_init))
return;
}
numa_init(dummy_numa_init);
}
| linux-master | drivers/base/arch_numa.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020 Intel Corporation
*
* Please see Documentation/driver-api/auxiliary_bus.rst for more information.
*/
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/auxiliary_bus.h>
#include "base.h"
/**
* DOC: PURPOSE
*
* In some subsystems, the functionality of the core device (PCI/ACPI/other) is
* too complex for a single device to be managed by a monolithic driver (e.g.
* Sound Open Firmware), multiple devices might implement a common intersection
* of functionality (e.g. NICs + RDMA), or a driver may want to export an
* interface for another subsystem to drive (e.g. SIOV Physical Function export
* Virtual Function management). A split of the functionality into child-
* devices representing sub-domains of functionality makes it possible to
* compartmentalize, layer, and distribute domain-specific concerns via a Linux
* device-driver model.
*
* An example for this kind of requirement is the audio subsystem where a
* single IP is handling multiple entities such as HDMI, Soundwire, local
* devices such as mics/speakers etc. The split for the core's functionality
* can be arbitrary or be defined by the DSP firmware topology and include
* hooks for test/debug. This allows for the audio core device to be minimal
* and focused on hardware-specific control and communication.
*
* Each auxiliary_device represents a part of its parent functionality. The
* generic behavior can be extended and specialized as needed by encapsulating
* an auxiliary_device within other domain-specific structures and the use of
* .ops callbacks. Devices on the auxiliary bus do not share any structures and
* the use of a communication channel with the parent is domain-specific.
*
* Note that ops are intended as a way to augment instance behavior within a
* class of auxiliary devices, it is not the mechanism for exporting common
* infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
* infrastructure from the parent module to the auxiliary module(s).
*/
/**
* DOC: USAGE
*
* The auxiliary bus is to be used when a driver and one or more kernel
* modules, who share a common header file with the driver, need a mechanism to
* connect and provide access to a shared object allocated by the
* auxiliary_device's registering driver. The registering driver for the
* auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
* can be from the same subsystem, or from multiple subsystems.
*
* The emphasis here is on a common generic interface that keeps subsystem
* customization out of the bus infrastructure.
*
* One example is a PCI network device that is RDMA-capable and exports a child
* device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
* driver allocates and registers an auxiliary_device for each physical
* function on the NIC. The RDMA driver registers an auxiliary_driver that
* claims each of these auxiliary_devices. This conveys data/ops published by
* the parent PCI device/driver to the RDMA auxiliary_driver.
*
* Another use case is for the PCI device to be split out into multiple sub
* functions. For each sub function an auxiliary_device is created. A PCI sub
* function driver binds to such devices that creates its own one or more class
* devices. A PCI sub function auxiliary device is likely to be contained in a
* struct with additional attributes such as user defined sub function number
* and optional attributes such as resources and a link to the parent device.
* These attributes could be used by systemd/udev; and hence should be
* initialized before a driver binds to an auxiliary_device.
*
* A key requirement for utilizing the auxiliary bus is that there is no
* dependency on a physical bus, device, register accesses or regmap support.
* These individual devices split from the core cannot live on the platform bus
* as they are not physical devices that are controlled by DT/ACPI. The same
* argument applies for not using MFD in this scenario as MFD relies on
* individual function devices being physical devices.
*/
/**
* DOC: EXAMPLE
*
* Auxiliary devices are created and registered by a subsystem-level core
* device that needs to break up its functionality into smaller fragments. One
* way to extend the scope of an auxiliary_device is to encapsulate it within a
* domain- pecific structure defined by the parent device. This structure
* contains the auxiliary_device and any associated shared data/callbacks
* needed to establish the connection with the parent.
*
* An example is:
*
* .. code-block:: c
*
* struct foo {
* struct auxiliary_device auxdev;
* void (*connect)(struct auxiliary_device *auxdev);
* void (*disconnect)(struct auxiliary_device *auxdev);
* void *data;
* };
*
* The parent device then registers the auxiliary_device by calling
* auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
* to the auxdev member of the above structure. The parent provides a name for
* the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
* creates a match_name that is be used for matching and binding with a driver.
*
* Whenever an auxiliary_driver is registered, based on the match_name, the
* auxiliary_driver's probe() is invoked for the matching devices. The
* auxiliary_driver can also be encapsulated inside custom drivers that make
* the core device's functionality extensible by adding additional
* domain-specific ops as follows:
*
* .. code-block:: c
*
* struct my_ops {
* void (*send)(struct auxiliary_device *auxdev);
* void (*receive)(struct auxiliary_device *auxdev);
* };
*
*
* struct my_driver {
* struct auxiliary_driver auxiliary_drv;
* const struct my_ops ops;
* };
*
* An example of this type of usage is:
*
* .. code-block:: c
*
* const struct auxiliary_device_id my_auxiliary_id_table[] = {
* { .name = "foo_mod.foo_dev" },
* { },
* };
*
* const struct my_ops my_custom_ops = {
* .send = my_tx,
* .receive = my_rx,
* };
*
* const struct my_driver my_drv = {
* .auxiliary_drv = {
* .name = "myauxiliarydrv",
* .id_table = my_auxiliary_id_table,
* .probe = my_probe,
* .remove = my_remove,
* .shutdown = my_shutdown,
* },
* .ops = my_custom_ops,
* };
*/
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
for (; id->name[0]; id++) {
const char *p = strrchr(dev_name(&auxdev->dev), '.');
int match_size;
if (!p)
continue;
match_size = p - dev_name(&auxdev->dev);
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
!strncmp(dev_name(&auxdev->dev), id->name, match_size))
return id;
}
return NULL;
}
static int auxiliary_match(struct device *dev, struct device_driver *drv)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
static int auxiliary_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const char *name, *p;
name = dev_name(dev);
p = strrchr(name, '.');
return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX,
(int)(p - name), name);
}
static const struct dev_pm_ops auxiliary_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
};
static int auxiliary_bus_probe(struct device *dev)
{
struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
ret = dev_pm_domain_attach(dev, true);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
if (ret)
dev_pm_domain_detach(dev, true);
return ret;
}
static void auxiliary_bus_remove(struct device *dev)
{
struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
if (auxdrv->remove)
auxdrv->remove(auxdev);
dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
{
struct auxiliary_driver *auxdrv = NULL;
struct auxiliary_device *auxdev;
if (dev->driver) {
auxdrv = to_auxiliary_drv(dev->driver);
auxdev = to_auxiliary_dev(dev);
}
if (auxdrv && auxdrv->shutdown)
auxdrv->shutdown(auxdev);
}
static struct bus_type auxiliary_bus_type = {
.name = "auxiliary",
.probe = auxiliary_bus_probe,
.remove = auxiliary_bus_remove,
.shutdown = auxiliary_bus_shutdown,
.match = auxiliary_match,
.uevent = auxiliary_uevent,
.pm = &auxiliary_dev_pm_ops,
};
/**
* auxiliary_device_init - check auxiliary_device and initialize
* @auxdev: auxiliary device struct
*
* This is the second step in the three-step process to register an
* auxiliary_device.
*
* When this function returns an error code, then the device_initialize will
* *not* have been performed, and the caller will be responsible to free any
* memory allocated for the auxiliary_device in the error path directly.
*
* It returns 0 on success. On success, the device_initialize has been
* performed. After this point any error unwinding will need to include a call
* to auxiliary_device_uninit(). In this post-initialize error scenario, a call
* to the device's .release callback will be triggered, and all memory clean-up
* is expected to be handled there.
*/
int auxiliary_device_init(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
if (!dev->parent) {
pr_err("auxiliary_device has a NULL dev->parent\n");
return -EINVAL;
}
if (!auxdev->name) {
pr_err("auxiliary_device has a NULL name\n");
return -EINVAL;
}
dev->bus = &auxiliary_bus_type;
device_initialize(&auxdev->dev);
return 0;
}
EXPORT_SYMBOL_GPL(auxiliary_device_init);
/**
* __auxiliary_device_add - add an auxiliary bus device
* @auxdev: auxiliary bus device to add to the bus
* @modname: name of the parent device's driver module
*
* This is the third step in the three-step process to register an
* auxiliary_device.
*
* This function must be called after a successful call to
* auxiliary_device_init(), which will perform the device_initialize. This
* means that if this returns an error code, then a call to
* auxiliary_device_uninit() must be performed so that the .release callback
* will be triggered to free the memory associated with the auxiliary_device.
*
* The expectation is that users will call the "auxiliary_device_add" macro so
* that the caller's KBUILD_MODNAME is automatically inserted for the modname
* parameter. Only if a user requires a custom name would this version be
* called directly.
*/
int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
{
struct device *dev = &auxdev->dev;
int ret;
if (!modname) {
dev_err(dev, "auxiliary device modname is NULL\n");
return -EINVAL;
}
ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
if (ret) {
dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret);
return ret;
}
ret = device_add(dev);
if (ret)
dev_err(dev, "adding auxiliary device failed!: %d\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(__auxiliary_device_add);
/**
* auxiliary_find_device - auxiliary device iterator for locating a particular device.
* @start: Device to begin with
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This function returns a reference to a device that is 'found'
* for later use, as determined by the @match callback.
*
* The reference returned should be released with put_device().
*
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
int (*match)(struct device *dev, const void *data))
{
struct device *dev;
dev = bus_find_device(&auxiliary_bus_type, start, data, match);
if (!dev)
return NULL;
return to_auxiliary_dev(dev);
}
EXPORT_SYMBOL_GPL(auxiliary_find_device);
/**
* __auxiliary_driver_register - register a driver for auxiliary bus devices
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
* @modname: KBUILD_MODNAME for parent driver
*
* The expectation is that users will call the "auxiliary_driver_register"
* macro so that the caller's KBUILD_MODNAME is automatically inserted for the
* modname parameter. Only if a user requires a custom name would this version
* be called directly.
*/
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
{
int ret;
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
if (auxdrv->name)
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname,
auxdrv->name);
else
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname);
if (!auxdrv->driver.name)
return -ENOMEM;
auxdrv->driver.owner = owner;
auxdrv->driver.bus = &auxiliary_bus_type;
auxdrv->driver.mod_name = modname;
ret = driver_register(&auxdrv->driver);
if (ret)
kfree(auxdrv->driver.name);
return ret;
}
EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
/**
* auxiliary_driver_unregister - unregister a driver
* @auxdrv: auxiliary_driver structure
*/
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
{
driver_unregister(&auxdrv->driver);
kfree(auxdrv->driver.name);
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
}
| linux-master | drivers/base/auxiliary.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/common.c - Common device power management code.
*
* Copyright (C) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
#include "power.h"
/**
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
* @dev: Device to handle.
*
* If power.subsys_data is NULL, point it to a new object, otherwise increment
* its reference counter. Return 0 if new object has been created or refcount
* increased, otherwise negative error code.
*/
int dev_pm_get_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
psd = kzalloc(sizeof(*psd), GFP_KERNEL);
if (!psd)
return -ENOMEM;
spin_lock_irq(&dev->power.lock);
if (dev->power.subsys_data) {
dev->power.subsys_data->refcount++;
} else {
spin_lock_init(&psd->lock);
psd->refcount = 1;
dev->power.subsys_data = psd;
pm_clk_init(dev);
psd = NULL;
}
spin_unlock_irq(&dev->power.lock);
/* kfree() verifies that its argument is nonzero. */
kfree(psd);
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
/**
* dev_pm_put_subsys_data - Drop reference to power.subsys_data.
* @dev: Device to handle.
*
* If the reference counter of power.subsys_data is zero after dropping the
* reference, power.subsys_data is removed.
*/
void dev_pm_put_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
spin_lock_irq(&dev->power.lock);
psd = dev_to_psd(dev);
if (!psd)
goto out;
if (--psd->refcount == 0)
dev->power.subsys_data = NULL;
else
psd = NULL;
out:
spin_unlock_irq(&dev->power.lock);
kfree(psd);
}
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
* @power_on: Used to indicate whether we should power on the device.
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
* As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
* should be assigned by the corresponding attach function.
*
* This function should typically be invoked from subsystem level code during
* the probe phase. Especially for those that holds devices which requires
* power management through PM domains.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
int dev_pm_domain_attach(struct device *dev, bool power_on)
{
int ret;
if (dev->pm_domain)
return 0;
ret = acpi_dev_pm_attach(dev, power_on);
if (!ret)
ret = genpd_dev_pm_attach(dev);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
/**
* dev_pm_domain_attach_by_id - Associate a device with one of its PM domains.
* @dev: The device used to lookup the PM domain.
* @index: The index of the PM domain.
*
* As @dev may only be attached to a single PM domain, the backend PM domain
* provider creates a virtual device to attach instead. If attachment succeeds,
* the ->detach() callback in the struct dev_pm_domain are assigned by the
* corresponding backend attach function, as to deal with detaching of the
* created virtual device.
*
* This function should typically be invoked by a driver during the probe phase,
* in case its device requires power management through multiple PM domains. The
* driver may benefit from using the received device, to configure device-links
* towards its original device. Depending on the use-case and if needed, the
* links may be dynamically changed by the driver, which allows it to control
* the power to the PM domains independently from each other.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*
* Returns the virtual created device when successfully attached to its PM
* domain, NULL in case @dev don't need a PM domain, else an ERR_PTR().
* Note that, to detach the returned virtual device, the driver shall call
* dev_pm_domain_detach() on it, typically during the remove phase.
*/
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
return genpd_dev_pm_attach_by_id(dev, index);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
/**
* dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
* @dev: The device used to lookup the PM domain.
* @name: The name of the PM domain.
*
* For a detailed function description, see dev_pm_domain_attach_by_id().
*/
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
return genpd_dev_pm_attach_by_name(dev, name);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
*
* This functions will reverse the actions from dev_pm_domain_attach(),
* dev_pm_domain_attach_by_id() and dev_pm_domain_attach_by_name(), thus it
* detaches @dev from its PM domain. Typically it should be invoked during the
* remove phase, either from subsystem level code or from drivers.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*/
void dev_pm_domain_detach(struct device *dev, bool power_off)
{
if (dev->pm_domain && dev->pm_domain->detach)
dev->pm_domain->detach(dev, power_off);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
* dev_pm_domain_start - Start the device through its PM domain.
* @dev: Device to start.
*
* This function should typically be called during probe by a subsystem/driver,
* when it needs to start its device from the PM domain's perspective. Note
* that, it's assumed that the PM domain is already powered on when this
* function is called.
*
* Returns 0 on success and negative error values on failures.
*/
int dev_pm_domain_start(struct device *dev)
{
if (dev->pm_domain && dev->pm_domain->start)
return dev->pm_domain->start(dev);
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_start);
/**
* dev_pm_domain_set - Set PM domain of a device.
* @dev: Device whose PM domain is to be set.
* @pd: PM domain to be set, or NULL.
*
* Sets the PM domain the device belongs to. The PM domain of a device needs
* to be set before its probe finishes (it's bound to a driver).
*
* This function must be called with the device lock held.
*/
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
{
if (dev->pm_domain == pd)
return;
WARN(pd && device_is_bound(dev),
"PM domains can only be changed for unbound devices\n");
dev->pm_domain = pd;
device_pm_check_callbacks(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_set);
| linux-master | drivers/base/power/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/trace.c
*
* Copyright (C) 2006 Linus Torvalds
*
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include "power.h"
/*
* Horrid, horrid, horrid.
*
* It turns out that the _only_ piece of hardware that actually
* keeps its value across a hard boot (and, more importantly, the
* POST init sequence) is literally the realtime clock.
*
* Never mind that an RTC chip has 114 bytes (and often a whole
* other bank of an additional 128 bytes) of nice SRAM that is
* _designed_ to keep data - the POST will clear it. So we literally
* can just use the few bytes of actual time data, which means that
* we're really limited.
*
* It means, for example, that we can't use the seconds at all
* (since the time between the hang and the boot might be more
* than a minute), and we'd better not depend on the low bits of
* the minutes either.
*
* There are the wday fields etc, but I wouldn't guarantee those
* are dependable either. And if the date isn't valid, either the
* hw or POST will do strange things.
*
* So we're left with:
* - year: 0-99
* - month: 0-11
* - day-of-month: 1-28
* - hour: 0-23
* - min: (0-30)*2
*
* Giving us a total range of 0-16128000 (0xf61800), ie less
* than 24 bits of actual data we can save across reboots.
*
* And if your box can't boot in less than three minutes,
* you're screwed.
*
* Now, almost 24 bits of data is pitifully small, so we need
* to be pretty dense if we want to use it for anything nice.
* What we do is that instead of saving off nice readable info,
* we save off _hashes_ of information that we can hopefully
* regenerate after the reboot.
*
* In particular, this means that we might be unlucky, and hit
* a case where we have a hash collision, and we end up not
* being able to tell for certain exactly which case happened.
* But that's hopefully unlikely.
*
* What we do is to take the bits we can fit, and split them
* into three parts (16*997*1009 = 16095568), and use the values
* for:
* - 0-15: user-settable
* - 0-996: file + line number
* - 0-1008: device
*/
#define USERHASH (16)
#define FILEHASH (997)
#define DEVHASH (1009)
#define DEVSEED (7919)
bool pm_trace_rtc_abused __read_mostly;
EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
{
unsigned int n = user + USERHASH*(file + FILEHASH*device);
// June 7th, 2006
static struct rtc_time time = {
.tm_sec = 0,
.tm_min = 0,
.tm_hour = 0,
.tm_mday = 7,
.tm_mon = 5, // June - counting from zero
.tm_year = 106,
.tm_wday = 3,
.tm_yday = 160,
.tm_isdst = 1
};
time.tm_year = (n % 100);
n /= 100;
time.tm_mon = (n % 12);
n /= 12;
time.tm_mday = (n % 28) + 1;
n /= 28;
time.tm_hour = (n % 24);
n /= 24;
time.tm_min = (n % 20) * 3;
n /= 20;
mc146818_set_time(&time);
pm_trace_rtc_abused = true;
return n ? -1 : 0;
}
static unsigned int read_magic_time(void)
{
struct rtc_time time;
unsigned int val;
if (mc146818_get_time(&time) < 0) {
pr_err("Unable to read current time from RTC\n");
return 0;
}
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
val -= 100;
val += time.tm_mon * 100; /* 12 months */
val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */
val += time.tm_hour * 100 * 12 * 28; /* 24 hours */
val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */
return val;
}
/*
* This is just the sdbm hash function with a user-supplied
* seed and final size parameter.
*/
static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
{
unsigned char c;
while ((c = *data++) != 0) {
seed = (seed << 16) + (seed << 6) - seed + c;
}
return seed % mod;
}
void set_trace_device(struct device *dev)
{
dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
}
EXPORT_SYMBOL(set_trace_device);
/*
* We could just take the "tracedata" index into the .tracedata
* section instead. Generating a hash of the data gives us a
* chance to work across kernel versions, and perhaps more
* importantly it also gives us valid/invalid check (ie we will
* likely not give totally bogus reports - if the hash matches,
* it's not any guarantee, but it's a high _likelihood_ that
* the match is valid).
*/
void generate_pm_trace(const void *tracedata, unsigned int user)
{
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
if (!x86_platform.legacy.rtc)
return;
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
}
EXPORT_SYMBOL(generate_pm_trace);
extern char __tracedata_start[], __tracedata_end[];
static int show_file_hash(unsigned int value)
{
int match;
char *tracedata;
match = 0;
for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
tracedata += 2 + sizeof(unsigned long)) {
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
unsigned int hash = hash_string(lineno, file, FILEHASH);
if (hash != value)
continue;
pr_info(" hash matches %s:%u\n", file, lineno);
match++;
}
return match;
}
static int show_dev_hash(unsigned int value)
{
int match = 0;
struct list_head *entry;
device_pm_lock();
entry = dpm_list.prev;
while (entry != &dpm_list) {
struct device * dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
if (hash == value) {
dev_info(dev, "hash matches\n");
match++;
}
entry = entry->prev;
}
device_pm_unlock();
return match;
}
static unsigned int hash_value_early_read;
int show_trace_dev_match(char *buf, size_t size)
{
unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
int ret = 0;
struct list_head *entry;
/*
* It's possible that multiple devices will match the hash and we can't
* tell which is the culprit, so it's best to output them all.
*/
device_pm_lock();
entry = dpm_list.prev;
while (size && entry != &dpm_list) {
struct device *dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
int len = snprintf(buf, size, "%s\n",
dev_driver_string(dev));
if (len > size)
len = size;
buf += len;
ret += len;
size -= len;
}
entry = entry->prev;
}
device_pm_unlock();
return ret;
}
static int
pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
{
switch (mode) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
if (pm_trace_rtc_abused) {
pm_trace_rtc_abused = false;
pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
}
break;
default:
break;
}
return 0;
}
static struct notifier_block pm_trace_nb = {
.notifier_call = pm_trace_notify,
};
static int __init early_resume_init(void)
{
if (!x86_platform.legacy.rtc)
return 0;
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
}
static int __init late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
if (!x86_platform.legacy.rtc)
return 0;
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
val = val / FILEHASH;
dev = val /* % DEVHASH */;
pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
show_file_hash(file);
show_dev_hash(dev);
return 0;
}
core_initcall(early_resume_init);
late_initcall(late_resume_init);
| linux-master | drivers/base/power/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.
* Copyright (C) 2010 Alan Stern <[email protected]>
*/
#include <linux/sched/mm.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
pm_callback_t cb;
const struct dev_pm_ops *ops;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
else if (dev->type && dev->type->pm)
ops = dev->type->pm;
else if (dev->class && dev->class->pm)
ops = dev->class->pm;
else if (dev->bus && dev->bus->pm)
ops = dev->bus->pm;
else
ops = NULL;
if (ops)
cb = *(pm_callback_t *)((void *)ops + cb_offset);
else
cb = NULL;
if (!cb && dev->driver && dev->driver->pm)
cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
return cb;
}
#define RPM_GET_CALLBACK(dev, callback) \
__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
/**
* update_pm_runtime_accounting - Update the time accounting of power states
* @dev: Device to update the accounting for
*
* In order to be able to have time accounting of the various power states
* (as used by programs such as PowerTOP to show the effectiveness of runtime
* PM), we need to track the time spent in each state.
* update_pm_runtime_accounting must be called each time before the
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
static void update_pm_runtime_accounting(struct device *dev)
{
u64 now, last, delta;
if (dev->power.disable_depth > 0)
return;
last = dev->power.accounting_timestamp;
now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
* timesptamp.
*/
if (now < last)
return;
delta = now - last;
if (dev->power.runtime_status == RPM_SUSPENDED)
dev->power.suspended_time += delta;
else
dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
dev->power.runtime_status = status;
}
static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
{
u64 time;
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
update_pm_runtime_accounting(dev);
time = suspended ? dev->power.suspended_time : dev->power.active_time;
spin_unlock_irqrestore(&dev->power.lock, flags);
return time;
}
u64 pm_runtime_active_time(struct device *dev)
{
return rpm_get_accounted_time(dev, false);
}
u64 pm_runtime_suspended_time(struct device *dev)
{
return rpm_get_accounted_time(dev, true);
}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
*/
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
/**
* pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
* @dev: Device to handle.
*/
static void pm_runtime_cancel_pending(struct device *dev)
{
pm_runtime_deactivate_timer(dev);
/*
* In case there's a request pending, make sure its work function will
* return without doing anything.
*/
dev->power.request = RPM_REQ_NONE;
}
/*
* pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
* @dev: Device to handle.
*
* Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0.
* Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
*
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
*/
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
u64 expires;
if (!dev->power.use_autosuspend)
return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
return 0;
expires = READ_ONCE(dev->power.last_busy);
expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires > ktime_get_mono_fast_ns())
return expires; /* Expires in the future */
return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
static int dev_memalloc_noio(struct device *dev, void *data)
{
return dev->power.memalloc_noio;
}
/*
* pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
* @dev: Device to handle.
* @enable: True for setting the flag and False for clearing the flag.
*
* Set the flag for all devices in the path from the device to the
* root device in the device tree if @enable is true, otherwise clear
* the flag for devices in the path whose siblings don't set the flag.
*
* The function should only be called by block device, or network
* device driver for solving the deadlock problem during runtime
* resume/suspend:
*
* If memory allocation with GFP_KERNEL is called inside runtime
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
* device becomes active and the involed page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
* The lock of dev_hotplug_mutex is held in the function for handling
* hotplug race because pm_runtime_set_memalloc_noio() may be called
* in async probe().
*
* The function should be called between device_add() and device_del()
* on the affected device(block/network device).
*/
void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
{
static DEFINE_MUTEX(dev_hotplug_mutex);
mutex_lock(&dev_hotplug_mutex);
for (;;) {
bool enabled;
/* hold power lock since bitfield is not SMP-safe. */
spin_lock_irq(&dev->power.lock);
enabled = dev->power.memalloc_noio;
dev->power.memalloc_noio = enable;
spin_unlock_irq(&dev->power.lock);
/*
* not need to enable ancestors any more if the device
* has been enabled.
*/
if (enabled && enable)
break;
dev = dev->parent;
/*
* clear flag of the parent device only if all the
* children don't set the flag because ancestor's
* flag was set by any one of the descendants.
*/
if (!dev || (!enable &&
device_for_each_child(dev, NULL, dev_memalloc_noio)))
break;
}
mutex_unlock(&dev_hotplug_mutex);
}
EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
*/
static int rpm_check_suspend_allowed(struct device *dev)
{
int retval = 0;
if (dev->power.runtime_error)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
else if (atomic_read(&dev->power.usage_count))
retval = -EAGAIN;
else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
else if ((dev->power.deferred_resume &&
dev->power.runtime_status == RPM_SUSPENDING) ||
(dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
return retval;
}
static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
int retval;
if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
/* Ignore suppliers with disabled runtime PM. */
if (retval < 0 && retval != -EACCES) {
pm_runtime_put_noidle(link->supplier);
return retval;
}
refcount_inc(&link->rpm_active);
}
return 0;
}
/**
* pm_runtime_release_supplier - Drop references to device link's supplier.
* @link: Target device link.
*
* Drop all runtime PM references associated with @link to its supplier device.
*/
void pm_runtime_release_supplier(struct device_link *link)
{
struct device *supplier = link->supplier;
/*
* The additional power.usage_count check is a safety net in case
* the rpm_active refcount becomes saturated, in which case
* refcount_dec_not_one() would return true forever, but it is not
* strictly necessary.
*/
while (refcount_dec_not_one(&link->rpm_active) &&
atomic_read(&supplier->power.usage_count) > 0)
pm_runtime_put_noidle(supplier);
}
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
pm_runtime_release_supplier(link);
if (try_to_suspend)
pm_request_idle(link->supplier);
}
}
static void rpm_put_suppliers(struct device *dev)
{
__rpm_put_suppliers(dev, true);
}
static void rpm_suspend_suppliers(struct device *dev)
{
struct device_link *link;
int idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
pm_request_idle(link->supplier);
device_links_read_unlock(idx);
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
* @dev: Device to run the callback for.
*/
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval = 0, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
} else {
spin_unlock_irq(&dev->power.lock);
/*
* Resume suppliers if necessary.
*
* The device's runtime PM status cannot change until this
* routine returns, so it is safe to read the status outside of
* the lock.
*/
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
if (retval) {
rpm_put_suppliers(dev);
goto fail;
}
device_links_read_unlock(idx);
}
}
if (cb)
retval = cb(dev);
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
} else {
/*
* If the device is suspending and the callback has returned
* success, drop the usage counters of the suppliers that have
* been reference counted on its resume.
*
* Do that if resume fails too.
*/
if (use_links &&
((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
(dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
__rpm_put_suppliers(dev, false);
fail:
device_links_read_unlock(idx);
}
spin_lock_irq(&dev->power.lock);
}
return retval;
}
/**
* rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
{
int retval;
if (dev->power.memalloc_noio) {
unsigned int noio_flag;
/*
* Deadlock might be caused if memory allocation with
* GFP_KERNEL happens inside runtime_suspend and
* runtime_resume callbacks of one block device's
* ancestor or the block device itself. Network
* device might be thought as part of iSCSI block
* device, so network device and its ancestor should
* be marked as memalloc_noio too.
*/
noio_flag = memalloc_noio_save();
retval = __rpm_callback(cb, dev);
memalloc_noio_restore(noio_flag);
} else {
retval = __rpm_callback(cb, dev);
}
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
}
/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
* run the ->runtime_idle() callback directly. If the ->runtime_idle callback
* doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_idle(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
int retval;
trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
/*
* Any pending request other than an idle notification takes
* precedence over us, except that the timer may be running.
*/
else if (dev->power.request_pending &&
dev->power.request > RPM_REQ_IDLE)
retval = -EAGAIN;
/* Act as though RPM_NOWAIT is always set. */
else if (dev->power.idle_notification)
retval = -EINPROGRESS;
if (retval)
goto out;
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
callback = RPM_GET_CALLBACK(dev, runtime_idle);
/* If no callback assume success. */
if (!callback || dev->power.no_callbacks)
goto out;
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = RPM_REQ_IDLE;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
dev->power.idle_notification = true;
if (dev->power.irq_safe)
spin_unlock(&dev->power.lock);
else
spin_unlock_irq(&dev->power.lock);
retval = callback(dev);
if (dev->power.irq_safe)
spin_lock(&dev->power.lock);
else
spin_lock_irq(&dev->power.lock);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be suspended.
* Cancel a pending idle notification, autosuspend or suspend. If
* another suspend has been started earlier, either return immediately
* or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
* flags. If the RPM_ASYNC flag is set then queue a suspend request;
* otherwise run the ->runtime_suspend() callback directly. When
* ->runtime_suspend succeeded, if a deferred resume was requested while
* the callback was running then carry it out, otherwise send an idle
* notification for its parent (if the suspend succeeded and both
* ignore_children of parent->power and irq_safe of dev->power are not set).
* If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
* flag is set and the next autosuspend-delay expiration time is in the
* future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_suspend(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int (*callback)(struct device *);
struct device *parent = NULL;
int retval;
trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
goto out; /* Conditions are wrong. */
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
if (retval)
goto out;
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
/*
* Optimization: If the timer is already running and is
* set to expire at or before the autosuspend delay,
* avoid the overhead of resetting it. Just let it
* expire; pm_suspend_timer_fn() will take care of the
* rest.
*/
if (!(dev->power.timer_expires &&
dev->power.timer_expires <= expires)) {
/*
* We add a slack of 25% to gather wakeups
* without sacrificing the granularity.
*/
u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
(NSEC_PER_MSEC >> 2);
dev->power.timer_expires = expires;
hrtimer_start_range_ns(&dev->power.suspend_timer,
ns_to_ktime(expires),
slack,
HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
}
}
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
if (dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
retval = -EINPROGRESS;
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
goto repeat;
}
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
/* Carry out an asynchronous or a synchronous suspend. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = (rpmflags & RPM_AUTO) ?
RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
goto out;
}
__update_runtime_status(dev, RPM_SUSPENDING);
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
dev_pm_enable_wake_irq_complete(dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
atomic_add_unless(&parent->power.child_count, -1, 0);
}
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
dev->power.deferred_resume = false;
rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
}
if (dev->power.irq_safe)
goto out;
/* Maybe the parent is now able to suspend. */
if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
rpm_idle(parent, RPM_ASYNC);
spin_unlock(&parent->power.lock);
spin_lock(&dev->power.lock);
}
/* Maybe the suppliers are now able to suspend. */
if (dev->power.links_count > 0) {
spin_unlock_irq(&dev->power.lock);
rpm_suspend_suppliers(dev);
spin_lock_irq(&dev->power.lock);
}
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
fail:
dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
/*
* If the callback routine failed an autosuspend, and
* if the last_busy time has been updated so that there
* is a new autosuspend expiration time, automatically
* reschedule another autosuspend.
*/
if ((rpmflags & RPM_AUTO) &&
pm_runtime_autosuspend_expiration(dev) != 0)
goto repeat;
} else {
pm_runtime_cancel_pending(dev);
}
goto out;
}
/**
* rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
* Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
* earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
* parallel with this function, either tell the other process to resume after
* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
* flag is set then queue a resume request; otherwise run the
* ->runtime_resume() callback directly. Queue an idle notification for the
* device if the resume succeeded.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static int rpm_resume(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int (*callback)(struct device *);
struct device *parent = NULL;
int retval = 0;
trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
retval = -EINVAL;
} else if (dev->power.disable_depth > 0) {
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
else
retval = -EACCES;
}
if (retval)
goto out;
/*
* Other scheduled or pending requests need to be canceled. Small
* optimization: If an autosuspend timer is running, leave it running
* rather than cancelling it now only to restart it again in the near
* future.
*/
dev->power.request = RPM_REQ_NONE;
if (!dev->power.timer_autosuspends)
pm_runtime_deactivate_timer(dev);
if (dev->power.runtime_status == RPM_ACTIVE) {
retval = 1;
goto out;
}
if (dev->power.runtime_status == RPM_RESUMING ||
dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
if (rpmflags & RPM_NOWAIT)
retval = -EINPROGRESS;
} else {
retval = -EINPROGRESS;
}
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_RESUMING &&
dev->power.runtime_status != RPM_SUSPENDING)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
goto repeat;
}
/*
* See if we can skip waking up the parent. This is safe only if
* power.no_callbacks is set, because otherwise we don't know whether
* the resume will actually succeed.
*/
if (dev->power.no_callbacks && !parent && dev->parent) {
spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
if (dev->parent->power.disable_depth > 0 ||
dev->parent->power.ignore_children ||
dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
retval = 1;
goto no_callback; /* Assume success. */
}
spin_unlock(&dev->parent->power.lock);
}
/* Carry out an asynchronous or a synchronous resume. */
if (rpmflags & RPM_ASYNC) {
dev->power.request = RPM_REQ_RESUME;
if (!dev->power.request_pending) {
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
retval = 0;
goto out;
}
if (!parent && dev->parent) {
/*
* Increment the parent's usage counter and resume it if
* necessary. Not needed if dev is irq-safe; then the
* parent is permanently resumed.
*/
parent = dev->parent;
if (dev->power.irq_safe)
goto skip_parent;
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
spin_lock(&parent->power.lock);
/*
* Resume the parent if it has runtime PM enabled and not been
* set to ignore its children.
*/
if (!parent->power.disable_depth &&
!parent->power.ignore_children) {
rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
}
spin_unlock(&parent->power.lock);
spin_lock(&dev->power.lock);
if (retval)
goto out;
goto repeat;
}
skip_parent:
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
__update_runtime_status(dev, RPM_RESUMING);
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
dev_pm_enable_wake_irq_check(dev, false);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
pm_runtime_mark_last_busy(dev);
if (parent)
atomic_inc(&parent->power.child_count);
}
wake_up_all(&dev->power.wait_queue);
if (retval >= 0)
rpm_idle(dev, RPM_ASYNC);
out:
if (parent && !dev->power.irq_safe) {
spin_unlock_irq(&dev->power.lock);
pm_runtime_put(parent);
spin_lock_irq(&dev->power.lock);
}
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
/**
* pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
* is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
struct device *dev = container_of(work, struct device, power.work);
enum rpm_request req;
spin_lock_irq(&dev->power.lock);
if (!dev->power.request_pending)
goto out;
req = dev->power.request;
dev->power.request = RPM_REQ_NONE;
dev->power.request_pending = false;
switch (req) {
case RPM_REQ_NONE:
break;
case RPM_REQ_IDLE:
rpm_idle(dev, RPM_NOWAIT);
break;
case RPM_REQ_SUSPEND:
rpm_suspend(dev, RPM_NOWAIT);
break;
case RPM_REQ_AUTOSUSPEND:
rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
rpm_resume(dev, RPM_NOWAIT);
break;
}
out:
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
* @timer: hrtimer used by pm_schedule_suspend().
*
* Check if the time is right and queue a suspend request.
*/
static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
{
struct device *dev = container_of(timer, struct device, power.suspend_timer);
unsigned long flags;
u64 expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
/*
* If 'expires' is after the current time, we've been called
* too early.
*/
if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
return HRTIMER_NORESTART;
}
/**
* pm_schedule_suspend - Set up a timer to submit a suspend request in future.
* @dev: Device to suspend.
* @delay: Time to wait before submitting a suspend request, in milliseconds.
*/
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
if (!delay) {
retval = rpm_suspend(dev, RPM_ASYNC);
goto out;
}
retval = rpm_check_suspend_allowed(dev);
if (retval)
goto out;
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
static int rpm_drop_usage_count(struct device *dev)
{
int ret;
ret = atomic_sub_return(1, &dev->power.usage_count);
if (ret >= 0)
return ret;
/*
* Because rpm_resume() does not check the usage counter, it will resume
* the device even if the usage counter is 0 or negative, so it is
* sufficient to increment the usage counter here to reverse the change
* made above.
*/
atomic_inc(&dev->power.usage_count);
dev_warn(dev, "Runtime PM usage count underflow!\n");
return -EINVAL;
}
/**
* __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
* return immediately if it is larger than zero (if it becomes negative, log a
* warning, increment it, and return an error). Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
if (rpmflags & RPM_GET_PUT) {
retval = rpm_drop_usage_count(dev);
if (retval < 0) {
return retval;
} else if (retval > 0) {
trace_rpm_usage(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
* __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
* return immediately if it is larger than zero (if it becomes negative, log a
* warning, increment it, and return an error). Then carry out a suspend,
* either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
if (rpmflags & RPM_GET_PUT) {
retval = rpm_drop_usage_count(dev);
if (retval < 0) {
return retval;
} else if (retval > 0) {
trace_rpm_usage(dev, rpmflags);
return 0;
}
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
* __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_resume(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* pm_runtime_get_if_active - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
* Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
* @ign_usage_count is %true or the runtime PM usage counter of @dev is not
* zero, increment the usage counter of @dev and return 1. Otherwise, return 0
* without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
*
* If @ign_usage_count is %false, this function can be used to prevent
* suspending the device when both its runtime PM status is %RPM_ACTIVE and its
* runtime PM usage counter is not zero.
*
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.disable_depth > 0) {
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
} else if (ign_usage_count) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
*
* If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
* parent's power.ignore_children flag is unset, the device's status cannot be
* set to RPM_ACTIVE, so -EBUSY is returned in that case.
*
* If successful, __pm_runtime_set_status() clears the power.runtime_error field
* and the device parent's counter of unsuspended children is modified to
* reflect the new status. If the new status is RPM_SUSPENDED, an idle
* notification request for the parent is submitted.
*
* If @dev has any suppliers (as reflected by device links to them), and @status
* is RPM_ACTIVE, they will be activated upfront and if the activation of one
* of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
* of the @status value) and the suppliers will be deacticated on exit. The
* error returned by the failing supplier activation will be returned in that
* case.
*/
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
bool notify_parent = false;
unsigned long flags;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
return -EINVAL;
spin_lock_irqsave(&dev->power.lock, flags);
/*
* Prevent PM-runtime from being enabled for the device or return an
* error if it is enabled already and working.
*/
if (dev->power.runtime_error || dev->power.disable_depth)
dev->power.disable_depth++;
else
error = -EAGAIN;
spin_unlock_irqrestore(&dev->power.lock, flags);
if (error)
return error;
/*
* If the new status is RPM_ACTIVE, the suppliers can be activated
* upfront regardless of the current status, because next time
* rpm_put_suppliers() runs, the rpm_active refcounts of the links
* involved will be dropped down to one anyway.
*/
if (status == RPM_ACTIVE) {
int idx = device_links_read_lock();
error = rpm_get_suppliers(dev);
if (error)
status = RPM_SUSPENDED;
device_links_read_unlock(idx);
}
spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.runtime_status == status || !parent)
goto out_set;
if (status == RPM_SUSPENDED) {
atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children;
} else {
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
* It is invalid to put an active child under a parent that is
* not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth &&
!parent->power.ignore_children &&
parent->power.runtime_status != RPM_ACTIVE) {
dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
dev_name(dev),
dev_name(parent));
error = -EBUSY;
} else if (dev->power.runtime_status == RPM_SUSPENDED) {
atomic_inc(&parent->power.child_count);
}
spin_unlock(&parent->power.lock);
if (error) {
status = RPM_SUSPENDED;
goto out;
}
}
out_set:
__update_runtime_status(dev, status);
if (!error)
dev->power.runtime_error = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
if (notify_parent)
pm_request_idle(parent);
if (status == RPM_SUSPENDED) {
int idx = device_links_read_lock();
rpm_put_suppliers(dev);
device_links_read_unlock(idx);
}
pm_runtime_enable(dev);
return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
/**
* __pm_runtime_barrier - Cancel pending requests and wait for completions.
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
* runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
static void __pm_runtime_barrier(struct device *dev)
{
pm_runtime_deactivate_timer(dev);
if (dev->power.request_pending) {
dev->power.request = RPM_REQ_NONE;
spin_unlock_irq(&dev->power.lock);
cancel_work_sync(&dev->power.work);
spin_lock_irq(&dev->power.lock);
dev->power.request_pending = false;
}
if (dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
dev->power.idle_notification) {
DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING
&& dev->power.runtime_status != RPM_RESUMING
&& !dev->power.idle_notification)
break;
spin_unlock_irq(&dev->power.lock);
schedule();
spin_lock_irq(&dev->power.lock);
}
finish_wait(&dev->power.wait_queue, &wait);
}
}
/**
* pm_runtime_barrier - Flush pending requests and wait for completions.
* @dev: Device to handle.
*
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
* 1, if there was a resume request pending and the device had to be woken up,
* 0, otherwise
*/
int pm_runtime_barrier(struct device *dev)
{
int retval = 0;
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME) {
rpm_resume(dev, 0);
retval = 1;
}
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
/**
* __pm_runtime_disable - Disable runtime PM of a device.
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
* Increment power.disable_depth for the device and if it was zero previously,
* cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
* suspended after its runtime PM has been disabled.
*
* If @check_resume is set and there's a resume request pending when
* __pm_runtime_disable() is called and power.disable_depth is zero, the
* function will wake up the device before disabling its runtime PM.
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.disable_depth > 0) {
dev->power.disable_depth++;
goto out;
}
/*
* Wake up the device if there's a resume request pending, because that
* means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending &&
dev->power.request == RPM_REQ_RESUME) {
/*
* Prevent suspends and idle notifications from being carried
* out after we have woken up the device.
*/
pm_runtime_get_noresume(dev);
rpm_resume(dev, 0);
pm_runtime_put_noidle(dev);
}
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
if (!dev->power.disable_depth++) {
__pm_runtime_barrier(dev);
dev->power.last_status = dev->power.runtime_status;
}
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
* pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
if (!dev->power.disable_depth) {
dev_warn(dev, "Unbalanced %s!\n", __func__);
goto out;
}
if (--dev->power.disable_depth > 0)
goto out;
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
if (dev->power.runtime_status == RPM_SUSPENDED &&
!dev->power.ignore_children &&
atomic_read(&dev->power.child_count) > 0)
dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
*
* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
* you at driver exit time if needed.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
{
pm_runtime_enable(dev);
return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
* so that it cannot be suspended at run time until pm_runtime_allow() is called
* for it.
*/
void pm_runtime_forbid(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
if (!dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = false;
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
*/
void pm_runtime_allow(struct device *dev)
{
int ret;
spin_lock_irq(&dev->power.lock);
if (dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = true;
ret = rpm_drop_usage_count(dev);
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
* pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
* device is power-managed through its parent and has no runtime PM
* callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.no_callbacks = 1;
spin_unlock_irq(&dev->power.lock);
if (device_is_registered(dev))
rpm_sysfs_remove(dev);
}
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
/**
* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
* @dev: Device to handle
*
* Set the power.irq_safe flag, which tells the PM core that the
* ->runtime_suspend() and ->runtime_resume() callbacks for this device should
* always be invoked with the spinlock held and interrupts disabled. It also
* causes the parent's usage counter to be permanently incremented, preventing
* the parent from runtime suspending -- otherwise an irq-safe child might have
* to wait for a non-irq-safe parent.
*/
void pm_runtime_irq_safe(struct device *dev)
{
if (dev->parent)
pm_runtime_get_sync(dev->parent);
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 1;
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
/**
* update_autosuspend - Handle a change to a device's autosuspend settings.
* @dev: Device to handle.
* @old_delay: The former autosuspend_delay value.
* @old_use: The former use_autosuspend value.
*
* Prevent runtime suspend if the new delay is negative and use_autosuspend is
* set; otherwise allow it. Send an idle notification if suspends are allowed.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
static void update_autosuspend(struct device *dev, int old_delay, int old_use)
{
int delay = dev->power.autosuspend_delay;
/* Should runtime suspend be prevented now? */
if (dev->power.use_autosuspend && delay < 0) {
/* If it used to be allowed then prevent it. */
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
trace_rpm_usage(dev, 0);
}
}
/* Runtime suspend should be allowed now. */
else {
/* If it used to be prevented then allow it. */
if (old_use && old_delay < 0)
atomic_dec(&dev->power.usage_count);
/* Maybe we can autosuspend now. */
rpm_idle(dev, RPM_AUTO);
}
}
/**
* pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
* @dev: Device to handle.
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
* and the power.use_autosuspend flag is set, prevent runtime suspends. If it
* changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
int old_delay, old_use;
spin_lock_irq(&dev->power.lock);
old_delay = dev->power.autosuspend_delay;
old_use = dev->power.use_autosuspend;
dev->power.autosuspend_delay = delay;
update_autosuspend(dev, old_delay, old_use);
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
/**
* __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
* Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
{
int old_delay, old_use;
spin_lock_irq(&dev->power.lock);
old_delay = dev->power.autosuspend_delay;
old_use = dev->power.use_autosuspend;
dev->power.use_autosuspend = use;
update_autosuspend(dev, old_delay, old_use);
spin_unlock_irq(&dev->power.lock);
}
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
* pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.last_status = RPM_INVALID;
dev->power.idle_notification = false;
dev->power.disable_depth = 1;
atomic_set(&dev->power.usage_count, 0);
dev->power.runtime_error = 0;
atomic_set(&dev->power.child_count, 0);
pm_suspend_ignore_children(dev, false);
dev->power.runtime_auto = true;
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
dev->power.suspend_timer.function = pm_suspend_timer_fn;
init_waitqueue_head(&dev->power.wait_queue);
}
/**
* pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
* @dev: Device object to re-initialize.
*/
void pm_runtime_reinit(struct device *dev)
{
if (!pm_runtime_enabled(dev)) {
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe) {
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 0;
spin_unlock_irq(&dev->power.lock);
if (dev->parent)
pm_runtime_put(dev->parent);
}
}
}
/**
* pm_runtime_remove - Prepare for removing a device from device hierarchy.
* @dev: Device object being removed from device hierarchy.
*/
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
pm_runtime_reinit(dev);
}
/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_get_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
device_links_read_unlock(idx);
}
/**
* pm_runtime_put_suppliers - Drop references to supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
pm_runtime_put(link->supplier);
}
device_links_read_unlock(idx);
}
void pm_runtime_new_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.links_count++;
spin_unlock_irq(&dev->power.lock);
}
static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_runtime_drop_link - Prepare for device link removal.
* @link: Device link going away.
*
* Drop the link count of the consumer end of @link and decrement the supplier
* device's runtime PM usage counter as many times as needed to drop all of the
* PM runtime reference to it from the consumer.
*/
void pm_runtime_drop_link(struct device_link *link)
{
if (!(link->flags & DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
pm_runtime_release_supplier(link);
pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||
dev->power.ignore_children);
}
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
* if it is active, invoke its ->runtime_suspend callback to suspend it and
* change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
* usage and children counters don't indicate that the device was in use before
* the system-wide transition under way, decrement its parent's children counter
* (if there is a parent). Keep runtime PM disabled to preserve the state
* unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
*
* Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
* state where this function has called the ->runtime_suspend callback but the
* PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
return 0;
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
if (ret)
goto err;
dev_pm_enable_wake_irq_complete(dev);
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
} else {
__update_runtime_status(dev, RPM_SUSPENDED);
dev->power.needs_force_resume = 1;
}
return 0;
err:
dev_pm_disable_wake_irq_check(dev, true);
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
* those actions and bring the device into full power, if it is expected to be
* used on system resume. In the other case, we defer the resume to be managed
* via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
* The value of the parent's children counter is correct already, so
* just update the status of the device.
*/
__update_runtime_status(dev, RPM_ACTIVE);
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
dev_pm_enable_wake_irq_check(dev, false);
goto out;
}
pm_runtime_mark_last_busy(dev);
out:
dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
| linux-master | drivers/base/power/runtime.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/capability.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/power.h>
#include "power.h"
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
*/
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
static unsigned int wakeup_irq[2] __read_mostly;
static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
* They need to be modified together atomically, so it's better to use one
* atomic variable to hold them both.
*/
static atomic_t combined_event_count = ATOMIC_INIT(0);
#define IN_PROGRESS_BITS (sizeof(int) * 4)
#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
static void split_counters(unsigned int *cnt, unsigned int *inpr)
{
unsigned int comb = atomic_read(&combined_event_count);
*cnt = (comb >> IN_PROGRESS_BITS);
*inpr = comb & MAX_IN_PROGRESS;
}
/* A preserved old value of the events counter. */
static unsigned int saved_count;
static DEFINE_RAW_SPINLOCK(events_lock);
static void pm_wakeup_timer_fn(struct timer_list *t);
static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
DEFINE_STATIC_SRCU(wakeup_srcu);
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
static DEFINE_IDA(wakeup_ida);
/**
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
int id;
ws = kzalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
goto err_ws;
ws_name = kstrdup_const(name, GFP_KERNEL);
if (!ws_name)
goto err_name;
ws->name = ws_name;
id = ida_alloc(&wakeup_ida, GFP_KERNEL);
if (id < 0)
goto err_id;
ws->id = id;
return ws;
err_id:
kfree_const(ws->name);
err_name:
kfree(ws);
err_ws:
return NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
static void wakeup_source_record(struct wakeup_source *ws)
{
unsigned long flags;
spin_lock_irqsave(&deleted_ws.lock, flags);
if (ws->event_count) {
deleted_ws.total_time =
ktime_add(deleted_ws.total_time, ws->total_time);
deleted_ws.prevent_sleep_time =
ktime_add(deleted_ws.prevent_sleep_time,
ws->prevent_sleep_time);
deleted_ws.max_time =
ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
deleted_ws.max_time : ws->max_time;
deleted_ws.event_count += ws->event_count;
deleted_ws.active_count += ws->active_count;
deleted_ws.relax_count += ws->relax_count;
deleted_ws.expire_count += ws->expire_count;
deleted_ws.wakeup_count += ws->wakeup_count;
}
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
static void wakeup_source_free(struct wakeup_source *ws)
{
ida_free(&wakeup_ida, ws->id);
kfree_const(ws->name);
kfree(ws);
}
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
__pm_relax(ws);
wakeup_source_record(ws);
wakeup_source_free(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
spin_lock_init(&ws->lock);
timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws->active = false;
raw_spin_lock_irqsave(&events_lock, flags);
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
del_timer_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
* @dev: Device this wakeup source is associated with (or NULL if virtual).
* @name: Name of the wakeup source to register.
*/
struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
struct wakeup_source *ws;
int ret;
ws = wakeup_source_create(name);
if (ws) {
if (!dev || device_is_registered(dev)) {
ret = wakeup_source_sysfs_add(dev, ws);
if (ret) {
wakeup_source_free(ws);
return NULL;
}
}
wakeup_source_add(ws);
}
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_register);
/**
* wakeup_source_unregister - Remove wakeup source from the list and remove it.
* @ws: Wakeup source object to unregister.
*/
void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
if (ws->dev)
wakeup_source_sysfs_remove(ws);
wakeup_source_destroy(ws);
}
}
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
/**
* wakeup_sources_read_lock - Lock wakeup source list for read.
*
* Returns an index of srcu lock for struct wakeup_srcu.
* This index must be passed to the matching wakeup_sources_read_unlock().
*/
int wakeup_sources_read_lock(void)
{
return srcu_read_lock(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
/**
* wakeup_sources_read_unlock - Unlock wakeup source list.
* @idx: return value from corresponding wakeup_sources_read_lock()
*/
void wakeup_sources_read_unlock(int idx)
{
srcu_read_unlock(&wakeup_srcu, idx);
}
EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
/**
* wakeup_sources_walk_start - Begin a walk on wakeup source list
*
* Returns first object of the list of wakeup sources.
*
* Note that to be safe, wakeup sources list needs to be locked by calling
* wakeup_source_read_lock() for this.
*/
struct wakeup_source *wakeup_sources_walk_start(void)
{
struct list_head *ws_head = &wakeup_sources;
return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
}
EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
/**
* wakeup_sources_walk_next - Get next wakeup source from the list
* @ws: Previous wakeup source object
*
* Note that to be safe, wakeup sources list needs to be locked by calling
* wakeup_source_read_lock() for this.
*/
struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
{
struct list_head *ws_head = &wakeup_sources;
return list_next_or_null_rcu(ws_head, &ws->entry,
struct wakeup_source, entry);
}
EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
/**
* device_wakeup_attach - Attach a wakeup source object to a device object.
* @dev: Device to handle.
* @ws: Wakeup source object to attach to @dev.
*
* This causes @dev to be treated as a wakeup device.
*/
static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
spin_unlock_irq(&dev->power.lock);
return -EEXIST;
}
dev->power.wakeup = ws;
if (dev->power.wakeirq)
device_wakeup_attach_irq(dev, dev->power.wakeirq);
spin_unlock_irq(&dev->power.lock);
return 0;
}
/**
* device_wakeup_enable - Enable given device to be a wakeup source.
* @dev: Device to handle.
*
* Create a wakeup source object, register it and attach it to @dev.
*/
int device_wakeup_enable(struct device *dev)
{
struct wakeup_source *ws;
int ret;
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
if (pm_suspend_target_state != PM_SUSPEND_ON)
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
if (!ws)
return -ENOMEM;
ret = device_wakeup_attach(dev, ws);
if (ret)
wakeup_source_unregister(ws);
return ret;
}
EXPORT_SYMBOL_GPL(device_wakeup_enable);
/**
* device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
* @dev: Device to handle
* @wakeirq: Device specific wakeirq entry
*
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
*
* Call under the device's power.lock lock.
*/
void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
if (!ws)
return;
if (ws->wakeirq)
dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
}
/**
* device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
*
* Call under the device's power.lock lock.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
if (ws)
ws->wakeirq = NULL;
}
/**
* device_wakeup_arm_wake_irqs -
*
* Iterates over the list of device wakeirqs to arm them.
*/
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
* device_wakeup_disarm_wake_irqs -
*
* Iterates over the list of device wakeirqs to disarm them.
*/
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
* device_wakeup_detach - Detach a device's wakeup source object from it.
* @dev: Device to detach the wakeup source object from.
*
* After it returns, @dev will not be treated as a wakeup device any more.
*/
static struct wakeup_source *device_wakeup_detach(struct device *dev)
{
struct wakeup_source *ws;
spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
dev->power.wakeup = NULL;
spin_unlock_irq(&dev->power.lock);
return ws;
}
/**
* device_wakeup_disable - Do not regard a device as a wakeup source any more.
* @dev: Device to handle.
*
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
int device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
/**
* device_set_wakeup_capable - Set/reset device wakeup capability flag.
* @dev: Device to handle.
* @capable: Whether or not @dev is capable of waking up the system from sleep.
*
* If @capable is set, set the @dev's power.can_wakeup flag and add its
* wakeup-related attributes to sysfs. Otherwise, unset the @dev's
* power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
*
* This function may sleep and it can't be called from any context where
* sleeping is not allowed.
*/
void device_set_wakeup_capable(struct device *dev, bool capable)
{
if (!!dev->power.can_wakeup == !!capable)
return;
dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
int ret = wakeup_sysfs_add(dev);
if (ret)
dev_info(dev, "Wakeup sysfs attributes not added\n");
} else {
wakeup_sysfs_remove(dev);
}
}
}
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
* @enable: enable/disable flag
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
* wakeup_source_not_registered - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
static bool wakeup_source_not_registered(struct wakeup_source *ws)
{
/*
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
/*
* The functions below use the observation that each wakeup event starts a
* period in which the system should not be suspended. The moment this period
* will end depends on how the wakeup event is going to be processed after being
* detected and all of the possible cases can be divided into two distinct
* groups.
*
* First, a wakeup event may be detected by the same functional unit that will
* carry out the entire processing of it and possibly will pass it to user space
* for further processing. In that case the functional unit that has detected
* the event may later "close" the "no suspend" period associated with it
* directly as soon as it has been dealt with. The pair of pm_stay_awake() and
* pm_relax(), balanced with each other, is supposed to be used in such
* situations.
*
* Second, a wakeup event may be detected by one functional unit and processed
* by another one. In that case the unit that has detected it cannot really
* "close" the "no suspend" period associated with it, unless it knows in
* advance what's going to happen to the event during processing. This
* knowledge, however, may not be available to it, so it can simply specify time
* to wait before the system can be suspended and pass it as the second
* argument of pm_wakeup_event().
*
* It is valid to call pm_relax() after pm_wakeup_event(), in which case the
* "no suspend" period will be ended either by the pm_relax(), or by the timer
* function executed when the timer expires, whichever comes first.
*/
/**
* wakeup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
* core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
if (WARN_ONCE(wakeup_source_not_registered(ws),
"unregistered wakeup source\n"))
return;
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
if (ws->autosleep_enabled)
ws->start_prevent_time = ws->last_time;
/* Increment the counter of events in progress. */
cec = atomic_inc_return(&combined_event_count);
trace_wakeup_source_activate(ws->name, cec);
}
/**
* wakeup_source_report_event - Report wakeup event using the given source.
* @ws: Wakeup source to report the event for.
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*/
static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
{
ws->event_count++;
/* This is racy, but the counter is approximate anyway. */
if (events_check_enabled)
ws->wakeup_count++;
if (!ws->active)
wakeup_source_activate(ws);
if (hard)
pm_system_wakeup();
}
/**
* __pm_stay_awake - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the source of the event.
*
* It is safe to call this function from interrupt context.
*/
void __pm_stay_awake(struct wakeup_source *ws)
{
unsigned long flags;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
del_timer(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_stay_awake);
/**
* pm_stay_awake - Notify the PM core that a wakeup event is being processed.
* @dev: Device the wakeup event is related to.
*
* Notify the PM core of a wakeup event (signaled by @dev) by calling
* __pm_stay_awake for the @dev's wakeup source object.
*
* Call this function after detecting of a wakeup event if pm_relax() is going
* to be called directly after processing the event (and possibly passing it to
* user space for further processing).
*/
void pm_stay_awake(struct device *dev)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
__pm_stay_awake(dev->power.wakeup);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_stay_awake);
#ifdef CONFIG_PM_AUTOSLEEP
static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
{
ktime_t delta = ktime_sub(now, ws->start_prevent_time);
ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
}
#else
static inline void update_prevent_sleep_time(struct wakeup_source *ws,
ktime_t now) {}
#endif
/**
* wakeup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and notify the PM core that the wakeup source has
* become inactive by decrementing the counter of wakeup events being processed
* and incrementing the counter of registered wakeup events.
*/
static void wakeup_source_deactivate(struct wakeup_source *ws)
{
unsigned int cnt, inpr, cec;
ktime_t duration;
ktime_t now;
ws->relax_count++;
/*
* __pm_relax() may be called directly or from a timer function.
* If it is called directly right after the timer function has been
* started, but before the timer function calls __pm_relax(), it is
* possible that __pm_stay_awake() will be called in the meantime and
* will set ws->active. Then, ws->active may be cleared immediately
* by the __pm_relax() called from the timer function, but in such a
* case ws->relax_count will be different from ws->active_count.
*/
if (ws->relax_count != ws->active_count) {
ws->relax_count--;
return;
}
ws->active = false;
now = ktime_get();
duration = ktime_sub(now, ws->last_time);
ws->total_time = ktime_add(ws->total_time, duration);
if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
ws->max_time = duration;
ws->last_time = now;
del_timer(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)
update_prevent_sleep_time(ws, now);
/*
* Increment the counter of registered wakeup events and decrement the
* counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
split_counters(&cnt, &inpr);
if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
wake_up(&wakeup_count_wait_queue);
}
/**
* __pm_relax - Notify the PM core that processing of a wakeup event has ended.
* @ws: Wakeup source object associated with the source of the event.
*
* Call this function for wakeup events whose processing started with calling
* __pm_stay_awake().
*
* It is safe to call it from interrupt context.
*/
void __pm_relax(struct wakeup_source *ws)
{
unsigned long flags;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
if (ws->active)
wakeup_source_deactivate(ws);
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_relax);
/**
* pm_relax - Notify the PM core that processing of a wakeup event has ended.
* @dev: Device that signaled the event.
*
* Execute __pm_relax() for the @dev's wakeup source object.
*/
void pm_relax(struct device *dev)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
__pm_relax(dev->power.wakeup);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_relax);
/**
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
* @t: timer list
*
* Call wakeup_source_deactivate() for the wakeup source whose address is stored
* in @data if it is currently active and its timer has not been canceled and
* the expiration time of the timer is not in future.
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
struct wakeup_source *ws = from_timer(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
if (ws->active && ws->timer_expires
&& time_after_eq(jiffies, ws->timer_expires)) {
wakeup_source_deactivate(ws);
ws->expire_count++;
}
spin_unlock_irqrestore(&ws->lock, flags);
}
/**
* pm_wakeup_ws_event - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the event source.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Notify the PM core of a wakeup event whose source is @ws that will take
* approximately @msec milliseconds to be processed by the kernel. If @ws is
* not active, activate it. If @msec is nonzero, set up the @ws' timer to
* execute pm_wakeup_timer_fn() in future.
*
* It is safe to call this function from interrupt context.
*/
void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
{
unsigned long flags;
unsigned long expires;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, hard);
if (!msec) {
wakeup_source_deactivate(ws);
goto unlock;
}
expires = jiffies + msecs_to_jiffies(msec);
if (!expires)
expires = 1;
if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
mod_timer(&ws->timer, expires);
ws->timer_expires = expires;
}
unlock:
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
* pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Call pm_wakeup_ws_event() for the @dev's wakeup source object.
*/
void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
ktime_to_ns(ws->last_time) >
ktime_to_ns(last_activity_ws->last_time))) {
last_activity_ws = ws;
}
}
if (!active && last_activity_ws)
pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
* Compare the current number of registered wakeup events with its preserved
* value from the past and return true if new wakeup events have been registered
* since the old value was stored. Also return true if the current number of
* wakeup events being processed is different from zero.
*/
bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
raw_spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
unsigned int cnt, inpr;
split_counters(&cnt, &inpr);
ret = (cnt != saved_count || inpr > 0);
events_check_enabled = !ret;
}
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
return ret || atomic_read(&pm_abort_suspend) > 0;
}
EXPORT_SYMBOL_GPL(pm_wakeup_pending);
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_system_cancel_wakeup(void)
{
atomic_dec_if_positive(&pm_abort_suspend);
}
void pm_wakeup_clear(unsigned int irq_number)
{
raw_spin_lock_irq(&wakeup_irq_lock);
if (irq_number && wakeup_irq[0] == irq_number)
wakeup_irq[0] = wakeup_irq[1];
else
wakeup_irq[0] = 0;
wakeup_irq[1] = 0;
raw_spin_unlock_irq(&wakeup_irq_lock);
if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
unsigned long flags;
raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
if (wakeup_irq[0] == 0)
wakeup_irq[0] = irq_number;
else if (wakeup_irq[1] == 0)
wakeup_irq[1] = irq_number;
else
irq_number = 0;
pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
if (irq_number)
pm_system_wakeup();
}
unsigned int pm_wakeup_irq(void)
{
return wakeup_irq[0];
}
/**
* pm_get_wakeup_count - Read the number of registered wakeup events.
* @count: Address to store the value at.
* @block: Whether or not to block.
*
* Store the number of registered wakeup events at the address in @count. If
* @block is set, block until the current number of wakeup events being
* processed is zero.
*
* Return 'false' if the current number of wakeup events being processed is
* nonzero. Otherwise return 'true'.
*/
bool pm_get_wakeup_count(unsigned int *count, bool block)
{
unsigned int cnt, inpr;
if (block) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&wakeup_count_wait_queue, &wait,
TASK_INTERRUPTIBLE);
split_counters(&cnt, &inpr);
if (inpr == 0 || signal_pending(current))
break;
pm_print_active_wakeup_sources();
schedule();
}
finish_wait(&wakeup_count_wait_queue, &wait);
}
split_counters(&cnt, &inpr);
*count = cnt;
return !inpr;
}
/**
* pm_save_wakeup_count - Save the current number of registered wakeup events.
* @count: Value to compare with the current number of registered wakeup events.
*
* If @count is equal to the current number of registered wakeup events and the
* current number of wakeup events being processed is zero, store @count as the
* old number of registered wakeup events for pm_check_wakeup_events(), enable
* wakeup events detection and return 'true'. Otherwise disable wakeup events
* detection and return 'false'.
*/
bool pm_save_wakeup_count(unsigned int count)
{
unsigned int cnt, inpr;
unsigned long flags;
events_check_enabled = false;
raw_spin_lock_irqsave(&events_lock, flags);
split_counters(&cnt, &inpr);
if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
}
raw_spin_unlock_irqrestore(&events_lock, flags);
return events_check_enabled;
}
#ifdef CONFIG_PM_AUTOSLEEP
/**
* pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
* @set: Whether to set or to clear the autosleep_enabled flags.
*/
void pm_wakep_autosleep_enabled(bool set)
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
if (ws->active) {
if (set)
ws->start_prevent_time = now;
else
update_prevent_sleep_time(ws, now);
}
}
spin_unlock_irq(&ws->lock);
}
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
/**
* print_wakeup_source_stats - Print wakeup source statistics information.
* @m: seq_file to print the statistics into.
* @ws: Wakeup source object to print the statistics for.
*/
static int print_wakeup_source_stats(struct seq_file *m,
struct wakeup_source *ws)
{
unsigned long flags;
ktime_t total_time;
ktime_t max_time;
unsigned long active_count;
ktime_t active_time;
ktime_t prevent_sleep_time;
spin_lock_irqsave(&ws->lock, flags);
total_time = ws->total_time;
max_time = ws->max_time;
prevent_sleep_time = ws->prevent_sleep_time;
active_count = ws->active_count;
if (ws->active) {
ktime_t now = ktime_get();
active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
if (active_time > max_time)
max_time = active_time;
if (ws->autosleep_enabled)
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(now, ws->start_prevent_time));
} else {
active_time = 0;
}
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
ws->name, active_count, ws->event_count,
ws->wakeup_count, ws->expire_count,
ktime_to_ms(active_time), ktime_to_ms(total_time),
ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
ktime_to_ms(prevent_sleep_time));
spin_unlock_irqrestore(&ws->lock, flags);
return 0;
}
static void *wakeup_sources_stats_seq_start(struct seq_file *m,
loff_t *pos)
{
struct wakeup_source *ws;
loff_t n = *pos;
int *srcuidx = m->private;
if (n == 0) {
seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
return NULL;
}
static void *wakeup_sources_stats_seq_next(struct seq_file *m,
void *v, loff_t *pos)
{
struct wakeup_source *ws = v;
struct wakeup_source *next_ws = NULL;
++(*pos);
list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
next_ws = ws;
break;
}
if (!next_ws)
print_wakeup_source_stats(m, &deleted_ws);
return next_ws;
}
static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
{
int *srcuidx = m->private;
srcu_read_unlock(&wakeup_srcu, *srcuidx);
}
/**
* wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
* @m: seq_file to print the statistics into.
* @v: wakeup_source of each iteration
*/
static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
{
struct wakeup_source *ws = v;
print_wakeup_source_stats(m, ws);
return 0;
}
static const struct seq_operations wakeup_sources_stats_seq_ops = {
.start = wakeup_sources_stats_seq_start,
.next = wakeup_sources_stats_seq_next,
.stop = wakeup_sources_stats_seq_stop,
.show = wakeup_sources_stats_seq_show,
};
static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
}
static const struct file_operations wakeup_sources_stats_fops = {
.owner = THIS_MODULE,
.open = wakeup_sources_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int __init wakeup_sources_debugfs_init(void)
{
debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
&wakeup_sources_stats_fops);
return 0;
}
postcore_initcall(wakeup_sources_debugfs_init);
| linux-master | drivers/base/power/wakeup.c |
// SPDX-License-Identifier: GPL-2.0
/* sysfs entries for device PM */
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
/*
* control - Report/change current runtime PM setting of the device
*
* Runtime power management of a device can be blocked with the help of
* this attribute. All devices have one of the following two values for
* the power/control file:
*
* + "auto\n" to allow the device to be power managed at run time;
* + "on\n" to prevent the device from being power managed at run time;
*
* The default for all devices is "auto", which means that devices may be
* subject to automatic power management, depending on their drivers.
* Changing this attribute to "on" prevents the driver from power managing
* the device at run time. Doing that while the device is suspended causes
* it to be woken up.
*
* wakeup - Report/change current wakeup option for device
*
* Some devices support "wakeup" events, which are hardware signals
* used to activate devices from suspended or low power states. Such
* devices have one of three values for the sysfs power/wakeup file:
*
* + "enabled\n" to issue the events;
* + "disabled\n" not to do so; or
* + "\n" for temporary or permanent inability to issue wakeup.
*
* (For example, unconfigured USB devices can't issue wakeups.)
*
* Familiar examples of devices that can issue wakeup events include
* keyboards and mice (both PS2 and USB styles), power buttons, modems,
* "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
* will wake the entire system from a suspend state; others may just
* wake up the device (if the system as a whole is already active).
* Some wakeup events use normal IRQ lines; other use special out
* of band signaling.
*
* It is the responsibility of device drivers to enable (or disable)
* wakeup signaling as part of changing device power states, respecting
* the policy choices provided through the driver model.
*
* Devices may not be able to generate wakeup events from all power
* states. Also, the events may be ignored in some configurations;
* for example, they might need help from other devices that aren't
* active, or which may have wakeup disabled. Some drivers rely on
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
*
* async - Report/change current async suspend setting for the device
*
* Asynchronous suspend and resume of the device during system-wide power
* state transitions can be enabled by writing "enabled" to this file.
* Analogously, if "disabled" is written to this file, the device will be
* suspended and resumed synchronously.
*
* All devices have one of the following two values for power/async:
*
* + "enabled\n" to permit the asynchronous suspend/resume of the device;
* + "disabled\n" to forbid it;
*
* NOTE: It generally is unsafe to permit the asynchronous suspend/resume
* of a device unless it is certain that all of the PM dependencies of the
* device are known to the PM core. However, for some devices this
* attribute is set to "enabled" by bus type code or device drivers and in
* that cases it should be safe to leave the default value.
*
* autosuspend_delay_ms - Report/change a device's autosuspend_delay value
*
* Some drivers don't want to carry out a runtime suspend as soon as a
* device becomes idle; they want it always to remain idle for some period
* of time before suspending it. This period is the autosuspend_delay
* value (expressed in milliseconds) and it can be controlled by the user.
* If the value is negative then the device will never be runtime
* suspended.
*
* NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
* value are used only if the driver calls pm_runtime_use_autosuspend().
*
* wakeup_count - Report the number of wakeup events related to the device
*/
const char power_group_name[] = "power";
EXPORT_SYMBOL_GPL(power_group_name);
static const char ctrl_auto[] = "auto";
static const char ctrl_on[] = "on";
static ssize_t control_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
dev->power.runtime_auto ? ctrl_auto : ctrl_on);
}
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
device_lock(dev);
if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
device_unlock(dev);
return n;
}
static DEVICE_ATTR_RW(control);
static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 tmp = pm_runtime_active_time(dev);
do_div(tmp, NSEC_PER_MSEC);
return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_active_time);
static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 tmp = pm_runtime_suspended_time(dev);
do_div(tmp, NSEC_PER_MSEC);
return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_suspended_time);
static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *output;
if (dev->power.runtime_error) {
output = "error";
} else if (dev->power.disable_depth) {
output = "unsupported";
} else {
switch (dev->power.runtime_status) {
case RPM_SUSPENDED:
output = "suspended";
break;
case RPM_SUSPENDING:
output = "suspending";
break;
case RPM_RESUMING:
output = "resuming";
break;
case RPM_ACTIVE:
output = "active";
break;
default:
return -EIO;
}
}
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!dev->power.use_autosuspend)
return -EIO;
return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
}
static ssize_t autosuspend_delay_ms_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
long delay;
if (!dev->power.use_autosuspend)
return -EIO;
if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
device_lock(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
device_unlock(dev);
return n;
}
static DEVICE_ATTR_RW(autosuspend_delay_ms);
static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sysfs_emit(buf, "n/a\n");
if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
s32 value;
int ret;
if (!kstrtos32(buf, 0, &value)) {
/*
* Prevent users from writing negative or "no constraint" values
* directly.
*/
if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return -EINVAL;
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
} else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
}
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sysfs_emit(buf, "%s\n", "auto");
if (value == PM_QOS_LATENCY_ANY)
return sysfs_emit(buf, "%s\n", "any");
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
s32 value;
int ret;
if (kstrtos32(buf, 0, &value) == 0) {
/* Users can't write negative values directly */
if (value < 0)
return -EINVAL;
} else {
if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
& PM_QOS_FLAG_NO_POWER_OFF));
}
static ssize_t pm_qos_no_power_off_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
int ret;
if (kstrtoint(buf, 0, &ret))
return -EINVAL;
if (ret != 0 && ret != 1)
return -EINVAL;
ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
if (!device_can_wakeup(dev))
return -EINVAL;
if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->wakeup_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->active_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->wakeup_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
count = dev->power.wakeup->expire_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned int active;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
active = dev->power.wakeup->active;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%u\n", active);
}
static DEVICE_ATTR_RO(wakeup_active);
static ssize_t wakeup_total_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->total_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_total_time_ms);
static ssize_t wakeup_max_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->max_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_max_time_ms);
static ssize_t wakeup_last_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->last_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lld\n", msec);
}
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
{
if (dev->power.wakeup && dev->power.wakeup->dev)
return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
return 0;
}
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
if (!enabled)
return sysfs_emit(buf, "\n");
return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
{
return 0;
}
#endif
#ifdef CONFIG_PM_ADVANCED_DEBUG
static ssize_t runtime_usage_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
static DEVICE_ATTR_RO(runtime_usage);
static ssize_t runtime_active_kids_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
static DEVICE_ATTR_RO(runtime_active_kids);
static ssize_t runtime_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *output;
if (dev->power.disable_depth && !dev->power.runtime_auto)
output = "disabled & forbidden";
else if (dev->power.disable_depth)
output = "disabled";
else if (!dev->power.runtime_auto)
output = "forbidden";
else
output = "enabled";
return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
device_async_suspend_enabled(dev) ?
_enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
#ifdef CONFIG_PM_ADVANCED_DEBUG
#ifdef CONFIG_PM_SLEEP
&dev_attr_async.attr,
#endif
&dev_attr_runtime_status.attr,
&dev_attr_runtime_usage.attr,
&dev_attr_runtime_active_kids.attr,
&dev_attr_runtime_enabled.attr,
#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
.name = power_group_name,
.attrs = power_attrs,
};
static struct attribute *wakeup_attrs[] = {
#ifdef CONFIG_PM_SLEEP
&dev_attr_wakeup.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_wakeup_active_count.attr,
&dev_attr_wakeup_abort_count.attr,
&dev_attr_wakeup_expire_count.attr,
&dev_attr_wakeup_active.attr,
&dev_attr_wakeup_total_time_ms.attr,
&dev_attr_wakeup_max_time_ms.attr,
&dev_attr_wakeup_last_time_ms.attr,
#ifdef CONFIG_PM_AUTOSLEEP
&dev_attr_wakeup_prevent_sleep_time_ms.attr,
#endif
#endif
NULL,
};
static const struct attribute_group pm_wakeup_attr_group = {
.name = power_group_name,
.attrs = wakeup_attrs,
};
static struct attribute *runtime_attrs[] = {
#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
.name = power_group_name,
.attrs = runtime_attrs,
};
static struct attribute *pm_qos_resume_latency_attrs[] = {
&dev_attr_pm_qos_resume_latency_us.attr,
NULL,
};
static const struct attribute_group pm_qos_resume_latency_attr_group = {
.name = power_group_name,
.attrs = pm_qos_resume_latency_attrs,
};
static struct attribute *pm_qos_latency_tolerance_attrs[] = {
&dev_attr_pm_qos_latency_tolerance_us.attr,
NULL,
};
static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
.name = power_group_name,
.attrs = pm_qos_latency_tolerance_attrs,
};
static struct attribute *pm_qos_flags_attrs[] = {
&dev_attr_pm_qos_no_power_off.attr,
NULL,
};
static const struct attribute_group pm_qos_flags_attr_group = {
.name = power_group_name,
.attrs = pm_qos_flags_attrs,
};
int dpm_sysfs_add(struct device *dev)
{
int rc;
/* No need to create PM sysfs if explicitly disabled. */
if (device_pm_not_required(dev))
return 0;
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
goto err_out;
}
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
if (rc)
goto err_runtime;
}
if (dev->power.set_latency_tolerance) {
rc = sysfs_merge_group(&dev->kobj,
&pm_qos_latency_tolerance_attr_group);
if (rc)
goto err_wakeup;
}
rc = pm_wakeup_source_sysfs_add(dev);
if (rc)
goto err_latency;
return 0;
err_latency:
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
err_wakeup:
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
err_runtime:
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
}
int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
{
int rc;
if (device_pm_not_required(dev))
return 0;
rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
if (rc)
return rc;
if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_group_change_owner(
&dev->kobj, &pm_runtime_attr_group, kuid, kgid);
if (rc)
return rc;
}
if (device_can_wakeup(dev)) {
rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
kuid, kgid);
if (rc)
return rc;
rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
if (rc)
return rc;
}
if (dev->power.set_latency_tolerance) {
rc = sysfs_group_change_owner(
&dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
kgid);
if (rc)
return rc;
}
return 0;
}
int wakeup_sysfs_add(struct device *dev)
{
int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
return ret;
}
void wakeup_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
void pm_qos_sysfs_remove_resume_latency(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
int pm_qos_sysfs_add_flags(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
void pm_qos_sysfs_remove_flags(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
{
return sysfs_merge_group(&dev->kobj,
&pm_qos_latency_tolerance_attr_group);
}
void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
}
void dpm_sysfs_remove(struct device *dev)
{
if (device_pm_not_required(dev))
return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
}
| linux-master | drivers/base/power/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP
*/
#include <kunit/test.h>
#include <linux/pm_qos.h>
/* Basic test for aggregating two "min" requests */
static void freq_qos_test_min(struct kunit *test)
{
struct freq_constraints qos;
struct freq_qos_request req1, req2;
int ret;
freq_constraints_init(&qos);
memset(&req1, 0, sizeof(req1));
memset(&req2, 0, sizeof(req2));
ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
KUNIT_EXPECT_EQ(test, ret, 1);
ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
ret = freq_qos_remove_request(&req2);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
ret = freq_qos_remove_request(&req1);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
FREQ_QOS_MIN_DEFAULT_VALUE);
}
/* Test that requests for MAX_DEFAULT_VALUE have no effect */
static void freq_qos_test_maxdef(struct kunit *test)
{
struct freq_constraints qos;
struct freq_qos_request req1, req2;
int ret;
freq_constraints_init(&qos);
memset(&req1, 0, sizeof(req1));
memset(&req2, 0, sizeof(req2));
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
FREQ_QOS_MAX_DEFAULT_VALUE);
ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
KUNIT_EXPECT_EQ(test, ret, 0);
ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
KUNIT_EXPECT_EQ(test, ret, 0);
/* Add max 1000 */
ret = freq_qos_update_request(&req1, 1000);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
/* Add max 2000, no impact */
ret = freq_qos_update_request(&req2, 2000);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
/* Remove max 1000, new max 2000 */
ret = freq_qos_remove_request(&req1);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
}
/*
* Test that a freq_qos_request can be added again after removal
*
* This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
* QoS requests after removal")
*/
static void freq_qos_test_readd(struct kunit *test)
{
struct freq_constraints qos;
struct freq_qos_request req;
int ret;
freq_constraints_init(&qos);
memset(&req, 0, sizeof(req));
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
FREQ_QOS_MIN_DEFAULT_VALUE);
/* Add */
ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
/* Remove */
ret = freq_qos_remove_request(&req);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
FREQ_QOS_MIN_DEFAULT_VALUE);
/* Add again */
ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
KUNIT_EXPECT_EQ(test, ret, 1);
KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
}
static struct kunit_case pm_qos_test_cases[] = {
KUNIT_CASE(freq_qos_test_min),
KUNIT_CASE(freq_qos_test_maxdef),
KUNIT_CASE(freq_qos_test_readd),
{},
};
static struct kunit_suite pm_qos_test_module = {
.name = "qos-kunit-test",
.test_cases = pm_qos_test_cases,
};
kunit_test_suites(&pm_qos_test_module);
| linux-master | drivers/base/power/qos-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/pm_clock.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/of_clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM_CLK
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
struct pm_clock_entry {
struct list_head node;
char *con_id;
struct clk *clk;
enum pce_status status;
bool enabled_when_prepared;
};
/**
* pm_clk_list_lock - ensure exclusive access for modifying the PM clock
* entry list.
* @psd: pm_subsys_data instance corresponding to the PM clock entry list
* and clk_op_might_sleep count to be modified.
*
* Get exclusive access before modifying the PM clock entry list and the
* clock_op_might_sleep count to guard against concurrent modifications.
* This also protects against a concurrent clock_op_might_sleep and PM clock
* entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
* happen in atomic context, hence both the mutex and the spinlock must be
* taken here.
*/
static void pm_clk_list_lock(struct pm_subsys_data *psd)
__acquires(&psd->lock)
{
mutex_lock(&psd->clock_mutex);
spin_lock_irq(&psd->lock);
}
/**
* pm_clk_list_unlock - counterpart to pm_clk_list_lock().
* @psd: the same pm_subsys_data instance previously passed to
* pm_clk_list_lock().
*/
static void pm_clk_list_unlock(struct pm_subsys_data *psd)
__releases(&psd->lock)
{
spin_unlock_irq(&psd->lock);
mutex_unlock(&psd->clock_mutex);
}
/**
* pm_clk_op_lock - ensure exclusive access for performing clock operations.
* @psd: pm_subsys_data instance corresponding to the PM clock entry list
* and clk_op_might_sleep count being used.
* @flags: stored irq flags.
* @fn: string for the caller function's name.
*
* This is used by pm_clk_suspend() and pm_clk_resume() to guard
* against concurrent modifications to the clock entry list and the
* clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
* only the mutex can be locked and those functions can only be used in
* non atomic context. If clock_op_might_sleep == 0 then these functions
* may be used in any context and only the spinlock can be locked.
* Returns -EINVAL if called in atomic context when clock ops might sleep.
*/
static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
const char *fn)
/* sparse annotations don't work here as exit state isn't static */
{
bool atomic_context = in_atomic() || irqs_disabled();
try_again:
spin_lock_irqsave(&psd->lock, *flags);
if (!psd->clock_op_might_sleep) {
/* the __release is there to work around sparse limitations */
__release(&psd->lock);
return 0;
}
/* bail out if in atomic context */
if (atomic_context) {
pr_err("%s: atomic context with clock_ops_might_sleep = %d",
fn, psd->clock_op_might_sleep);
spin_unlock_irqrestore(&psd->lock, *flags);
might_sleep();
return -EPERM;
}
/* we must switch to the mutex */
spin_unlock_irqrestore(&psd->lock, *flags);
mutex_lock(&psd->clock_mutex);
/*
* There was a possibility for psd->clock_op_might_sleep
* to become 0 above. Keep the mutex only if not the case.
*/
if (likely(psd->clock_op_might_sleep))
return 0;
mutex_unlock(&psd->clock_mutex);
goto try_again;
}
/**
* pm_clk_op_unlock - counterpart to pm_clk_op_lock().
* @psd: the same pm_subsys_data instance previously passed to
* pm_clk_op_lock().
* @flags: irq flags provided by pm_clk_op_lock().
*/
static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
/* sparse annotations don't work here as entry state isn't static */
{
if (psd->clock_op_might_sleep) {
mutex_unlock(&psd->clock_mutex);
} else {
/* the __acquire is there to work around sparse limitations */
__acquire(&psd->lock);
spin_unlock_irqrestore(&psd->lock, *flags);
}
}
/**
* __pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
*/
static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
{
int ret;
switch (ce->status) {
case PCE_STATUS_ACQUIRED:
ret = clk_prepare_enable(ce->clk);
break;
case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
break;
default:
return;
}
if (!ret)
ce->status = PCE_STATUS_ENABLED;
else
dev_err(dev, "%s: failed to enable clk %p, error %d\n",
__func__, ce->clk, ret);
}
/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @ce: PM clock entry corresponding to the clock.
*/
static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
{
if (!ce->clk)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
return;
} else if (clk_is_enabled_when_prepared(ce->clk)) {
/* we defer preparing the clock in that case */
ce->status = PCE_STATUS_ACQUIRED;
ce->enabled_when_prepared = true;
} else if (clk_prepare(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
dev_err(dev, "clk_prepare() failed\n");
return;
} else {
ce->status = PCE_STATUS_PREPARED;
}
dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
struct clk *clk)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
if (!psd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
if (!ce->con_id) {
kfree(ce);
return -ENOMEM;
}
} else {
if (IS_ERR(clk)) {
kfree(ce);
return -ENOENT;
}
ce->clk = clk;
}
pm_clk_acquire(dev, ce);
pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
if (ce->enabled_when_prepared)
psd->clock_op_might_sleep++;
pm_clk_list_unlock(psd);
return 0;
}
/**
* pm_clk_add - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
* the power management of @dev.
*/
int pm_clk_add(struct device *dev, const char *con_id)
{
return __pm_clk_add(dev, con_id, NULL);
}
EXPORT_SYMBOL_GPL(pm_clk_add);
/**
* pm_clk_add_clk - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @clk: Clock pointer
*
* Add the clock to the list of clocks used for the power management of @dev.
* The power-management code will take control of the clock reference, so
* callers should not call clk_put() on @clk after this function sucessfully
* returned.
*/
int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
return __pm_clk_add(dev, NULL, clk);
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
/**
* of_pm_clk_add_clk - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @name: Name of clock that is going to be used for power management.
*
* Add the clock described in the 'clocks' device-tree node that matches
* with the 'name' provided, to the list of clocks used for the power
* management of @dev. On success, returns 0. Returns a negative error
* code if the clock is not found or cannot be added.
*/
int of_pm_clk_add_clk(struct device *dev, const char *name)
{
struct clk *clk;
int ret;
if (!dev || !dev->of_node || !name)
return -EINVAL;
clk = of_clk_get_by_name(dev->of_node, name);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = pm_clk_add_clk(dev, clk);
if (ret) {
clk_put(clk);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
*
* Add a series of clocks described in the 'clocks' device-tree node for
* a device to the list of clocks used for the power management of @dev.
* On success, returns the number of clocks added. Returns a negative
* error code if there are no clocks in the device node for the device
* or if adding a clock fails.
*/
int of_pm_clk_add_clks(struct device *dev)
{
struct clk **clks;
int i, count;
int ret;
if (!dev || !dev->of_node)
return -EINVAL;
count = of_clk_get_parent_count(dev->of_node);
if (count <= 0)
return -ENODEV;
clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
if (!clks)
return -ENOMEM;
for (i = 0; i < count; i++) {
clks[i] = of_clk_get(dev->of_node, i);
if (IS_ERR(clks[i])) {
ret = PTR_ERR(clks[i]);
goto error;
}
ret = pm_clk_add_clk(dev, clks[i]);
if (ret) {
clk_put(clks[i]);
goto error;
}
}
kfree(clks);
return i;
error:
while (i--)
pm_clk_remove_clk(dev, clks[i]);
kfree(clks);
return ret;
}
EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
*/
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
switch (ce->status) {
case PCE_STATUS_ENABLED:
clk_disable(ce->clk);
fallthrough;
case PCE_STATUS_PREPARED:
clk_unprepare(ce->clk);
fallthrough;
case PCE_STATUS_ACQUIRED:
case PCE_STATUS_ERROR:
if (!IS_ERR(ce->clk))
clk_put(ce->clk);
break;
default:
break;
}
kfree(ce->con_id);
kfree(ce);
}
/**
* pm_clk_remove - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
* the power management of @dev.
*/
void pm_clk_remove(struct device *dev, const char *con_id)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
if (!psd)
return;
pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
goto remove;
else if (!con_id || !ce->con_id)
continue;
else if (!strcmp(con_id, ce->con_id))
goto remove;
}
pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
if (ce->enabled_when_prepared)
psd->clock_op_might_sleep--;
pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
EXPORT_SYMBOL_GPL(pm_clk_remove);
/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
*
* Remove the clock pointed to by @clk from the list of clocks used for
* the power management of @dev.
*/
void pm_clk_remove_clk(struct device *dev, struct clk *clk)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
if (!psd || !clk)
return;
pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
if (ce->enabled_when_prepared)
psd->clock_op_might_sleep--;
pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
/**
* pm_clk_init - Initialize a device's list of power management clocks.
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
* object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
mutex_init(&psd->clock_mutex);
psd->clock_op_might_sleep = 0;
}
}
EXPORT_SYMBOL_GPL(pm_clk_init);
/**
* pm_clk_create - Create and initialize a device's list of PM clocks.
* @dev: Device to create and initialize the list of PM clocks for.
*
* Allocate a struct pm_subsys_data object, initialize its lock and clock_list
* members and make the @dev's power.subsys_data field point to it.
*/
int pm_clk_create(struct device *dev)
{
return dev_pm_get_subsys_data(dev);
}
EXPORT_SYMBOL_GPL(pm_clk_create);
/**
* pm_clk_destroy - Destroy a device's list of power management clocks.
* @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
* from the struct pm_subsys_data object pointed to by it before and free
* that object.
*/
void pm_clk_destroy(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce, *c;
struct list_head list;
if (!psd)
return;
INIT_LIST_HEAD(&list);
pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
psd->clock_op_might_sleep = 0;
pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
list_for_each_entry_safe_reverse(ce, c, &list, node) {
list_del(&ce->node);
__pm_clk_remove(ce);
}
}
EXPORT_SYMBOL_GPL(pm_clk_destroy);
static void pm_clk_destroy_action(void *data)
{
pm_clk_destroy(data);
}
int devm_pm_clk_create(struct device *dev)
{
int ret;
ret = pm_clk_create(dev);
if (ret)
return ret;
return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_clk_create);
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
int pm_clk_suspend(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
ret = pm_clk_op_lock(psd, &flags, __func__);
if (ret)
return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
if (ce->status == PCE_STATUS_ENABLED) {
if (ce->enabled_when_prepared) {
clk_disable_unprepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
} else {
clk_disable(ce->clk);
ce->status = PCE_STATUS_PREPARED;
}
}
}
pm_clk_op_unlock(psd, &flags);
return 0;
}
EXPORT_SYMBOL_GPL(pm_clk_suspend);
/**
* pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
int pm_clk_resume(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
ret = pm_clk_op_lock(psd, &flags, __func__);
if (ret)
return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
pm_clk_op_unlock(psd, &flags);
return 0;
}
EXPORT_SYMBOL_GPL(pm_clk_resume);
/**
* pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
* Specifically, the pm_domain member of that object is copied to the device's
* pm_domain field and its con_ids member is used to populate the device's list
* of PM clocks, depending on @action.
*
* If the device's pm_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
char **con_id;
int error;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->pm_domain)
break;
error = pm_clk_create(dev);
if (error)
break;
dev_pm_domain_set(dev, clknb->pm_domain);
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
pm_clk_add(dev, *con_id);
} else {
pm_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->pm_domain != clknb->pm_domain)
break;
dev_pm_domain_set(dev, NULL);
pm_clk_destroy(dev);
break;
}
return 0;
}
int pm_clk_runtime_suspend(struct device *dev)
{
int ret;
dev_dbg(dev, "%s\n", __func__);
ret = pm_generic_runtime_suspend(dev);
if (ret) {
dev_err(dev, "failed to suspend device\n");
return ret;
}
ret = pm_clk_suspend(dev);
if (ret) {
dev_err(dev, "failed to suspend clock\n");
pm_generic_runtime_resume(dev);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
int pm_clk_runtime_resume(struct device *dev)
{
int ret;
dev_dbg(dev, "%s\n", __func__);
ret = pm_clk_resume(dev);
if (ret) {
dev_err(dev, "failed to resume clock\n");
return ret;
}
return pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
#else /* !CONFIG_PM_CLK */
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
* @con_id: Connection ID of the clock.
*/
static void enable_clock(struct device *dev, const char *con_id)
{
struct clk *clk;
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
clk_prepare_enable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced on.\n");
}
}
/**
* disable_clock - Disable a device clock.
* @dev: Device whose clock is to be disabled.
* @con_id: Connection ID of the clock.
*/
static void disable_clock(struct device *dev, const char *con_id)
{
struct clk *clk;
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
clk_disable_unprepare(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced off.\n");
}
}
/**
* pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
char **con_id;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
enable_clock(dev, *con_id);
} else {
enable_clock(dev, NULL);
}
break;
case BUS_NOTIFY_DRIVER_NOT_BOUND:
case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
disable_clock(dev, *con_id);
} else {
disable_clock(dev, NULL);
}
break;
}
return 0;
}
#endif /* !CONFIG_PM_CLK */
/**
* pm_clk_add_notifier - Add bus type notifier for power management clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
* notifier_call member will be replaced with pm_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
| linux-master | drivers/base/power/clock_ops.c |
// SPDX-License-Identifier: GPL-2.0
/* Device wakeirq helper functions */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include "power.h"
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
* @wirq: Wake irq specific data
*
* Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
*/
static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
{
unsigned long flags;
if (!dev || !wirq)
return -EINVAL;
spin_lock_irqsave(&dev->power.lock, flags);
if (dev_WARN_ONCE(dev, dev->power.wakeirq,
"wake irq already initialized\n")) {
spin_unlock_irqrestore(&dev->power.lock, flags);
return -EEXIST;
}
dev->power.wakeirq = wirq;
device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
return 0;
}
/**
* dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
* @dev: Device entry
* @irq: Device IO interrupt
*
* Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
* automatically configured for wake-up from suspend based
* on the device specific sysfs wakeup entry. Typically called
* during driver probe after calling device_init_wakeup().
*/
int dev_pm_set_wake_irq(struct device *dev, int irq)
{
struct wake_irq *wirq;
int err;
if (irq < 0)
return -EINVAL;
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
wirq->dev = dev;
wirq->irq = irq;
err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
kfree(wirq);
return err;
}
EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
/**
* dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
* @dev: Device entry
*
* Detach a device wake IRQ and free resources.
*
* Note that it's OK for drivers to call this without calling
* dev_pm_set_wake_irq() as all the driver instances may not have
* a wake IRQ configured. This avoid adding wake IRQ specific
* checks into the drivers.
*/
void dev_pm_clear_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
unsigned long flags;
if (!wirq)
return;
spin_lock_irqsave(&dev->power.lock, flags);
device_wakeup_detach_irq(dev);
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
free_irq(wirq->irq, wirq);
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
}
kfree(wirq->name);
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
* @_wirq: Wake IRQ data
*
* Some devices have a separate wake-up interrupt in addition to the
* device IO interrupt. The wake-up interrupt signals that a device
* should be woken up from it's idle state. This handler uses device
* specific pm_runtime functions to wake the device, and then it's
* up to the device to do whatever it needs to. Note that as the
* device may need to restore context and start up regulators, we
* use a threaded IRQ.
*
* Also note that we are not resending the lost device interrupts.
* We assume that the wake-up interrupt just needs to wake-up the
* device, and then device's pm_runtime_resume() can deal with the
* situation.
*/
static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
{
struct wake_irq *wirq = _wirq;
int res;
/* Maybe abort suspend? */
if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
pm_wakeup_event(wirq->dev, 0);
return IRQ_HANDLED;
}
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
dev_warn(wirq->dev,
"wake IRQ with no resume: %i\n", res);
return IRQ_HANDLED;
}
static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
if (irq < 0)
return -EINVAL;
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
if (!wirq->name) {
err = -ENOMEM;
goto err_free;
}
wirq->dev = dev;
wirq->irq = irq;
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
/*
* Consumer device may need to power up and restore state
* so we use a threaded irq.
*/
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
wirq->name, wirq);
if (err)
goto err_free_name;
err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
goto err_free_irq;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
err_free_irq:
free_irq(irq, wirq);
err_free_name:
kfree(wirq->name);
err_free:
kfree(wirq);
return err;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
* dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
* with reverse enable ordering
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has a dedicated
* wake-up interrupt in addition to the device IO interrupt. It sets
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
* to enable dedicated wake-up interrupt after running the runtime suspend
* callback for @dev.
*/
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
/**
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
* @dev: Device
* @can_change_status: Can change wake-up interrupt status
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
* starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
* Should be only called from rpm_suspend() and rpm_resume() path.
* Caller must hold &dev->power.lock to change wirq->status
*/
void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
goto enable;
} else if (can_change_status) {
wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
goto enable;
}
return;
enable:
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
* @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
}
}
/**
* dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
* @dev: Device using the wake IRQ
*
* Enable wake IRQ conditionally based on status, mainly used if want to
* enable wake IRQ after running ->runtime_suspend() which depends on
* WAKE_IRQ_DEDICATED_REVERSE.
*
* Should be only called from rpm_suspend() path.
*/
void dev_pm_enable_wake_irq_complete(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
enable_irq(wirq->irq);
}
/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
* Sets up the wake-up event conditionally based on the
* device_may_wake().
*/
void dev_pm_arm_wake_irq(struct wake_irq *wirq)
{
if (!wirq)
return;
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
}
}
/**
* dev_pm_disarm_wake_irq - Disarm device wake-up
* @wirq: Device wake-up interrupt
*
* Clears up the wake-up event conditionally based on the
* device_may_wake().
*/
void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
if (!wirq)
return;
if (device_may_wakeup(wirq->dev)) {
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}
| linux-master | drivers/base/power/wakeirq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Wakeup statistics in sysfs
*
* Copyright (c) 2019 Linux Foundation
* Copyright (c) 2019 Greg Kroah-Hartman <[email protected]>
* Copyright (c) 2019 Google Inc.
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
#include "power.h"
static struct class *wakeup_class;
#define wakeup_attr(_name) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct wakeup_source *ws = dev_get_drvdata(dev); \
\
return sysfs_emit(buf, "%lu\n", ws->_name); \
} \
static DEVICE_ATTR_RO(_name)
wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
ktime_t active_time =
ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time));
}
static DEVICE_ATTR_RO(active_time_ms);
static ssize_t total_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
ktime_t active_time;
ktime_t total_time = ws->total_time;
if (ws->active) {
active_time = ktime_sub(ktime_get(), ws->last_time);
total_time = ktime_add(total_time, active_time);
}
return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time));
}
static DEVICE_ATTR_RO(total_time_ms);
static ssize_t max_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
ktime_t active_time;
ktime_t max_time = ws->max_time;
if (ws->active) {
active_time = ktime_sub(ktime_get(), ws->last_time);
if (active_time > max_time)
max_time = active_time;
}
return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time));
}
static DEVICE_ATTR_RO(max_time_ms);
static ssize_t last_change_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time));
}
static DEVICE_ATTR_RO(last_change_ms);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ws->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t prevent_suspend_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wakeup_source *ws = dev_get_drvdata(dev);
ktime_t prevent_sleep_time = ws->prevent_sleep_time;
if (ws->active && ws->autosleep_enabled) {
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(ktime_get(), ws->start_prevent_time));
}
return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
}
static DEVICE_ATTR_RO(prevent_suspend_time_ms);
static struct attribute *wakeup_source_attrs[] = {
&dev_attr_name.attr,
&dev_attr_active_count.attr,
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
&dev_attr_last_change_ms.attr,
&dev_attr_prevent_suspend_time_ms.attr,
NULL,
};
ATTRIBUTE_GROUPS(wakeup_source);
static void device_create_release(struct device *dev)
{
kfree(dev);
}
static struct device *wakeup_source_device_create(struct device *parent,
struct wakeup_source *ws)
{
struct device *dev = NULL;
int retval;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto error;
}
device_initialize(dev);
dev->devt = MKDEV(0, 0);
dev->class = wakeup_class;
dev->parent = parent;
dev->groups = wakeup_source_groups;
dev->release = device_create_release;
dev_set_drvdata(dev, ws);
device_set_pm_not_required(dev);
retval = dev_set_name(dev, "wakeup%d", ws->id);
if (retval)
goto error;
retval = device_add(dev);
if (retval)
goto error;
return dev;
error:
put_device(dev);
return ERR_PTR(retval);
}
/**
* wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
* @parent: Device given wakeup source is associated with (or NULL if virtual).
* @ws: Wakeup source to be added in sysfs.
*/
int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
{
struct device *dev;
dev = wakeup_source_device_create(parent, ws);
if (IS_ERR(dev))
return PTR_ERR(dev);
ws->dev = dev;
return 0;
}
/**
* pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
* for a device if they're missing.
* @parent: Device given wakeup source is associated with
*/
int pm_wakeup_source_sysfs_add(struct device *parent)
{
if (!parent->power.wakeup || parent->power.wakeup->dev)
return 0;
return wakeup_source_sysfs_add(parent, parent->power.wakeup);
}
/**
* wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
* @ws: Wakeup source to be removed from sysfs.
*/
void wakeup_source_sysfs_remove(struct wakeup_source *ws)
{
device_unregister(ws->dev);
}
static int __init wakeup_sources_sysfs_init(void)
{
wakeup_class = class_create("wakeup");
return PTR_ERR_OR_ZERO(wakeup_class);
}
postcore_initcall(wakeup_sources_sysfs_init);
| linux-master | drivers/base/power/wakeup_stats.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
*
* Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
* Watchers can register a per-device notification callback using the
* dev_pm_qos_*_notifier API. The notification chain data is stored in the
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
* . The per-device constraints data struct ptr is stored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
* . The data is later free'd when the device is removed from the system.
* . A global mutex protects the constraints users from the data being
* allocated and free'd.
*/
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <trace/events/power.h>
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
/**
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
* @dev: Device to check the PM QoS flags for.
* @mask: Flags to check against.
*
* This routine must be called with dev->power.lock held.
*/
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
{
struct dev_pm_qos *qos = dev->power.qos;
struct pm_qos_flags *pqf;
s32 val;
lockdep_assert_held(&dev->power.lock);
if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags;
if (list_empty(&pqf->list))
return PM_QOS_FLAGS_UNDEFINED;
val = pqf->effective_flags & mask;
if (val)
return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
return PM_QOS_FLAGS_NONE;
}
/**
* dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
* @dev: Device to check the PM QoS flags for.
* @mask: Flags to check against.
*/
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
{
unsigned long irqflags;
enum pm_qos_flags_status ret;
spin_lock_irqsave(&dev->power.lock, irqflags);
ret = __dev_pm_qos_flags(dev, mask);
spin_unlock_irqrestore(&dev->power.lock, irqflags);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
* __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
* This routine must be called with dev->power.lock held.
*/
s32 __dev_pm_qos_resume_latency(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
return dev_pm_qos_raw_resume_latency(dev);
}
/**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
* @type: QoS request type.
*/
s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
{
struct dev_pm_qos *qos = dev->power.qos;
unsigned long flags;
s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
break;
default:
WARN_ON(1);
ret = 0;
}
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
/**
* apply_constraint - Add/modify/remove device PM QoS request.
* @req: Constraint request to apply
* @action: Action to perform (add/update/remove).
* @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device callbacks.
*/
static int apply_constraint(struct dev_pm_qos_request *req,
enum pm_qos_req_action action, s32 value)
{
struct dev_pm_qos *qos = req->dev->power.qos;
int ret;
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
value = 0;
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance,
&req->data.pnode, action, value);
if (ret) {
value = pm_qos_read_value(&qos->latency_tolerance);
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
case DEV_PM_QOS_MIN_FREQUENCY:
case DEV_PM_QOS_MAX_FREQUENCY:
ret = freq_qos_apply(&req->data.freq, action, value);
break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* dev_pm_qos_constraints_allocate
* @dev: device to allocate data for
*
* Called at the first call to add_request, for constraint data allocation
* Must be called with the dev_pm_qos_mtx mutex held
*/
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
struct dev_pm_qos *qos;
struct pm_qos_constraints *c;
struct blocking_notifier_head *n;
qos = kzalloc(sizeof(*qos), GFP_KERNEL);
if (!qos)
return -ENOMEM;
n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
}
c = &qos->resume_latency;
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->latency_tolerance;
plist_head_init(&c->list);
c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
freq_constraints_init(&qos->freq);
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
dev->power.qos = qos;
spin_unlock_irq(&dev->power.lock);
return 0;
}
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
* @dev: target device
*
* Called from the device PM subsystem on device removal under device_pm_lock().
*/
void dev_pm_qos_constraints_destroy(struct device *dev)
{
struct dev_pm_qos *qos;
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
qos = dev->power.qos;
if (!qos)
goto out;
/* Flush the constraints lists for the device. */
c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
* callbacks if needed
*/
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->freq.min_freq;
plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->freq.max_freq;
plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
spin_lock_irq(&dev->power.lock);
dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(qos->resume_latency.notifiers);
kfree(qos);
out:
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
static bool dev_pm_qos_invalid_req_type(struct device *dev,
enum dev_pm_qos_req_type type)
{
return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
!dev->power.set_latency_tolerance;
}
static int __dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
int ret = 0;
if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
return -EINVAL;
if (WARN(dev_pm_qos_request_active(req),
"%s() called for already added request\n", __func__))
return -EINVAL;
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
if (ret)
return ret;
req->dev = dev;
req->type = type;
if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
ret = freq_qos_add_request(&dev->power.qos->freq,
&req->data.freq,
FREQ_QOS_MIN, value);
else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
ret = freq_qos_add_request(&dev->power.qos->freq,
&req->data.freq,
FREQ_QOS_MAX, value);
else
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
return ret;
}
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
* @req: pointer to a preallocated handle
* @type: type of the request
* @value: defines the qos request
*
* This function inserts a new entry in the device constraints list of
* requested qos performance characteristics. It recomputes the aggregate
* QoS expectations of parameters and initializes the dev_pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
* to allocate for data structures, -ENODEV if the device has just been removed
* from the system.
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
/**
* __dev_pm_qos_update_request - Modify an existing device PM QoS request.
* @req : PM QoS request to modify.
* @new_value: New value to request.
*/
static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 new_value)
{
s32 curr_value;
int ret = 0;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_MIN_FREQUENCY:
case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.freq.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
break;
default:
return -EINVAL;
}
trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
new_value);
if (curr_value != new_value)
ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
return ret;
}
/**
* dev_pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a dev_pm_qos request to use
* @new_value: defines the qos request
*
* Updates an existing dev PM qos request along with updating the
* target value.
*
* Attempts are made to make this code callable on hot code paths.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
PM_QOS_DEFAULT_VALUE);
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret;
}
/**
* dev_pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value. Call this on slow code paths.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
/**
* dev_pm_qos_add_notifier - sets notification entry for changes to target value
* of per-device PM QoS constraints
*
* @dev: target device for the constraint
* @notifier: notifier block managed by caller.
* @type: request type.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
*
* If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails).
*/
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{
int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
if (ret)
goto unlock;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = freq_qos_add_notifier(&dev->power.qos->freq,
FREQ_QOS_MIN, notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = freq_qos_add_notifier(&dev->power.qos->freq,
FREQ_QOS_MAX, notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
unlock:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
/**
* dev_pm_qos_remove_notifier - deletes notification for changes to target value
* of per-device PM QoS constraints
*
* @dev: target device for the constraint
* @notifier: notifier block to be removed.
* @type: request type.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value.
*/
int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{
int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
if (IS_ERR_OR_NULL(dev->power.qos))
goto unlock;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = freq_qos_remove_notifier(&dev->power.qos->freq,
FREQ_QOS_MIN, notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = freq_qos_remove_notifier(&dev->power.qos->freq,
FREQ_QOS_MAX, notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
unlock:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
/**
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
* @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
while (ancestor && !ancestor->power.set_latency_tolerance)
ancestor = ancestor->parent;
break;
default:
ancestor = NULL;
}
if (ancestor)
ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
struct dev_pm_qos_request *req = NULL;
switch(type) {
case DEV_PM_QOS_RESUME_LATENCY:
req = dev->power.qos->resume_latency_req;
dev->power.qos->resume_latency_req = NULL;
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
req = dev->power.qos->latency_tolerance_req;
dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
default:
WARN_ON(1);
return;
}
__dev_pm_qos_remove_request(req);
kfree(req);
}
static void dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_drop_user_request(dev, type);
mutex_unlock(&dev_pm_qos_mtx);
}
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
* @value: Initial value of the latency limit.
*/
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
struct dev_pm_qos_request *req;
int ret;
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
}
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
/**
* dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
* @dev: Device whose PM QoS flags are to be exposed to user space.
* @val: Initial values of the flags.
*/
int dev_pm_qos_expose_flags(struct device *dev, s32 val)
{
struct dev_pm_qos_request *req;
int ret;
if (!device_is_registered(dev))
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
if (ret < 0) {
kfree(req);
return ret;
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->flags_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->flags_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}
/**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space.
*/
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
/**
* dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
* @dev: Device to update the PM QoS flags request for.
* @mask: Flags to set/clear.
* @set: Whether to set or clear the flags (true means set).
*/
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
{
s32 value;
int ret;
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
ret = -EINVAL;
goto out;
}
value = dev_pm_qos_requested_flags(dev);
if (set)
value |= mask;
else
value &= ~mask;
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
/**
* dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
* @dev: Device to obtain the user space latency tolerance for.
*/
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{
s32 ret;
mutex_lock(&dev_pm_qos_mtx);
ret = IS_ERR_OR_NULL(dev->power.qos)
|| !dev->power.qos->latency_tolerance_req ?
PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
dev->power.qos->latency_tolerance_req->data.pnode.prio;
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
/**
* dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
* @dev: Device to update the user space latency tolerance for.
* @val: New user space latency tolerance for @dev (negative values disable).
*/
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos)
|| !dev->power.qos->latency_tolerance_req) {
struct dev_pm_qos_request *req;
if (val < 0) {
if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
ret = 0;
else
ret = -EINVAL;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto out;
}
ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
if (ret < 0) {
kfree(req);
goto out;
}
dev->power.qos->latency_tolerance_req = req;
} else {
if (val < 0) {
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
ret = 0;
} else {
ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
}
}
out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
/**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
* @dev: Device whose latency tolerance to expose
*/
int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{
int ret;
if (!dev->power.set_latency_tolerance)
return -EINVAL;
mutex_lock(&dev_pm_qos_sysfs_mtx);
ret = pm_qos_sysfs_add_latency_tolerance(dev);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
/**
* dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
* @dev: Device whose latency tolerance to hide
*/
void dev_pm_qos_hide_latency_tolerance(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_latency_tolerance(dev);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
/* Remove the request from user space now */
pm_runtime_get_sync(dev);
dev_pm_qos_update_user_latency_tolerance(dev,
PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
| linux-master | drivers/base/power/qos.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
* controlling device power management will also be added.
*
* A separate list is used for keeping track of power info, because the power
* domain dependencies may differ from the ancestral dependencies that the
* subsystem list maintains.
*/
#define pr_fmt(fmt) "PM: " fmt
#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm-trace.h>
#include <linux/pm_wakeirq.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
device_links_read_lock_held())
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery.
*
* Since device_pm_add() may be called with a device lock held,
* we must never try to acquire a device lock while holding
* dpm_list_mutex.
*/
LIST_HEAD(dpm_list);
static LIST_HEAD(dpm_prepared_list);
static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
static int async_error;
static const char *pm_verb(int event)
{
switch (event) {
case PM_EVENT_SUSPEND:
return "suspend";
case PM_EVENT_RESUME:
return "resume";
case PM_EVENT_FREEZE:
return "freeze";
case PM_EVENT_QUIESCE:
return "quiesce";
case PM_EVENT_HIBERNATE:
return "hibernate";
case PM_EVENT_THAW:
return "thaw";
case PM_EVENT_RESTORE:
return "restore";
case PM_EVENT_RECOVER:
return "recover";
default:
return "(unknown PM event)";
}
}
/**
* device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
*/
void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
dev->power.is_noirq_suspended = false;
dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
INIT_LIST_HEAD(&dev->power.entry);
}
/**
* device_pm_lock - Lock the list of active devices used by the PM core.
*/
void device_pm_lock(void)
{
mutex_lock(&dpm_list_mtx);
}
/**
* device_pm_unlock - Unlock the list of active devices used by the PM core.
*/
void device_pm_unlock(void)
{
mutex_unlock(&dpm_list_mtx);
}
/**
* device_pm_add - Add a device to the PM core's list of active devices.
* @dev: Device to add to the list.
*/
void device_pm_add(struct device *dev)
{
/* Skip PM setup/initialization. */
if (device_pm_not_required(dev))
return;
pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
/**
* device_pm_remove - Remove a device from the PM core's list of active devices.
* @dev: Device to be removed from the list.
*/
void device_pm_remove(struct device *dev)
{
if (device_pm_not_required(dev))
return;
pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
device_pm_check_callbacks(dev);
}
/**
* device_pm_move_before - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come before.
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
list_move_tail(&deva->power.entry, &devb->power.entry);
}
/**
* device_pm_move_after - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come after.
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
list_move(&deva->power.entry, &devb->power.entry);
}
/**
* device_pm_move_last - Move device to end of the PM core's list of devices.
* @dev: Device to move in dpm_list.
*/
void device_pm_move_last(struct device *dev)
{
pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
static ktime_t initcall_debug_start(struct device *dev, void *cb)
{
if (!pm_print_times_enabled)
return 0;
dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
}
static void initcall_debug_report(struct device *dev, ktime_t calltime,
void *cb, int error)
{
ktime_t rettime;
if (!pm_print_times_enabled)
return;
rettime = ktime_get();
dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
/**
* dpm_wait - Wait for a PM operation to complete.
* @dev: Device to wait for.
* @async: If unset, wait only if the device's power.async_suspend flag is set.
*/
static void dpm_wait(struct device *dev, bool async)
{
if (!dev)
return;
if (async || (pm_async_enabled && dev->power.async_suspend))
wait_for_completion(&dev->power.completion);
}
static int dpm_wait_fn(struct device *dev, void *async_ptr)
{
dpm_wait(dev, *((bool *)async_ptr));
return 0;
}
static void dpm_wait_for_children(struct device *dev, bool async)
{
device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* If the supplier goes away right after we've checked the link to it,
* we'll wait for its completion to change the state, but that's fine,
* because the only things that will block as a result are the SRCU
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
static bool dpm_wait_for_superior(struct device *dev, bool async)
{
struct device *parent;
/*
* If the device is resumed asynchronously and the parent's callback
* deletes both the device and the parent itself, the parent object may
* be freed while this function is running, so avoid that by reference
* counting the parent once more unless the device has been deleted
* already (in which case return right away).
*/
mutex_lock(&dpm_list_mtx);
if (!device_pm_initialized(dev)) {
mutex_unlock(&dpm_list_mtx);
return false;
}
parent = get_device(dev->parent);
mutex_unlock(&dpm_list_mtx);
dpm_wait(parent, async);
put_device(parent);
dpm_wait_for_suppliers(dev, async);
/*
* If the parent's callback has deleted the device, attempting to resume
* it would be invalid, so avoid doing that then.
*/
return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* The status of a device link can only be changed from "dormant" by a
* probe, but that cannot happen during system suspend/resume. In
* theory it can change to "dormant" at that time, but then it is
* reasonable to wait for the target device anyway (eg. if it goes
* away, it's better to wait for it to go away completely and then
* continue instead of trying to continue in parallel with its
* unregistration).
*/
list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_subordinate(struct device *dev, bool async)
{
dpm_wait_for_children(dev, async);
dpm_wait_for_consumers(dev, async);
}
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend;
case PM_EVENT_RESUME:
return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw;
case PM_EVENT_RESTORE:
return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
/**
* pm_late_early_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend_late;
case PM_EVENT_RESUME:
return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw_early;
case PM_EVENT_RESTORE:
return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
/**
* pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend_noirq;
case PM_EVENT_RESUME:
return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw_noirq;
case PM_EVENT_RESTORE:
return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
{
dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
", may wakeup" : "", dev->power.driver_flags);
}
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
u64 usecs64;
int usecs;
calltime = ktime_get();
usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
do_div(usecs64, NSEC_PER_USEC);
usecs = usecs64;
if (usecs == 0)
usecs = 1;
pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
error ? "aborted" : "complete",
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
{
ktime_t calltime;
int error;
if (!cb)
return 0;
calltime = initcall_debug_start(dev, cb);
pm_dev_dbg(dev, state, info);
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
return error;
}
#ifdef CONFIG_DPM_WATCHDOG
struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
* capture a crash-dump in pstore.
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
/**
* dpm_watchdog_set - Enable pm watchdog for given device.
* @wd: Watchdog. Must be allocated on the stack.
* @dev: Device to handle.
*/
static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
{
struct timer_list *timer = &wd->timer;
wd->dev = dev;
wd->tsk = current;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
add_timer(timer);
}
/**
* dpm_watchdog_clear - Disable suspend/resume watchdog.
* @wd: Watchdog to disable.
*/
static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
del_timer_sync(timer);
destroy_timer_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
#define dpm_watchdog_set(x, y)
#define dpm_watchdog_clear(x)
#endif
/*------------------------- Resume routines -------------------------*/
/**
* dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
* Return:
* - %false if the transition under way is RESTORE.
* - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
* - The logical negation of %power.must_resume otherwise (that is, when the
* transition under way is RESUME).
*/
bool dev_pm_skip_resume(struct device *dev)
{
if (pm_transition.event == PM_EVENT_RESTORE)
return false;
if (pm_transition.event == PM_EVENT_THAW)
return dev_pm_skip_suspend(dev);
return !dev->power.must_resume;
}
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
if (!dev->power.is_noirq_suspended)
goto Out;
if (!dpm_wait_for_superior(dev, async))
goto Out;
skip_resume = dev_pm_skip_resume(dev);
/*
* If the driver callback is skipped below or by the middle layer
* callback and device_resume_early() also skips the driver callback for
* this device later, it needs to appear as "suspended" to PM-runtime,
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
* status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
* to avoid confusing drivers that don't use it.
*/
if (skip_resume)
pm_runtime_set_suspended(dev);
else if (dev_pm_skip_suspend(dev))
pm_runtime_set_active(dev);
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (skip_resume)
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
Skip:
dev->power.is_noirq_suspended = false;
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule_dev(func, dev);
return true;
}
return false;
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = device_resume_noirq(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
put_device(dev);
}
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry)
dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
int error;
error = device_resume_noirq(dev, state, false);
if (error) {
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " noirq", error);
}
}
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
* allow device drivers' interrupt handlers to be called.
*/
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
}
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
if (!dpm_wait_for_superior(dev, async))
goto Out;
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "early type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "early class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "early bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_resume(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
Skip:
dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
complete_all(&dev->power.completion);
return error;
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = device_resume_early(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
put_device(dev);
}
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
void dpm_resume_early(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
/*
* Advanced the async threads upfront,
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry)
dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
int error;
error = device_resume_early(dev, state, false);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
}
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
/**
* dpm_resume_start - Execute "noirq" and "early" device callbacks.
* @state: PM transition of the system being carried out.
*/
void dpm_resume_start(pm_message_t state)
{
dpm_resume_noirq(state);
dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->power.syscore)
goto Complete;
if (dev->power.direct_complete) {
/* Match the pm_runtime_disable() in __device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
if (!dpm_wait_for_superior(dev, async))
goto Complete;
dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
* This is a fib. But we'll allow new children to be added below
* a resumed device, even if the device hasn't been completed yet.
*/
dev->power.is_prepared = false;
if (!dev->power.is_suspended)
goto Unlock;
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
goto Driver;
}
if (dev->type && dev->type->pm) {
info = "type ";
callback = pm_op(dev->type->pm, state);
goto Driver;
}
if (dev->class && dev->class->pm) {
info = "class ";
callback = pm_op(dev->class->pm, state);
goto Driver;
}
if (dev->bus) {
if (dev->bus->pm) {
info = "bus ";
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
info = "legacy bus ";
callback = dev->bus->resume;
goto End;
}
}
Driver:
if (!callback && dev->driver && dev->driver->pm) {
info = "driver ";
callback = pm_op(dev->driver->pm, state);
}
End:
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = device_resume(dev, pm_transition, true);
if (error)
pm_dev_err(dev, pm_transition, " async", error);
put_device(dev);
}
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the appropriate "resume" callback for all devices whose status
* indicates that they are suspended.
*/
void dpm_resume(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
list_for_each_entry(dev, &dpm_suspended_list, power.entry)
dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
get_device(dev);
if (!is_async(dev)) {
int error;
mutex_unlock(&dpm_list_mtx);
error = device_resume(dev, state, false);
if (error) {
suspend_stats.failed_resume++;
dpm_save_failed_step(SUSPEND_RESUME);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, "", error);
}
mutex_lock(&dpm_list_mtx);
}
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
/**
* device_complete - Complete a PM transition for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*/
static void device_complete(struct device *dev, pm_message_t state)
{
void (*callback)(struct device *) = NULL;
const char *info = NULL;
if (dev->power.syscore)
goto out;
device_lock(dev);
if (dev->pm_domain) {
info = "completing power domain ";
callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
info = "completing type ";
callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
info = "completing class ";
callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
info = "completing bus ";
callback = dev->bus->pm->complete;
}
if (!callback && dev->driver && dev->driver->pm) {
info = "completing driver ";
callback = dev->driver->pm->complete;
}
if (callback) {
pm_dev_dbg(dev, state, info);
callback(dev);
}
device_unlock(dev);
out:
pm_runtime_put(dev);
}
/**
* dpm_complete - Complete a PM transition for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the ->complete() callbacks for all devices whose PM status is not
* DPM_ON (this allows new devices to be registered).
*/
void dpm_complete(pm_message_t state)
{
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_prepared_list)) {
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
/* Allow device probing and trigger re-probing of deferred devices */
device_unblock_probing();
trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
/**
* dpm_resume_end - Execute "resume" callbacks and complete system transition.
* @state: PM transition of the system being carried out.
*
* Execute "resume" callbacks for all devices and complete the PM transition of
* the system.
*/
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
*
* Return a PM message representing the resume event corresponding to given
* sleep state.
*/
static pm_message_t resume_event(pm_message_t sleep_state)
{
switch (sleep_state.event) {
case PM_EVENT_SUSPEND:
return PMSG_RESUME;
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return PMSG_RECOVER;
case PM_EVENT_HIBERNATE:
return PMSG_RESTORE;
}
return PMSG_ON;
}
static void dpm_superior_set_must_resume(struct device *dev)
{
struct device_link *link;
int idx;
if (dev->parent)
dev->parent->power.must_resume = true;
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
goto Complete;
}
Skip:
dev->power.is_noirq_suspended = true;
/*
* Skipping the resume of devices that were in use right before the
* system suspend (as indicated by their PM-runtime usage counters)
* would be suboptimal. Also resume them if doing that is not allowed
* to be skipped.
*/
if (atomic_read(&dev->power.usage_count) > 1 ||
!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume))
dev->power.must_resume = true;
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
return error;
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend_noirq(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
put_device(dev);
}
static int device_suspend_noirq(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
return __device_suspend_noirq(dev, pm_transition, false);
}
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
dpm_save_failed_dev(dev_name(dev));
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_noirq_list);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
}
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers' interrupt handlers from being called and invoke
* "noirq" suspend callbacks for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
int ret;
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
return ret;
}
static void dpm_propagate_wakeup_to_parent(struct device *dev)
{
struct device *parent = dev->parent;
if (!parent)
return;
spin_lock_irq(&parent->power.lock);
if (device_wakeup_path(dev) && !parent->power.ignore_children)
parent->power.wakeup_path = true;
spin_unlock_irq(&parent->power.lock);
}
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
__pm_runtime_disable(dev, false);
dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
goto Complete;
}
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "late type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "late class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "late bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (callback)
goto Run;
if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
Skip:
dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
return error;
}
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend_late(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
put_device(dev);
}
static int device_suspend_late(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend_late))
return 0;
return __device_suspend_late(dev, pm_transition, false);
}
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
wake_up_all_idle_cpus();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
/**
* dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend_end(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error;
error = dpm_suspend_late(state);
if (error)
goto out;
error = dpm_suspend_noirq(state);
if (error)
dpm_resume_early(resume_event(state));
out:
dpm_show_time(starttime, state, error, "end");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
* @dev: Device to suspend.
* @state: PM transition of the system being carried out.
* @cb: Suspend callback to execute.
* @info: string description of caller.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
int (*cb)(struct device *dev, pm_message_t state),
const char *info)
{
int error;
ktime_t calltime;
calltime = initcall_debug_start(dev, cb);
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
return error;
}
static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
if (dev->parent) {
spin_lock_irq(&dev->parent->power.lock);
dev->parent->power.direct_complete = false;
spin_unlock_irq(&dev->parent->power.lock);
}
idx = device_links_read_lock();
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
}
device_links_read_unlock(idx);
}
/**
* __device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_subordinate(dev, async);
if (async_error) {
dev->power.direct_complete = false;
goto Complete;
}
/*
* Wait for possible runtime PM transitions of the device in progress
* to complete and if there's a runtime resume request pending for it,
* resume it before proceeding with invoking the system-wide suspend
* callbacks for it.
*
* If the system-wide suspend callbacks below change the configuration
* of the device, they must disable runtime PM for it or otherwise
* ensure that its runtime-resume callbacks will not be confused by that
* change in case they are invoked going forward.
*/
pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
if (dev->power.syscore)
goto Complete;
/* Avoid direct_complete to let wakeup_path propagate. */
if (device_may_wakeup(dev) || device_wakeup_path(dev))
dev->power.direct_complete = false;
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
goto Complete;
}
pm_runtime_enable(dev);
}
dev->power.direct_complete = false;
}
dev->power.may_skip_resume = true;
dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
goto Run;
}
if (dev->type && dev->type->pm) {
info = "type ";
callback = pm_op(dev->type->pm, state);
goto Run;
}
if (dev->class && dev->class->pm) {
info = "class ";
callback = pm_op(dev->class->pm, state);
goto Run;
}
if (dev->bus) {
if (dev->bus->pm) {
info = "bus ";
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend,
"legacy bus ");
goto End;
}
}
Run:
if (!callback && dev->driver && dev->driver->pm) {
info = "driver ";
callback = pm_op(dev->driver->pm, state);
}
error = dpm_run_callback(callback, dev, state, info);
End:
if (!error) {
dev->power.is_suspended = true;
if (device_may_wakeup(dev))
dev->power.wakeup_path = true;
dpm_propagate_wakeup_to_parent(dev);
dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
if (error)
async_error = error;
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
return error;
}
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
put_device(dev);
}
static int device_suspend(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend))
return 0;
return __device_suspend(dev, pm_transition, false);
}
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
devfreq_suspend();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
while (!list_empty(&dpm_prepared_list)) {
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, "", error);
dpm_save_failed_dev(dev_name(dev));
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_suspended_list);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
}
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback(s) for given device. No new children of the
* device may be registered after this function has returned.
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
int ret = 0;
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
* block runtime suspend here, during the prepare phase, and allow
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
if (dev->power.syscore)
return 0;
device_lock(dev);
dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks)
goto unlock;
if (dev->pm_domain)
callback = dev->pm_domain->ops.prepare;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->prepare;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->prepare;
else if (dev->bus && dev->bus->pm)
callback = dev->bus->pm->prepare;
if (!callback && dev->driver && dev->driver->pm)
callback = dev->driver->pm->prepare;
if (callback)
ret = callback(dev);
unlock:
device_unlock(dev);
if (ret < 0) {
suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
* runtime-suspended, you can leave it in that state provided that you
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
spin_unlock_irq(&dev->power.lock);
return 0;
}
/**
* dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback(s) for all devices.
*/
int dpm_prepare(pm_message_t state)
{
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
* disable probing of devices. This sync point is important at least
* at boot time + hibernation restore.
*/
wait_for_device_probe();
/*
* It is unsafe if probing of devices will happen during suspend or
* hibernation and system behavior will be unpredictable in this case.
* So, let's prohibit device's probing here and defer their probes
* instead. The normal behavior will be restored in dpm_complete().
*/
device_block_probing();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
error = device_prepare(dev, state);
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
if (!error) {
dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
} else if (error == -EAGAIN) {
error = 0;
} else {
dev_info(dev, "not prepared for power transition: code %d\n",
error);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
return error;
}
/**
* dpm_suspend_start - Prepare devices for PM transition and suspend them.
* @state: PM transition of the system being carried out.
*
* Prepare all non-sysdev devices for system PM transition and execute "suspend"
* callbacks for them.
*/
int dpm_suspend_start(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
if (error) {
suspend_stats.failed_prepare++;
dpm_save_failed_step(SUSPEND_PREPARE);
} else
error = dpm_suspend(state);
dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
* @subordinate: Device that needs to wait for @dev.
* @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
dpm_wait(dev, subordinate->power.async_suspend);
return async_error;
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
/**
* dpm_for_each_dev - device iterator.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over devices in dpm_list, and call @fn for each device,
* passing it @data.
*/
void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
struct device *dev;
if (!fn)
return;
device_pm_lock();
list_for_each_entry(dev, &dpm_list, power.entry)
fn(dev, data);
device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);
static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
{
if (!ops)
return true;
return !ops->prepare &&
!ops->suspend &&
!ops->suspend_late &&
!ops->suspend_noirq &&
!ops->resume_noirq &&
!ops->resume_early &&
!ops->resume &&
!ops->complete;
}
void device_pm_check_callbacks(struct device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
{
return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
pm_runtime_status_suspended(dev);
}
| linux-master | drivers/base/power/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
*
* Copyright (c) 2010 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.
*
* If PM operations are defined for the @dev's driver and they include
* ->runtime_suspend(), execute it and return its error code. Otherwise,
* return 0.
*/
int pm_generic_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
return ret;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
/**
* pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
* @dev: Device to resume.
*
* If PM operations are defined for the @dev's driver and they include
* ->runtime_resume(), execute it and return its error code. Otherwise,
* return 0.
*/
int pm_generic_runtime_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
return ret;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
* pm_generic_prepare - Generic routine preparing a device for power transition.
* @dev: Device to prepare.
*
* Prepare a device for a system-wide power transition.
*/
int pm_generic_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm && drv->pm->prepare)
ret = drv->pm->prepare(dev);
return ret;
}
/**
* pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
/**
* pm_generic_suspend_late - Generic suspend_late callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend ? pm->suspend(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
/**
* pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
* pm_generic_freeze_late - Generic freeze_late callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze ? pm->freeze(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
/**
* pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
/**
* pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
/**
* pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
* pm_generic_thaw_early - Generic thaw_early callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw ? pm->thaw(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
/**
* pm_generic_resume_early - Generic resume_early callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_early ? pm->resume_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume ? pm->resume(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
/**
* pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
/**
* pm_generic_restore_early - Generic restore_early callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_restore_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_early ? pm->restore_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore ? pm->restore(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
/**
* pm_generic_complete - Generic routine completing a device power transition.
* @dev: Device to handle.
*
* Complete a device power transition during a system-wide power transition.
*/
void pm_generic_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
#endif /* CONFIG_PM_SLEEP */
| linux-master | drivers/base/power/generic_ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain_governor.c - Governors for device PM domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
*/
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/ktime.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
s64 constraint_ns;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
struct gpd_timing_data *td = dev_gpd_data(dev)->td;
/*
* Only take suspend-time QoS constraints of devices into
* account, because constraints updated after the device has
* been suspended are not guaranteed to be taken into account
* anyway. In order for them to take effect, the device has to
* be resumed and suspended again.
*/
constraint_ns = td ? td->effective_constraint_ns :
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
} else {
/*
* The child is not in a domain and there's no info on its
* suspend/resume latencies, so assume them to be negligible and
* take its current PM QoS constraint (that's the only thing
* known at this point anyway).
*/
constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
constraint_ns *= NSEC_PER_USEC;
}
if (constraint_ns < *constraint_ns_p)
*constraint_ns_p = constraint_ns;
return 0;
}
/**
* default_suspend_ok - Default PM domain governor routine to suspend devices.
* @dev: Device to check.
*/
static bool default_suspend_ok(struct device *dev)
{
struct gpd_timing_data *td = dev_gpd_data(dev)->td;
unsigned long flags;
s64 constraint_ns;
dev_dbg(dev, "%s()\n", __func__);
spin_lock_irqsave(&dev->power.lock, flags);
if (!td->constraint_changed) {
bool ret = td->cached_suspend_ok;
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
td->constraint_changed = false;
td->cached_suspend_ok = false;
td->effective_constraint_ns = 0;
constraint_ns = __dev_pm_qos_resume_latency(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
if (constraint_ns == 0)
return false;
constraint_ns *= NSEC_PER_USEC;
/*
* We can walk the children without any additional locking, because
* they all have been suspended at this point and their
* effective_constraint_ns fields won't be modified in parallel with us.
*/
if (!dev->power.ignore_children)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
/* "No restriction", so the device is allowed to suspend. */
td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
td->cached_suspend_ok = true;
} else if (constraint_ns == 0) {
/*
* This triggers if one of the children that don't belong to a
* domain has a zero PM QoS constraint and it's better not to
* suspend then. effective_constraint_ns is zero already and
* cached_suspend_ok is false, so bail out.
*/
return false;
} else {
constraint_ns -= td->suspend_latency_ns +
td->resume_latency_ns;
/*
* effective_constraint_ns is zero already and cached_suspend_ok
* is false, so if the computed value is not positive, return
* right away.
*/
if (constraint_ns <= 0)
return false;
td->effective_constraint_ns = constraint_ns;
td->cached_suspend_ok = true;
}
/*
* The children have been suspended already, so we don't need to take
* their suspend latencies into account here.
*/
return td->cached_suspend_ok;
}
static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
{
ktime_t domain_wakeup = KTIME_MAX;
ktime_t next_wakeup;
struct pm_domain_data *pdd;
struct gpd_link *link;
if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
return;
/*
* Devices that have a predictable wakeup pattern, may specify
* their next wakeup. Let's find the next wakeup from all the
* devices attached to this domain and from all the sub-domains.
* It is possible that component's a next wakeup may have become
* stale when we read that here. We will ignore to ensure the domain
* is able to enter its optimal idle state.
*/
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
next_wakeup = to_gpd_data(pdd)->td->next_wakeup;
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
if (ktime_before(next_wakeup, domain_wakeup))
domain_wakeup = next_wakeup;
}
list_for_each_entry(link, &genpd->parent_links, parent_node) {
struct genpd_governor_data *cgd = link->child->gd;
next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX;
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
if (ktime_before(next_wakeup, domain_wakeup))
domain_wakeup = next_wakeup;
}
genpd->gd->next_wakeup = domain_wakeup;
}
static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
unsigned int state, ktime_t now)
{
ktime_t domain_wakeup = genpd->gd->next_wakeup;
s64 idle_time_ns, min_sleep_ns;
min_sleep_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].residency_ns;
idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
return idle_time_ns >= min_sleep_ns;
}
static bool __default_power_down_ok(struct dev_pm_domain *pd,
unsigned int state)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
struct pm_domain_data *pdd;
s64 min_off_time_ns;
s64 off_on_time_ns;
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;
min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
*
* All subdomains have been powered off already at this point.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
struct genpd_governor_data *cgd = link->child->gd;
s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1;
if (sd_max_off_ns < 0)
continue;
/*
* Check if the subdomain is allowed to be off long enough for
* the current domain to turn off and on (that's how much time
* it will have to wait worst case).
*/
if (sd_max_off_ns <= off_on_time_ns)
return false;
if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
min_off_time_ns = sd_max_off_ns;
}
/*
* Check if the devices in the domain can be off enough time.
*/
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
struct gpd_timing_data *td;
s64 constraint_ns;
/*
* Check if the device is allowed to be off long enough for the
* domain to turn off and on (that's how much time it will
* have to wait worst case).
*/
td = to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
/*
* Zero means "no suspend at all" and this runs only when all
* devices in the domain are suspended, so it must be positive.
*/
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
continue;
if (constraint_ns <= off_on_time_ns)
return false;
if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
min_off_time_ns = constraint_ns;
}
/*
* If the computed minimum device off time is negative, there are no
* latency constraints, so the domain can spend arbitrary time in the
* "off" state.
*/
if (min_off_time_ns < 0)
return true;
/*
* The difference between the computed minimum subdomain or device off
* time and the time needed to turn the domain on is the maximum
* theoretical time this domain can spend in the "off" state.
*/
genpd->gd->max_off_time_ns = min_off_time_ns -
genpd->states[state].power_on_latency_ns;
return true;
}
/**
* _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
* @now: current ktime.
*
* This routine must be executed under the PM domain's lock.
*/
static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct genpd_governor_data *gd = genpd->gd;
int state_idx = genpd->state_count - 1;
struct gpd_link *link;
/*
* Find the next wakeup from devices that can determine their own wakeup
* to find when the domain would wakeup and do it for every device down
* the hierarchy. It is not worth while to sleep if the state's residency
* cannot be met.
*/
update_domain_next_wakeup(genpd, now);
if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) {
/* Let's find out the deepest domain idle state, the devices prefer */
while (state_idx >= 0) {
if (next_wakeup_allows_state(genpd, state_idx, now)) {
gd->max_off_time_changed = true;
break;
}
state_idx--;
}
if (state_idx < 0) {
state_idx = 0;
gd->cached_power_down_ok = false;
goto done;
}
}
if (!gd->max_off_time_changed) {
genpd->state_idx = gd->cached_power_down_state_idx;
return gd->cached_power_down_ok;
}
/*
* We have to invalidate the cached results for the parents, so
* use the observation that default_power_down_ok() is not
* going to be called for any parent until this instance
* returns.
*/
list_for_each_entry(link, &genpd->child_links, child_node) {
struct genpd_governor_data *pgd = link->parent->gd;
if (pgd)
pgd->max_off_time_changed = true;
}
gd->max_off_time_ns = -1;
gd->max_off_time_changed = false;
gd->cached_power_down_ok = true;
/*
* Find a state to power down to, starting from the state
* determined by the next wakeup.
*/
while (!__default_power_down_ok(pd, state_idx)) {
if (state_idx == 0) {
gd->cached_power_down_ok = false;
break;
}
state_idx--;
}
done:
genpd->state_idx = state_idx;
gd->cached_power_down_state_idx = genpd->state_idx;
return gd->cached_power_down_ok;
}
static bool default_power_down_ok(struct dev_pm_domain *pd)
{
return _default_power_down_ok(pd, ktime_get());
}
#ifdef CONFIG_CPU_IDLE
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
ktime_t now = ktime_get();
s64 idle_duration_ns;
int cpu, i;
/* Validate dev PM QoS constraints. */
if (!_default_power_down_ok(pd, now))
return false;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
return true;
/*
* Find the next wakeup for any of the online CPUs within the PM domain
* and its subdomains. Note, we only need the genpd->cpus, as it already
* contains a mask of all CPUs from subdomains.
*/
domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
dev = per_cpu(cpuidle_devices, cpu);
if (dev) {
next_hrtimer = READ_ONCE(dev->next_hrtimer);
if (ktime_before(next_hrtimer, domain_wakeup))
domain_wakeup = next_hrtimer;
}
}
/* The minimum idle duration is from now - until the next wakeup. */
idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
return false;
/* Store the next domain_wakeup to allow consumers to use it. */
genpd->gd->next_hrtimer = domain_wakeup;
/*
* Find the deepest idle state that has its residency value satisfied
* and by also taking into account the power off latency for the state.
* Start at the state picked by the dev PM QoS constraint validation.
*/
i = genpd->state_idx;
do {
if (idle_duration_ns >= (genpd->states[i].residency_ns +
genpd->states[i].power_off_latency_ns)) {
genpd->state_idx = i;
return true;
}
} while (--i >= 0);
return false;
}
struct dev_power_governor pm_domain_cpu_gov = {
.suspend_ok = default_suspend_ok,
.power_down_ok = cpu_power_down_ok,
};
#endif
struct dev_power_governor simple_qos_governor = {
.suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
};
/**
* pm_genpd_gov_always_on - A governor implementing an always-on policy
*/
struct dev_power_governor pm_domain_always_on_gov = {
.suspend_ok = default_suspend_ok,
};
| linux-master | drivers/base/power/domain_governor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain.c - Common code related to device power domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/pm_clock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include "power.h"
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
({ \
type (*__routine)(struct device *__d); \
type __ret = (type)0; \
\
__routine = genpd->dev_ops.callback; \
if (__routine) { \
__ret = __routine(dev); \
} \
__ret; \
})
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
struct genpd_lock_ops {
void (*lock)(struct generic_pm_domain *genpd);
void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
int (*lock_interruptible)(struct generic_pm_domain *genpd);
void (*unlock)(struct generic_pm_domain *genpd);
};
static void genpd_lock_mtx(struct generic_pm_domain *genpd)
{
mutex_lock(&genpd->mlock);
}
static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
int depth)
{
mutex_lock_nested(&genpd->mlock, depth);
}
static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
{
return mutex_lock_interruptible(&genpd->mlock);
}
static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
{
return mutex_unlock(&genpd->mlock);
}
static const struct genpd_lock_ops genpd_mtx_ops = {
.lock = genpd_lock_mtx,
.lock_nested = genpd_lock_nested_mtx,
.lock_interruptible = genpd_lock_interruptible_mtx,
.unlock = genpd_unlock_mtx,
};
static void genpd_lock_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->slock)
{
unsigned long flags;
spin_lock_irqsave(&genpd->slock, flags);
genpd->lock_flags = flags;
}
static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
int depth)
__acquires(&genpd->slock)
{
unsigned long flags;
spin_lock_irqsave_nested(&genpd->slock, flags, depth);
genpd->lock_flags = flags;
}
static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->slock)
{
unsigned long flags;
spin_lock_irqsave(&genpd->slock, flags);
genpd->lock_flags = flags;
return 0;
}
static void genpd_unlock_spin(struct generic_pm_domain *genpd)
__releases(&genpd->slock)
{
spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
}
static const struct genpd_lock_ops genpd_spin_ops = {
.lock = genpd_lock_spin,
.lock_nested = genpd_lock_nested_spin,
.lock_interruptible = genpd_lock_interruptible_spin,
.unlock = genpd_unlock_spin,
};
#define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
#define genpd_unlock(p) p->lock_ops->unlock(p)
#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
{
bool ret;
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
/*
* Warn once if an IRQ safe device is attached to a domain, which
* callbacks are allowed to sleep. This indicates a suboptimal
* configuration for PM, but it doesn't matter for an always on domain.
*/
if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
return ret;
if (ret)
dev_warn_once(dev, "PM domain %s will not be powered off\n",
genpd->name);
return ret;
}
static int genpd_runtime_suspend(struct device *dev);
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
* and checks that the PM domain pointer is a real generic PM domain.
* Any failure results in NULL being returned.
*/
static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
{
if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
return NULL;
/* A genpd's always have its ->runtime_suspend() callback assigned. */
if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
return pd_to_genpd(dev->pm_domain);
return NULL;
}
/*
* This should only be used where we are certain that the pm_domain
* attached to the device is a genpd domain.
*/
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
return pd_to_genpd(dev->pm_domain);
}
static int genpd_stop_dev(const struct generic_pm_domain *genpd,
struct device *dev)
{
return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
}
static int genpd_start_dev(const struct generic_pm_domain *genpd,
struct device *dev)
{
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
ret = !!atomic_dec_and_test(&genpd->sd_count);
return ret;
}
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
atomic_inc(&genpd->sd_count);
smp_mb__after_atomic();
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *genpd_debugfs_dir;
static void genpd_debug_add(struct generic_pm_domain *genpd);
static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
if (!genpd_debugfs_dir)
return;
debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
{
u64 delta, now;
now = ktime_get_mono_fast_ns();
if (now <= genpd->accounting_time)
return;
delta = now - genpd->accounting_time;
/*
* If genpd->status is active, it means we are just
* out of off and so update the idle time and vice
* versa.
*/
if (genpd->status == GENPD_STATE_ON)
genpd->states[genpd->state_idx].idle_time += delta;
else
genpd->on_time += delta;
genpd->accounting_time = now;
}
#else
static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
unsigned int state)
{
struct generic_pm_domain_data *pd_data;
struct pm_domain_data *pdd;
struct gpd_link *link;
/* New requested state is same as Max requested state */
if (state == genpd->performance_state)
return state;
/* New requested state is higher than Max requested state */
if (state > genpd->performance_state)
return state;
/* Traverse all devices within the domain */
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
pd_data = to_gpd_data(pdd);
if (pd_data->performance_state > state)
state = pd_data->performance_state;
}
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
* field is protected by the parent genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
* requirement to parent domain) is different from
* link->child->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (link->performance_state > state)
state = link->performance_state;
}
return state;
}
static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
struct generic_pm_domain *parent,
unsigned int pstate)
{
if (!parent->set_performance_state)
return pstate;
return dev_pm_opp_xlate_performance_state(genpd->opp_table,
parent->opp_table,
pstate);
}
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
struct generic_pm_domain *parent;
struct gpd_link *link;
int parent_state, ret;
if (state == genpd->performance_state)
return 0;
/* Propagate to parents of genpd */
list_for_each_entry(link, &genpd->child_links, child_node) {
parent = link->parent;
/* Find parent's performance state */
ret = genpd_xlate_performance_state(genpd, parent, state);
if (unlikely(ret < 0))
goto err;
parent_state = ret;
genpd_lock_nested(parent, depth + 1);
link->prev_performance_state = link->performance_state;
link->performance_state = parent_state;
parent_state = _genpd_reeval_performance_state(parent,
parent_state);
ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
genpd_unlock(parent);
if (ret)
goto err;
}
if (genpd->set_performance_state) {
ret = genpd->set_performance_state(genpd, state);
if (ret)
goto err;
}
genpd->performance_state = state;
return 0;
err:
/* Encountered an error, lets rollback */
list_for_each_entry_continue_reverse(link, &genpd->child_links,
child_node) {
parent = link->parent;
genpd_lock_nested(parent, depth + 1);
parent_state = link->prev_performance_state;
link->performance_state = parent_state;
parent_state = _genpd_reeval_performance_state(parent,
parent_state);
if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
parent->name, parent_state);
}
genpd_unlock(parent);
}
return ret;
}
static int genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd = dev_to_genpd(dev);
struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
unsigned int prev_state;
int ret;
prev_state = gpd_data->performance_state;
if (prev_state == state)
return 0;
gpd_data->performance_state = state;
state = _genpd_reeval_performance_state(genpd, state);
ret = _genpd_set_performance_state(genpd, state, 0);
if (ret)
gpd_data->performance_state = prev_state;
return ret;
}
static int genpd_drop_performance_state(struct device *dev)
{
unsigned int prev_state = dev_gpd_data(dev)->performance_state;
if (!genpd_set_performance_state(dev, 0))
return prev_state;
return 0;
}
static void genpd_restore_performance_state(struct device *dev,
unsigned int state)
{
if (state)
genpd_set_performance_state(dev, state);
}
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
*
* @dev: Device for which the performance-state needs to be set.
* @state: Target performance state of the device. This can be set as 0 when the
* device doesn't have any performance state constraints left (And so
* the device wouldn't participate anymore to find the target
* performance state of the genpd).
*
* It is assumed that the users guarantee that the genpd wouldn't be detached
* while this routine is getting called.
*
* Returns 0 on success and negative error values on failures.
*/
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
int ret = 0;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -ENODEV;
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
genpd_lock(genpd);
if (pm_runtime_suspended(dev)) {
dev_gpd_data(dev)->rpm_pstate = state;
} else {
ret = genpd_set_performance_state(dev, state);
if (!ret)
dev_gpd_data(dev)->rpm_pstate = 0;
}
genpd_unlock(genpd);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
/**
* dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
*
* @dev: Device to handle
* @next: impending interrupt/wakeup for the device
*
*
* Allow devices to inform of the next wakeup. It's assumed that the users
* guarantee that the genpd wouldn't be detached while this routine is getting
* called. Additionally, it's also assumed that @dev isn't runtime suspended
* (RPM_SUSPENDED)."
* Although devices are expected to update the next_wakeup after the end of
* their usecase as well, it is possible the devices themselves may not know
* about that, so stale @next will be ignored when powering off the domain.
*/
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
{
struct generic_pm_domain *genpd;
struct gpd_timing_data *td;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return;
td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
if (td)
td->next_wakeup = next;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
/**
* dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
* @dev: A device that is attached to the genpd.
*
* This routine should typically be called for a device, at the point of when a
* GENPD_NOTIFY_PRE_OFF notification has been sent for it.
*
* Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
* valid value have been set.
*/
ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
{
struct generic_pm_domain *genpd;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return KTIME_MAX;
if (genpd->gd)
return genpd->gd->next_hrtimer;
return KTIME_MAX;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
/*
* dev_pm_genpd_synced_poweroff - Next power off should be synchronous
*
* @dev: A device that is attached to the genpd.
*
* Allows a consumer of the genpd to notify the provider that the next power off
* should be synchronous.
*
* It is assumed that the users guarantee that the genpd wouldn't be detached
* while this routine is getting called.
*/
void dev_pm_genpd_synced_poweroff(struct device *dev)
{
struct generic_pm_domain *genpd;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return;
genpd_lock(genpd);
genpd->synced_poweroff = true;
genpd_unlock(genpd);
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
/* Notify consumers that we are about to power on. */
ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
GENPD_NOTIFY_PRE_ON,
GENPD_NOTIFY_OFF, NULL);
ret = notifier_to_errno(ret);
if (ret)
return ret;
if (!genpd->power_on)
goto out;
timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
if (!timed) {
ret = genpd->power_on(genpd);
if (ret)
goto err;
goto out;
}
time_start = ktime_get();
ret = genpd->power_on(genpd);
if (ret)
goto err;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
goto out;
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
genpd->synced_poweroff = false;
return 0;
err:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
NULL);
return ret;
}
static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
/* Notify consumers that we are about to power off. */
ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
GENPD_NOTIFY_PRE_OFF,
GENPD_NOTIFY_ON, NULL);
ret = notifier_to_errno(ret);
if (ret)
return ret;
if (!genpd->power_off)
goto out;
timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
if (!timed) {
ret = genpd->power_off(genpd);
if (ret)
goto busy;
goto out;
}
time_start = ktime_get();
ret = genpd->power_off(genpd);
if (ret)
goto busy;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
goto out;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->gd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
out:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
NULL);
return 0;
busy:
raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
return ret;
}
/**
* genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
* @genpd: PM domain to power off.
*
* Queue up the execution of genpd_power_off() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
queue_work(pm_wq, &genpd->power_off_work);
}
/**
* genpd_power_off - Remove power from a given PM domain.
* @genpd: PM domain to power down.
* @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
* RPM status of the releated device is in an intermediate state, not yet turned
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
* be RPM_SUSPENDED, while it tries to power off the PM domain.
* @depth: nesting count for lockdep.
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
unsigned int depth)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
unsigned int not_suspended = 0;
int ret;
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
* (2) System suspend is in progress.
*/
if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
return 0;
/*
* Abort power off for the PM domain in the following situations:
* (1) The domain is configured as always on.
* (2) When the domain has a subdomain being powered on.
*/
if (genpd_is_always_on(genpd) ||
genpd_is_rpm_always_on(genpd) ||
atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
/*
* The children must be in their deepest (powered-off) states to allow
* the parent to be powered off. Note that, there's no need for
* additional locking, as powering on a child, requires the parent's
* lock to be acquired first.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
struct generic_pm_domain *child = link->child;
if (child->state_idx < child->state_count - 1)
return -EBUSY;
}
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
/*
* Do not allow PM domain to be powered off, when an IRQ safe
* device is part of a non-IRQ safe domain.
*/
if (!pm_runtime_suspended(pdd->dev) ||
irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
return -EBUSY;
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
/* Default to shallowest state. */
if (!genpd->gov)
genpd->state_idx = 0;
/* Don't power off, if a child domain is waiting to power on. */
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
ret = _genpd_power_off(genpd, true);
if (ret) {
genpd->states[genpd->state_idx].rejected++;
return ret;
}
genpd->status = GENPD_STATE_OFF;
genpd_update_accounting(genpd);
genpd->states[genpd->state_idx].usage++;
list_for_each_entry(link, &genpd->child_links, child_node) {
genpd_sd_counter_dec(link->parent);
genpd_lock_nested(link->parent, depth + 1);
genpd_power_off(link->parent, false, depth + 1);
genpd_unlock(link->parent);
}
return 0;
}
/**
* genpd_power_on - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
* Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
{
struct gpd_link *link;
int ret = 0;
if (genpd_status_on(genpd))
return 0;
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the parents' .power_on() callbacks fiddles
* with it.
*/
list_for_each_entry(link, &genpd->child_links, child_node) {
struct generic_pm_domain *parent = link->parent;
genpd_sd_counter_inc(parent);
genpd_lock_nested(parent, depth + 1);
ret = genpd_power_on(parent, depth + 1);
genpd_unlock(parent);
if (ret) {
genpd_sd_counter_dec(parent);
goto err;
}
}
ret = _genpd_power_on(genpd, true);
if (ret)
goto err;
genpd->status = GENPD_STATE_ON;
genpd_update_accounting(genpd);
return 0;
err:
list_for_each_entry_continue_reverse(link,
&genpd->child_links,
child_node) {
genpd_sd_counter_dec(link->parent);
genpd_lock_nested(link->parent, depth + 1);
genpd_power_off(link->parent, false, depth + 1);
genpd_unlock(link->parent);
}
return ret;
}
static int genpd_dev_pm_start(struct device *dev)
{
struct generic_pm_domain *genpd = dev_to_genpd(dev);
return genpd_start_dev(genpd, dev);
}
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
struct generic_pm_domain_data *gpd_data;
struct device *dev;
gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
dev = gpd_data->base.dev;
for (;;) {
struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
struct pm_domain_data *pdd;
struct gpd_timing_data *td;
spin_lock_irq(&dev->power.lock);
pdd = dev->power.subsys_data ?
dev->power.subsys_data->domain_data : NULL;
if (pdd) {
td = to_gpd_data(pdd)->td;
if (td) {
td->constraint_changed = true;
genpd = dev_to_genpd(dev);
}
}
spin_unlock_irq(&dev->power.lock);
if (!IS_ERR(genpd)) {
genpd_lock(genpd);
genpd->gd->max_off_time_changed = true;
genpd_unlock(genpd);
}
dev = dev->parent;
if (!dev || dev->power.ignore_children)
break;
}
return NOTIFY_DONE;
}
/**
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
* @work: Work structure used for scheduling the execution of this function.
*/
static void genpd_power_off_work_fn(struct work_struct *work)
{
struct generic_pm_domain *genpd;
genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_lock(genpd);
genpd_power_off(genpd, false, 0);
genpd_unlock(genpd);
}
/**
* __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
* @dev: Device to handle.
*/
static int __genpd_runtime_suspend(struct device *dev)
{
int (*cb)(struct device *__dev);
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
cb = dev->class->pm->runtime_suspend;
else if (dev->bus && dev->bus->pm)
cb = dev->bus->pm->runtime_suspend;
else
cb = NULL;
if (!cb && dev->driver && dev->driver->pm)
cb = dev->driver->pm->runtime_suspend;
return cb ? cb(dev) : 0;
}
/**
* __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
* @dev: Device to handle.
*/
static int __genpd_runtime_resume(struct device *dev)
{
int (*cb)(struct device *__dev);
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
cb = dev->class->pm->runtime_resume;
else if (dev->bus && dev->bus->pm)
cb = dev->bus->pm->runtime_resume;
else
cb = NULL;
if (!cb && dev->driver && dev->driver->pm)
cb = dev->driver->pm->runtime_resume;
return cb ? cb(dev) : 0;
}
/**
* genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a runtime suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
static int genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
bool (*suspend_ok)(struct device *__dev);
struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
struct gpd_timing_data *td = gpd_data->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start = 0;
s64 elapsed_ns;
int ret;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
/*
* A runtime PM centric subsystem/driver may re-use the runtime PM
* callbacks for other purposes than runtime PM. In those scenarios
* runtime PM is disabled. Under these circumstances, we shall skip
* validating/measuring the PM QoS latency.
*/
suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
if (runtime_pm && suspend_ok && !suspend_ok(dev))
return -EBUSY;
/* Measure suspend latency. */
if (td && runtime_pm)
time_start = ktime_get();
ret = __genpd_runtime_suspend(dev);
if (ret)
return ret;
ret = genpd_stop_dev(genpd, dev);
if (ret) {
__genpd_runtime_resume(dev);
return ret;
}
/* Update suspend latency value if the measured time exceeds it. */
if (td && runtime_pm) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->suspend_latency_ns) {
td->suspend_latency_ns = elapsed_ns;
dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
elapsed_ns);
genpd->gd->max_off_time_changed = true;
td->constraint_changed = true;
}
}
/*
* If power.irq_safe is set, this routine may be run with
* IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
if (irq_safe_dev_in_sleep_domain(dev, genpd))
return 0;
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_unlock(genpd);
return 0;
}
/**
* genpd_runtime_resume - Resume a device belonging to I/O PM domain.
* @dev: Device to resume.
*
* Carry out a runtime resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
static int genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
struct gpd_timing_data *td = gpd_data->td;
bool timed = td && pm_runtime_enabled(dev);
ktime_t time_start = 0;
s64 elapsed_ns;
int ret;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
/*
* As we don't power off a non IRQ safe domain, which holds
* an IRQ safe device, we don't need to restore power to it.
*/
if (irq_safe_dev_in_sleep_domain(dev, genpd))
goto out;
genpd_lock(genpd);
genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
ret = genpd_power_on(genpd, 0);
genpd_unlock(genpd);
if (ret)
return ret;
out:
/* Measure resume latency. */
if (timed)
time_start = ktime_get();
ret = genpd_start_dev(genpd, dev);
if (ret)
goto err_poweroff;
ret = __genpd_runtime_resume(dev);
if (ret)
goto err_stop;
/* Update resume latency value if the measured time exceeds it. */
if (timed) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->resume_latency_ns) {
td->resume_latency_ns = elapsed_ns;
dev_dbg(dev, "resume latency exceeded, %lld ns\n",
elapsed_ns);
genpd->gd->max_off_time_changed = true;
td->constraint_changed = true;
}
}
return 0;
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_unlock(genpd);
}
return ret;
}
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
pd_ignore_unused = true;
return 1;
}
__setup("pd_ignore_unused", pd_ignore_unused_setup);
/**
* genpd_power_off_unused - Power off all PM domains with no devices in use.
*/
static int __init genpd_power_off_unused(void)
{
struct generic_pm_domain *genpd;
if (pd_ignore_unused) {
pr_warn("genpd: Not disabling unused power domains\n");
return 0;
}
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
genpd_queue_power_off_work(genpd);
mutex_unlock(&gpd_list_lock);
return 0;
}
late_initcall(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
/**
* genpd_sync_power_off - Synchronously power off a PM domain and its parents.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its parents.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
* these cases the lock must be held.
*/
static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{
struct gpd_link *link;
if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
return;
if (genpd->suspended_count != genpd->device_count
|| atomic_read(&genpd->sd_count) > 0)
return;
/* Check that the children are in their deepest (powered-off) state. */
list_for_each_entry(link, &genpd->parent_links, parent_node) {
struct generic_pm_domain *child = link->child;
if (child->state_idx < child->state_count - 1)
return;
}
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
if (_genpd_power_off(genpd, false))
return;
genpd->status = GENPD_STATE_OFF;
list_for_each_entry(link, &genpd->child_links, child_node) {
genpd_sd_counter_dec(link->parent);
if (use_lock)
genpd_lock_nested(link->parent, depth + 1);
genpd_sync_power_off(link->parent, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->parent);
}
}
/**
* genpd_sync_power_on - Synchronously power on a PM domain and its parents.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
* these cases the lock must be held.
*/
static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{
struct gpd_link *link;
if (genpd_status_on(genpd))
return;
list_for_each_entry(link, &genpd->child_links, child_node) {
genpd_sd_counter_inc(link->parent);
if (use_lock)
genpd_lock_nested(link->parent, depth + 1);
genpd_sync_power_on(link->parent, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->parent);
}
_genpd_power_on(genpd, false);
genpd->status = GENPD_STATE_ON;
}
/**
* genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
* Start a power transition of a device (during a system-wide power transition)
* under the assumption that its pm_domain field points to the domain member of
* an object of type struct generic_pm_domain representing a PM domain
* consisting of I/O devices.
*/
static int genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
genpd_lock(genpd);
if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
if (ret < 0) {
genpd_lock(genpd);
genpd->prepared_count--;
genpd_unlock(genpd);
}
/* Never return 1, as genpd don't cope with the direct_complete path. */
return ret >= 0 ? 0 : ret;
}
/**
* genpd_finish_suspend - Completion of suspend or hibernation of device in an
* I/O pm domain.
* @dev: Device to suspend.
* @suspend_noirq: Generic suspend_noirq callback.
* @resume_noirq: Generic resume_noirq callback.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
static int genpd_finish_suspend(struct device *dev,
int (*suspend_noirq)(struct device *dev),
int (*resume_noirq)(struct device *dev))
{
struct generic_pm_domain *genpd;
int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
ret = suspend_noirq(dev);
if (ret)
return ret;
if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
return 0;
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
!pm_runtime_status_suspended(dev)) {
ret = genpd_stop_dev(genpd, dev);
if (ret) {
resume_noirq(dev);
return ret;
}
}
genpd_lock(genpd);
genpd->suspended_count++;
genpd_sync_power_off(genpd, true, 0);
genpd_unlock(genpd);
return 0;
}
/**
* genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_suspend(dev,
pm_generic_suspend_noirq,
pm_generic_resume_noirq);
}
/**
* genpd_finish_resume - Completion of resume of device in an I/O PM domain.
* @dev: Device to resume.
* @resume_noirq: Generic resume_noirq callback.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
static int genpd_finish_resume(struct device *dev,
int (*resume_noirq)(struct device *dev))
{
struct generic_pm_domain *genpd;
int ret;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
return resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
!pm_runtime_status_suspended(dev)) {
ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
return pm_generic_resume_noirq(dev);
}
/**
* genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
static int genpd_resume_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_resume(dev, pm_generic_resume_noirq);
}
/**
* genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int genpd_freeze_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_suspend(dev,
pm_generic_freeze_noirq,
pm_generic_thaw_noirq);
}
/**
* genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
static int genpd_thaw_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_resume(dev, pm_generic_thaw_noirq);
}
/**
* genpd_poweroff_noirq - Completion of hibernation of device in an
* I/O PM domain.
* @dev: Device to poweroff.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_suspend(dev,
pm_generic_poweroff_noirq,
pm_generic_restore_noirq);
}
/**
* genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
static int genpd_restore_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_resume(dev, pm_generic_restore_noirq);
}
/**
* genpd_complete - Complete power transition of a device in a power domain.
* @dev: Device to complete the transition of.
*
* Complete a power transition of a device (during a system-wide power
* transition) under the assumption that its pm_domain field points to the
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
static void genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return;
pm_generic_complete(dev);
genpd_lock(genpd);
genpd->prepared_count--;
if (!genpd->prepared_count)
genpd_queue_power_off_work(genpd);
genpd_unlock(genpd);
}
static void genpd_switch_state(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
bool use_lock;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return;
use_lock = genpd_is_irq_safe(genpd);
if (use_lock)
genpd_lock(genpd);
if (suspend) {
genpd->suspended_count++;
genpd_sync_power_off(genpd, use_lock, 0);
} else {
genpd_sync_power_on(genpd, use_lock, 0);
genpd->suspended_count--;
}
if (use_lock)
genpd_unlock(genpd);
}
/**
* dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
* @dev: The device that is attached to the genpd, that can be suspended.
*
* This routine should typically be called for a device that needs to be
* suspended during the syscore suspend phase. It may also be called during
* suspend-to-idle to suspend a corresponding CPU device that is attached to a
* genpd.
*/
void dev_pm_genpd_suspend(struct device *dev)
{
genpd_switch_state(dev, true);
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
/**
* dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
* @dev: The device that is attached to the genpd, which needs to be resumed.
*
* This routine should typically be called for a device that needs to be resumed
* during the syscore resume phase. It may also be called during suspend-to-idle
* to resume a corresponding CPU device that is attached to a genpd.
*/
void dev_pm_genpd_resume(struct device *dev)
{
genpd_switch_state(dev, false);
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
#else /* !CONFIG_PM_SLEEP */
#define genpd_prepare NULL
#define genpd_suspend_noirq NULL
#define genpd_resume_noirq NULL
#define genpd_freeze_noirq NULL
#define genpd_thaw_noirq NULL
#define genpd_poweroff_noirq NULL
#define genpd_restore_noirq NULL
#define genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
bool has_governor)
{
struct generic_pm_domain_data *gpd_data;
struct gpd_timing_data *td;
int ret;
ret = dev_pm_get_subsys_data(dev);
if (ret)
return ERR_PTR(ret);
gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
if (!gpd_data) {
ret = -ENOMEM;
goto err_put;
}
gpd_data->base.dev = dev;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
/* Allocate data used by a governor. */
if (has_governor) {
td = kzalloc(sizeof(*td), GFP_KERNEL);
if (!td) {
ret = -ENOMEM;
goto err_free;
}
td->constraint_changed = true;
td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
td->next_wakeup = KTIME_MAX;
gpd_data->td = td;
}
spin_lock_irq(&dev->power.lock);
if (dev->power.subsys_data->domain_data)
ret = -EINVAL;
else
dev->power.subsys_data->domain_data = &gpd_data->base;
spin_unlock_irq(&dev->power.lock);
if (ret)
goto err_free;
return gpd_data;
err_free:
kfree(gpd_data->td);
kfree(gpd_data);
err_put:
dev_pm_put_subsys_data(dev);
return ERR_PTR(ret);
}
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL;
spin_unlock_irq(&dev->power.lock);
kfree(gpd_data->td);
kfree(gpd_data);
dev_pm_put_subsys_data(dev);
}
static void genpd_update_cpumask(struct generic_pm_domain *genpd,
int cpu, bool set, unsigned int depth)
{
struct gpd_link *link;
if (!genpd_is_cpu_domain(genpd))
return;
list_for_each_entry(link, &genpd->child_links, child_node) {
struct generic_pm_domain *parent = link->parent;
genpd_lock_nested(parent, depth + 1);
genpd_update_cpumask(parent, cpu, set, depth + 1);
genpd_unlock(parent);
}
if (set)
cpumask_set_cpu(cpu, genpd->cpus);
else
cpumask_clear_cpu(cpu, genpd->cpus);
}
static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
{
if (cpu >= 0)
genpd_update_cpumask(genpd, cpu, true, 0);
}
static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
{
if (cpu >= 0)
genpd_update_cpumask(genpd, cpu, false, 0);
}
static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
{
int cpu;
if (!genpd_is_cpu_domain(genpd))
return -1;
for_each_possible_cpu(cpu) {
if (get_cpu_device(cpu) == dev)
return cpu;
}
return -1;
}
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct device *base_dev)
{
struct genpd_governor_data *gd = genpd->gd;
struct generic_pm_domain_data *gpd_data;
int ret;
dev_dbg(dev, "%s()\n", __func__);
gpd_data = genpd_alloc_dev_data(dev, gd);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
genpd_lock(genpd);
genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
if (gd)
gd->max_off_time_changed = true;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
genpd_unlock(genpd);
out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
else
dev_pm_qos_add_notifier(dev, &gpd_data->nb,
DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
/**
* pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
* @dev: Device to be added.
*/
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
{
int ret;
if (!genpd || !dev)
return -EINVAL;
mutex_lock(&gpd_list_lock);
ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_device);
static int genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
pdd = dev->power.subsys_data->domain_data;
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
DEV_PM_QOS_RESUME_LATENCY);
genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
goto out;
}
genpd->device_count--;
if (genpd->gd)
genpd->gd->max_off_time_changed = true;
genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
genpd_unlock(genpd);
dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @dev: Device to be removed.
*/
int pm_genpd_remove_device(struct device *dev)
{
struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -EINVAL;
return genpd_remove_device(genpd, dev);
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
/**
* dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
*
* @dev: Device that should be associated with the notifier
* @nb: The notifier block to register
*
* Users may call this function to add a genpd power on/off notifier for an
* attached @dev. Only one notifier per device is allowed. The notifier is
* sent when genpd is powering on/off the PM domain.
*
* It is assumed that the user guarantee that the genpd wouldn't be detached
* while this routine is getting called.
*
* Returns 0 on success and negative error values on failures.
*/
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
{
struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data;
int ret;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -ENODEV;
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
if (gpd_data->power_nb)
return -EEXIST;
genpd_lock(genpd);
ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
genpd_unlock(genpd);
if (ret) {
dev_warn(dev, "failed to add notifier for PM domain %s\n",
genpd->name);
return ret;
}
gpd_data->power_nb = nb;
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
/**
* dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
*
* @dev: Device that is associated with the notifier
*
* Users may call this function to remove a genpd power on/off notifier for an
* attached @dev.
*
* It is assumed that the user guarantee that the genpd wouldn't be detached
* while this routine is getting called.
*
* Returns 0 on success and negative error values on failures.
*/
int dev_pm_genpd_remove_notifier(struct device *dev)
{
struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data;
int ret;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -ENODEV;
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
if (!gpd_data->power_nb)
return -ENODEV;
genpd_lock(genpd);
ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
gpd_data->power_nb);
genpd_unlock(genpd);
if (ret) {
dev_warn(dev, "failed to remove notifier for PM domain %s\n",
genpd->name);
return ret;
}
gpd_data->power_nb = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
struct gpd_link *link, *itr;
int ret = 0;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|| genpd == subdomain)
return -EINVAL;
/*
* If the domain can be powered on/off in an IRQ safe
* context, ensure that the subdomain can also be
* powered on/off in that context.
*/
if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
genpd->name, subdomain->name);
return -EINVAL;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
ret = -EINVAL;
goto out;
}
list_for_each_entry(itr, &genpd->parent_links, parent_node) {
if (itr->child == subdomain && itr->parent == genpd) {
ret = -EINVAL;
goto out;
}
}
link->parent = genpd;
list_add_tail(&link->parent_node, &genpd->parent_links);
link->child = subdomain;
list_add_tail(&link->child_node, &subdomain->child_links);
if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd);
out:
genpd_unlock(genpd);
genpd_unlock(subdomain);
if (ret)
kfree(link);
return ret;
}
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Leader PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
int ret;
mutex_lock(&gpd_list_lock);
ret = genpd_add_subdomain(genpd, subdomain);
mutex_unlock(&gpd_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Leader PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
struct gpd_link *l, *link;
int ret = -EINVAL;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
if (link->child != subdomain)
continue;
list_del(&link->parent_node);
list_del(&link->child_node);
kfree(link);
if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd);
ret = 0;
break;
}
out:
genpd_unlock(genpd);
genpd_unlock(subdomain);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
static void genpd_free_default_power_state(struct genpd_power_state *states,
unsigned int state_count)
{
kfree(states);
}
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
struct genpd_power_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
genpd->states = state;
genpd->state_count = 1;
genpd->free_states = genpd_free_default_power_state;
return 0;
}
static int genpd_alloc_data(struct generic_pm_domain *genpd)
{
struct genpd_governor_data *gd = NULL;
int ret;
if (genpd_is_cpu_domain(genpd) &&
!zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
return -ENOMEM;
if (genpd->gov) {
gd = kzalloc(sizeof(*gd), GFP_KERNEL);
if (!gd) {
ret = -ENOMEM;
goto free;
}
gd->max_off_time_ns = -1;
gd->max_off_time_changed = true;
gd->next_wakeup = KTIME_MAX;
gd->next_hrtimer = KTIME_MAX;
}
/* Use only one "off" state if there were no states declared */
if (genpd->state_count == 0) {
ret = genpd_set_default_power_state(genpd);
if (ret)
goto free;
}
genpd->gd = gd;
return 0;
free:
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
kfree(gd);
return ret;
}
static void genpd_free_data(struct generic_pm_domain *genpd)
{
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
if (genpd->free_states)
genpd->free_states(genpd->states, genpd->state_count);
kfree(genpd->gd);
}
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops;
} else {
mutex_init(&genpd->mlock);
genpd->lock_ops = &genpd_mtx_ops;
}
}
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
* @is_off: Initial value of the domain's power_is_off field.
*
* Returns 0 on successful initialization, else a negative error code.
*/
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
int ret;
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
INIT_LIST_HEAD(&genpd->parent_links);
INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
genpd->device_count = 0;
genpd->provider = NULL;
genpd->has_provider = false;
genpd->accounting_time = ktime_get_mono_fast_ns();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = genpd_prepare;
genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = genpd_resume_noirq;
genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
genpd->domain.start = genpd_dev_pm_start;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
genpd->dev_ops.start = pm_clk_resume;
}
/* The always-on governor works better with the corresponding flag. */
if (gov == &pm_domain_always_on_gov)
genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
/* Always-on domains must be powered on at initialization. */
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
!genpd_status_on(genpd)) {
pr_err("always-on PM domain %s is not on\n", genpd->name);
return -EINVAL;
}
/* Multiple states but no governor doesn't make sense. */
if (!gov && genpd->state_count > 1)
pr_warn("%s: no governor for states\n", genpd->name);
ret = genpd_alloc_data(genpd);
if (ret)
return ret;
device_initialize(&genpd->dev);
dev_set_name(&genpd->dev, "%s", genpd->name);
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
genpd_debug_add(genpd);
return 0;
}
EXPORT_SYMBOL_GPL(pm_genpd_init);
static int genpd_remove(struct generic_pm_domain *genpd)
{
struct gpd_link *l, *link;
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
genpd_lock(genpd);
if (genpd->has_provider) {
genpd_unlock(genpd);
pr_err("Provider present, unable to remove %s\n", genpd->name);
return -EBUSY;
}
if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
list_del(&link->parent_node);
list_del(&link->child_node);
kfree(link);
}
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
genpd_free_data(genpd);
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
}
/**
* pm_genpd_remove - Remove a generic I/O PM domain
* @genpd: Pointer to PM domain that is to be removed.
*
* To remove the PM domain, this function:
* - Removes the PM domain as a subdomain to any parent domains,
* if it was added.
* - Removes the PM domain from the list of registered PM domains.
*
* The PM domain will only be removed, if the associated provider has
* been removed, it is not a parent to any other PM domain and has no
* devices associated with it.
*/
int pm_genpd_remove(struct generic_pm_domain *genpd)
{
int ret;
mutex_lock(&gpd_list_lock);
ret = genpd_remove(genpd);
mutex_unlock(&gpd_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove);
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
/*
* Device Tree based PM domain providers.
*
* The code below implements generic device tree based PM domain providers that
* bind device tree nodes with generic PM domains registered in the system.
*
* Any driver that registers generic PM domains and needs to support binding of
* devices to these domains is supposed to register a PM domain provider, which
* maps a PM domain specifier retrieved from the device tree to a PM domain.
*
* Two simple mapping functions have been provided for convenience:
* - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
* - genpd_xlate_onecell() for mapping of multiple PM domains per node by
* index.
*/
/**
* struct of_genpd_provider - PM domain provider registration structure
* @link: Entry in global list of PM domain providers
* @node: Pointer to device tree node of PM domain provider
* @xlate: Provider-specific xlate callback mapping a set of specifier cells
* into a PM domain.
* @data: context pointer to be passed into @xlate callback
*/
struct of_genpd_provider {
struct list_head link;
struct device_node *node;
genpd_xlate_t xlate;
void *data;
};
/* List of registered PM domain providers. */
static LIST_HEAD(of_genpd_providers);
/* Mutex to protect the list above. */
static DEFINE_MUTEX(of_genpd_mutex);
/**
* genpd_xlate_simple() - Xlate function for direct node-domain mapping
* @genpdspec: OF phandle args to map into a PM domain
* @data: xlate function private data - pointer to struct generic_pm_domain
*
* This is a generic xlate function that can be used to model PM domains that
* have their own device tree nodes. The private data of xlate function needs
* to be a valid pointer to struct generic_pm_domain.
*/
static struct generic_pm_domain *genpd_xlate_simple(
struct of_phandle_args *genpdspec,
void *data)
{
return data;
}
/**
* genpd_xlate_onecell() - Xlate function using a single index.
* @genpdspec: OF phandle args to map into a PM domain
* @data: xlate function private data - pointer to struct genpd_onecell_data
*
* This is a generic xlate function that can be used to model simple PM domain
* controllers that have one device tree node and provide multiple PM domains.
* A single cell is used as an index into an array of PM domains specified in
* the genpd_onecell_data struct when registering the provider.
*/
static struct generic_pm_domain *genpd_xlate_onecell(
struct of_phandle_args *genpdspec,
void *data)
{
struct genpd_onecell_data *genpd_data = data;
unsigned int idx = genpdspec->args[0];
if (genpdspec->args_count != 1)
return ERR_PTR(-EINVAL);
if (idx >= genpd_data->num_domains) {
pr_err("%s: invalid domain index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
if (!genpd_data->domains[idx])
return ERR_PTR(-ENOENT);
return genpd_data->domains[idx];
}
/**
* genpd_add_provider() - Register a PM domain provider for a node
* @np: Device node pointer associated with the PM domain provider.
* @xlate: Callback for decoding PM domain from phandle arguments.
* @data: Context pointer for @xlate callback.
*/
static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
void *data)
{
struct of_genpd_provider *cp;
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
cp->node = of_node_get(np);
cp->data = data;
cp->xlate = xlate;
fwnode_dev_initialized(&np->fwnode, true);
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
mutex_unlock(&of_genpd_mutex);
pr_debug("Added domain provider from %pOF\n", np);
return 0;
}
static bool genpd_present(const struct generic_pm_domain *genpd)
{
bool ret = false;
const struct generic_pm_domain *gpd;
mutex_lock(&gpd_list_lock);
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
if (gpd == genpd) {
ret = true;
break;
}
}
mutex_unlock(&gpd_list_lock);
return ret;
}
/**
* of_genpd_add_provider_simple() - Register a simple PM domain provider
* @np: Device node pointer associated with the PM domain provider.
* @genpd: Pointer to PM domain associated with the PM domain provider.
*/
int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
{
int ret;
if (!np || !genpd)
return -EINVAL;
if (!genpd_present(genpd))
return -EINVAL;
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
if (ret)
return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
/*
* Save table for faster processing while setting performance
* state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
WARN_ON(IS_ERR(genpd->opp_table));
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
if (genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
return ret;
}
genpd->provider = &np->fwnode;
genpd->has_provider = true;
return 0;
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
/**
* of_genpd_add_provider_onecell() - Register a onecell PM domain provider
* @np: Device node pointer associated with the PM domain provider.
* @data: Pointer to the data associated with the PM domain provider.
*/
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data)
{
struct generic_pm_domain *genpd;
unsigned int i;
int ret = -EINVAL;
if (!np || !data)
return -EINVAL;
if (!data->xlate)
data->xlate = genpd_xlate_onecell;
for (i = 0; i < data->num_domains; i++) {
genpd = data->domains[i];
if (!genpd)
continue;
if (!genpd_present(genpd))
goto error;
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
dev_err_probe(&genpd->dev, ret,
"Failed to add OPP table for index %d\n", i);
goto error;
}
/*
* Save table for faster processing while setting
* performance state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
WARN_ON(IS_ERR(genpd->opp_table));
}
genpd->provider = &np->fwnode;
genpd->has_provider = true;
}
ret = genpd_add_provider(np, data->xlate, data);
if (ret < 0)
goto error;
return 0;
error:
while (i--) {
genpd = data->domains[i];
if (!genpd)
continue;
genpd->provider = NULL;
genpd->has_provider = false;
if (genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
}
return ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
/**
* of_genpd_del_provider() - Remove a previously registered PM domain provider
* @np: Device node pointer associated with the PM domain provider
*/
void of_genpd_del_provider(struct device_node *np)
{
struct of_genpd_provider *cp, *tmp;
struct generic_pm_domain *gpd;
mutex_lock(&gpd_list_lock);
mutex_lock(&of_genpd_mutex);
list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
if (cp->node == np) {
/*
* For each PM domain associated with the
* provider, set the 'has_provider' to false
* so that the PM domain can be safely removed.
*/
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
if (gpd->provider == &np->fwnode) {
gpd->has_provider = false;
if (!gpd->set_performance_state)
continue;
dev_pm_opp_put_opp_table(gpd->opp_table);
dev_pm_opp_of_remove_table(&gpd->dev);
}
}
fwnode_dev_initialized(&cp->node->fwnode, false);
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);
break;
}
}
mutex_unlock(&of_genpd_mutex);
mutex_unlock(&gpd_list_lock);
}
EXPORT_SYMBOL_GPL(of_genpd_del_provider);
/**
* genpd_get_from_provider() - Look-up PM domain
* @genpdspec: OF phandle args to use for look-up
*
* Looks for a PM domain provider under the node specified by @genpdspec and if
* found, uses xlate function of the provider to map phandle args to a PM
* domain.
*
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
static struct generic_pm_domain *genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
struct of_genpd_provider *provider;
if (!genpdspec)
return ERR_PTR(-EINVAL);
mutex_lock(&of_genpd_mutex);
/* Check if we have such a provider in our array */
list_for_each_entry(provider, &of_genpd_providers, link) {
if (provider->node == genpdspec->np)
genpd = provider->xlate(genpdspec, provider->data);
if (!IS_ERR(genpd))
break;
}
mutex_unlock(&of_genpd_mutex);
return genpd;
}
/**
* of_genpd_add_device() - Add a device to an I/O PM domain
* @genpdspec: OF phandle args to use for look-up PM domain
* @dev: Device to be added.
*
* Looks-up an I/O PM domain based upon phandle args provided and adds
* the device to the PM domain. Returns a negative error code on failure.
*/
int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
if (!dev)
return -EINVAL;
mutex_lock(&gpd_list_lock);
genpd = genpd_get_from_provider(genpdspec);
if (IS_ERR(genpd)) {
ret = PTR_ERR(genpd);
goto out;
}
ret = genpd_add_device(genpd, dev, dev);
out:
mutex_unlock(&gpd_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_device);
/**
* of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @parent_spec: OF phandle args to use for parent PM domain look-up
* @subdomain_spec: OF phandle args to use for subdomain look-up
*
* Looks-up a parent PM domain and subdomain based upon phandle args
* provided and adds the subdomain to the parent PM domain. Returns a
* negative error code on failure.
*/
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
struct of_phandle_args *subdomain_spec)
{
struct generic_pm_domain *parent, *subdomain;
int ret;
mutex_lock(&gpd_list_lock);
parent = genpd_get_from_provider(parent_spec);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
goto out;
}
subdomain = genpd_get_from_provider(subdomain_spec);
if (IS_ERR(subdomain)) {
ret = PTR_ERR(subdomain);
goto out;
}
ret = genpd_add_subdomain(parent, subdomain);
out:
mutex_unlock(&gpd_list_lock);
return ret == -ENOENT ? -EPROBE_DEFER : ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
/**
* of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @parent_spec: OF phandle args to use for parent PM domain look-up
* @subdomain_spec: OF phandle args to use for subdomain look-up
*
* Looks-up a parent PM domain and subdomain based upon phandle args
* provided and removes the subdomain from the parent PM domain. Returns a
* negative error code on failure.
*/
int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
struct of_phandle_args *subdomain_spec)
{
struct generic_pm_domain *parent, *subdomain;
int ret;
mutex_lock(&gpd_list_lock);
parent = genpd_get_from_provider(parent_spec);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
goto out;
}
subdomain = genpd_get_from_provider(subdomain_spec);
if (IS_ERR(subdomain)) {
ret = PTR_ERR(subdomain);
goto out;
}
ret = pm_genpd_remove_subdomain(parent, subdomain);
out:
mutex_unlock(&gpd_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
/**
* of_genpd_remove_last - Remove the last PM domain registered for a provider
* @np: Pointer to device node associated with provider
*
* Find the last PM domain that was added by a particular provider and
* remove this PM domain from the list of PM domains. The provider is
* identified by the 'provider' device structure that is passed. The PM
* domain will only be removed, if the provider associated with domain
* has been removed.
*
* Returns a valid pointer to struct generic_pm_domain on success or
* ERR_PTR() on failure.
*/
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
int ret;
if (IS_ERR_OR_NULL(np))
return ERR_PTR(-EINVAL);
mutex_lock(&gpd_list_lock);
list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
if (gpd->provider == &np->fwnode) {
ret = genpd_remove(gpd);
genpd = ret ? ERR_PTR(ret) : gpd;
break;
}
}
mutex_unlock(&gpd_list_lock);
return genpd;
}
EXPORT_SYMBOL_GPL(of_genpd_remove_last);
static void genpd_release_dev(struct device *dev)
{
of_node_put(dev->of_node);
kfree(dev);
}
static struct bus_type genpd_bus_type = {
.name = "genpd",
};
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Currently not used
*
* Try to locate a corresponding generic PM domain, which the device was
* attached to previously. If such is found, the device is detached from it.
*/
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
unsigned int i;
int ret = 0;
pd = dev_to_genpd(dev);
if (IS_ERR(pd))
return;
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
/* Drop the default performance state */
if (dev_gpd_data(dev)->default_pstate) {
dev_pm_genpd_set_performance_state(dev, 0);
dev_gpd_data(dev)->default_pstate = 0;
}
for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
break;
mdelay(i);
cond_resched();
}
if (ret < 0) {
dev_err(dev, "failed to remove from PM domain %s: %d",
pd->name, ret);
return;
}
/* Check if PM domain can be powered off after removing this device. */
genpd_queue_power_off_work(pd);
/* Unregister the device if it was created by genpd. */
if (dev->bus == &genpd_bus_type)
device_unregister(dev);
}
static void genpd_dev_pm_sync(struct device *dev)
{
struct generic_pm_domain *pd;
pd = dev_to_genpd(dev);
if (IS_ERR(pd))
return;
genpd_queue_power_off_work(pd);
}
static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int pstate;
int ret;
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", index, &pd_args);
if (ret < 0)
return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
of_node_put(pd_args.np);
if (IS_ERR(pd)) {
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
/* Set the default performance state */
pstate = of_get_required_opp_performance_state(dev->of_node, index);
if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
ret = pstate;
goto err;
} else if (pstate > 0) {
ret = dev_pm_genpd_set_performance_state(dev, pstate);
if (ret)
goto err;
dev_gpd_data(dev)->default_pstate = pstate;
}
if (power_on) {
genpd_lock(pd);
ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
}
if (ret) {
/* Drop the default performance state */
if (dev_gpd_data(dev)->default_pstate) {
dev_pm_genpd_set_performance_state(dev, 0);
dev_gpd_data(dev)->default_pstate = 0;
}
genpd_remove_device(pd, dev);
return -EPROBE_DEFER;
}
return 1;
err:
dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
pd->name, ret);
genpd_remove_device(pd, dev);
return ret;
}
/**
* genpd_dev_pm_attach - Attach a device to its PM domain using DT.
* @dev: Device to attach.
*
* Parse device's OF node to find a PM domain specifier. If such is found,
* attaches the device to retrieved pm_domain ops.
*
* Returns 1 on successfully attached PM domain, 0 when the device don't need a
* PM domain or when multiple power-domains exists for it, else a negative error
* code. Note that if a power-domain exists for the device, but it cannot be
* found or turned on, then return -EPROBE_DEFER to ensure that the device is
* not probed and to re-try again later.
*/
int genpd_dev_pm_attach(struct device *dev)
{
if (!dev->of_node)
return 0;
/*
* Devices with multiple PM domains must be attached separately, as we
* can only attach one PM domain per device.
*/
if (of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells") != 1)
return 0;
return __genpd_dev_pm_attach(dev, dev, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
/**
* genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
* @dev: The device used to lookup the PM domain.
* @index: The index of the PM domain.
*
* Parse device's OF node to find a PM domain specifier at the provided @index.
* If such is found, creates a virtual device and attaches it to the retrieved
* pm_domain ops. To deal with detaching of the virtual device, the ->detach()
* callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
*
* Returns the created virtual device if successfully attached PM domain, NULL
* when the device don't need a PM domain, else an ERR_PTR() in case of
* failures. If a power-domain exists for the device, but cannot be found or
* turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
* is not probed and to re-try again later.
*/
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index)
{
struct device *virt_dev;
int num_domains;
int ret;
if (!dev->of_node)
return NULL;
/* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
if (index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
if (!virt_dev)
return ERR_PTR(-ENOMEM);
dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
virt_dev->bus = &genpd_bus_type;
virt_dev->release = genpd_release_dev;
virt_dev->of_node = of_node_get(dev->of_node);
ret = device_register(virt_dev);
if (ret) {
put_device(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
if (ret < 1) {
device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
}
pm_runtime_enable(virt_dev);
genpd_queue_power_off_work(dev_to_genpd(virt_dev));
return virt_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
/**
* genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
* @dev: The device used to lookup the PM domain.
* @name: The name of the PM domain.
*
* Parse device's OF node to find a PM domain specifier using the
* power-domain-names DT property. For further description see
* genpd_dev_pm_attach_by_id().
*/
struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
{
int index;
if (!dev->of_node)
return NULL;
index = of_property_match_string(dev->of_node, "power-domain-names",
name);
if (index < 0)
return NULL;
return genpd_dev_pm_attach_by_id(dev, index);
}
static const struct of_device_id idle_state_match[] = {
{ .compatible = "domain-idle-state", },
{ }
};
static int genpd_parse_state(struct genpd_power_state *genpd_state,
struct device_node *state_node)
{
int err;
u32 residency;
u32 entry_latency, exit_latency;
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
pr_debug(" * %pOF missing entry-latency-us property\n",
state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
pr_debug(" * %pOF missing exit-latency-us property\n",
state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "min-residency-us", &residency);
if (!err)
genpd_state->residency_ns = 1000LL * residency;
genpd_state->power_on_latency_ns = 1000LL * exit_latency;
genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
return 0;
}
static int genpd_iterate_idle_states(struct device_node *dn,
struct genpd_power_state *states)
{
int ret;
struct of_phandle_iterator it;
struct device_node *np;
int i = 0;
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0)
return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
if (!of_device_is_available(np))
continue;
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {
pr_err("Parsing idle state node %pOF failed with err %d\n",
np, ret);
of_node_put(np);
return ret;
}
}
i++;
}
return i;
}
/**
* of_genpd_parse_idle_states: Return array of idle states for the genpd.
*
* @dn: The genpd device node
* @states: The pointer to which the state array will be saved.
* @n: The count of elements in the array returned from this function.
*
* Returns the device states parsed from the OF node. The memory for the states
* is allocated by this function and is the responsibility of the caller to
* free the memory after use. If any or zero compatible domain idle states is
* found it returns 0 and in case of errors, a negative error code is returned.
*/
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
{
struct genpd_power_state *st;
int ret;
ret = genpd_iterate_idle_states(dn, NULL);
if (ret < 0)
return ret;
if (!ret) {
*states = NULL;
*n = 0;
return 0;
}
st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
ret = genpd_iterate_idle_states(dn, st);
if (ret <= 0) {
kfree(st);
return ret < 0 ? ret : -EINVAL;
}
*states = st;
*n = ret;
return 0;
}
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
/**
* pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
*
* @genpd_dev: Genpd's device for which the performance-state needs to be found.
* @opp: struct dev_pm_opp of the OPP for which we need to find performance
* state.
*
* Returns performance state encoded in the OPP of the genpd. This calls
* platform specific genpd->opp_to_performance_state() callback to translate
* power domain OPP to performance state.
*
* Returns performance state on success and 0 on failure.
*/
unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
struct dev_pm_opp *opp)
{
struct generic_pm_domain *genpd = NULL;
int state;
genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
if (unlikely(!genpd->opp_to_performance_state))
return 0;
genpd_lock(genpd);
state = genpd->opp_to_performance_state(genpd, opp);
genpd_unlock(genpd);
return state;
}
EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
static int __init genpd_bus_init(void)
{
return bus_register(&genpd_bus_type);
}
core_initcall(genpd_bus_init);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
/*** debugfs support ***/
#ifdef CONFIG_DEBUG_FS
/*
* TODO: This function is a slightly modified version of rtpm_status_show
* from sysfs.c, so generalize it.
*/
static void rtpm_status_str(struct seq_file *s, struct device *dev)
{
static const char * const status_lookup[] = {
[RPM_ACTIVE] = "active",
[RPM_RESUMING] = "resuming",
[RPM_SUSPENDED] = "suspended",
[RPM_SUSPENDING] = "suspending"
};
const char *p = "";
if (dev->power.runtime_error)
p = "error";
else if (dev->power.disable_depth)
p = "unsupported";
else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
p = status_lookup[dev->power.runtime_status];
else
WARN_ON(1);
seq_printf(s, "%-25s ", p);
}
static void perf_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
seq_put_decimal_ull(s, "", gpd_data->performance_state);
}
static int genpd_summary_one(struct seq_file *s,
struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GENPD_STATE_ON] = "on",
[GENPD_STATE_OFF] = "off"
};
struct pm_domain_data *pm_data;
const char *kobj_path;
struct gpd_link *link;
char state[16];
int ret;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
if (!genpd_status_on(genpd))
snprintf(state, sizeof(state), "%s-%u",
status_lookup[genpd->status], genpd->state_idx);
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
* parent and child, so we are safe.
* Also genpd->name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (list_is_first(&link->parent_node, &genpd->parent_links))
seq_printf(s, "\n%48s", " ");
seq_printf(s, "%s", link->child->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj,
genpd_is_irq_safe(genpd) ?
GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
perf_status_str(s, pm_data->dev);
kfree(kobj_path);
}
seq_puts(s, "\n");
exit:
genpd_unlock(genpd);
return 0;
}
static int summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
ret = genpd_summary_one(s, genpd);
if (ret)
break;
}
mutex_unlock(&gpd_list_lock);
return ret;
}
static int status_show(struct seq_file *s, void *data)
{
static const char * const status_lookup[] = {
[GENPD_STATE_ON] = "on",
[GENPD_STATE_OFF] = "off"
};
struct generic_pm_domain *genpd = s->private;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
if (genpd->status == GENPD_STATE_OFF)
seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
genpd->state_idx);
else
seq_printf(s, "%s\n", status_lookup[genpd->status]);
exit:
genpd_unlock(genpd);
return ret;
}
static int sub_domains_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct gpd_link *link;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(link, &genpd->parent_links, parent_node)
seq_printf(s, "%s\n", link->child->name);
genpd_unlock(genpd);
return ret;
}
static int idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
u64 now, delta, idle_time = 0;
unsigned int i;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
idle_time += genpd->states[i].idle_time;
if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
now = ktime_get_mono_fast_ns();
if (now > genpd->accounting_time) {
delta = now - genpd->accounting_time;
idle_time += delta;
}
}
do_div(idle_time, NSEC_PER_MSEC);
seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
genpd->states[i].usage, genpd->states[i].rejected);
}
genpd_unlock(genpd);
return ret;
}
static int active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
u64 now, on_time, delta = 0;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
if (genpd->status == GENPD_STATE_ON) {
now = ktime_get_mono_fast_ns();
if (now > genpd->accounting_time)
delta = now - genpd->accounting_time;
}
on_time = genpd->on_time + delta;
do_div(on_time, NSEC_PER_MSEC);
seq_printf(s, "%llu ms\n", on_time);
genpd_unlock(genpd);
return ret;
}
static int total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
u64 now, delta, total = 0;
unsigned int i;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
for (i = 0; i < genpd->state_count; i++) {
total += genpd->states[i].idle_time;
if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
now = ktime_get_mono_fast_ns();
if (now > genpd->accounting_time) {
delta = now - genpd->accounting_time;
total += delta;
}
}
}
do_div(total, NSEC_PER_MSEC);
seq_printf(s, "%llu ms\n", total);
genpd_unlock(genpd);
return ret;
}
static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
const char *kobj_path;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj,
genpd_is_irq_safe(genpd) ?
GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
seq_printf(s, "%s\n", kobj_path);
kfree(kobj_path);
}
genpd_unlock(genpd);
return ret;
}
static int perf_state_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
if (genpd_lock_interruptible(genpd))
return -ERESTARTSYS;
seq_printf(s, "%u\n", genpd->performance_state);
genpd_unlock(genpd);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(summary);
DEFINE_SHOW_ATTRIBUTE(status);
DEFINE_SHOW_ATTRIBUTE(sub_domains);
DEFINE_SHOW_ATTRIBUTE(idle_states);
DEFINE_SHOW_ATTRIBUTE(active_time);
DEFINE_SHOW_ATTRIBUTE(total_idle_time);
DEFINE_SHOW_ATTRIBUTE(devices);
DEFINE_SHOW_ATTRIBUTE(perf_state);
static void genpd_debug_add(struct generic_pm_domain *genpd)
{
struct dentry *d;
if (!genpd_debugfs_dir)
return;
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
debugfs_create_file("current_state", 0444,
d, genpd, &status_fops);
debugfs_create_file("sub_domains", 0444,
d, genpd, &sub_domains_fops);
debugfs_create_file("idle_states", 0444,
d, genpd, &idle_states_fops);
debugfs_create_file("active_time", 0444,
d, genpd, &active_time_fops);
debugfs_create_file("total_idle_time", 0444,
d, genpd, &total_idle_time_fops);
debugfs_create_file("devices", 0444,
d, genpd, &devices_fops);
if (genpd->set_performance_state)
debugfs_create_file("perf_state", 0444,
d, genpd, &perf_state_fops);
}
static int __init genpd_debug_init(void)
{
struct generic_pm_domain *genpd;
genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
NULL, &summary_fops);
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
genpd_debug_add(genpd);
return 0;
}
late_initcall(genpd_debug_init);
static void __exit genpd_debug_exit(void)
{
debugfs_remove_recursive(genpd_debugfs_dir);
}
__exitcall(genpd_debug_exit);
#endif /* CONFIG_DEBUG_FS */
| linux-master | drivers/base/power/domain.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright 2023 Maxime Ripard <[email protected]>
#include <kunit/resource.h>
#include <linux/device.h>
#define DEVICE_NAME "test"
struct test_priv {
bool probe_done;
bool release_done;
wait_queue_head_t release_wq;
struct device *dev;
};
static int root_device_devm_init(struct kunit *test)
{
struct test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
init_waitqueue_head(&priv->release_wq);
test->priv = priv;
return 0;
}
static void devm_device_action(void *ptr)
{
struct test_priv *priv = ptr;
priv->release_done = true;
wake_up_interruptible(&priv->release_wq);
}
#define RELEASE_TIMEOUT_MS 100
/*
* Tests that a bus-less, non-probed device will run its device-managed
* actions when unregistered.
*/
static void root_device_devm_register_unregister_test(struct kunit *test)
{
struct test_priv *priv = test->priv;
int ret;
priv->dev = root_device_register(DEVICE_NAME);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
root_device_unregister(priv->dev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
static void devm_put_device_action(void *ptr)
{
struct test_priv *priv = ptr;
put_device(priv->dev);
priv->release_done = true;
wake_up_interruptible(&priv->release_wq);
}
/*
* Tests that a bus-less, non-probed device will run its device-managed
* actions when unregistered, even if someone still holds a reference to
* it.
*/
static void root_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
{
struct test_priv *priv = test->priv;
int ret;
priv->dev = root_device_register(DEVICE_NAME);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
get_device(priv->dev);
ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
root_device_unregister(priv->dev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
static struct kunit_case root_device_devm_tests[] = {
KUNIT_CASE(root_device_devm_register_unregister_test),
KUNIT_CASE(root_device_devm_register_get_unregister_with_devm_test),
{}
};
static struct kunit_suite root_device_devm_test_suite = {
.name = "root-device-devm",
.init = root_device_devm_init,
.test_cases = root_device_devm_tests,
};
kunit_test_suite(root_device_devm_test_suite);
MODULE_DESCRIPTION("Test module for root devices");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/base/test/root-device-test.c |
// SPDX-License-Identifier: GPL-2.0
// Unit tests for property entries API
//
// Copyright 2019 Google LLC.
#include <kunit/test.h>
#include <linux/property.h>
#include <linux/types.h>
static void pe_test_uints(struct kunit *test)
{
static const struct property_entry entries[] = {
PROPERTY_ENTRY_U8("prop-u8", 8),
PROPERTY_ENTRY_U16("prop-u16", 16),
PROPERTY_ENTRY_U32("prop-u32", 32),
PROPERTY_ENTRY_U64("prop-u64", 64),
{ }
};
struct fwnode_handle *node;
u8 val_u8, array_u8[2];
u16 val_u16, array_u16[2];
u32 val_u32, array_u32[2];
u64 val_u64, array_u64[2];
int error;
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
error = fwnode_property_count_u8(node, "prop-u8");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u8, 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u8[0], 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u16, 16);
error = fwnode_property_count_u16(node, "prop-u16");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u16[0], 16);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u32, 32);
error = fwnode_property_count_u32(node, "prop-u32");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u32[0], 32);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u64, 64);
error = fwnode_property_count_u64(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u64[0], 64);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
KUNIT_EXPECT_NE(test, error, 0);
/* Count 64-bit values as 16-bit */
error = fwnode_property_count_u16(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 4);
fwnode_remove_software_node(node);
}
static void pe_test_uint_arrays(struct kunit *test)
{
static const u8 a_u8[10] = { 8, 9 };
static const u16 a_u16[10] = { 16, 17 };
static const u32 a_u32[10] = { 32, 33 };
static const u64 a_u64[10] = { 64, 65 };
static const struct property_entry entries[] = {
PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
{ }
};
struct fwnode_handle *node;
u8 val_u8, array_u8[32];
u16 val_u16, array_u16[32];
u32 val_u32, array_u32[32];
u64 val_u64, array_u64[32];
int error;
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u8, 8);
error = fwnode_property_count_u8(node, "prop-u8");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u8[0], 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u8[0], 8);
KUNIT_EXPECT_EQ(test, array_u8[1], 9);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u16, 16);
error = fwnode_property_count_u16(node, "prop-u16");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u16[0], 16);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u16[0], 16);
KUNIT_EXPECT_EQ(test, array_u16[1], 17);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u32, 32);
error = fwnode_property_count_u32(node, "prop-u32");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u32[0], 32);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u32[0], 32);
KUNIT_EXPECT_EQ(test, array_u32[1], 33);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, val_u64, 64);
error = fwnode_property_count_u64(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u64[0], 64);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, array_u64[0], 64);
KUNIT_EXPECT_EQ(test, array_u64[1], 65);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
KUNIT_EXPECT_NE(test, error, 0);
/* Count 64-bit values as 16-bit */
error = fwnode_property_count_u16(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 40);
/* Other way around */
error = fwnode_property_count_u64(node, "prop-u16");
KUNIT_EXPECT_EQ(test, error, 2);
fwnode_remove_software_node(node);
}
static void pe_test_strings(struct kunit *test)
{
static const char *strings[] = {
"string-a",
"string-b",
};
static const struct property_entry entries[] = {
PROPERTY_ENTRY_STRING("str", "single"),
PROPERTY_ENTRY_STRING("empty", ""),
PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
{ }
};
struct fwnode_handle *node;
const char *str;
const char *strs[10];
int error;
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
error = fwnode_property_read_string(node, "str", &str);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_STREQ(test, str, "single");
error = fwnode_property_string_array_count(node, "str");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_string_array(node, "str", strs, 1);
KUNIT_EXPECT_EQ(test, error, 1);
KUNIT_EXPECT_STREQ(test, strs[0], "single");
/* asking for more data returns what we have */
error = fwnode_property_read_string_array(node, "str", strs, 2);
KUNIT_EXPECT_EQ(test, error, 1);
KUNIT_EXPECT_STREQ(test, strs[0], "single");
error = fwnode_property_read_string(node, "no-str", &str);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_read_string_array(node, "no-str", strs, 1);
KUNIT_EXPECT_LT(test, error, 0);
error = fwnode_property_read_string(node, "empty", &str);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_STREQ(test, str, "");
error = fwnode_property_string_array_count(node, "strs");
KUNIT_EXPECT_EQ(test, error, 2);
error = fwnode_property_read_string_array(node, "strs", strs, 3);
KUNIT_EXPECT_EQ(test, error, 2);
KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
error = fwnode_property_read_string_array(node, "strs", strs, 1);
KUNIT_EXPECT_EQ(test, error, 1);
KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
/* NULL argument -> returns size */
error = fwnode_property_read_string_array(node, "strs", NULL, 0);
KUNIT_EXPECT_EQ(test, error, 2);
/* accessing array as single value */
error = fwnode_property_read_string(node, "strs", &str);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_STREQ(test, str, "string-a");
fwnode_remove_software_node(node);
}
static void pe_test_bool(struct kunit *test)
{
static const struct property_entry entries[] = {
PROPERTY_ENTRY_BOOL("prop"),
{ }
};
struct fwnode_handle *node;
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
fwnode_remove_software_node(node);
}
/* Verifies that small U8 array is stored inline when property is copied */
static void pe_test_move_inline_u8(struct kunit *test)
{
static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
static const struct property_entry entries[] = {
PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
{ }
};
struct property_entry *copy;
const u8 *data_ptr;
copy = property_entries_dup(entries);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
data_ptr = (u8 *)©[0].value;
KUNIT_EXPECT_EQ(test, data_ptr[0], 1);
KUNIT_EXPECT_EQ(test, data_ptr[1], 2);
KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
data_ptr = copy[1].pointer;
KUNIT_EXPECT_EQ(test, data_ptr[0], 5);
KUNIT_EXPECT_EQ(test, data_ptr[1], 6);
property_entries_free(copy);
}
/* Verifies that single string array is stored inline when property is copied */
static void pe_test_move_inline_str(struct kunit *test)
{
static char *str_array_small[] = { "a" };
static char *str_array_big[] = { "b", "c", "d", "e" };
static char *str_array_small_empty[] = { "" };
static struct property_entry entries[] = {
PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
{ }
};
struct property_entry *copy;
const char * const *data_ptr;
copy = property_entries_dup(entries);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
data_ptr = copy[1].pointer;
KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
property_entries_free(copy);
}
/* Handling of reference properties */
static void pe_test_reference(struct kunit *test)
{
static const struct software_node node1 = { .name = "1" };
static const struct software_node node2 = { .name = "2" };
static const struct software_node *group[] = { &node1, &node2, NULL };
static const struct software_node_ref_args refs[] = {
SOFTWARE_NODE_REFERENCE(&node1),
SOFTWARE_NODE_REFERENCE(&node2, 3, 4),
};
const struct property_entry entries[] = {
PROPERTY_ENTRY_REF("ref-1", &node1),
PROPERTY_ENTRY_REF("ref-2", &node2, 1, 2),
PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
{ }
};
struct fwnode_handle *node;
struct fwnode_reference_args ref;
int error;
error = software_node_register_node_group(group);
KUNIT_ASSERT_EQ(test, error, 0);
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* wrong index */
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
0, 1, &ref);
KUNIT_EXPECT_NE(test, error, 0);
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
1, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
/* asking for more args, padded with zero data */
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
3, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
/* wrong index */
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
2, 1, &ref);
KUNIT_EXPECT_NE(test, error, 0);
/* array of references */
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* second reference in the array */
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
2, 1, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
/* wrong index */
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
0, 2, &ref);
KUNIT_EXPECT_NE(test, error, 0);
fwnode_remove_software_node(node);
software_node_unregister_node_group(group);
}
static struct kunit_case property_entry_test_cases[] = {
KUNIT_CASE(pe_test_uints),
KUNIT_CASE(pe_test_uint_arrays),
KUNIT_CASE(pe_test_strings),
KUNIT_CASE(pe_test_bool),
KUNIT_CASE(pe_test_move_inline_u8),
KUNIT_CASE(pe_test_move_inline_str),
KUNIT_CASE(pe_test_reference),
{ }
};
static struct kunit_suite property_entry_test_suite = {
.name = "property-entry",
.test_cases = property_entry_test_cases,
};
kunit_test_suite(property_entry_test_suite);
| linux-master | drivers/base/test/property-entry-test.c |
// SPDX-License-Identifier: GPL-2.0
#include <kunit/resource.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
struct test_priv {
bool probe_done;
bool release_done;
wait_queue_head_t probe_wq;
wait_queue_head_t release_wq;
struct device *dev;
};
static int platform_device_devm_init(struct kunit *test)
{
struct test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
init_waitqueue_head(&priv->probe_wq);
init_waitqueue_head(&priv->release_wq);
test->priv = priv;
return 0;
}
static void devm_device_action(void *ptr)
{
struct test_priv *priv = ptr;
priv->release_done = true;
wake_up_interruptible(&priv->release_wq);
}
static void devm_put_device_action(void *ptr)
{
struct test_priv *priv = ptr;
put_device(priv->dev);
priv->release_done = true;
wake_up_interruptible(&priv->release_wq);
}
#define RELEASE_TIMEOUT_MS 100
/*
* Tests that a platform bus, non-probed device will run its
* device-managed actions when unregistered.
*/
static void platform_device_devm_register_unregister_test(struct kunit *test)
{
struct platform_device *pdev;
struct test_priv *priv = test->priv;
int ret;
pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
priv->dev = &pdev->dev;
ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
platform_device_unregister(pdev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
/*
* Tests that a platform bus, non-probed device will run its
* device-managed actions when unregistered, even if someone still holds
* a reference to it.
*/
static void platform_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
{
struct platform_device *pdev;
struct test_priv *priv = test->priv;
int ret;
pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
priv->dev = &pdev->dev;
get_device(priv->dev);
ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
platform_device_unregister(pdev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
}
static int fake_probe(struct platform_device *pdev)
{
struct test_priv *priv = platform_get_drvdata(pdev);
priv->probe_done = true;
wake_up_interruptible(&priv->probe_wq);
return 0;
}
static struct platform_driver fake_driver = {
.probe = fake_probe,
.driver = {
.name = DEVICE_NAME,
},
};
/*
* Tests that a platform bus, probed device will run its device-managed
* actions when unregistered.
*/
static void probed_platform_device_devm_register_unregister_test(struct kunit *test)
{
struct platform_device *pdev;
struct test_priv *priv = test->priv;
int ret;
ret = platform_driver_register(&fake_driver);
KUNIT_ASSERT_EQ(test, ret, 0);
pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
priv->dev = &pdev->dev;
platform_set_drvdata(pdev, priv);
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = wait_event_interruptible_timeout(priv->probe_wq, priv->probe_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
platform_device_unregister(pdev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
platform_driver_unregister(&fake_driver);
}
/*
* Tests that a platform bus, probed device will run its device-managed
* actions when unregistered, even if someone still holds a reference to
* it.
*/
static void probed_platform_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
{
struct platform_device *pdev;
struct test_priv *priv = test->priv;
int ret;
ret = platform_driver_register(&fake_driver);
KUNIT_ASSERT_EQ(test, ret, 0);
pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
priv->dev = &pdev->dev;
platform_set_drvdata(pdev, priv);
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = wait_event_interruptible_timeout(priv->probe_wq, priv->probe_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
get_device(priv->dev);
ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
KUNIT_ASSERT_EQ(test, ret, 0);
platform_device_unregister(pdev);
ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
msecs_to_jiffies(RELEASE_TIMEOUT_MS));
KUNIT_EXPECT_GT(test, ret, 0);
platform_driver_unregister(&fake_driver);
}
static struct kunit_case platform_device_devm_tests[] = {
KUNIT_CASE(platform_device_devm_register_unregister_test),
KUNIT_CASE(platform_device_devm_register_get_unregister_with_devm_test),
KUNIT_CASE(probed_platform_device_devm_register_unregister_test),
KUNIT_CASE(probed_platform_device_devm_register_get_unregister_with_devm_test),
{}
};
static struct kunit_suite platform_device_devm_test_suite = {
.name = "platform-device-devm",
.init = platform_device_devm_init,
.test_cases = platform_device_devm_tests,
};
kunit_test_suite(platform_device_devm_test_suite);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/base/test/platform-device-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Google, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/time.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
static atomic_t warnings, errors, timeout, async_completed;
static int test_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
/*
* Determine if we have hit the "timeout" limit for the test if we
* have then report it as an error, otherwise we wil sleep for the
* required amount of time and then report completion.
*/
if (atomic_read(&timeout)) {
dev_err(dev, "async probe took too long\n");
atomic_inc(&errors);
} else {
dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n",
TEST_PROBE_DELAY);
msleep(TEST_PROBE_DELAY);
dev_dbg(&pdev->dev, "done sleeping\n");
}
/*
* Report NUMA mismatch if device node is set and we are not
* performing an async init on that node.
*/
if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
if (IS_ENABLED(CONFIG_NUMA) &&
dev_to_node(dev) != numa_node_id()) {
dev_warn(dev, "NUMA node mismatch %d != %d\n",
dev_to_node(dev), numa_node_id());
atomic_inc(&warnings);
}
atomic_inc(&async_completed);
}
return 0;
}
static struct platform_driver async_driver = {
.driver = {
.name = "test_async_driver",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_driver sync_driver = {
.driver = {
.name = "test_sync_driver",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_device *async_dev[NR_CPUS * 2];
static struct platform_device *sync_dev[2];
static struct platform_device *
test_platform_device_register_node(char *name, int id, int nid)
{
struct platform_device *pdev;
int ret;
pdev = platform_device_alloc(name, id);
if (!pdev)
return ERR_PTR(-ENOMEM);
if (nid != NUMA_NO_NODE)
set_dev_node(&pdev->dev, nid);
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
return ERR_PTR(ret);
}
return pdev;
}
static int __init test_async_probe_init(void)
{
struct platform_device **pdev = NULL;
int async_id = 0, sync_id = 0;
unsigned long long duration;
ktime_t calltime;
int err, nid, cpu;
pr_info("registering first set of asynchronous devices...\n");
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,
nid);
if (IS_ERR(*pdev)) {
err = PTR_ERR(*pdev);
*pdev = NULL;
pr_err("failed to create async_dev: %d\n", err);
goto err_unregister_async_devs;
}
async_id++;
}
pr_info("registering asynchronous driver...\n");
calltime = ktime_get();
err = platform_driver_register(&async_driver);
if (err) {
pr_err("Failed to register async_driver: %d\n", err);
goto err_unregister_async_devs;
}
duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
err = -ETIMEDOUT;
goto err_unregister_async_driver;
}
pr_info("registering second set of asynchronous devices...\n");
calltime = ktime_get();
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,
nid);
if (IS_ERR(*pdev)) {
err = PTR_ERR(*pdev);
*pdev = NULL;
pr_err("failed to create async_dev: %d\n", err);
goto err_unregister_async_driver;
}
async_id++;
}
duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
"test failed: probe took too long\n");
err = -ETIMEDOUT;
goto err_unregister_async_driver;
}
pr_info("registering first synchronous device...\n");
nid = cpu_to_node(cpu);
pdev = &sync_dev[sync_id];
*pdev = test_platform_device_register_node("test_sync_driver",
sync_id,
NUMA_NO_NODE);
if (IS_ERR(*pdev)) {
err = PTR_ERR(*pdev);
*pdev = NULL;
pr_err("failed to create sync_dev: %d\n", err);
goto err_unregister_async_driver;
}
sync_id++;
pr_info("registering synchronous driver...\n");
calltime = ktime_get();
err = platform_driver_register(&sync_driver);
if (err) {
pr_err("Failed to register async_driver: %d\n", err);
goto err_unregister_sync_devs;
}
duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
"test failed: probe was too quick\n");
err = -ETIMEDOUT;
goto err_unregister_sync_driver;
}
pr_info("registering second synchronous device...\n");
pdev = &sync_dev[sync_id];
calltime = ktime_get();
*pdev = test_platform_device_register_node("test_sync_driver",
sync_id,
NUMA_NO_NODE);
if (IS_ERR(*pdev)) {
err = PTR_ERR(*pdev);
*pdev = NULL;
pr_err("failed to create sync_dev: %d\n", err);
goto err_unregister_sync_driver;
}
sync_id++;
duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
"test failed: probe was too quick\n");
err = -ETIMEDOUT;
goto err_unregister_sync_driver;
}
/*
* The async events should have completed while we were taking care
* of the synchronous events. We will now terminate any outstanding
* asynchronous probe calls remaining by forcing timeout and remove
* the driver before we return which should force the flush of the
* pending asynchronous probe calls.
*
* Otherwise if they completed without errors or warnings then
* report successful completion.
*/
if (atomic_read(&async_completed) != async_id) {
pr_err("async events still pending, forcing timeout\n");
atomic_inc(&timeout);
err = -ETIMEDOUT;
} else if (!atomic_read(&errors) && !atomic_read(&warnings)) {
pr_info("completed successfully\n");
return 0;
}
err_unregister_sync_driver:
platform_driver_unregister(&sync_driver);
err_unregister_sync_devs:
while (sync_id--)
platform_device_unregister(sync_dev[sync_id]);
err_unregister_async_driver:
platform_driver_unregister(&async_driver);
err_unregister_async_devs:
while (async_id--)
platform_device_unregister(async_dev[async_id]);
/*
* If err is already set then count that as an additional error for
* the test. Otherwise we will report an invalid argument error and
* not count that as we should have reached here as a result of
* errors or warnings being reported by the probe routine.
*/
if (err)
atomic_inc(&errors);
else
err = -EINVAL;
pr_err("Test failed with %d errors and %d warnings\n",
atomic_read(&errors), atomic_read(&warnings));
return err;
}
module_init(test_async_probe_init);
static void __exit test_async_probe_exit(void)
{
int id = 2;
platform_driver_unregister(&async_driver);
platform_driver_unregister(&sync_driver);
while (id--)
platform_device_unregister(sync_dev[id]);
id = NR_CPUS * 2;
while (id--)
platform_device_unregister(async_dev[id]);
}
module_exit(test_async_probe_exit);
MODULE_DESCRIPTION("Test module for asynchronous driver probing");
MODULE_AUTHOR("Dmitry Torokhov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/base/test/test_async_driver_probe.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
#include <linux/regmap.h>
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
#include <linux/module.h>
static int regmap_i3c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
struct i3c_priv_xfer xfers[] = {
{
.rnw = false,
.len = count,
.data.out = data,
},
};
return i3c_device_do_priv_xfers(i3c, xfers, 1);
}
static int regmap_i3c_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
struct i3c_priv_xfer xfers[2];
xfers[0].rnw = false;
xfers[0].len = reg_size;
xfers[0].data.out = reg;
xfers[1].rnw = true;
xfers[1].len = val_size;
xfers[1].data.in = val;
return i3c_device_do_priv_xfers(i3c, xfers, 2);
}
static const struct regmap_bus regmap_i3c = {
.write = regmap_i3c_write,
.read = regmap_i3c_read,
};
struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __devm_regmap_init(&i3c->dev, ®map_i3c, &i3c->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
MODULE_AUTHOR("Vitor Soares <[email protected]>");
MODULE_DESCRIPTION("Regmap I3C Module");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-i3c.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - AC'97 support
//
// Copyright 2013 Linaro Ltd. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/ac97_codec.h>
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case AC97_RESET:
case AC97_POWERDOWN:
case AC97_INT_PAGING:
case AC97_EXTENDED_ID:
case AC97_EXTENDED_STATUS:
case AC97_EXTENDED_MID:
case AC97_EXTENDED_MSTATUS:
case AC97_GPIO_STATUS:
case AC97_MISC_AFE:
case AC97_VENDOR_ID1:
case AC97_VENDOR_ID2:
case AC97_CODEC_CLASS_REV:
case AC97_PCI_SVID:
case AC97_PCI_SID:
case AC97_FUNC_SELECT:
case AC97_FUNC_INFO:
case AC97_SENSE_INFO:
return true;
default:
return false;
}
}
EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
static int regmap_ac97_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct snd_ac97 *ac97 = context;
*val = ac97->bus->ops->read(ac97, reg);
return 0;
}
static int regmap_ac97_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct snd_ac97 *ac97 = context;
ac97->bus->ops->write(ac97, reg, val);
return 0;
}
static const struct regmap_bus ac97_regmap_bus = {
.reg_write = regmap_ac97_reg_write,
.reg_read = regmap_ac97_reg_read,
};
struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_ac97);
struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-ac97.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register cache access API - maple tree based cache
//
// Copyright 2023 Arm, Ltd
//
// Author: Mark Brown <[email protected]>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/maple_tree.h>
#include <linux/slab.h>
#include "internal.h"
static int regcache_maple_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, reg, reg);
unsigned long *entry;
rcu_read_lock();
entry = mas_walk(&mas);
if (!entry) {
rcu_read_unlock();
return -ENOENT;
}
*value = entry[reg - mas.index];
rcu_read_unlock();
return 0;
}
static int regcache_maple_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, reg, reg);
unsigned long *entry, *upper, *lower;
unsigned long index, last;
size_t lower_sz, upper_sz;
int ret;
rcu_read_lock();
entry = mas_walk(&mas);
if (entry) {
entry[reg - mas.index] = val;
rcu_read_unlock();
return 0;
}
/* Any adjacent entries to extend/merge? */
mas_set_range(&mas, reg - 1, reg + 1);
index = reg;
last = reg;
lower = mas_find(&mas, reg - 1);
if (lower) {
index = mas.index;
lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
}
upper = mas_find(&mas, reg + 1);
if (upper) {
last = mas.last;
upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
}
rcu_read_unlock();
entry = kmalloc((last - index + 1) * sizeof(unsigned long),
map->alloc_flags);
if (!entry)
return -ENOMEM;
if (lower)
memcpy(entry, lower, lower_sz);
entry[reg - index] = val;
if (upper)
memcpy(&entry[reg - index + 1], upper, upper_sz);
/*
* This is safe because the regmap lock means the Maple lock
* is redundant, but we need to take it due to lockdep asserts
* in the maple tree code.
*/
mas_lock(&mas);
mas_set_range(&mas, index, last);
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
if (ret == 0) {
kfree(lower);
kfree(upper);
}
return ret;
}
static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned int max)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
unsigned long lower_index, lower_last;
unsigned long upper_index, upper_last;
int ret;
lower = NULL;
upper = NULL;
mas_lock(&mas);
mas_for_each(&mas, entry, max) {
/*
* This is safe because the regmap lock means the
* Maple lock is redundant, but we need to take it due
* to lockdep asserts in the maple tree code.
*/
mas_unlock(&mas);
/* Do we need to save any of this entry? */
if (mas.index < min) {
lower_index = mas.index;
lower_last = min -1;
lower = kmemdup(entry, ((min - mas.index) *
sizeof(unsigned long)),
map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
}
}
if (mas.last > max) {
upper_index = max + 1;
upper_last = mas.last;
upper = kmemdup(&entry[max + 1],
((mas.last - max) *
sizeof(unsigned long)),
map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
}
}
kfree(entry);
mas_lock(&mas);
mas_erase(&mas);
/* Insert new nodes with the saved data */
if (lower) {
mas_set_range(&mas, lower_index, lower_last);
ret = mas_store_gfp(&mas, lower, map->alloc_flags);
if (ret != 0)
goto out;
lower = NULL;
}
if (upper) {
mas_set_range(&mas, upper_index, upper_last);
ret = mas_store_gfp(&mas, upper, map->alloc_flags);
if (ret != 0)
goto out;
upper = NULL;
}
}
out:
mas_unlock(&mas);
out_unlocked:
kfree(lower);
kfree(upper);
return ret;
}
static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
struct ma_state *mas,
unsigned int min, unsigned int max)
{
void *buf;
unsigned long r;
size_t val_bytes = map->format.val_bytes;
int ret = 0;
mas_pause(mas);
rcu_read_unlock();
/*
* Use a raw write if writing more than one register to a
* device that supports raw writes to reduce transaction
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
}
/* Render the data for a raw write */
for (r = min; r < max; r++) {
regcache_set_val(map, buf, r - min,
entry[r - mas->index]);
}
ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
false);
kfree(buf);
} else {
for (r = min; r < max; r++) {
ret = _regmap_write(map, r,
entry[r - mas->index]);
if (ret != 0)
goto out;
}
}
out:
rcu_read_lock();
return ret;
}
static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
struct maple_tree *mt = map->cache;
unsigned long *entry;
MA_STATE(mas, mt, min, max);
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
int ret;
bool sync_needed = false;
map->cache_bypass = true;
rcu_read_lock();
mas_for_each(&mas, entry, max) {
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
v = entry[r - mas.index];
if (regcache_reg_needs_sync(map, r, v)) {
if (!sync_needed) {
sync_start = r;
sync_needed = true;
}
continue;
}
if (!sync_needed)
continue;
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0)
goto out;
sync_needed = false;
}
if (sync_needed) {
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0)
goto out;
sync_needed = false;
}
}
out:
rcu_read_unlock();
map->cache_bypass = false;
return ret;
}
static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
unsigned int *entry;;
/* if we've already been called then just return */
if (!mt)
return 0;
mas_lock(&mas);
mas_for_each(&mas, entry, UINT_MAX)
kfree(entry);
__mt_destroy(mt);
mas_unlock(&mas);
kfree(mt);
map->cache = NULL;
return 0;
}
static int regcache_maple_insert_block(struct regmap *map, int first,
int last)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, first, last);
unsigned long *entry;
int i, ret;
entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
if (!entry)
return -ENOMEM;
for (i = 0; i < last - first + 1; i++)
entry[i] = map->reg_defaults[first + i].def;
mas_lock(&mas);
mas_set_range(&mas, map->reg_defaults[first].reg,
map->reg_defaults[last].reg);
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
if (ret)
kfree(entry);
return ret;
}
static int regcache_maple_init(struct regmap *map)
{
struct maple_tree *mt;
int i;
int ret;
int range_start;
mt = kmalloc(sizeof(*mt), GFP_KERNEL);
if (!mt)
return -ENOMEM;
map->cache = mt;
mt_init(mt);
if (!map->num_reg_defaults)
return 0;
range_start = 0;
/* Scan for ranges of contiguous registers */
for (i = 1; i < map->num_reg_defaults; i++) {
if (map->reg_defaults[i].reg !=
map->reg_defaults[i - 1].reg + 1) {
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
goto err;
range_start = i;
}
}
/* Add the last block */
ret = regcache_maple_insert_block(map, range_start,
map->num_reg_defaults - 1);
if (ret != 0)
goto err;
return 0;
err:
regcache_maple_exit(map);
return ret;
}
struct regcache_ops regcache_maple_ops = {
.type = REGCACHE_MAPLE,
.name = "maple",
.init = regcache_maple_init,
.exit = regcache_maple_exit,
.read = regcache_maple_read,
.write = regcache_maple_write,
.drop = regcache_maple_drop,
.sync = regcache_maple_sync,
};
| linux-master | drivers/base/regmap/regcache-maple.c |
// SPDX-License-Identifier: GPL-2.0
// Register map access API - SCCB support
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include "internal.h"
/**
* sccb_is_available - Check if the adapter supports SCCB protocol
* @adap: I2C adapter
*
* Return true if the I2C adapter is capable of using SCCB helper functions,
* false otherwise.
*/
static bool sccb_is_available(struct i2c_adapter *adap)
{
u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
/*
* If we ever want support for hardware doing SCCB natively, we will
* introduce a sccb_xfer() callback to struct i2c_algorithm and check
* for it here.
*/
return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
}
/**
* regmap_sccb_read - Read data from SCCB slave device
* @context: Device that will be interacted with
* @reg: Register to be read from
* @val: Pointer to store read value
*
* This executes the 2-phase write transmission cycle that is followed by a
* 2-phase read transmission cycle, returning negative errno else zero on
* success.
*/
static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
union i2c_smbus_data data;
i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
if (ret < 0)
goto out;
ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
if (ret < 0)
goto out;
*val = data.byte;
out:
i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
return ret;
}
/**
* regmap_sccb_write - Write data to SCCB slave device
* @context: Device that will be interacted with
* @reg: Register to write to
* @val: Value to be written
*
* This executes the SCCB 3-phase write transmission cycle, returning negative
* errno else zero on success.
*/
static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
return i2c_smbus_write_byte_data(i2c, reg, val);
}
static const struct regmap_bus regmap_sccb_bus = {
.reg_write = regmap_sccb_write,
.reg_read = regmap_sccb_read,
};
static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
if (config->val_bits == 8 && config->reg_bits == 8 &&
sccb_is_available(i2c->adapter))
return ®map_sccb_bus;
return ERR_PTR(-ENOTSUPP);
}
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sccb);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-sccb.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - I2C support
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include "internal.h"
static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
if (reg > 0xff)
return -EINVAL;
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
return ret;
*val = ret;
return 0;
}
static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
if (val > 0xff || reg > 0xff)
return -EINVAL;
return i2c_smbus_write_byte_data(i2c, reg, val);
}
static const struct regmap_bus regmap_smbus_byte = {
.reg_write = regmap_smbus_byte_reg_write,
.reg_read = regmap_smbus_byte_reg_read,
};
static int regmap_smbus_word_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
if (reg > 0xff)
return -EINVAL;
ret = i2c_smbus_read_word_data(i2c, reg);
if (ret < 0)
return ret;
*val = ret;
return 0;
}
static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
if (val > 0xffff || reg > 0xff)
return -EINVAL;
return i2c_smbus_write_word_data(i2c, reg, val);
}
static const struct regmap_bus regmap_smbus_word = {
.reg_write = regmap_smbus_word_reg_write,
.reg_read = regmap_smbus_word_reg_read,
};
static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
if (reg > 0xff)
return -EINVAL;
ret = i2c_smbus_read_word_swapped(i2c, reg);
if (ret < 0)
return ret;
*val = ret;
return 0;
}
static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
if (val > 0xffff || reg > 0xff)
return -EINVAL;
return i2c_smbus_write_word_swapped(i2c, reg, val);
}
static const struct regmap_bus regmap_smbus_word_swapped = {
.reg_write = regmap_smbus_word_write_swapped,
.reg_read = regmap_smbus_word_read_swapped,
};
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
ret = i2c_master_send(i2c, data, count);
if (ret == count)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static int regmap_i2c_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct i2c_msg xfer[2];
int ret;
/* If the I2C controller can't do a gather tell the core, it
* will substitute in a linear write for us.
*/
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
xfer[0].flags = 0;
xfer[0].len = reg_size;
xfer[0].buf = (void *)reg;
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_NOSTART;
xfer[1].len = val_size;
xfer[1].buf = (void *)val;
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret == 2)
return 0;
if (ret < 0)
return ret;
else
return -EIO;
}
static int regmap_i2c_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct i2c_msg xfer[2];
int ret;
xfer[0].addr = i2c->addr;
xfer[0].flags = 0;
xfer[0].len = reg_size;
xfer[0].buf = (void *)reg;
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].len = val_size;
xfer[1].buf = val;
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret == 2)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static const struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
size_t count)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
if (count < 1)
return -EINVAL;
--count;
return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
((u8 *)data + 1));
}
static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
size_t reg_size, void *val,
size_t val_size)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
if (reg_size != 1 || val_size < 1)
return -EINVAL;
ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
if (ret == val_size)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
};
static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
size_t count)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
if (count < 2)
return -EINVAL;
count--;
return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
(u8 *)data + 1);
}
static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
size_t reg_size, void *val,
size_t val_size)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret, count, len = val_size;
if (reg_size != 2)
return -EINVAL;
ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
((u16 *)reg)[0] >> 8);
if (ret < 0)
return ret;
count = 0;
do {
/* Current Address Read */
ret = i2c_smbus_read_byte(i2c);
if (ret < 0)
break;
*((u8 *)val++) = ret;
count++;
len--;
} while (len > 0);
if (count == val_size)
return 0;
else if (ret < 0)
return ret;
else
return -EIO;
}
static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
.write = regmap_i2c_smbus_i2c_write_reg16,
.read = regmap_i2c_smbus_i2c_read_reg16,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
const struct i2c_adapter_quirks *quirks;
const struct regmap_bus *bus = NULL;
struct regmap_bus *ret_bus;
u16 max_read = 0, max_write = 0;
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
bus = ®map_i2c;
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
bus = ®map_i2c_smbus_i2c_block;
else if (config->val_bits == 8 && config->reg_bits == 16 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
bus = ®map_i2c_smbus_i2c_block_reg16;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
case REGMAP_ENDIAN_LITTLE:
bus = ®map_smbus_word;
break;
case REGMAP_ENDIAN_BIG:
bus = ®map_smbus_word_swapped;
break;
default: /* everything else is not supported */
break;
}
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
bus = ®map_smbus_byte;
if (!bus)
return ERR_PTR(-ENOTSUPP);
quirks = i2c->adapter->quirks;
if (quirks) {
if (quirks->max_read_len &&
(bus->max_raw_read == 0 || bus->max_raw_read > quirks->max_read_len))
max_read = quirks->max_read_len;
if (quirks->max_write_len &&
(bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
max_write = quirks->max_write_len;
if (max_read || max_write) {
ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
if (!ret_bus)
return ERR_PTR(-ENOMEM);
ret_bus->free_on_exit = true;
ret_bus->max_raw_read = max_read;
ret_bus->max_raw_write = max_write;
bus = ret_bus;
}
}
return bus;
}
struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_i2c);
struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-i2c.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2015-17 Intel Corporation.
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/types.h>
#include "internal.h"
static int regmap_sdw_write(void *context, const void *val_buf, size_t val_size)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
/* First word of buffer contains the destination address */
u32 addr = le32_to_cpu(*(const __le32 *)val_buf);
const u8 *val = val_buf;
return sdw_nwrite_no_pm(slave, addr, val_size - sizeof(addr), val + sizeof(addr));
}
static int regmap_sdw_gather_write(void *context,
const void *reg_buf, size_t reg_size,
const void *val_buf, size_t val_size)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
u32 addr = le32_to_cpu(*(const __le32 *)reg_buf);
return sdw_nwrite_no_pm(slave, addr, val_size, val_buf);
}
static int regmap_sdw_read(void *context,
const void *reg_buf, size_t reg_size,
void *val_buf, size_t val_size)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
u32 addr = le32_to_cpu(*(const __le32 *)reg_buf);
return sdw_nread_no_pm(slave, addr, val_size, val_buf);
}
static const struct regmap_bus regmap_sdw = {
.write = regmap_sdw_write,
.gather_write = regmap_sdw_gather_write,
.read = regmap_sdw_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static int regmap_sdw_config_check(const struct regmap_config *config)
{
/* Register addresses are 32 bits wide */
if (config->reg_bits != 32)
return -ENOTSUPP;
if (config->pad_bits != 0)
return -ENOTSUPP;
/* Only bulk writes are supported not multi-register writes */
if (config->can_multi_write)
return -ENOTSUPP;
return 0;
}
struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
int ret;
ret = regmap_sdw_config_check(config);
if (ret)
return ERR_PTR(ret);
return __regmap_init(&sdw->dev, ®map_sdw,
&sdw->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw);
struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
int ret;
ret = regmap_sdw_config_check(config);
if (ret)
return ERR_PTR(ret);
return __devm_regmap_init(&sdw->dev, ®map_sdw,
&sdw->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
MODULE_DESCRIPTION("Regmap SoundWire Module");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-sdw.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register cache access API - rbtree caching support
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Dimitris Papastamos <[email protected]>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "internal.h"
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value);
static int regcache_rbtree_exit(struct regmap *map);
struct regcache_rbtree_node {
/* block of adjacent registers */
void *block;
/* Which registers are present */
unsigned long *cache_present;
/* base register handled by this block */
unsigned int base_reg;
/* number of registers available in the block */
unsigned int blklen;
/* the actual rbtree node holding this block */
struct rb_node node;
};
struct regcache_rbtree_ctx {
struct rb_root root;
struct regcache_rbtree_node *cached_rbnode;
};
static inline void regcache_rbtree_get_base_top_reg(
struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int *base, unsigned int *top)
{
*base = rbnode->base_reg;
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
}
static unsigned int regcache_rbtree_get_register(struct regmap *map,
struct regcache_rbtree_node *rbnode, unsigned int idx)
{
return regcache_get_val(map, rbnode->block, idx);
}
static void regcache_rbtree_set_register(struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int idx, unsigned int val)
{
set_bit(idx, rbnode->cache_present);
regcache_set_val(map, rbnode->block, idx, val);
}
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
unsigned int reg)
{
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
unsigned int base_reg, top_reg;
rbnode = rbtree_ctx->cached_rbnode;
if (rbnode) {
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
&top_reg);
if (reg >= base_reg && reg <= top_reg)
return rbnode;
}
node = rbtree_ctx->root.rb_node;
while (node) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
&top_reg);
if (reg >= base_reg && reg <= top_reg) {
rbtree_ctx->cached_rbnode = rbnode;
return rbnode;
} else if (reg > top_reg) {
node = node->rb_right;
} else if (reg < base_reg) {
node = node->rb_left;
}
}
return NULL;
}
static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
struct regcache_rbtree_node *rbnode)
{
struct rb_node **new, *parent;
struct regcache_rbtree_node *rbnode_tmp;
unsigned int base_reg_tmp, top_reg_tmp;
unsigned int base_reg;
parent = NULL;
new = &root->rb_node;
while (*new) {
rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
/* base and top registers of the current rbnode */
regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
&top_reg_tmp);
/* base register of the rbnode to be added */
base_reg = rbnode->base_reg;
parent = *new;
/* if this register has already been inserted, just return */
if (base_reg >= base_reg_tmp &&
base_reg <= top_reg_tmp)
return 0;
else if (base_reg > top_reg_tmp)
new = &((*new)->rb_right);
else if (base_reg < base_reg_tmp)
new = &((*new)->rb_left);
}
/* insert the node into the rbtree */
rb_link_node(&rbnode->node, parent, new);
rb_insert_color(&rbnode->node, root);
return 1;
}
#ifdef CONFIG_DEBUG_FS
static int rbtree_show(struct seq_file *s, void *ignored)
{
struct regmap *map = s->private;
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
struct regcache_rbtree_node *n;
struct rb_node *node;
unsigned int base, top;
size_t mem_size;
int nodes = 0;
int registers = 0;
int this_registers, average;
map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = rb_entry(node, struct regcache_rbtree_node, node);
mem_size += sizeof(*n);
mem_size += (n->blklen * map->cache_word_size);
mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1;
seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
nodes++;
registers += this_registers;
}
if (nodes)
average = registers / nodes;
else
average = 0;
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
nodes, registers, average, mem_size);
map->unlock(map->lock_arg);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rbtree);
static void rbtree_debugfs_init(struct regmap *map)
{
debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
}
#endif
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
int i;
int ret;
map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
rbtree_ctx = map->cache;
rbtree_ctx->root = RB_ROOT;
rbtree_ctx->cached_rbnode = NULL;
for (i = 0; i < map->num_reg_defaults; i++) {
ret = regcache_rbtree_write(map,
map->reg_defaults[i].reg,
map->reg_defaults[i].def);
if (ret)
goto err;
}
return 0;
err:
regcache_rbtree_exit(map);
return ret;
}
static int regcache_rbtree_exit(struct regmap *map)
{
struct rb_node *next;
struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbtree_node;
/* if we've already been called then just return */
rbtree_ctx = map->cache;
if (!rbtree_ctx)
return 0;
/* free up the rbtree */
next = rb_first(&rbtree_ctx->root);
while (next) {
rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
next = rb_next(&rbtree_node->node);
rb_erase(&rbtree_node->node, &rbtree_ctx->root);
kfree(rbtree_node->cache_present);
kfree(rbtree_node->block);
kfree(rbtree_node);
}
/* release the resources */
kfree(map->cache);
map->cache = NULL;
return 0;
}
static int regcache_rbtree_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
struct regcache_rbtree_node *rbnode;
unsigned int reg_tmp;
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
if (!test_bit(reg_tmp, rbnode->cache_present))
return -ENOENT;
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else {
return -ENOENT;
}
return 0;
}
static int regcache_rbtree_insert_to_block(struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int base_reg,
unsigned int top_reg,
unsigned int reg,
unsigned int value)
{
unsigned int blklen;
unsigned int pos, offset;
unsigned long *present;
u8 *blk;
blklen = (top_reg - base_reg) / map->reg_stride + 1;
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
blk = krealloc(rbnode->block,
blklen * map->cache_word_size,
map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
map->alloc_flags);
if (!present)
return -ENOMEM;
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
* sizeof(*present));
} else {
present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
if (pos == 0) {
memmove(blk + offset * map->cache_word_size,
blk, rbnode->blklen * map->cache_word_size);
bitmap_shift_left(present, present, offset, blklen);
}
/* update the rbnode block, its size and the base register */
rbnode->blklen = blklen;
rbnode->base_reg = base_reg;
rbnode->cache_present = present;
regcache_rbtree_set_register(map, rbnode, pos, value);
return 0;
}
static struct regcache_rbtree_node *
regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
{
struct regcache_rbtree_node *rbnode;
const struct regmap_range *range;
int i;
rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
if (!rbnode)
return NULL;
/* If there is a read table then use it to guess at an allocation */
if (map->rd_table) {
for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
if (regmap_reg_in_range(reg,
&map->rd_table->yes_ranges[i]))
break;
}
if (i != map->rd_table->n_yes_ranges) {
range = &map->rd_table->yes_ranges[i];
rbnode->blklen = (range->range_max - range->range_min) /
map->reg_stride + 1;
rbnode->base_reg = range->range_min;
}
}
if (!rbnode->blklen) {
rbnode->blklen = 1;
rbnode->base_reg = reg;
}
rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
map->alloc_flags);
if (!rbnode->block)
goto err_free;
rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
sizeof(*rbnode->cache_present),
map->alloc_flags);
if (!rbnode->cache_present)
goto err_free_block;
return rbnode;
err_free_block:
kfree(rbnode->block);
err_free:
kfree(rbnode);
return NULL;
}
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node;
unsigned int reg_tmp;
int ret;
rbtree_ctx = map->cache;
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it.
*/
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
} else {
unsigned int base_reg, top_reg;
unsigned int new_base_reg, new_top_reg;
unsigned int min, max;
unsigned int max_dist;
unsigned int dist, best_dist = UINT_MAX;
max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
map->cache_word_size;
if (reg < max_dist)
min = 0;
else
min = reg - max_dist;
max = reg + max_dist;
/* look for an adjacent register to the one we are about to add */
node = rbtree_ctx->root.rb_node;
while (node) {
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
node);
regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
&base_reg, &top_reg);
if (base_reg <= max && top_reg >= min) {
if (reg < base_reg)
dist = base_reg - reg;
else if (reg > top_reg)
dist = reg - top_reg;
else
dist = 0;
if (dist < best_dist) {
rbnode = rbnode_tmp;
best_dist = dist;
new_base_reg = min(reg, base_reg);
new_top_reg = max(reg, top_reg);
}
}
/*
* Keep looking, we want to choose the closest block,
* otherwise we might end up creating overlapping
* blocks, which breaks the rbtree.
*/
if (reg < base_reg)
node = node->rb_left;
else if (reg > top_reg)
node = node->rb_right;
else
break;
}
if (rbnode) {
ret = regcache_rbtree_insert_to_block(map, rbnode,
new_base_reg,
new_top_reg, reg,
value);
if (ret)
return ret;
rbtree_ctx->cached_rbnode = rbnode;
return 0;
}
/* We did not manage to find a place to insert it in
* an existing block so create a new rbnode.
*/
rbnode = regcache_rbtree_node_alloc(map, reg);
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
return 0;
}
static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
unsigned int base_reg, top_reg;
unsigned int start, end;
int ret;
map->async = true;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
&top_reg);
if (base_reg > max)
break;
if (top_reg < min)
continue;
if (min > base_reg)
start = (min - base_reg) / map->reg_stride;
else
start = 0;
if (max < top_reg)
end = (max - base_reg) / map->reg_stride + 1;
else
end = rbnode->blklen;
ret = regcache_sync_block(map, rbnode->block,
rbnode->cache_present,
rbnode->base_reg, start, end);
if (ret != 0)
return ret;
}
map->async = false;
return regmap_async_complete(map);
}
static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
unsigned int max)
{
struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbnode;
struct rb_node *node;
unsigned int base_reg, top_reg;
unsigned int start, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
&top_reg);
if (base_reg > max)
break;
if (top_reg < min)
continue;
if (min > base_reg)
start = (min - base_reg) / map->reg_stride;
else
start = 0;
if (max < top_reg)
end = (max - base_reg) / map->reg_stride + 1;
else
end = rbnode->blklen;
bitmap_clear(rbnode->cache_present, start, end - start);
}
return 0;
}
struct regcache_ops regcache_rbtree_ops = {
.type = REGCACHE_RBTREE,
.name = "rbtree",
.init = regcache_rbtree_init,
.exit = regcache_rbtree_exit,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = rbtree_debugfs_init,
#endif
.read = regcache_rbtree_read,
.write = regcache_rbtree_write,
.sync = regcache_rbtree_sync,
.drop = regcache_rbtree_drop,
};
| linux-master | drivers/base/regmap/regcache-rbtree.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
#include "internal.h"
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int ret;
ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
if (ret < 0)
return ret;
return sdw_write_no_pm(slave, reg, val & 0xff);
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read0;
int read1;
read0 = sdw_read_no_pm(slave, reg);
if (read0 < 0)
return read0;
read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
if (read1 < 0)
return read1;
*val = (read1 << 8) | read0;
return 0;
}
static const struct regmap_bus regmap_sdw_mbq = {
.reg_read = regmap_sdw_mbq_read,
.reg_write = regmap_sdw_mbq_write,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
/* MBQ-based controls are only 16-bits for now */
if (config->val_bits != 16)
return -ENOTSUPP;
/* Registers are 32 bits wide */
if (config->reg_bits != 32)
return -ENOTSUPP;
if (config->pad_bits != 0)
return -ENOTSUPP;
return 0;
}
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
return __regmap_init(&sdw->dev, ®map_sdw_mbq,
&sdw->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
return __devm_regmap_init(&sdw->dev, ®map_sdw_mbq,
&sdw->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-sdw-mbq.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - W1 (1-Wire) support
//
// Copyright (c) 2017 Radioavionica Corporation
// Author: Alex A. Mihaylov <[email protected]>
#include <linux/regmap.h>
#include <linux/module.h>
#include <linux/w1.h>
#include "internal.h"
#define W1_CMD_READ_DATA 0x69
#define W1_CMD_WRITE_DATA 0x6C
/*
* 1-Wire slaves registers with addess 8 bit and data 8 bit
*/
static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 255)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_READ_DATA);
w1_write_8(sl->master, reg);
*val = w1_read_8(sl->master);
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 255)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_WRITE_DATA);
w1_write_8(sl->master, reg);
w1_write_8(sl->master, val);
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
/*
* 1-Wire slaves registers with addess 8 bit and data 16 bit
*/
static int w1_reg_a8_v16_read(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 255)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_READ_DATA);
w1_write_8(sl->master, reg);
*val = w1_read_8(sl->master);
*val |= w1_read_8(sl->master)<<8;
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static int w1_reg_a8_v16_write(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 255)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_WRITE_DATA);
w1_write_8(sl->master, reg);
w1_write_8(sl->master, val & 0x00FF);
w1_write_8(sl->master, val>>8 & 0x00FF);
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
/*
* 1-Wire slaves registers with addess 16 bit and data 16 bit
*/
static int w1_reg_a16_v16_read(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 65535)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_READ_DATA);
w1_write_8(sl->master, reg & 0x00FF);
w1_write_8(sl->master, reg>>8 & 0x00FF);
*val = w1_read_8(sl->master);
*val |= w1_read_8(sl->master)<<8;
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
static int w1_reg_a16_v16_write(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
int ret = 0;
if (reg > 65535)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
if (!w1_reset_select_slave(sl)) {
w1_write_8(sl->master, W1_CMD_WRITE_DATA);
w1_write_8(sl->master, reg & 0x00FF);
w1_write_8(sl->master, reg>>8 & 0x00FF);
w1_write_8(sl->master, val & 0x00FF);
w1_write_8(sl->master, val>>8 & 0x00FF);
} else {
ret = -ENODEV;
}
mutex_unlock(&sl->master->bus_mutex);
return ret;
}
/*
* Various types of supported bus addressing
*/
static const struct regmap_bus regmap_w1_bus_a8_v8 = {
.reg_read = w1_reg_a8_v8_read,
.reg_write = w1_reg_a8_v8_write,
};
static const struct regmap_bus regmap_w1_bus_a8_v16 = {
.reg_read = w1_reg_a8_v16_read,
.reg_write = w1_reg_a8_v16_write,
};
static const struct regmap_bus regmap_w1_bus_a16_v16 = {
.reg_read = w1_reg_a16_v16_read,
.reg_write = w1_reg_a16_v16_write,
};
static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev,
const struct regmap_config *config)
{
if (config->reg_bits == 8 && config->val_bits == 8)
return ®map_w1_bus_a8_v8;
if (config->reg_bits == 8 && config->val_bits == 16)
return ®map_w1_bus_a8_v16;
if (config->reg_bits == 16 && config->val_bits == 16)
return ®map_w1_bus_a16_v16;
return ERR_PTR(-ENOTSUPP);
}
struct regmap *__regmap_init_w1(struct device *w1_dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_w1);
struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(w1_dev, bus, w1_dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-w1.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - SPI support
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "internal.h"
struct regmap_async_spi {
struct regmap_async core;
struct spi_message m;
struct spi_transfer t[2];
};
static void regmap_spi_complete(void *data)
{
struct regmap_async_spi *async = data;
regmap_async_complete_cb(&async->core, async->m.status);
}
static int regmap_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write(spi, data, count);
}
static int regmap_spi_gather_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
{ .tx_buf = val, .len = val_len, }, };
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
return spi_sync(spi, &m);
}
static int regmap_spi_async_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len,
struct regmap_async *a)
{
struct regmap_async_spi *async = container_of(a,
struct regmap_async_spi,
core);
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
async->t[0].tx_buf = reg;
async->t[0].len = reg_len;
async->t[1].tx_buf = val;
async->t[1].len = val_len;
spi_message_init(&async->m);
spi_message_add_tail(&async->t[0], &async->m);
if (val)
spi_message_add_tail(&async->t[1], &async->m);
async->m.complete = regmap_spi_complete;
async->m.context = async;
return spi_async(spi, &async->m);
}
static struct regmap_async *regmap_spi_async_alloc(void)
{
struct regmap_async_spi *async_spi;
async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
if (!async_spi)
return NULL;
return &async_spi->core;
}
static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
static const struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
.async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
bus = kmemdup(®map_spi, sizeof(*bus), GFP_KERNEL);
if (!bus)
return ERR_PTR(-ENOMEM);
max_msg_size = spi_max_message_size(spi);
reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ config->pad_bits / BITS_PER_BYTE;
if (max_size + reg_reserve_size > max_msg_size)
max_size -= reg_reserve_size;
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
return bus;
}
return ®map_spi;
}
struct regmap *__regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spi);
struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-spi.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register cache access API - flat caching support
//
// Copyright 2012 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "internal.h"
static inline unsigned int regcache_flat_get_index(const struct regmap *map,
unsigned int reg)
{
return regcache_get_index_by_order(map, reg);
}
static int regcache_flat_init(struct regmap *map)
{
int i;
unsigned int *cache;
if (!map || map->reg_stride_order < 0 || !map->max_register)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+ 1, sizeof(unsigned int), GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
cache = map->cache;
for (i = 0; i < map->num_reg_defaults; i++) {
unsigned int reg = map->reg_defaults[i].reg;
unsigned int index = regcache_flat_get_index(map, reg);
cache[index] = map->reg_defaults[i].def;
}
return 0;
}
static int regcache_flat_exit(struct regmap *map)
{
kfree(map->cache);
map->cache = NULL;
return 0;
}
static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
*value = cache[index];
return 0;
}
static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
cache[index] = value;
return 0;
}
struct regcache_ops regcache_flat_ops = {
.type = REGCACHE_FLAT,
.name = "flat",
.init = regcache_flat_init,
.exit = regcache_flat_exit,
.read = regcache_flat_read,
.write = regcache_flat_write,
};
| linux-master | drivers/base/regmap/regcache-flat.c |
// SPDX-License-Identifier: GPL-2.0
//
// regmap based irq_chip
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "internal.h"
struct regmap_irq_chip_data {
struct mutex lock;
struct irq_chip irq_chip;
struct regmap *map;
const struct regmap_irq_chip *chip;
int irq_base;
struct irq_domain *domain;
int irq;
int wake_count;
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
unsigned int *type_buf;
unsigned int *type_buf_def;
unsigned int **config_buf;
unsigned int irq_reg_stride;
unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
unsigned int base, int index);
unsigned int clear_status:1;
};
static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
int irq)
{
return &data->chip->irqs[irq];
}
static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
{
struct regmap *map = data->map;
/*
* While possible that a user-defined ->get_irq_reg() callback might
* be linear enough to support bulk reads, most of the time it won't.
* Therefore only allow them if the default callback is being used.
*/
return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
!map->use_single_read;
}
static void regmap_irq_lock(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
mutex_lock(&d->lock);
}
static void regmap_irq_sync_unlock(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
int i, j, ret;
u32 reg;
u32 val;
if (d->chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
if (ret < 0)
dev_err(map->dev, "IRQ sync failed to resume: %d\n",
ret);
}
if (d->clear_status) {
for (i = 0; i < d->chip->num_regs; i++) {
reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &val);
if (ret)
dev_err(d->map->dev,
"Failed to clear the interrupt status bits\n");
}
d->clear_status = false;
}
/*
* If there's been a change in the mask write it back to the
* hardware. We rely on the use of the regmap core cache to
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
if (d->chip->handle_mask_sync)
d->chip->handle_mask_sync(i, d->mask_buf_def[i],
d->mask_buf[i],
d->chip->irq_drv_data);
if (d->chip->mask_base && !d->chip->handle_mask_sync) {
reg = d->get_irq_reg(d, d->chip->mask_base, i);
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->mask_buf[i]);
if (ret)
dev_err(d->map->dev, "Failed to sync masks in %x\n", reg);
}
if (d->chip->unmask_base && !d->chip->handle_mask_sync) {
reg = d->get_irq_reg(d, d->chip->unmask_base, i);
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
if (ret)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
}
reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (d->wake_buf) {
if (d->chip->wake_invert)
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
~d->wake_buf[i]);
else
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0)
dev_err(d->map->dev,
"Failed to sync wakes in %x: %d\n",
reg, ret);
}
if (!d->chip->init_ack_masked)
continue;
/*
* Ack all the masked interrupts unconditionally,
* OR if there is masked interrupt which hasn't been Acked,
* it'll be ignored in irq handler, then may introduce irq storm
*/
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
reg = d->get_irq_reg(d, d->chip->ack_base, i);
/* some chips ack by write 0 */
if (d->chip->ack_invert)
ret = regmap_write(map, reg, ~d->mask_buf[i]);
else
ret = regmap_write(map, reg, d->mask_buf[i]);
if (d->chip->clear_ack) {
if (d->chip->ack_invert && !ret)
ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
}
}
for (i = 0; i < d->chip->num_config_bases; i++) {
for (j = 0; j < d->chip->num_config_regs; j++) {
reg = d->get_irq_reg(d, d->chip->config_base[i], j);
ret = regmap_write(map, reg, d->config_buf[i][j]);
if (ret)
dev_err(d->map->dev,
"Failed to write config %x: %d\n",
reg, ret);
}
}
if (d->chip->runtime_pm)
pm_runtime_put(map->dev);
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
irq_set_irq_wake(d->irq, 0);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
irq_set_irq_wake(d->irq, 1);
d->wake_count = 0;
mutex_unlock(&d->lock);
}
static void regmap_irq_enable(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
unsigned int reg = irq_data->reg_offset / map->reg_stride;
unsigned int mask;
/*
* The type_in_mask flag means that the underlying hardware uses
* separate mask bits for each interrupt trigger type, but we want
* to have a single logical interrupt with a configurable type.
*
* If the interrupt we're enabling defines any supported types
* then instead of using the regular mask bits for this interrupt,
* use the value previously written to the type buffer at the
* corresponding offset in regmap_irq_set_type().
*/
if (d->chip->type_in_mask && irq_data->type.types_supported)
mask = d->type_buf[reg] & irq_data->mask;
else
mask = irq_data->mask;
if (d->chip->clear_on_unmask)
d->clear_status = true;
d->mask_buf[reg] &= ~mask;
}
static void regmap_irq_disable(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
}
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
int reg, ret;
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
return 0;
reg = t->type_reg_offset / map->reg_stride;
if (d->chip->type_in_mask) {
ret = regmap_irq_set_type_config_simple(&d->type_buf, type,
irq_data, reg, d->chip->irq_drv_data);
if (ret)
return ret;
}
if (d->chip->set_type_config) {
ret = d->chip->set_type_config(d->config_buf, type, irq_data,
reg, d->chip->irq_drv_data);
if (ret)
return ret;
}
return 0;
}
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
if (on) {
if (d->wake_buf)
d->wake_buf[irq_data->reg_offset / map->reg_stride]
&= ~irq_data->mask;
d->wake_count++;
} else {
if (d->wake_buf)
d->wake_buf[irq_data->reg_offset / map->reg_stride]
|= irq_data->mask;
d->wake_count--;
}
return 0;
}
static const struct irq_chip regmap_irq_chip = {
.irq_bus_lock = regmap_irq_lock,
.irq_bus_sync_unlock = regmap_irq_sync_unlock,
.irq_disable = regmap_irq_disable,
.irq_enable = regmap_irq_enable,
.irq_set_type = regmap_irq_set_type,
.irq_set_wake = regmap_irq_set_wake,
};
static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
unsigned int b)
{
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
struct regmap_irq_sub_irq_map *subreg;
unsigned int reg;
int i, ret = 0;
if (!chip->sub_reg_offsets) {
reg = data->get_irq_reg(data, chip->status_base, b);
ret = regmap_read(map, reg, &data->status_buf[b]);
} else {
/*
* Note we can't use ->get_irq_reg() here because the offsets
* in 'subreg' are *not* interchangeable with indices.
*/
subreg = &chip->sub_reg_offsets[b];
for (i = 0; i < subreg->num_regs; i++) {
unsigned int offset = subreg->offset[i];
unsigned int index = offset / map->reg_stride;
ret = regmap_read(map, chip->status_base + offset,
&data->status_buf[index]);
if (ret)
break;
}
}
return ret;
}
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
bool handled = false;
u32 reg;
if (chip->handle_pre_irq)
chip->handle_pre_irq(chip->irq_drv_data);
if (chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
goto exit;
}
}
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
* possible in order to reduce the I/O overheads.
*/
if (chip->no_status) {
/* no status register so default to all active */
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) {
unsigned int max_main_bits;
unsigned long size;
size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */
memset(data->status_buf, 0, size);
/* We could support bulk read for main status registers
* but I don't expect to see devices with really many main
* status registers so let's only support single reads for the
* sake of simplicity. and add bulk reads only if needed
*/
for (i = 0; i < chip->num_main_regs; i++) {
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
dev_err(map->dev,
"Failed to read IRQ status %d\n",
ret);
goto exit;
}
}
/* Read sub registers with active IRQs */
for (i = 0; i < chip->num_main_regs; i++) {
unsigned int b;
const unsigned long mreg = data->main_status_buf[i];
for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
if (i * map->format.val_bytes * 8 + b >
max_main_bits)
break;
ret = read_sub_irq_data(data, b);
if (ret != 0) {
dev_err(map->dev,
"Failed to read IRQ status %d\n",
ret);
goto exit;
}
}
}
} else if (regmap_irq_can_bulk_read_status(data)) {
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
u32 *buf32 = data->status_reg_buf;
BUG_ON(!data->status_reg_buf);
ret = regmap_bulk_read(map, chip->status_base,
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
goto exit;
}
for (i = 0; i < data->chip->num_regs; i++) {
switch (map->format.val_bytes) {
case 1:
data->status_buf[i] = buf8[i];
break;
case 2:
data->status_buf[i] = buf16[i];
break;
case 4:
data->status_buf[i] = buf32[i];
break;
default:
BUG();
goto exit;
}
}
} else {
for (i = 0; i < data->chip->num_regs; i++) {
unsigned int reg = data->get_irq_reg(data,
data->chip->status_base, i);
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
dev_err(map->dev,
"Failed to read IRQ status: %d\n",
ret);
goto exit;
}
}
}
if (chip->status_invert)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
* interrupt. We assume that typically few of the interrupts
* will fire simultaneously so don't worry about overhead from
* doing a write per register.
*/
for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = data->get_irq_reg(data, data->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
~data->status_buf[i]);
else
ret = regmap_write(map, reg,
data->status_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
}
}
for (i = 0; i < chip->num_irqs; i++) {
if (data->status_buf[chip->irqs[i].reg_offset /
map->reg_stride] & chip->irqs[i].mask) {
handle_nested_irq(irq_find_mapping(data->domain, i));
handled = true;
}
}
exit:
if (chip->handle_post_irq)
chip->handle_post_irq(chip->irq_drv_data);
if (chip->runtime_pm)
pm_runtime_put(map->dev);
if (handled)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq);
irq_set_noprobe(virq);
return 0;
}
static const struct irq_domain_ops regmap_domain_ops = {
.map = regmap_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
/**
* regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
* @data: Data for the &struct regmap_irq_chip
* @base: Base register
* @index: Register index
*
* Returns the register address corresponding to the given @base and @index
* by the formula ``base + index * regmap_stride * irq_reg_stride``.
*/
unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
unsigned int base, int index)
{
struct regmap *map = data->map;
return base + index * map->reg_stride * data->irq_reg_stride;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
/**
* regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
* @buf: Buffer containing configuration register values, this is a 2D array of
* `num_config_bases` rows, each of `num_config_regs` elements.
* @type: The requested IRQ type.
* @irq_data: The IRQ being configured.
* @idx: Index of the irq's config registers within each array `buf[i]`
* @irq_drv_data: Driver specific IRQ data
*
* This is a &struct regmap_irq_chip->set_type_config callback suitable for
* chips with one config register. Register values are updated according to
* the &struct regmap_irq_type data associated with an IRQ.
*/
int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
const struct regmap_irq *irq_data,
int idx, void *irq_drv_data)
{
const struct regmap_irq_type *t = &irq_data->type;
if (t->type_reg_mask)
buf[0][idx] &= ~t->type_reg_mask;
else
buf[0][idx] &= ~(t->type_falling_val |
t->type_rising_val |
t->type_level_low_val |
t->type_level_high_val);
switch (type) {
case IRQ_TYPE_EDGE_FALLING:
buf[0][idx] |= t->type_falling_val;
break;
case IRQ_TYPE_EDGE_RISING:
buf[0][idx] |= t->type_rising_val;
break;
case IRQ_TYPE_EDGE_BOTH:
buf[0][idx] |= (t->type_falling_val |
t->type_rising_val);
break;
case IRQ_TYPE_LEVEL_HIGH:
buf[0][idx] |= t->type_level_high_val;
break;
case IRQ_TYPE_LEVEL_LOW:
buf[0][idx] |= t->type_level_low_val;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
/**
* regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
* @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
* @irq_base: Allocate at specific IRQ number if irq_base > 0.
* @chip: Configuration for the interrupt controller.
* @data: Runtime data structure for the controller, allocated on success.
*
* Returns 0 on success or an errno on failure.
*
* In order for this to be efficient the chip really should use a
* register cache. The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume.
*/
int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
int i;
int ret = -ENOMEM;
u32 reg;
if (chip->num_regs <= 0)
return -EINVAL;
if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
return -EINVAL;
if (chip->mask_base && chip->unmask_base && !chip->mask_unmask_non_inverted)
return -EINVAL;
for (i = 0; i < chip->num_irqs; i++) {
if (chip->irqs[i].reg_offset % map->reg_stride)
return -EINVAL;
if (chip->irqs[i].reg_offset / map->reg_stride >=
chip->num_regs)
return -EINVAL;
}
if (irq_base) {
irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
if (irq_base < 0) {
dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
irq_base);
return irq_base;
}
}
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
if (chip->num_main_regs) {
d->main_status_buf = kcalloc(chip->num_main_regs,
sizeof(*d->main_status_buf),
GFP_KERNEL);
if (!d->main_status_buf)
goto err_alloc;
}
d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
GFP_KERNEL);
if (!d->status_buf)
goto err_alloc;
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
goto err_alloc;
d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
GFP_KERNEL);
if (!d->mask_buf_def)
goto err_alloc;
if (chip->wake_base) {
d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
GFP_KERNEL);
if (!d->wake_buf)
goto err_alloc;
}
if (chip->type_in_mask) {
d->type_buf_def = kcalloc(chip->num_regs,
sizeof(*d->type_buf_def), GFP_KERNEL);
if (!d->type_buf_def)
goto err_alloc;
d->type_buf = kcalloc(chip->num_regs, sizeof(*d->type_buf), GFP_KERNEL);
if (!d->type_buf)
goto err_alloc;
}
if (chip->num_config_bases && chip->num_config_regs) {
/*
* Create config_buf[num_config_bases][num_config_regs]
*/
d->config_buf = kcalloc(chip->num_config_bases,
sizeof(*d->config_buf), GFP_KERNEL);
if (!d->config_buf)
goto err_alloc;
for (i = 0; i < chip->num_config_bases; i++) {
d->config_buf[i] = kcalloc(chip->num_config_regs,
sizeof(**d->config_buf),
GFP_KERNEL);
if (!d->config_buf[i])
goto err_alloc;
}
}
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
d->irq = irq;
d->map = map;
d->chip = chip;
d->irq_base = irq_base;
if (chip->irq_reg_stride)
d->irq_reg_stride = chip->irq_reg_stride;
else
d->irq_reg_stride = 1;
if (chip->get_irq_reg)
d->get_irq_reg = chip->get_irq_reg;
else
d->get_irq_reg = regmap_irq_get_irq_reg_linear;
if (regmap_irq_can_bulk_read_status(d)) {
d->status_reg_buf = kmalloc_array(chip->num_regs,
map->format.val_bytes,
GFP_KERNEL);
if (!d->status_reg_buf)
goto err_alloc;
}
mutex_init(&d->lock);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
|= chip->irqs[i].mask;
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
if (chip->handle_mask_sync) {
ret = chip->handle_mask_sync(i, d->mask_buf_def[i],
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
goto err_alloc;
}
if (chip->mask_base && !chip->handle_mask_sync) {
reg = d->get_irq_reg(d, chip->mask_base, i);
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->mask_buf[i]);
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
}
if (chip->unmask_base && !chip->handle_mask_sync) {
reg = d->get_irq_reg(d, chip->unmask_base, i);
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
}
if (!chip->init_ack_masked)
continue;
/* Ack masked but set interrupts */
if (d->chip->no_status) {
/* no status register so default to all active */
d->status_buf[i] = GENMASK(31, 0);
} else {
reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
goto err_alloc;
}
}
if (chip->status_invert)
d->status_buf[i] = ~d->status_buf[i];
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = d->get_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
~(d->status_buf[i] & d->mask_buf[i]));
else
ret = regmap_write(map, reg,
d->status_buf[i] & d->mask_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
ret = regmap_write(map, reg, 0);
}
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
}
}
/* Wake is disabled by default */
if (d->wake_buf) {
for (i = 0; i < chip->num_regs; i++) {
d->wake_buf[i] = d->mask_buf_def[i];
reg = d->get_irq_reg(d, d->chip->wake_base, i);
if (chip->wake_invert)
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
0);
else
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i],
d->wake_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
}
}
if (irq_base)
d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
irq_base, 0,
®map_domain_ops, d);
else
d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
®map_domain_ops, d);
if (!d->domain) {
dev_err(map->dev, "Failed to create IRQ domain\n");
ret = -ENOMEM;
goto err_alloc;
}
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
chip->name, d);
if (ret != 0) {
dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
irq, chip->name, ret);
goto err_domain;
}
*data = d;
return 0;
err_domain:
/* Should really dispose of the domain but... */
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
kfree(d);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
/**
* regmap_add_irq_chip() - Use standard regmap IRQ controller handling
*
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
* @irq_base: Allocate at specific IRQ number if irq_base > 0.
* @chip: Configuration for the interrupt controller.
* @data: Runtime data structure for the controller, allocated on success.
*
* Returns 0 on success or an errno on failure.
*
* This is the same as regmap_add_irq_chip_fwnode, except that the firmware
* node of the regmap is used.
*/
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
irq_flags, irq_base, chip, data);
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
/**
* regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
*
* @irq: Primary IRQ for the device
* @d: ®map_irq_chip_data allocated by regmap_add_irq_chip()
*
* This function also disposes of all mapped IRQs on the chip.
*/
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
unsigned int virq;
int i, hwirq;
if (!d)
return;
free_irq(irq, d);
/* Dispose all virtual irq from irq domain before removing it */
for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
/* Ignore hwirq if holes in the IRQ list */
if (!d->chip->irqs[hwirq].mask)
continue;
/*
* Find the virtual irq of hwirq on chip and if it is
* there then dispose it
*/
virq = irq_find_mapping(d->domain, hwirq);
if (virq)
irq_dispose_mapping(virq);
}
irq_domain_remove(d->domain);
kfree(d->type_buf);
kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
regmap_del_irq_chip(d->irq, d);
}
static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
{
struct regmap_irq_chip_data **r = res;
if (!r || !*r) {
WARN_ON(!r || !*r);
return 0;
}
return *r == data;
}
/**
* devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
*
* @dev: The device pointer on which irq_chip belongs to.
* @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
* @irq_base: Allocate at specific IRQ number if irq_base > 0.
* @chip: Configuration for the interrupt controller.
* @data: Runtime data structure for the controller, allocated on success
*
* Returns 0 on success or an errno on failure.
*
* The ®map_irq_chip_data will be automatically released when the device is
* unbound.
*/
int devm_regmap_add_irq_chip_fwnode(struct device *dev,
struct fwnode_handle *fwnode,
struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data **ptr, *d;
int ret;
ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
chip, &d);
if (ret < 0) {
devres_free(ptr);
return ret;
}
*ptr = d;
devres_add(dev, ptr);
*data = d;
return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
* devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
*
* @dev: The device pointer on which irq_chip belongs to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
* @irq_base: Allocate at specific IRQ number if irq_base > 0.
* @chip: Configuration for the interrupt controller.
* @data: Runtime data structure for the controller, allocated on success
*
* Returns 0 on success or an errno on failure.
*
* The ®map_irq_chip_data will be automatically released when the device is
* unbound.
*/
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
irq, irq_flags, irq_base, chip,
data);
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
* devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
* @dev: Device for which the resource was allocated.
* @irq: Primary IRQ for the device.
* @data: ®map_irq_chip_data allocated by regmap_add_irq_chip().
*
* A resource managed version of regmap_del_irq_chip().
*/
void devm_regmap_del_irq_chip(struct device *dev, int irq,
struct regmap_irq_chip_data *data)
{
int rc;
WARN_ON(irq != data->irq);
rc = devres_release(dev, devm_regmap_irq_chip_release,
devm_regmap_irq_chip_match, data);
if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
/**
* regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
*
* @data: regmap irq controller to operate on.
*
* Useful for drivers to request their own IRQs.
*/
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
WARN_ON(!data->irq_base);
return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
/**
* regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
*
* @data: regmap irq controller to operate on.
* @irq: index of the interrupt requested in the chip IRQs.
*
* Useful for drivers to request their own IRQs.
*/
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
/* Handle holes in the IRQ list */
if (!data->chip->irqs[irq].mask)
return -EINVAL;
return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
/**
* regmap_irq_get_domain() - Retrieve the irq_domain for the chip
*
* @data: regmap_irq controller to operate on.
*
* Useful for drivers to request their own IRQs and for integration
* with subsystems. For ease of integration NULL is accepted as a
* domain, allowing devices to just call this even if no domain is
* allocated.
*/
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
if (data)
return data->domain;
else
return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
| linux-master | drivers/base/regmap/regmap-irq.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/regmap.h>
#define REGVAL_MASK GENMASK(15, 0)
#define REGNUM_C22_MASK GENMASK(4, 0)
/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
#define REGNUM_C45_MASK GENMASK(20, 0)
static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
{
struct mdio_device *mdio_dev = context;
int ret;
if (unlikely(reg & ~REGNUM_C22_MASK))
return -ENXIO;
ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
*val = ret & REGVAL_MASK;
return 0;
}
static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
{
struct mdio_device *mdio_dev = context;
if (unlikely(reg & ~REGNUM_C22_MASK))
return -ENXIO;
return mdiodev_write(mdio_dev, reg, val);
}
static const struct regmap_bus regmap_mdio_c22_bus = {
.reg_write = regmap_mdio_c22_write,
.reg_read = regmap_mdio_c22_read,
};
static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
{
struct mdio_device *mdio_dev = context;
unsigned int devad;
int ret;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
ret = mdiodev_c45_read(mdio_dev, devad, reg);
if (ret < 0)
return ret;
*val = ret & REGVAL_MASK;
return 0;
}
static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
{
struct mdio_device *mdio_dev = context;
unsigned int devad;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
return mdiodev_c45_write(mdio_dev, devad, reg, val);
}
static const struct regmap_bus regmap_mdio_c45_bus = {
.reg_write = regmap_mdio_c45_write,
.reg_read = regmap_mdio_c45_read,
};
struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
const struct regmap_config *config, struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus;
if (config->reg_bits == 5 && config->val_bits == 16)
bus = ®map_mdio_c22_bus;
else if (config->reg_bits == 21 && config->val_bits == 16)
bus = ®map_mdio_c45_bus;
else
return ERR_PTR(-EOPNOTSUPP);
return __regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_mdio);
struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
const struct regmap_config *config, struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus;
if (config->reg_bits == 5 && config->val_bits == 16)
bus = ®map_mdio_c22_bus;
else if (config->reg_bits == 21 && config->val_bits == 16)
bus = ®map_mdio_c45_bus;
else
return ERR_PTR(-EOPNOTSUPP);
return __devm_regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
MODULE_AUTHOR("Sander Vanheule <[email protected]>");
MODULE_DESCRIPTION("Regmap MDIO Module");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-mdio.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/property.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "internal.h"
/*
* Sometimes for failures during very early init the trace
* infrastructure isn't available early enough to be used. For this
* sort of problem defining LOG_DEVICE will add printks for basic
* register I/O on a specific device.
*/
#undef LOG_DEVICE
#ifdef LOG_DEVICE
static inline bool regmap_should_log(struct regmap *map)
{
return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
}
#else
static inline bool regmap_should_log(struct regmap *map) { return false; }
#endif
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write);
static int _regmap_bus_reg_read(void *context, unsigned int reg,
unsigned int *val);
static int _regmap_bus_read(void *context, unsigned int reg,
unsigned int *val);
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val);
static int _regmap_bus_reg_write(void *context, unsigned int reg,
unsigned int val);
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
{
const struct regmap_range *r;
int i;
for (i = 0, r = ranges; i < nranges; i++, r++)
if (regmap_reg_in_range(reg, r))
return true;
return false;
}
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table)
{
/* Check "no ranges" first */
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
return false;
/* In case zero "yes ranges" are supplied, any reg is OK */
if (!table->n_yes_ranges)
return true;
return regmap_reg_in_ranges(reg, table->yes_ranges,
table->n_yes_ranges);
}
EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
if (map->max_register && reg > map->max_register)
return false;
if (map->writeable_reg)
return map->writeable_reg(map->dev, reg);
if (map->wr_table)
return regmap_check_range_table(map, reg, map->wr_table);
return true;
}
bool regmap_cached(struct regmap *map, unsigned int reg)
{
int ret;
unsigned int val;
if (map->cache_type == REGCACHE_NONE)
return false;
if (!map->cache_ops)
return false;
if (map->max_register && reg > map->max_register)
return false;
map->lock(map->lock_arg);
ret = regcache_read(map, reg, &val);
map->unlock(map->lock_arg);
if (ret)
return false;
return true;
}
bool regmap_readable(struct regmap *map, unsigned int reg)
{
if (!map->reg_read)
return false;
if (map->max_register && reg > map->max_register)
return false;
if (map->format.format_write)
return false;
if (map->readable_reg)
return map->readable_reg(map->dev, reg);
if (map->rd_table)
return regmap_check_range_table(map, reg, map->rd_table);
return true;
}
bool regmap_volatile(struct regmap *map, unsigned int reg)
{
if (!map->format.format_write && !regmap_readable(map, reg))
return false;
if (map->volatile_reg)
return map->volatile_reg(map->dev, reg);
if (map->volatile_table)
return regmap_check_range_table(map, reg, map->volatile_table);
if (map->cache_ops)
return false;
else
return true;
}
bool regmap_precious(struct regmap *map, unsigned int reg)
{
if (!regmap_readable(map, reg))
return false;
if (map->precious_reg)
return map->precious_reg(map->dev, reg);
if (map->precious_table)
return regmap_check_range_table(map, reg, map->precious_table);
return false;
}
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
{
if (map->writeable_noinc_reg)
return map->writeable_noinc_reg(map->dev, reg);
if (map->wr_noinc_table)
return regmap_check_range_table(map, reg, map->wr_noinc_table);
return true;
}
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
{
if (map->readable_noinc_reg)
return map->readable_noinc_reg(map->dev, reg);
if (map->rd_noinc_table)
return regmap_check_range_table(map, reg, map->rd_noinc_table);
return true;
}
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
size_t num)
{
unsigned int i;
for (i = 0; i < num; i++)
if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
return false;
return true;
}
static void regmap_format_12_20_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
u8 *out = map->work_buf;
out[0] = reg >> 4;
out[1] = (reg << 4) | (val >> 16);
out[2] = val >> 8;
out[3] = val;
}
static void regmap_format_2_6_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
u8 *out = map->work_buf;
*out = (reg << 6) | val;
}
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
__be16 *out = map->work_buf;
*out = cpu_to_be16((reg << 12) | val);
}
static void regmap_format_7_9_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
__be16 *out = map->work_buf;
*out = cpu_to_be16((reg << 9) | val);
}
static void regmap_format_7_17_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
u8 *out = map->work_buf;
out[2] = val;
out[1] = val >> 8;
out[0] = (val >> 16) | (reg << 1);
}
static void regmap_format_10_14_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
u8 *out = map->work_buf;
out[2] = val;
out[1] = (val >> 8) | (reg << 6);
out[0] = reg >> 2;
}
static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
{
u8 *b = buf;
b[0] = val << shift;
}
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
u16 v = val << shift;
memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
{
put_unaligned_be24(val << shift, buf);
}
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
u32 v = val << shift;
memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_inplace_noop(void *buf)
{
}
static unsigned int regmap_parse_8(const void *buf)
{
const u8 *b = buf;
return b[0];
}
static unsigned int regmap_parse_16_be(const void *buf)
{
return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
u16 v = get_unaligned_be16(buf);
memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
u16 v = get_unaligned_le16(buf);
memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
u16 v;
memcpy(&v, buf, sizeof(v));
return v;
}
static unsigned int regmap_parse_24_be(const void *buf)
{
return get_unaligned_be24(buf);
}
static unsigned int regmap_parse_32_be(const void *buf)
{
return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
u32 v = get_unaligned_be32(buf);
memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
u32 v = get_unaligned_le32(buf);
memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
u32 v;
memcpy(&v, buf, sizeof(v));
return v;
}
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
hwspin_lock_timeout(map->hwlock, UINT_MAX);
}
static void regmap_lock_hwlock_irq(void *__map)
{
struct regmap *map = __map;
hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
}
static void regmap_lock_hwlock_irqsave(void *__map)
{
struct regmap *map = __map;
hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
&map->spinlock_flags);
}
static void regmap_unlock_hwlock(void *__map)
{
struct regmap *map = __map;
hwspin_unlock(map->hwlock);
}
static void regmap_unlock_hwlock_irq(void *__map)
{
struct regmap *map = __map;
hwspin_unlock_irq(map->hwlock);
}
static void regmap_unlock_hwlock_irqrestore(void *__map)
{
struct regmap *map = __map;
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
static void regmap_lock_unlock_none(void *__map)
{
}
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
mutex_lock(&map->mutex);
}
static void regmap_unlock_mutex(void *__map)
{
struct regmap *map = __map;
mutex_unlock(&map->mutex);
}
static void regmap_lock_spinlock(void *__map)
__acquires(&map->spinlock)
{
struct regmap *map = __map;
unsigned long flags;
spin_lock_irqsave(&map->spinlock, flags);
map->spinlock_flags = flags;
}
static void regmap_unlock_spinlock(void *__map)
__releases(&map->spinlock)
{
struct regmap *map = __map;
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
}
static void regmap_lock_raw_spinlock(void *__map)
__acquires(&map->raw_spinlock)
{
struct regmap *map = __map;
unsigned long flags;
raw_spin_lock_irqsave(&map->raw_spinlock, flags);
map->raw_spinlock_flags = flags;
}
static void regmap_unlock_raw_spinlock(void *__map)
__releases(&map->raw_spinlock)
{
struct regmap *map = __map;
raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
}
static void dev_get_regmap_release(struct device *dev, void *res)
{
/*
* We don't actually have anything to do here; the goal here
* is not to manage the regmap but to provide a simple way to
* get the regmap back given a struct device.
*/
}
static bool _regmap_range_add(struct regmap *map,
struct regmap_range_node *data)
{
struct rb_root *root = &map->range_tree;
struct rb_node **new = &(root->rb_node), *parent = NULL;
while (*new) {
struct regmap_range_node *this =
rb_entry(*new, struct regmap_range_node, node);
parent = *new;
if (data->range_max < this->range_min)
new = &((*new)->rb_left);
else if (data->range_min > this->range_max)
new = &((*new)->rb_right);
else
return false;
}
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
return true;
}
static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
unsigned int reg)
{
struct rb_node *node = map->range_tree.rb_node;
while (node) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
if (reg < this->range_min)
node = node->rb_left;
else if (reg > this->range_max)
node = node->rb_right;
else
return this;
}
return NULL;
}
static void regmap_range_exit(struct regmap *map)
{
struct rb_node *next;
struct regmap_range_node *range_node;
next = rb_first(&map->range_tree);
while (next) {
range_node = rb_entry(next, struct regmap_range_node, node);
next = rb_next(&range_node->node);
rb_erase(&range_node->node, &map->range_tree);
kfree(range_node);
}
kfree(map->selector_work_buf);
}
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
{
if (config->name) {
const char *name = kstrdup_const(config->name, GFP_KERNEL);
if (!name)
return -ENOMEM;
kfree_const(map->name);
map->name = name;
}
return 0;
}
int regmap_attach_dev(struct device *dev, struct regmap *map,
const struct regmap_config *config)
{
struct regmap **m;
int ret;
map->dev = dev;
ret = regmap_set_name(map, config);
if (ret)
return ret;
regmap_debugfs_exit(map);
regmap_debugfs_init(map);
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
regmap_debugfs_exit(map);
return -ENOMEM;
}
*m = map;
devres_add(dev, m);
return 0;
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
enum regmap_endian endian;
/* Retrieve the endianness specification from the regmap config */
endian = config->reg_format_endian;
/* If the regmap config specified a non-default value, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
/* Retrieve the endianness specification from the bus config */
if (bus && bus->reg_format_endian_default)
endian = bus->reg_format_endian_default;
/* If the bus specified a non-default value, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
/* Use this if no other value was found */
return REGMAP_ENDIAN_BIG;
}
enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config)
{
struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
enum regmap_endian endian;
/* Retrieve the endianness specification from the regmap config */
endian = config->val_format_endian;
/* If the regmap config specified a non-default value, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
/* If the firmware node exist try to get endianness from it */
if (fwnode_property_read_bool(fwnode, "big-endian"))
endian = REGMAP_ENDIAN_BIG;
else if (fwnode_property_read_bool(fwnode, "little-endian"))
endian = REGMAP_ENDIAN_LITTLE;
else if (fwnode_property_read_bool(fwnode, "native-endian"))
endian = REGMAP_ENDIAN_NATIVE;
/* If the endianness was specified in fwnode, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
/* Retrieve the endianness specification from the bus config */
if (bus && bus->val_format_endian_default)
endian = bus->val_format_endian_default;
/* If the bus specified a non-default value, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
/* Use this if no other value was found */
return REGMAP_ENDIAN_BIG;
}
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
struct regmap *__regmap_init(struct device *dev,
const struct regmap_bus *bus,
void *bus_context,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap *map;
int ret = -EINVAL;
enum regmap_endian reg_endian, val_endian;
int i, j;
if (!config)
goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
ret = -ENOMEM;
goto err;
}
ret = regmap_set_name(map, config);
if (ret)
goto err_map;
ret = -EINVAL; /* Later error paths rely on this */
if (config->disable_locking) {
map->lock = map->unlock = regmap_lock_unlock_none;
map->can_sleep = config->can_sleep;
regmap_debugfs_disable(map);
} else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
map->can_sleep = config->can_sleep;
} else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
goto err_name;
}
switch (config->hwlock_mode) {
case HWLOCK_IRQSTATE:
map->lock = regmap_lock_hwlock_irqsave;
map->unlock = regmap_unlock_hwlock_irqrestore;
break;
case HWLOCK_IRQ:
map->lock = regmap_lock_hwlock_irq;
map->unlock = regmap_unlock_hwlock_irq;
break;
default:
map->lock = regmap_lock_hwlock;
map->unlock = regmap_unlock_hwlock;
break;
}
map->lock_arg = map;
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
if (config->use_raw_spinlock) {
raw_spin_lock_init(&map->raw_spinlock);
map->lock = regmap_lock_raw_spinlock;
map->unlock = regmap_unlock_raw_spinlock;
lockdep_set_class_and_name(&map->raw_spinlock,
lock_key, lock_name);
} else {
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
lockdep_set_class_and_name(&map->spinlock,
lock_key, lock_name);
}
} else {
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
map->can_sleep = true;
lockdep_set_class_and_name(&map->mutex,
lock_key, lock_name);
}
map->lock_arg = map;
}
/*
* When we write in fast-paths with regmap_bulk_write() don't allocate
* scratch buffers with sleeping allocations.
*/
if ((bus && bus->fast_io) || config->fast_io)
map->alloc_flags = GFP_ATOMIC;
else
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
map->reg_shift = config->pad_bits % 8;
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
map->reg_stride = 1;
if (is_power_of_2(map->reg_stride))
map->reg_stride_order = ilog2(map->reg_stride);
else
map->reg_stride_order = -1;
map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
if (bus) {
map->max_raw_read = bus->max_raw_read;
map->max_raw_write = bus->max_raw_write;
} else if (config->max_raw_read && config->max_raw_write) {
map->max_raw_read = config->max_raw_read;
map->max_raw_write = config->max_raw_write;
}
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
map->wr_table = config->wr_table;
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
map->precious_table = config->precious_table;
map->wr_noinc_table = config->wr_noinc_table;
map->rd_noinc_table = config->rd_noinc_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->writeable_noinc_reg = config->writeable_noinc_reg;
map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask ||
config->write_flag_mask ||
config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
map->read_flag_mask = bus->read_flag_mask;
}
if (config && config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
/* Bulk read/write */
map->read = config->read;
map->write = config->write;
reg_endian = REGMAP_ENDIAN_NATIVE;
val_endian = REGMAP_ENDIAN_NATIVE;
} else if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
} else if (!bus->read || !bus->write) {
map->reg_read = _regmap_bus_reg_read;
map->reg_write = _regmap_bus_reg_write;
map->reg_update_bits = bus->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
} else {
map->reg_read = _regmap_bus_read;
map->reg_update_bits = bus->reg_update_bits;
/* Bulk read/write */
map->read = bus->read;
map->write = bus->write;
reg_endian = regmap_get_reg_endian(bus, config);
val_endian = regmap_get_val_endian(dev, bus, config);
}
switch (config->reg_bits + map->reg_shift) {
case 2:
switch (config->val_bits) {
case 6:
map->format.format_write = regmap_format_2_6_write;
break;
default:
goto err_hwlock;
}
break;
case 4:
switch (config->val_bits) {
case 12:
map->format.format_write = regmap_format_4_12_write;
break;
default:
goto err_hwlock;
}
break;
case 7:
switch (config->val_bits) {
case 9:
map->format.format_write = regmap_format_7_9_write;
break;
case 17:
map->format.format_write = regmap_format_7_17_write;
break;
default:
goto err_hwlock;
}
break;
case 10:
switch (config->val_bits) {
case 14:
map->format.format_write = regmap_format_10_14_write;
break;
default:
goto err_hwlock;
}
break;
case 12:
switch (config->val_bits) {
case 20:
map->format.format_write = regmap_format_12_20_write;
break;
default:
goto err_hwlock;
}
break;
case 8:
map->format.format_reg = regmap_format_8;
break;
case 16:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_16_be;
break;
case REGMAP_ENDIAN_LITTLE:
map->format.format_reg = regmap_format_16_le;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_reg = regmap_format_16_native;
break;
default:
goto err_hwlock;
}
break;
case 24:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_24_be;
break;
default:
goto err_hwlock;
}
break;
case 32:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_32_be;
break;
case REGMAP_ENDIAN_LITTLE:
map->format.format_reg = regmap_format_32_le;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_reg = regmap_format_32_native;
break;
default:
goto err_hwlock;
}
break;
default:
goto err_hwlock;
}
if (val_endian == REGMAP_ENDIAN_NATIVE)
map->format.parse_inplace = regmap_parse_inplace_noop;
switch (config->val_bits) {
case 8:
map->format.format_val = regmap_format_8;
map->format.parse_val = regmap_parse_8;
map->format.parse_inplace = regmap_parse_inplace_noop;
break;
case 16:
switch (val_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_16_be;
map->format.parse_val = regmap_parse_16_be;
map->format.parse_inplace = regmap_parse_16_be_inplace;
break;
case REGMAP_ENDIAN_LITTLE:
map->format.format_val = regmap_format_16_le;
map->format.parse_val = regmap_parse_16_le;
map->format.parse_inplace = regmap_parse_16_le_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_16_native;
map->format.parse_val = regmap_parse_16_native;
break;
default:
goto err_hwlock;
}
break;
case 24:
switch (val_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_24_be;
map->format.parse_val = regmap_parse_24_be;
break;
default:
goto err_hwlock;
}
break;
case 32:
switch (val_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_32_be;
map->format.parse_val = regmap_parse_32_be;
map->format.parse_inplace = regmap_parse_32_be_inplace;
break;
case REGMAP_ENDIAN_LITTLE:
map->format.format_val = regmap_format_32_le;
map->format.parse_val = regmap_parse_32_le;
map->format.parse_inplace = regmap_parse_32_le_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_32_native;
map->format.parse_val = regmap_parse_32_native;
break;
default:
goto err_hwlock;
}
break;
}
if (map->format.format_write) {
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
goto err_hwlock;
map->use_single_write = true;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
goto err_hwlock;
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
goto err_hwlock;
}
if (map->format.format_write) {
map->defer_caching = false;
map->reg_write = _regmap_bus_formatted_write;
} else if (map->format.format_val) {
map->defer_caching = true;
map->reg_write = _regmap_bus_raw_write;
}
skip_format_initialization:
map->range_tree = RB_ROOT;
for (i = 0; i < config->num_ranges; i++) {
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
struct regmap_range_node *new;
/* Sanity check */
if (range_cfg->range_max < range_cfg->range_min) {
dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
range_cfg->range_max, range_cfg->range_min);
goto err_range;
}
if (range_cfg->range_max > map->max_register) {
dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
range_cfg->range_max, map->max_register);
goto err_range;
}
if (range_cfg->selector_reg > map->max_register) {
dev_err(map->dev,
"Invalid range %d: selector out of map\n", i);
goto err_range;
}
if (range_cfg->window_len == 0) {
dev_err(map->dev, "Invalid range %d: window_len 0\n",
i);
goto err_range;
}
/* Make sure, that this register range has no selector
or data window within its boundary */
for (j = 0; j < config->num_ranges; j++) {
unsigned int sel_reg = config->ranges[j].selector_reg;
unsigned int win_min = config->ranges[j].window_start;
unsigned int win_max = win_min +
config->ranges[j].window_len - 1;
/* Allow data window inside its own virtual range */
if (j == i)
continue;
if (range_cfg->range_min <= sel_reg &&
sel_reg <= range_cfg->range_max) {
dev_err(map->dev,
"Range %d: selector for %d in window\n",
i, j);
goto err_range;
}
if (!(win_max < range_cfg->range_min ||
win_min > range_cfg->range_max)) {
dev_err(map->dev,
"Range %d: window for %d in window\n",
i, j);
goto err_range;
}
}
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL) {
ret = -ENOMEM;
goto err_range;
}
new->map = map;
new->name = range_cfg->name;
new->range_min = range_cfg->range_min;
new->range_max = range_cfg->range_max;
new->selector_reg = range_cfg->selector_reg;
new->selector_mask = range_cfg->selector_mask;
new->selector_shift = range_cfg->selector_shift;
new->window_start = range_cfg->window_start;
new->window_len = range_cfg->window_len;
if (!_regmap_range_add(map, new)) {
dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
}
if (map->selector_work_buf == NULL) {
map->selector_work_buf =
kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->selector_work_buf == NULL) {
ret = -ENOMEM;
goto err_range;
}
}
}
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
if (dev) {
ret = regmap_attach_dev(dev, map, config);
if (ret != 0)
goto err_regcache;
} else {
regmap_debugfs_init(map);
}
return map;
err_regcache:
regcache_exit(map);
err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
if (map->hwlock)
hwspin_lock_free(map->hwlock);
err_name:
kfree_const(map->name);
err_map:
kfree(map);
err:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
static void devm_regmap_release(struct device *dev, void *res)
{
regmap_exit(*(struct regmap **)res);
}
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
void *bus_context,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap **ptr, *regmap;
ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
regmap = __regmap_init(dev, bus, bus_context, config,
lock_key, lock_name);
if (!IS_ERR(regmap)) {
*ptr = regmap;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return regmap;
}
EXPORT_SYMBOL_GPL(__devm_regmap_init);
static void regmap_field_init(struct regmap_field *rm_field,
struct regmap *regmap, struct reg_field reg_field)
{
rm_field->regmap = regmap;
rm_field->reg = reg_field.reg;
rm_field->shift = reg_field.lsb;
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
rm_field->id_size = reg_field.id_size;
rm_field->id_offset = reg_field.id_offset;
}
/**
* devm_regmap_field_alloc() - Allocate and initialise a register field.
*
* @dev: Device that will be interacted with
* @regmap: regmap bank in which this register field is located.
* @reg_field: Register field with in the bank.
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap_field. The regmap_field will be automatically freed
* by the device management code.
*/
struct regmap_field *devm_regmap_field_alloc(struct device *dev,
struct regmap *regmap, struct reg_field reg_field)
{
struct regmap_field *rm_field = devm_kzalloc(dev,
sizeof(*rm_field), GFP_KERNEL);
if (!rm_field)
return ERR_PTR(-ENOMEM);
regmap_field_init(rm_field, regmap, reg_field);
return rm_field;
}
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
/**
* regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
*
* @regmap: regmap bank in which this register field is located.
* @rm_field: regmap register fields within the bank.
* @reg_field: Register fields within the bank.
* @num_fields: Number of register fields.
*
* The return value will be an -ENOMEM on error or zero for success.
* Newly allocated regmap_fields should be freed by calling
* regmap_field_bulk_free()
*/
int regmap_field_bulk_alloc(struct regmap *regmap,
struct regmap_field **rm_field,
const struct reg_field *reg_field,
int num_fields)
{
struct regmap_field *rf;
int i;
rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
if (!rf)
return -ENOMEM;
for (i = 0; i < num_fields; i++) {
regmap_field_init(&rf[i], regmap, reg_field[i]);
rm_field[i] = &rf[i];
}
return 0;
}
EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
/**
* devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
* fields.
*
* @dev: Device that will be interacted with
* @regmap: regmap bank in which this register field is located.
* @rm_field: regmap register fields within the bank.
* @reg_field: Register fields within the bank.
* @num_fields: Number of register fields.
*
* The return value will be an -ENOMEM on error or zero for success.
* Newly allocated regmap_fields will be automatically freed by the
* device management code.
*/
int devm_regmap_field_bulk_alloc(struct device *dev,
struct regmap *regmap,
struct regmap_field **rm_field,
const struct reg_field *reg_field,
int num_fields)
{
struct regmap_field *rf;
int i;
rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
if (!rf)
return -ENOMEM;
for (i = 0; i < num_fields; i++) {
regmap_field_init(&rf[i], regmap, reg_field[i]);
rm_field[i] = &rf[i];
}
return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
/**
* regmap_field_bulk_free() - Free register field allocated using
* regmap_field_bulk_alloc.
*
* @field: regmap fields which should be freed.
*/
void regmap_field_bulk_free(struct regmap_field *field)
{
kfree(field);
}
EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
/**
* devm_regmap_field_bulk_free() - Free a bulk register field allocated using
* devm_regmap_field_bulk_alloc.
*
* @dev: Device that will be interacted with
* @field: regmap field which should be freed.
*
* Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
* drivers need not call this function, as the memory allocated via devm
* will be freed as per device-driver life-cycle.
*/
void devm_regmap_field_bulk_free(struct device *dev,
struct regmap_field *field)
{
devm_kfree(dev, field);
}
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
/**
* devm_regmap_field_free() - Free a register field allocated using
* devm_regmap_field_alloc.
*
* @dev: Device that will be interacted with
* @field: regmap field which should be freed.
*
* Free register field allocated using devm_regmap_field_alloc(). Usually
* drivers need not call this function, as the memory allocated via devm
* will be freed as per device-driver life-cyle.
*/
void devm_regmap_field_free(struct device *dev,
struct regmap_field *field)
{
devm_kfree(dev, field);
}
EXPORT_SYMBOL_GPL(devm_regmap_field_free);
/**
* regmap_field_alloc() - Allocate and initialise a register field.
*
* @regmap: regmap bank in which this register field is located.
* @reg_field: Register field with in the bank.
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap_field. The regmap_field should be freed by the
* user once its finished working with it using regmap_field_free().
*/
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
struct reg_field reg_field)
{
struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
if (!rm_field)
return ERR_PTR(-ENOMEM);
regmap_field_init(rm_field, regmap, reg_field);
return rm_field;
}
EXPORT_SYMBOL_GPL(regmap_field_alloc);
/**
* regmap_field_free() - Free register field allocated using
* regmap_field_alloc.
*
* @field: regmap field which should be freed.
*/
void regmap_field_free(struct regmap_field *field)
{
kfree(field);
}
EXPORT_SYMBOL_GPL(regmap_field_free);
/**
* regmap_reinit_cache() - Reinitialise the current register cache
*
* @map: Register map to operate on.
* @config: New configuration. Only the cache data will be used.
*
* Discard any existing register cache for the map and initialize a
* new cache. This can be used to restore the cache to defaults or to
* update the cache configuration to reflect runtime discovery of the
* hardware.
*
* No explicit locking is done here, the user needs to ensure that
* this function will not race with other calls to regmap.
*/
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
{
int ret;
regcache_exit(map);
regmap_debugfs_exit(map);
map->max_register = config->max_register;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->writeable_noinc_reg = config->writeable_noinc_reg;
map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
ret = regmap_set_name(map, config);
if (ret)
return ret;
regmap_debugfs_init(map);
map->cache_bypass = false;
map->cache_only = false;
return regcache_init(map, config);
}
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
/**
* regmap_exit() - Free a previously allocated register map
*
* @map: Register map to operate on.
*/
void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
while (!list_empty(&map->async_free)) {
async = list_first_entry_or_null(&map->async_free,
struct regmap_async,
list);
list_del(&async->list);
kfree(async->work_buf);
kfree(async);
}
if (map->hwlock)
hwspin_lock_free(map->hwlock);
if (map->lock == regmap_lock_mutex)
mutex_destroy(&map->mutex);
kfree_const(map->name);
kfree(map->patch);
if (map->bus && map->bus->free_on_exit)
kfree(map->bus);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
static int dev_get_regmap_match(struct device *dev, void *res, void *data)
{
struct regmap **r = res;
if (!r || !*r) {
WARN_ON(!r || !*r);
return 0;
}
/* If the user didn't specify a name match any */
if (data)
return !strcmp((*r)->name, data);
else
return 1;
}
/**
* dev_get_regmap() - Obtain the regmap (if any) for a device
*
* @dev: Device to retrieve the map for
* @name: Optional name for the register map, usually NULL.
*
* Returns the regmap for the device if one is present, or NULL. If
* name is specified then it must match the name specified when
* registering the device, if it is NULL then the first regmap found
* will be used. Devices with multiple register maps are very rare,
* generic code should normally not need to specify a name.
*/
struct regmap *dev_get_regmap(struct device *dev, const char *name)
{
struct regmap **r = devres_find(dev, dev_get_regmap_release,
dev_get_regmap_match, (void *)name);
if (!r)
return NULL;
return *r;
}
EXPORT_SYMBOL_GPL(dev_get_regmap);
/**
* regmap_get_device() - Obtain the device from a regmap
*
* @map: Register map to operate on.
*
* Returns the underlying device that the regmap has been created for.
*/
struct device *regmap_get_device(struct regmap *map)
{
return map->dev;
}
EXPORT_SYMBOL_GPL(regmap_get_device);
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
struct regmap_range_node *range,
unsigned int val_num)
{
void *orig_work_buf;
unsigned int win_offset;
unsigned int win_page;
bool page_chg;
int ret;
win_offset = (*reg - range->range_min) % range->window_len;
win_page = (*reg - range->range_min) / range->window_len;
if (val_num > 1) {
/* Bulk write shouldn't cross range boundary */
if (*reg + val_num - 1 > range->range_max)
return -EINVAL;
/* ... or single page boundary */
if (val_num > range->window_len - win_offset)
return -EINVAL;
}
/* It is possible to have selector register inside data window.
In that case, selector register is located on every page and
it needs no page switching, when accessed alone. */
if (val_num > 1 ||
range->window_start + win_offset != range->selector_reg) {
/* Use separate work_buf during page switching */
orig_work_buf = map->work_buf;
map->work_buf = map->selector_work_buf;
ret = _regmap_update_bits(map, range->selector_reg,
range->selector_mask,
win_page << range->selector_shift,
&page_chg, false);
map->work_buf = orig_work_buf;
if (ret != 0)
return ret;
}
*reg = range->window_start + win_offset;
return 0;
}
static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
unsigned long mask)
{
u8 *buf;
int i;
if (!mask || !map->work_buf)
return;
buf = map->work_buf;
for (i = 0; i < max_bytes; i++)
buf[i] |= (mask >> (8 * i)) & 0xff;
}
static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
{
reg += map->reg_base;
if (map->format.reg_shift > 0)
reg >>= map->format.reg_shift;
else if (map->format.reg_shift < 0)
reg <<= -(map->format.reg_shift);
return reg;
}
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool noinc)
{
struct regmap_range_node *range;
unsigned long flags;
void *work_val = map->work_buf + map->format.reg_bytes +
map->format.pad_bytes;
void *buf;
int ret = -ENOTSUPP;
size_t len;
int i;
/* Check for unwritable or noinc registers in range
* before we start
*/
if (!regmap_writeable_noinc(map, reg)) {
for (i = 0; i < val_len / map->format.val_bytes; i++) {
unsigned int element =
reg + regmap_get_offset(map, i);
if (!regmap_writeable(map, element) ||
regmap_writeable_noinc(map, element))
return -EINVAL;
}
}
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
ival = map->format.parse_val(val + (i * val_bytes));
ret = regcache_write(map,
reg + regmap_get_offset(map, i),
ival);
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
reg + regmap_get_offset(map, i), ret);
return ret;
}
}
if (map->cache_only) {
map->cache_dirty = true;
return 0;
}
}
range = _regmap_range_lookup(map, reg);
if (range) {
int val_num = val_len / map->format.val_bytes;
int win_offset = (reg - range->range_min) % range->window_len;
int win_residue = range->window_len - win_offset;
/* If the write goes beyond the end of the window split it */
while (val_num > win_residue) {
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write_impl(map, reg, val,
win_residue *
map->format.val_bytes, noinc);
if (ret != 0)
return ret;
reg += win_residue;
val_num -= win_residue;
val += win_residue * map->format.val_bytes;
val_len -= win_residue * map->format.val_bytes;
win_offset = (reg - range->range_min) %
range->window_len;
win_residue = range->window_len - win_offset;
}
ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
if (ret != 0)
return ret;
}
reg = regmap_reg_addr(map, reg);
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
/*
* Essentially all I/O mechanisms will be faster with a single
* buffer to write. Since register syncs often generate raw
* writes of single registers optimise that case.
*/
if (val != work_val && val_len == map->format.val_bytes) {
memcpy(work_val, val, map->format.val_bytes);
val = work_val;
}
if (map->async && map->bus && map->bus->async_write) {
struct regmap_async *async;
trace_regmap_async_write_start(map, reg, val_len);
spin_lock_irqsave(&map->async_lock, flags);
async = list_first_entry_or_null(&map->async_free,
struct regmap_async,
list);
if (async)
list_del(&async->list);
spin_unlock_irqrestore(&map->async_lock, flags);
if (!async) {
async = map->bus->async_alloc();
if (!async)
return -ENOMEM;
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
kfree(async);
return -ENOMEM;
}
}
async->map = map;
/* If the caller supplied the value we can use it safely. */
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
map->format.reg_bytes + map->format.val_bytes);
spin_lock_irqsave(&map->async_lock, flags);
list_add_tail(&async->list, &map->async_list);
spin_unlock_irqrestore(&map->async_lock, flags);
if (val != work_val)
ret = map->bus->async_write(map->bus_context,
async->work_buf,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len, async);
else
ret = map->bus->async_write(map->bus_context,
async->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
val_len, NULL, 0, async);
if (ret != 0) {
dev_err(map->dev, "Failed to schedule write: %d\n",
ret);
spin_lock_irqsave(&map->async_lock, flags);
list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
}
return ret;
}
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
/* If we're doing a single register write we can probably just
* send the work_buf directly, otherwise try to do a gather
* write.
*/
if (val == work_val)
ret = map->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
val_len);
else if (map->bus && map->bus->gather_write)
ret = map->bus->gather_write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len);
else
ret = -ENOTSUPP;
/* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
len = map->format.reg_bytes + map->format.pad_bytes + val_len;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, map->work_buf, map->format.reg_bytes);
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
ret = map->write(map->bus_context, buf, len);
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
/* regcache_drop_region() takes lock that we already have,
* thus call map->cache_ops->drop() directly
*/
if (map->cache_ops && map->cache_ops->drop)
map->cache_ops->drop(map, reg, reg + 1);
}
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
/**
* regmap_can_raw_write - Test if regmap_raw_write() is supported
*
* @map: Map to check.
*/
bool regmap_can_raw_write(struct regmap *map)
{
return map->write && map->format.format_val && map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
/**
* regmap_get_raw_read_max - Get the maximum size we can read
*
* @map: Map to check.
*/
size_t regmap_get_raw_read_max(struct regmap *map)
{
return map->max_raw_read;
}
EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
/**
* regmap_get_raw_write_max - Get the maximum size we can read
*
* @map: Map to check.
*/
size_t regmap_get_raw_write_max(struct regmap *map)
{
return map->max_raw_write;
}
EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val)
{
int ret;
struct regmap_range_node *range;
struct regmap *map = context;
WARN_ON(!map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, ®, range, 1);
if (ret != 0)
return ret;
}
reg = regmap_reg_addr(map, reg);
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
trace_regmap_hw_write_done(map, reg, 1);
return ret;
}
static int _regmap_bus_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct regmap *map = context;
struct regmap_range_node *range;
int ret;
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, ®, range, 1);
if (ret != 0)
return ret;
}
reg = regmap_reg_addr(map, reg);
return map->bus->reg_write(map->bus_context, reg, val);
}
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val)
{
struct regmap *map = context;
WARN_ON(!map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
return _regmap_raw_write_impl(map, reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
map->format.val_bytes,
false);
}
static inline void *_regmap_map_get_context(struct regmap *map)
{
return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
}
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
int ret;
void *context = _regmap_map_get_context(map);
if (!regmap_writeable(map, reg))
return -EIO;
if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
if (map->cache_only) {
map->cache_dirty = true;
return 0;
}
}
ret = map->reg_write(context, reg, val);
if (ret == 0) {
if (regmap_should_log(map))
dev_info(map->dev, "%x <= %x\n", reg, val);
trace_regmap_reg_write(map, reg, val);
}
return ret;
}
/**
* regmap_write() - Write a value to a single register
*
* @map: Register map to write to
* @reg: Register to write to
* @val: Value to be written
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
ret = _regmap_write(map, reg, val);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_write);
/**
* regmap_write_async() - Write a value to a single register asynchronously
*
* @map: Register map to write to
* @reg: Register to write to
* @val: Value to be written
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
map->async = true;
ret = _regmap_write(map, reg, val);
map->async = false;
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_write_async);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool noinc)
{
size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
int ret, i;
if (!val_count)
return -EINVAL;
if (map->use_single_write)
chunk_regs = 1;
else if (map->max_raw_write && val_len > map->max_raw_write)
chunk_regs = map->max_raw_write / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
/* Write as many bytes as possible with chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
if (ret)
return ret;
reg += regmap_get_offset(map, chunk_regs);
val += chunk_bytes;
val_len -= chunk_bytes;
}
/* Write remaining bytes */
if (val_len)
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
return ret;
}
/**
* regmap_raw_write() - Write raw values to one or more registers
*
* @map: Register map to write to
* @reg: Initial register to write to
* @val: Block of data to be written, laid out for direct transmission to the
* device
* @val_len: Length of data pointed to by val.
*
* This function is intended to be used for things like firmware
* download where a large block of data needs to be transferred to the
* device. No formatting will be done on the data provided.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
int ret;
if (!regmap_can_raw_write(map))
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, val, val_len, false);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
void *val, unsigned int val_len, bool write)
{
size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes;
unsigned int lastval;
u8 *u8p;
u16 *u16p;
u32 *u32p;
int ret;
int i;
switch (val_bytes) {
case 1:
u8p = val;
if (write)
lastval = (unsigned int)u8p[val_count - 1];
break;
case 2:
u16p = val;
if (write)
lastval = (unsigned int)u16p[val_count - 1];
break;
case 4:
u32p = val;
if (write)
lastval = (unsigned int)u32p[val_count - 1];
break;
default:
return -EINVAL;
}
/*
* Update the cache with the last value we write, the rest is just
* gone down in the hardware FIFO. We can't cache FIFOs. This makes
* sure a single read from the cache will work.
*/
if (write) {
if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, lastval);
if (ret != 0)
return ret;
if (map->cache_only) {
map->cache_dirty = true;
return 0;
}
}
ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
} else {
ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
}
if (!ret && regmap_should_log(map)) {
dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
for (i = 0; i < val_count; i++) {
switch (val_bytes) {
case 1:
pr_cont("%x", u8p[i]);
break;
case 2:
pr_cont("%x", u16p[i]);
break;
case 4:
pr_cont("%x", u32p[i]);
break;
default:
break;
}
if (i == (val_count - 1))
pr_cont("]\n");
else
pr_cont(",");
}
}
return 0;
}
/**
* regmap_noinc_write(): Write data from a register without incrementing the
* register number
*
* @map: Register map to write to
* @reg: Register to write to
* @val: Pointer to data buffer
* @val_len: Length of output buffer in bytes.
*
* The regmap API usually assumes that bulk bus write operations will write a
* range of registers. Some devices have certain registers for which a write
* operation can write to an internal FIFO.
*
* The target register must be volatile but registers after it can be
* completely unrelated cacheable registers.
*
* This will attempt multiple writes as required to write val_len bytes.
*
* A value of zero will be returned on success, a negative errno will be
* returned in error cases.
*/
int regmap_noinc_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
size_t write_len;
int ret;
if (!map->write && !(map->bus && map->bus->reg_noinc_write))
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_len == 0)
return -EINVAL;
map->lock(map->lock_arg);
if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
ret = -EINVAL;
goto out_unlock;
}
/*
* Use the accelerated operation if we can. The val drops the const
* typing in order to facilitate code reuse in regmap_noinc_readwrite().
*/
if (map->bus->reg_noinc_write) {
ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
goto out_unlock;
}
while (val_len) {
if (map->max_raw_write && map->max_raw_write < val_len)
write_len = map->max_raw_write;
else
write_len = val_len;
ret = _regmap_raw_write(map, reg, val, write_len, true);
if (ret)
goto out_unlock;
val = ((u8 *)val) + write_len;
val_len -= write_len;
}
out_unlock:
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_noinc_write);
/**
* regmap_field_update_bits_base() - Perform a read/modify/write cycle a
* register field.
*
* @field: Register field to write to
* @mask: Bitmask to change
* @val: Value to be written
* @change: Boolean indicating if a write was done
* @async: Boolean indicating asynchronously
* @force: Boolean indicating use force update
*
* Perform a read/modify/write cycle on the register field with change,
* async, force option.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
{
mask = (mask << field->shift) & field->mask;
return regmap_update_bits_base(field->regmap, field->reg,
mask, val << field->shift,
change, async, force);
}
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
* regmap_field_test_bits() - Check if all specified bits are set in a
* register field.
*
* @field: Register field to operate on
* @bits: Bits to test
*
* Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
* tested bits is not set and 1 if all tested bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
unsigned int val, ret;
ret = regmap_field_read(field, &val);
if (ret)
return ret;
return (val & bits) == bits;
}
EXPORT_SYMBOL_GPL(regmap_field_test_bits);
/**
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
* register field with port ID
*
* @field: Register field to write to
* @id: port ID
* @mask: Bitmask to change
* @val: Value to be written
* @change: Boolean indicating if a write was done
* @async: Boolean indicating asynchronously
* @force: Boolean indicating use force update
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
{
if (id >= field->id_size)
return -EINVAL;
mask = (mask << field->shift) & field->mask;
return regmap_update_bits_base(field->regmap,
field->reg + (field->id_offset * id),
mask, val << field->shift,
change, async, force);
}
EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
/**
* regmap_bulk_write() - Write multiple registers to the device
*
* @map: Register map to write to
* @reg: First register to be write from
* @val: Block of data to be written, in native register size for device
* @val_count: Number of registers to write
*
* This function is intended to be used for writing a large block of
* data to the device either in single transfer or multiple transfer.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count)
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
/*
* Some devices don't support bulk write, for them we have a series of
* single write operations.
*/
if (!map->write || !map->format.parse_inplace) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
switch (val_bytes) {
case 1:
ival = *(u8 *)(val + (i * val_bytes));
break;
case 2:
ival = *(u16 *)(val + (i * val_bytes));
break;
case 4:
ival = *(u32 *)(val + (i * val_bytes));
break;
default:
ret = -EINVAL;
goto out;
}
ret = _regmap_write(map,
reg + regmap_get_offset(map, i),
ival);
if (ret != 0)
goto out;
}
out:
map->unlock(map->lock_arg);
} else {
void *wval;
wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
if (!wval)
return -ENOMEM;
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
kfree(wval);
}
if (!ret)
trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
/*
* _regmap_raw_multi_reg_write()
*
* the (register,newvalue) pairs in regs have not been formatted, but
* they are all in the same page and have been changed to being page
* relative. The page register has been written if that was necessary.
*/
static int _regmap_raw_multi_reg_write(struct regmap *map,
const struct reg_sequence *regs,
size_t num_regs)
{
int ret;
void *buf;
int i;
u8 *u8;
size_t val_bytes = map->format.val_bytes;
size_t reg_bytes = map->format.reg_bytes;
size_t pad_bytes = map->format.pad_bytes;
size_t pair_size = reg_bytes + pad_bytes + val_bytes;
size_t len = pair_size * num_regs;
if (!len)
return -EINVAL;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* We have to linearise by hand. */
u8 = buf;
for (i = 0; i < num_regs; i++) {
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
reg = regmap_reg_addr(map, reg);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
u8 += val_bytes;
}
u8 = buf;
*u8 |= map->write_flag_mask;
ret = map->write(map->bus_context, buf, len);
kfree(buf);
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
trace_regmap_hw_write_done(map, reg, 1);
}
return ret;
}
static unsigned int _regmap_register_page(struct regmap *map,
unsigned int reg,
struct regmap_range_node *range)
{
unsigned int win_page = (reg - range->range_min) / range->window_len;
return win_page;
}
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
struct reg_sequence *regs,
size_t num_regs)
{
int ret;
int i, n;
struct reg_sequence *base;
unsigned int this_page = 0;
unsigned int page_change = 0;
/*
* the set of registers are not neccessarily in order, but
* since the order of write must be preserved this algorithm
* chops the set each time the page changes. This also applies
* if there is a delay required at any point in the sequence.
*/
base = regs;
for (i = 0, n = 0; i < num_regs; i++, n++) {
unsigned int reg = regs[i].reg;
struct regmap_range_node *range;
range = _regmap_range_lookup(map, reg);
if (range) {
unsigned int win_page = _regmap_register_page(map, reg,
range);
if (i == 0)
this_page = win_page;
if (win_page != this_page) {
this_page = win_page;
page_change = 1;
}
}
/* If we have both a page change and a delay make sure to
* write the regs and apply the delay before we change the
* page.
*/
if (page_change || regs[i].delay_us) {
/* For situations where the first write requires
* a delay we need to make sure we don't call
* raw_multi_reg_write with n=0
* This can't occur with page breaks as we
* never write on the first iteration
*/
if (regs[i].delay_us && i == 0)
n = 1;
ret = _regmap_raw_multi_reg_write(map, base, n);
if (ret != 0)
return ret;
if (regs[i].delay_us) {
if (map->can_sleep)
fsleep(regs[i].delay_us);
else
udelay(regs[i].delay_us);
}
base += n;
n = 0;
if (page_change) {
ret = _regmap_select_page(map,
&base[n].reg,
range, 1);
if (ret != 0)
return ret;
page_change = 0;
}
}
}
if (n > 0)
return _regmap_raw_multi_reg_write(map, base, n);
return 0;
}
static int _regmap_multi_reg_write(struct regmap *map,
const struct reg_sequence *regs,
size_t num_regs)
{
int i;
int ret;
if (!map->can_multi_write) {
for (i = 0; i < num_regs; i++) {
ret = _regmap_write(map, regs[i].reg, regs[i].def);
if (ret != 0)
return ret;
if (regs[i].delay_us) {
if (map->can_sleep)
fsleep(regs[i].delay_us);
else
udelay(regs[i].delay_us);
}
}
return 0;
}
if (!map->format.parse_inplace)
return -EINVAL;
if (map->writeable_reg)
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
if (!map->writeable_reg(map->dev, reg))
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
}
if (!map->cache_bypass) {
for (i = 0; i < num_regs; i++) {
unsigned int val = regs[i].def;
unsigned int reg = regs[i].reg;
ret = regcache_write(map, reg, val);
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
reg, ret);
return ret;
}
}
if (map->cache_only) {
map->cache_dirty = true;
return 0;
}
}
WARN_ON(!map->bus);
for (i = 0; i < num_regs; i++) {
unsigned int reg = regs[i].reg;
struct regmap_range_node *range;
/* Coalesce all the writes between a page break or a delay
* in a sequence
*/
range = _regmap_range_lookup(map, reg);
if (range || regs[i].delay_us) {
size_t len = sizeof(struct reg_sequence)*num_regs;
struct reg_sequence *base = kmemdup(regs, len,
GFP_KERNEL);
if (!base)
return -ENOMEM;
ret = _regmap_range_multi_paged_reg_write(map, base,
num_regs);
kfree(base);
return ret;
}
}
return _regmap_raw_multi_reg_write(map, regs, num_regs);
}
/**
* regmap_multi_reg_write() - Write multiple registers to the device
*
* @map: Register map to write to
* @regs: Array of structures containing register,value to be written
* @num_regs: Number of registers to write
*
* Write multiple registers to the device where the set of register, value
* pairs are supplied in any order, possibly not all in a single range.
*
* The 'normal' block write mode will send ultimately send data on the
* target bus as R,V1,V2,V3,..,Vn where successively higher registers are
* addressed. However, this alternative block multi write mode will send
* the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
* must of course support the mode.
*
* A value of zero will be returned on success, a negative errno will be
* returned in error cases.
*/
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
int ret;
map->lock(map->lock_arg);
ret = _regmap_multi_reg_write(map, regs, num_regs);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
/**
* regmap_multi_reg_write_bypassed() - Write multiple registers to the
* device but not the cache
*
* @map: Register map to write to
* @regs: Array of structures containing register,value to be written
* @num_regs: Number of registers to write
*
* Write multiple registers to the device but not the cache where the set
* of register are supplied in any order.
*
* This function is intended to be used for writing a large block of data
* atomically to the device in single transfer for those I2C client devices
* that implement this alternative block write mode.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_multi_reg_write_bypassed(struct regmap *map,
const struct reg_sequence *regs,
int num_regs)
{
int ret;
bool bypass;
map->lock(map->lock_arg);
bypass = map->cache_bypass;
map->cache_bypass = true;
ret = _regmap_multi_reg_write(map, regs, num_regs);
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
* regmap_raw_write_async() - Write raw values to one or more registers
* asynchronously
*
* @map: Register map to write to
* @reg: Initial register to write to
* @val: Block of data to be written, laid out for direct transmission to the
* device. Must be valid until regmap_async_complete() is called.
* @val_len: Length of data pointed to by val.
*
* This function is intended to be used for things like firmware
* download where a large block of data needs to be transferred to the
* device. No formatting will be done on the data provided.
*
* If supported by the underlying bus the write will be scheduled
* asynchronously, helping maximise I/O speed on higher speed buses
* like SPI. regmap_async_complete() can be called to ensure that all
* asynchrnous writes have been completed.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
int ret;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
map->async = true;
ret = _regmap_raw_write(map, reg, val, val_len, false);
map->async = false;
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len, bool noinc)
{
struct regmap_range_node *range;
int ret;
if (!map->read)
return -EINVAL;
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, ®, range,
noinc ? 1 : val_len / map->format.val_bytes);
if (ret != 0)
return ret;
}
reg = regmap_reg_addr(map, reg);
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
static int _regmap_bus_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct regmap *map = context;
struct regmap_range_node *range;
int ret;
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, ®, range, 1);
if (ret != 0)
return ret;
}
reg = regmap_reg_addr(map, reg);
return map->bus->reg_read(map->bus_context, reg, val);
}
static int _regmap_bus_read(void *context, unsigned int reg,
unsigned int *val)
{
int ret;
struct regmap *map = context;
void *work_val = map->work_buf + map->format.reg_bytes +
map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
if (ret == 0)
*val = map->format.parse_val(work_val);
return ret;
}
static int _regmap_read(struct regmap *map, unsigned int reg,
unsigned int *val)
{
int ret;
void *context = _regmap_map_get_context(map);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
return 0;
}
if (map->cache_only)
return -EBUSY;
if (!regmap_readable(map, reg))
return -EIO;
ret = map->reg_read(context, reg, val);
if (ret == 0) {
if (regmap_should_log(map))
dev_info(map->dev, "%x => %x\n", reg, *val);
trace_regmap_reg_read(map, reg, *val);
if (!map->cache_bypass)
regcache_write(map, reg, *val);
}
return ret;
}
/**
* regmap_read() - Read a value from a single register
*
* @map: Register map to read from
* @reg: Register to be read from
* @val: Pointer to store read value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
{
int ret;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
ret = _regmap_read(map, reg, val);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_read);
/**
* regmap_raw_read() - Read raw data from the device
*
* @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value
* @val_len: Size of data to read
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes;
unsigned int v;
int ret, i;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_count == 0)
return -EINVAL;
map->lock(map->lock_arg);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
if (!map->cache_bypass && map->cache_only) {
ret = -EBUSY;
goto out;
}
if (!map->read) {
ret = -ENOTSUPP;
goto out;
}
if (map->use_single_read)
chunk_regs = 1;
else if (map->max_raw_read && val_len > map->max_raw_read)
chunk_regs = map->max_raw_read / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
/* Read bytes that fit into whole chunks */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
if (ret != 0)
goto out;
reg += regmap_get_offset(map, chunk_regs);
val += chunk_bytes;
val_len -= chunk_bytes;
}
/* Read remaining bytes */
if (val_len) {
ret = _regmap_raw_read(map, reg, val, val_len, false);
if (ret != 0)
goto out;
}
} else {
/* Otherwise go word by word for the cache; should be low
* cost as we expect to hit the cache.
*/
for (i = 0; i < val_count; i++) {
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
&v);
if (ret != 0)
goto out;
map->format.format_val(val + (i * val_bytes), v, 0);
}
}
out:
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
* regmap_noinc_read(): Read data from a register without incrementing the
* register number
*
* @map: Register map to read from
* @reg: Register to read from
* @val: Pointer to data buffer
* @val_len: Length of output buffer in bytes.
*
* The regmap API usually assumes that bulk read operations will read a
* range of registers. Some devices have certain registers for which a read
* operation read will read from an internal FIFO.
*
* The target register must be volatile but registers after it can be
* completely unrelated cacheable registers.
*
* This will attempt multiple reads as required to read val_len bytes.
*
* A value of zero will be returned on success, a negative errno will be
* returned in error cases.
*/
int regmap_noinc_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len)
{
size_t read_len;
int ret;
if (!map->read)
return -ENOTSUPP;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_len == 0)
return -EINVAL;
map->lock(map->lock_arg);
if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
ret = -EINVAL;
goto out_unlock;
}
/*
* We have not defined the FIFO semantics for cache, as the
* cache is just one value deep. Should we return the last
* written value? Just avoid this by always reading the FIFO
* even when using cache. Cache only will not work.
*/
if (!map->cache_bypass && map->cache_only) {
ret = -EBUSY;
goto out_unlock;
}
/* Use the accelerated operation if we can */
if (map->bus->reg_noinc_read) {
ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
goto out_unlock;
}
while (val_len) {
if (map->max_raw_read && map->max_raw_read < val_len)
read_len = map->max_raw_read;
else
read_len = val_len;
ret = _regmap_raw_read(map, reg, val, read_len, true);
if (ret)
goto out_unlock;
val = ((u8 *)val) + read_len;
val_len -= read_len;
}
out_unlock:
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_noinc_read);
/**
* regmap_field_read(): Read a value to a single register field
*
* @field: Register field to read from
* @val: Pointer to store read value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_field_read(struct regmap_field *field, unsigned int *val)
{
int ret;
unsigned int reg_val;
ret = regmap_read(field->regmap, field->reg, ®_val);
if (ret != 0)
return ret;
reg_val &= field->mask;
reg_val >>= field->shift;
*val = reg_val;
return ret;
}
EXPORT_SYMBOL_GPL(regmap_field_read);
/**
* regmap_fields_read() - Read a value to a single register field with port ID
*
* @field: Register field to read from
* @id: port ID
* @val: Pointer to store read value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val)
{
int ret;
unsigned int reg_val;
if (id >= field->id_size)
return -EINVAL;
ret = regmap_read(field->regmap,
field->reg + (field->id_offset * id),
®_val);
if (ret != 0)
return ret;
reg_val &= field->mask;
reg_val >>= field->shift;
*val = reg_val;
return ret;
}
EXPORT_SYMBOL_GPL(regmap_fields_read);
/**
* regmap_bulk_read() - Read multiple registers from the device
*
* @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value, in native register size for device
* @val_count: Number of registers to read
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count)
{
int ret, i;
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_count == 0)
return -EINVAL;
if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
u32 *u32 = val;
u16 *u16 = val;
u8 *u8 = val;
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
&ival);
if (ret != 0)
goto out;
switch (map->format.val_bytes) {
case 4:
u32[i] = ival;
break;
case 2:
u16[i] = ival;
break;
case 1:
u8[i] = ival;
break;
default:
ret = -EINVAL;
goto out;
}
}
out:
map->unlock(map->lock_arg);
}
if (!ret)
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write)
{
int ret;
unsigned int tmp, orig;
if (change)
*change = false;
if (regmap_volatile(map, reg) && map->reg_update_bits) {
reg = regmap_reg_addr(map, reg);
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
if (ret == 0 && change)
*change = true;
} else {
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
return ret;
tmp = orig & ~mask;
tmp |= val & mask;
if (force_write || (tmp != orig) || map->force_write_field) {
ret = _regmap_write(map, reg, tmp);
if (ret == 0 && change)
*change = true;
}
}
return ret;
}
/**
* regmap_update_bits_base() - Perform a read/modify/write cycle on a register
*
* @map: Register map to update
* @reg: Register to update
* @mask: Bitmask to change
* @val: New value for bitmask
* @change: Boolean indicating if a write was done
* @async: Boolean indicating asynchronously
* @force: Boolean indicating use force update
*
* Perform a read/modify/write cycle on a register map with change, async, force
* options.
*
* If async is true:
*
* With most buses the read must be done synchronously so this is most useful
* for devices with a cache which do not need to interact with the hardware to
* determine the current register value.
*
* Returns zero for success, a negative number on error.
*/
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
{
int ret;
map->lock(map->lock_arg);
map->async = async;
ret = _regmap_update_bits(map, reg, mask, val, change, force);
map->async = false;
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
/**
* regmap_test_bits() - Check if all specified bits are set in a register.
*
* @map: Register map to operate on
* @reg: Register to read from
* @bits: Bits to test
*
* Returns 0 if at least one of the tested bits is not set, 1 if all tested
* bits are set and a negative error number if the underlying regmap_read()
* fails.
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
unsigned int val, ret;
ret = regmap_read(map, reg, &val);
if (ret)
return ret;
return (val & bits) == bits;
}
EXPORT_SYMBOL_GPL(regmap_test_bits);
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
bool wake;
trace_regmap_async_io_complete(map);
spin_lock(&map->async_lock);
list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
map->async_ret = ret;
spin_unlock(&map->async_lock);
if (wake)
wake_up(&map->async_waitq);
}
EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
static int regmap_async_is_done(struct regmap *map)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&map->async_lock, flags);
ret = list_empty(&map->async_list);
spin_unlock_irqrestore(&map->async_lock, flags);
return ret;
}
/**
* regmap_async_complete - Ensure all asynchronous I/O has completed.
*
* @map: Map to operate on.
*
* Blocks until any pending asynchronous I/O has completed. Returns
* an error code for any failed I/O operations.
*/
int regmap_async_complete(struct regmap *map)
{
unsigned long flags;
int ret;
/* Nothing to do with no async support */
if (!map->bus || !map->bus->async_write)
return 0;
trace_regmap_async_complete_start(map);
wait_event(map->async_waitq, regmap_async_is_done(map));
spin_lock_irqsave(&map->async_lock, flags);
ret = map->async_ret;
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
trace_regmap_async_complete_done(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_async_complete);
/**
* regmap_register_patch - Register and apply register updates to be applied
* on device initialistion
*
* @map: Register map to apply updates to.
* @regs: Values to update.
* @num_regs: Number of entries in regs.
*
* Register a set of register updates to be applied to the device
* whenever the device registers are synchronised with the cache and
* apply them immediately. Typically this is used to apply
* corrections to be applied to the device defaults on startup, such
* as the updates some vendors provide to undocumented registers.
*
* The caller must ensure that this function cannot be called
* concurrently with either itself or regcache_sync().
*/
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
struct reg_sequence *p;
int ret;
bool bypass;
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
num_regs))
return 0;
p = krealloc(map->patch,
sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
GFP_KERNEL);
if (p) {
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
map->patch = p;
map->patch_regs += num_regs;
} else {
return -ENOMEM;
}
map->lock(map->lock_arg);
bypass = map->cache_bypass;
map->cache_bypass = true;
map->async = true;
ret = _regmap_multi_reg_write(map, regs, num_regs);
map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
regmap_async_complete(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_register_patch);
/**
* regmap_get_val_bytes() - Report the size of a register value
*
* @map: Register map to operate on.
*
* Report the size of a register value, mainly intended to for use by
* generic infrastructure built on top of regmap.
*/
int regmap_get_val_bytes(struct regmap *map)
{
if (map->format.format_write)
return -EINVAL;
return map->format.val_bytes;
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
/**
* regmap_get_max_register() - Report the max register value
*
* @map: Register map to operate on.
*
* Report the max register value, mainly intended to for use by
* generic infrastructure built on top of regmap.
*/
int regmap_get_max_register(struct regmap *map)
{
return map->max_register ? map->max_register : -EINVAL;
}
EXPORT_SYMBOL_GPL(regmap_get_max_register);
/**
* regmap_get_reg_stride() - Report the register address stride
*
* @map: Register map to operate on.
*
* Report the register address stride, mainly intended to for use by
* generic infrastructure built on top of regmap.
*/
int regmap_get_reg_stride(struct regmap *map)
{
return map->reg_stride;
}
EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
/**
* regmap_might_sleep() - Returns whether a regmap access might sleep.
*
* @map: Register map to operate on.
*
* Returns true if an access to the register might sleep, else false.
*/
bool regmap_might_sleep(struct regmap *map)
{
return map->can_sleep;
}
EXPORT_SYMBOL_GPL(regmap_might_sleep);
int regmap_parse_val(struct regmap *map, const void *buf,
unsigned int *val)
{
if (!map->format.parse_val)
return -EINVAL;
*val = map->format.parse_val(buf);
return 0;
}
EXPORT_SYMBOL_GPL(regmap_parse_val);
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
return 0;
}
postcore_initcall(regmap_initcall);
| linux-master | drivers/base/regmap/regmap.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - Memory region with raw access
//
// This is intended for testing only
//
// Copyright (c) 2023, Arm Ltd
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include "internal.h"
static unsigned int decode_reg(enum regmap_endian endian, const void *reg)
{
const u16 *r = reg;
if (endian == REGMAP_ENDIAN_BIG)
return be16_to_cpu(*r);
else
return le16_to_cpu(*r);
}
static int regmap_raw_ram_gather_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
struct regmap_ram_data *data = context;
unsigned int r;
u16 *our_buf = (u16 *)data->vals;
int i;
if (reg_len != 2)
return -EINVAL;
if (val_len % 2)
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
memcpy(&our_buf[r], val, val_len);
for (i = 0; i < val_len / 2; i++)
data->written[r + i] = true;
return 0;
}
static int regmap_raw_ram_write(void *context, const void *data, size_t count)
{
return regmap_raw_ram_gather_write(context, data, 2,
data + 2, count - 2);
}
static int regmap_raw_ram_read(void *context,
const void *reg, size_t reg_len,
void *val, size_t val_len)
{
struct regmap_ram_data *data = context;
unsigned int r;
u16 *our_buf = (u16 *)data->vals;
int i;
if (reg_len != 2)
return -EINVAL;
if (val_len % 2)
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
memcpy(val, &our_buf[r], val_len);
for (i = 0; i < val_len / 2; i++)
data->read[r + i] = true;
return 0;
}
static void regmap_raw_ram_free_context(void *context)
{
struct regmap_ram_data *data = context;
kfree(data->vals);
kfree(data->read);
kfree(data->written);
kfree(data);
}
static const struct regmap_bus regmap_raw_ram = {
.fast_io = true,
.write = regmap_raw_ram_write,
.gather_write = regmap_raw_ram_gather_write,
.read = regmap_raw_ram_read,
.free_context = regmap_raw_ram_free_context,
};
struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap *map;
if (config->reg_bits != 16)
return ERR_PTR(-EINVAL);
if (!config->max_register) {
pr_crit("No max_register specified for RAM regmap\n");
return ERR_PTR(-EINVAL);
}
data->read = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
data->written = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
data->reg_endian = config->reg_format_endian;
map = __regmap_init(NULL, ®map_raw_ram, data, config,
lock_key, lock_name);
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_raw_ram);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-raw-ram.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - FSI support
//
// Copyright 2022 IBM Corp
//
// Author: Eddie James <[email protected]>
#include <linux/fsi.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include "internal.h"
static int regmap_fsi32_reg_read(void *context, unsigned int reg, unsigned int *val)
{
u32 v;
int ret;
ret = fsi_slave_read(context, reg, &v, sizeof(v));
if (ret)
return ret;
*val = v;
return 0;
}
static int regmap_fsi32_reg_write(void *context, unsigned int reg, unsigned int val)
{
u32 v = val;
return fsi_slave_write(context, reg, &v, sizeof(v));
}
static const struct regmap_bus regmap_fsi32 = {
.reg_write = regmap_fsi32_reg_write,
.reg_read = regmap_fsi32_reg_read,
};
static int regmap_fsi32le_reg_read(void *context, unsigned int reg, unsigned int *val)
{
__be32 v;
int ret;
ret = fsi_slave_read(context, reg, &v, sizeof(v));
if (ret)
return ret;
*val = be32_to_cpu(v);
return 0;
}
static int regmap_fsi32le_reg_write(void *context, unsigned int reg, unsigned int val)
{
__be32 v = cpu_to_be32(val);
return fsi_slave_write(context, reg, &v, sizeof(v));
}
static const struct regmap_bus regmap_fsi32le = {
.reg_write = regmap_fsi32le_reg_write,
.reg_read = regmap_fsi32le_reg_read,
};
static int regmap_fsi16_reg_read(void *context, unsigned int reg, unsigned int *val)
{
u16 v;
int ret;
ret = fsi_slave_read(context, reg, &v, sizeof(v));
if (ret)
return ret;
*val = v;
return 0;
}
static int regmap_fsi16_reg_write(void *context, unsigned int reg, unsigned int val)
{
u16 v;
if (val > 0xffff)
return -EINVAL;
v = val;
return fsi_slave_write(context, reg, &v, sizeof(v));
}
static const struct regmap_bus regmap_fsi16 = {
.reg_write = regmap_fsi16_reg_write,
.reg_read = regmap_fsi16_reg_read,
};
static int regmap_fsi16le_reg_read(void *context, unsigned int reg, unsigned int *val)
{
__be16 v;
int ret;
ret = fsi_slave_read(context, reg, &v, sizeof(v));
if (ret)
return ret;
*val = be16_to_cpu(v);
return 0;
}
static int regmap_fsi16le_reg_write(void *context, unsigned int reg, unsigned int val)
{
__be16 v;
if (val > 0xffff)
return -EINVAL;
v = cpu_to_be16(val);
return fsi_slave_write(context, reg, &v, sizeof(v));
}
static const struct regmap_bus regmap_fsi16le = {
.reg_write = regmap_fsi16le_reg_write,
.reg_read = regmap_fsi16le_reg_read,
};
static int regmap_fsi8_reg_read(void *context, unsigned int reg, unsigned int *val)
{
u8 v;
int ret;
ret = fsi_slave_read(context, reg, &v, sizeof(v));
if (ret)
return ret;
*val = v;
return 0;
}
static int regmap_fsi8_reg_write(void *context, unsigned int reg, unsigned int val)
{
u8 v;
if (val > 0xff)
return -EINVAL;
v = val;
return fsi_slave_write(context, reg, &v, sizeof(v));
}
static const struct regmap_bus regmap_fsi8 = {
.reg_write = regmap_fsi8_reg_write,
.reg_read = regmap_fsi8_reg_read,
};
static const struct regmap_bus *regmap_get_fsi_bus(struct fsi_device *fsi_dev,
const struct regmap_config *config)
{
const struct regmap_bus *bus = NULL;
if (config->reg_bits == 8 || config->reg_bits == 16 || config->reg_bits == 32) {
switch (config->val_bits) {
case 8:
bus = ®map_fsi8;
break;
case 16:
switch (regmap_get_val_endian(&fsi_dev->dev, NULL, config)) {
case REGMAP_ENDIAN_LITTLE:
#ifdef __LITTLE_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
bus = ®map_fsi16le;
break;
case REGMAP_ENDIAN_DEFAULT:
case REGMAP_ENDIAN_BIG:
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
bus = ®map_fsi16;
break;
default:
break;
}
break;
case 32:
switch (regmap_get_val_endian(&fsi_dev->dev, NULL, config)) {
case REGMAP_ENDIAN_LITTLE:
#ifdef __LITTLE_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
bus = ®map_fsi32le;
break;
case REGMAP_ENDIAN_DEFAULT:
case REGMAP_ENDIAN_BIG:
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
bus = ®map_fsi32;
break;
default:
break;
}
break;
}
}
return bus ?: ERR_PTR(-EOPNOTSUPP);
}
struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev, const struct regmap_config *config,
struct lock_class_key *lock_key, const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_fsi_bus(fsi_dev, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&fsi_dev->dev, bus, fsi_dev->slave, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_fsi);
struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev,
const struct regmap_config *config,
struct lock_class_key *lock_key, const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_fsi_bus(fsi_dev, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&fsi_dev->dev, bus, fsi_dev->slave, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_fsi);
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-fsi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017, Linaro Ltd.
#include <linux/regmap.h>
#include <linux/slimbus.h>
#include <linux/module.h>
#include "internal.h"
static int regmap_slimbus_write(void *context, const void *data, size_t count)
{
struct slim_device *sdev = context;
return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
}
static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct slim_device *sdev = context;
return slim_read(sdev, *(u16 *)reg, val_size, val);
}
static const struct regmap_bus regmap_slimbus_bus = {
.write = regmap_slimbus_write,
.read = regmap_slimbus_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim,
const struct regmap_config *config)
{
if (config->val_bits == 8 && config->reg_bits == 16)
return ®map_slimbus_bus;
return ERR_PTR(-ENOTSUPP);
}
struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-slimbus.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - SPMI support
//
// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
//
// Based on regmap-i2c.c:
// Copyright 2011 Wolfson Microelectronics plc
// Author: Mark Brown <[email protected]>
#include <linux/regmap.h>
#include <linux/spmi.h>
#include <linux/module.h>
#include <linux/init.h>
static int regmap_spmi_base_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
u8 addr = *(u8 *)reg;
int err = 0;
BUG_ON(reg_size != 1);
while (val_size-- && !err)
err = spmi_register_read(context, addr++, val++);
return err;
}
static int regmap_spmi_base_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
const u8 *data = val;
u8 addr = *(u8 *)reg;
int err = 0;
BUG_ON(reg_size != 1);
/*
* SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
* use it when possible.
*/
if (addr == 0 && val_size) {
err = spmi_register_zero_write(context, *data);
if (err)
goto err_out;
data++;
addr++;
val_size--;
}
while (val_size) {
err = spmi_register_write(context, addr, *data);
if (err)
goto err_out;
data++;
addr++;
val_size--;
}
err_out:
return err;
}
static int regmap_spmi_base_write(void *context, const void *data,
size_t count)
{
BUG_ON(count < 1);
return regmap_spmi_base_gather_write(context, data, 1, data + 1,
count - 1);
}
static const struct regmap_bus regmap_spmi_base = {
.read = regmap_spmi_base_read,
.write = regmap_spmi_base_write,
.gather_write = regmap_spmi_base_gather_write,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __regmap_init(&sdev->dev, ®map_spmi_base, sdev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
static int regmap_spmi_ext_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
int err = 0;
size_t len;
u16 addr;
BUG_ON(reg_size != 2);
addr = *(u16 *)reg;
/*
* Split accesses into two to take advantage of the more
* bandwidth-efficient 'Extended Register Read' command when possible
*/
while (addr <= 0xFF && val_size) {
len = min_t(size_t, val_size, 16);
err = spmi_ext_register_read(context, addr, val, len);
if (err)
goto err_out;
addr += len;
val += len;
val_size -= len;
}
while (val_size) {
len = min_t(size_t, val_size, 8);
err = spmi_ext_register_readl(context, addr, val, len);
if (err)
goto err_out;
addr += len;
val += len;
val_size -= len;
}
err_out:
return err;
}
static int regmap_spmi_ext_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
int err = 0;
size_t len;
u16 addr;
BUG_ON(reg_size != 2);
addr = *(u16 *)reg;
while (addr <= 0xFF && val_size) {
len = min_t(size_t, val_size, 16);
err = spmi_ext_register_write(context, addr, val, len);
if (err)
goto err_out;
addr += len;
val += len;
val_size -= len;
}
while (val_size) {
len = min_t(size_t, val_size, 8);
err = spmi_ext_register_writel(context, addr, val, len);
if (err)
goto err_out;
addr += len;
val += len;
val_size -= len;
}
err_out:
return err;
}
static int regmap_spmi_ext_write(void *context, const void *data,
size_t count)
{
BUG_ON(count < 2);
return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
count - 2);
}
static const struct regmap_bus regmap_spmi_ext = {
.read = regmap_spmi_ext_read,
.write = regmap_spmi_ext_write,
.gather_write = regmap_spmi_ext_gather_write,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
return __devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
| linux-master | drivers/base/regmap/regmap-spmi.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register cache access API
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Dimitris Papastamos <[email protected]>
#include <linux/bsearch.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include "trace.h"
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
®cache_rbtree_ops,
®cache_maple_ops,
®cache_flat_ops,
};
static int regcache_hw_init(struct regmap *map)
{
int i, j;
int ret;
int count;
unsigned int reg, val;
void *tmp_buf;
if (!map->num_reg_defaults_raw)
return -EINVAL;
/* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
if (regmap_readable(map, i * map->reg_stride) &&
!regmap_volatile(map, i * map->reg_stride))
count++;
/* all registers are unreadable or volatile, so just bypass */
if (!count) {
map->cache_bypass = true;
return 0;
}
map->num_reg_defaults = count;
map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
GFP_KERNEL);
if (!map->reg_defaults)
return -ENOMEM;
if (!map->reg_defaults_raw) {
bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
/* Bypass the cache access till data read from HW */
map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
goto err_free;
}
ret = regmap_raw_read(map, 0, tmp_buf,
map->cache_size_raw);
map->cache_bypass = cache_bypass;
if (ret == 0) {
map->reg_defaults_raw = tmp_buf;
map->cache_free = true;
} else {
kfree(tmp_buf);
}
}
/* fill the reg_defaults */
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
reg = i * map->reg_stride;
if (!regmap_readable(map, reg))
continue;
if (regmap_volatile(map, reg))
continue;
if (map->reg_defaults_raw) {
val = regcache_get_val(map, map->reg_defaults_raw, i);
} else {
bool cache_bypass = map->cache_bypass;
map->cache_bypass = true;
ret = regmap_read(map, reg, &val);
map->cache_bypass = cache_bypass;
if (ret != 0) {
dev_err(map->dev, "Failed to read %d: %d\n",
reg, ret);
goto err_free;
}
}
map->reg_defaults[j].reg = reg;
map->reg_defaults[j].def = val;
j++;
}
return 0;
err_free:
kfree(map->reg_defaults);
return ret;
}
int regcache_init(struct regmap *map, const struct regmap_config *config)
{
int ret;
int i;
void *tmp_buf;
if (map->cache_type == REGCACHE_NONE) {
if (config->reg_defaults || config->num_reg_defaults_raw)
dev_warn(map->dev,
"No cache used with register defaults set!\n");
map->cache_bypass = true;
return 0;
}
if (config->reg_defaults && !config->num_reg_defaults) {
dev_err(map->dev,
"Register defaults are set without the number!\n");
return -EINVAL;
}
if (config->num_reg_defaults && !config->reg_defaults) {
dev_err(map->dev,
"Register defaults number are set without the reg!\n");
return -EINVAL;
}
for (i = 0; i < config->num_reg_defaults; i++)
if (config->reg_defaults[i].reg % map->reg_stride)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(cache_types); i++)
if (cache_types[i]->type == map->cache_type)
break;
if (i == ARRAY_SIZE(cache_types)) {
dev_err(map->dev, "Could not match cache type: %d\n",
map->cache_type);
return -EINVAL;
}
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
map->cache_ops = cache_types[i];
if (!map->cache_ops->read ||
!map->cache_ops->write ||
!map->cache_ops->name)
return -EINVAL;
/* We still need to ensure that the reg_defaults
* won't vanish from under us. We'll need to make
* a copy of it.
*/
if (config->reg_defaults) {
tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
sizeof(struct reg_default), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
map->reg_defaults = tmp_buf;
} else if (map->num_reg_defaults_raw) {
/* Some devices such as PMICs don't have cache defaults,
* we cope with this by reading back the HW registers and
* crafting the cache defaults by hand.
*/
ret = regcache_hw_init(map);
if (ret < 0)
return ret;
if (map->cache_bypass)
return 0;
}
if (!map->max_register && map->num_reg_defaults_raw)
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
ret = map->cache_ops->init(map);
if (ret)
goto err_free;
}
return 0;
err_free:
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
return ret;
}
void regcache_exit(struct regmap *map)
{
if (map->cache_type == REGCACHE_NONE)
return;
BUG_ON(!map->cache_ops);
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
if (map->cache_ops->exit) {
dev_dbg(map->dev, "Destroying %s cache\n",
map->cache_ops->name);
map->cache_ops->exit(map);
}
}
/**
* regcache_read - Fetch the value of a given register from the cache.
*
* @map: map to configure.
* @reg: The register index.
* @value: The value to be returned.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
int ret;
if (map->cache_type == REGCACHE_NONE)
return -EINVAL;
BUG_ON(!map->cache_ops);
if (!regmap_volatile(map, reg)) {
ret = map->cache_ops->read(map, reg, value);
if (ret == 0)
trace_regmap_reg_read_cache(map, reg, *value);
return ret;
}
return -EINVAL;
}
/**
* regcache_write - Set the value of a given register in the cache.
*
* @map: map to configure.
* @reg: The register index.
* @value: The new register value.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value)
{
if (map->cache_type == REGCACHE_NONE)
return 0;
BUG_ON(!map->cache_ops);
if (!regmap_volatile(map, reg))
return map->cache_ops->write(map, reg, value);
return 0;
}
bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
unsigned int val)
{
int ret;
if (!regmap_writeable(map, reg))
return false;
/* If we don't know the chip just got reset, then sync everything. */
if (!map->no_sync_defaults)
return true;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, reg);
if (ret >= 0 && val == map->reg_defaults[ret].def)
return false;
return true;
}
static int regcache_default_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
unsigned int reg;
for (reg = min; reg <= max; reg += map->reg_stride) {
unsigned int val;
int ret;
if (regmap_volatile(map, reg) ||
!regmap_writeable(map, reg))
continue;
ret = regcache_read(map, reg, &val);
if (ret == -ENOENT)
continue;
if (ret)
return ret;
if (!regcache_reg_needs_sync(map, reg, val))
continue;
map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
map->cache_bypass = false;
if (ret) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
return ret;
}
dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
}
return 0;
}
/**
* regcache_sync - Sync the register cache with the hardware.
*
* @map: map to configure.
*
* Any registers that should not be synced should be marked as
* volatile. In general drivers can choose not to use the provided
* syncing functionality if they so require.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_sync(struct regmap *map)
{
int ret = 0;
unsigned int i;
const char *name;
bool bypass;
if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL;
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map, name, "start");
if (!map->cache_dirty)
goto out;
/* Apply any patch first */
map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
map->patch[i].reg, map->patch[i].def, ret);
goto out;
}
}
map->cache_bypass = false;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
else
ret = regcache_default_sync(map, 0, map->max_register);
if (ret == 0)
map->cache_dirty = false;
out:
/* Restore the bypass state */
map->cache_bypass = bypass;
map->no_sync_defaults = false;
map->unlock(map->lock_arg);
regmap_async_complete(map);
trace_regcache_sync(map, name, "stop");
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync);
/**
* regcache_sync_region - Sync part of the register cache with the hardware.
*
* @map: map to sync.
* @min: first register to sync
* @max: last register to sync
*
* Write all non-default register values in the specified region to
* the hardware.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_sync_region(struct regmap *map, unsigned int min,
unsigned int max)
{
int ret = 0;
const char *name;
bool bypass;
if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL;
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
name = map->cache_ops->name;
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
trace_regcache_sync(map, name, "start region");
if (!map->cache_dirty)
goto out;
map->async = true;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, min, max);
else
ret = regcache_default_sync(map, min, max);
out:
/* Restore the bypass state */
map->cache_bypass = bypass;
map->async = false;
map->no_sync_defaults = false;
map->unlock(map->lock_arg);
regmap_async_complete(map);
trace_regcache_sync(map, name, "stop region");
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync_region);
/**
* regcache_drop_region - Discard part of the register cache
*
* @map: map to operate on
* @min: first register to discard
* @max: last register to discard
*
* Discard part of the register cache.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_drop_region(struct regmap *map, unsigned int min,
unsigned int max)
{
int ret = 0;
if (!map->cache_ops || !map->cache_ops->drop)
return -EINVAL;
map->lock(map->lock_arg);
trace_regcache_drop_region(map, min, max);
ret = map->cache_ops->drop(map, min, max);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regcache_drop_region);
/**
* regcache_cache_only - Put a register map into cache only mode
*
* @map: map to configure
* @enable: flag if changes should be written to the hardware
*
* When a register map is marked as cache only writes to the register
* map API will only update the register cache, they will not cause
* any hardware changes. This is useful for allowing portions of
* drivers to act as though the device were functioning as normal when
* it is disabled for power saving reasons.
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
map->lock(map->lock_arg);
WARN_ON(map->cache_type != REGCACHE_NONE &&
map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
* regcache_mark_dirty - Indicate that HW registers were reset to default values
*
* @map: map to mark
*
* Inform regcache that the device has been powered down or reset, so that
* on resume, regcache_sync() knows to write out all non-default values
* stored in the cache.
*
* If this function is not called, regcache_sync() will assume that
* the hardware state still matches the cache state, modulo any writes that
* happened when cache_only was true.
*/
void regcache_mark_dirty(struct regmap *map)
{
map->lock(map->lock_arg);
map->cache_dirty = true;
map->no_sync_defaults = true;
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
/**
* regcache_cache_bypass - Put a register map into cache bypass mode
*
* @map: map to configure
* @enable: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
* to the register map API will only update the hardware and not
* the cache directly. This is useful when syncing the cache back to
* the hardware.
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
/**
* regcache_reg_cached - Check if a register is cached
*
* @map: map to check
* @reg: register to check
*
* Reports if a register is cached.
*/
bool regcache_reg_cached(struct regmap *map, unsigned int reg)
{
unsigned int val;
int ret;
map->lock(map->lock_arg);
ret = regcache_read(map, reg, &val);
map->unlock(map->lock_arg);
return ret == 0;
}
EXPORT_SYMBOL_GPL(regcache_reg_cached);
void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val)
{
/* Use device native format if possible */
if (map->format.format_val) {
map->format.format_val(base + (map->cache_word_size * idx),
val, 0);
return;
}
switch (map->cache_word_size) {
case 1: {
u8 *cache = base;
cache[idx] = val;
break;
}
case 2: {
u16 *cache = base;
cache[idx] = val;
break;
}
case 4: {
u32 *cache = base;
cache[idx] = val;
break;
}
default:
BUG();
}
}
unsigned int regcache_get_val(struct regmap *map, const void *base,
unsigned int idx)
{
if (!base)
return -EINVAL;
/* Use device native format if possible */
if (map->format.parse_val)
return map->format.parse_val(regcache_get_val_addr(map, base,
idx));
switch (map->cache_word_size) {
case 1: {
const u8 *cache = base;
return cache[idx];
}
case 2: {
const u16 *cache = base;
return cache[idx];
}
case 4: {
const u32 *cache = base;
return cache[idx];
}
default:
BUG();
}
/* unreachable */
return -1;
}
static int regcache_default_cmp(const void *a, const void *b)
{
const struct reg_default *_a = a;
const struct reg_default *_b = b;
return _a->reg - _b->reg;
}
int regcache_lookup_reg(struct regmap *map, unsigned int reg)
{
struct reg_default key;
struct reg_default *r;
key.reg = reg;
key.def = 0;
r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
sizeof(struct reg_default), regcache_default_cmp);
if (r)
return r - map->reg_defaults;
else
return -ENOENT;
}
static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
{
if (!cache_present)
return true;
return test_bit(idx, cache_present);
}
int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
if (!regcache_reg_needs_sync(map, reg, val))
return 0;
map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
map->cache_bypass = false;
if (ret != 0) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
return ret;
}
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
reg, val);
return 0;
}
static int regcache_sync_block_single(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base,
unsigned int start, unsigned int end)
{
unsigned int i, regtmp, val;
int ret;
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
if (!regcache_reg_present(cache_present, i) ||
!regmap_writeable(map, regtmp))
continue;
val = regcache_get_val(map, block, i);
ret = regcache_sync_val(map, regtmp, val);
if (ret != 0)
return ret;
}
return 0;
}
static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
unsigned int base, unsigned int cur)
{
size_t val_bytes = map->format.val_bytes;
int ret, count;
if (*data == NULL)
return 0;
count = (cur - base) / map->reg_stride;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
map->cache_bypass = false;
*data = NULL;
return ret;
}
static int regcache_sync_block_raw(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
unsigned int i, val;
unsigned int regtmp = 0;
unsigned int base = 0;
const void *data = NULL;
int ret;
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
if (!regcache_reg_present(cache_present, i) ||
!regmap_writeable(map, regtmp)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
return ret;
continue;
}
val = regcache_get_val(map, block, i);
if (!regcache_reg_needs_sync(map, regtmp, val)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
return ret;
continue;
}
if (!data) {
data = regcache_get_val_addr(map, block, i);
base = regtmp;
}
}
return regcache_sync_block_raw_flush(map, &data, base, regtmp +
map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
if (regmap_can_raw_write(map) && !map->use_single_write)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
return regcache_sync_block_single(map, block, cache_present,
block_base, start, end);
}
| linux-master | drivers/base/regmap/regcache.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - MMIO support
//
// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include "internal.h"
struct regmap_mmio_context {
void __iomem *regs;
unsigned int val_bytes;
bool big_endian;
bool attached_clk;
struct clk *clk;
void (*reg_write)(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val);
unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
unsigned int reg);
};
static int regmap_mmio_regbits_check(size_t reg_bits)
{
switch (reg_bits) {
case 8:
case 16:
case 32:
return 0;
default:
return -EINVAL;
}
}
static int regmap_mmio_get_min_stride(size_t val_bits)
{
int min_stride;
switch (val_bits) {
case 8:
/* The core treats 0 as 1 */
min_stride = 0;
break;
case 16:
min_stride = 2;
break;
case 32:
min_stride = 4;
break;
default:
return -EINVAL;
}
return min_stride;
}
static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writeb(val, ctx->regs + reg);
}
static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writeb_relaxed(val, ctx->regs + reg);
}
static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val)
{
iowrite8(val, ctx->regs + reg);
}
static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writew(val, ctx->regs + reg);
}
static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writew_relaxed(val, ctx->regs + reg);
}
static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val)
{
iowrite16(val, ctx->regs + reg);
}
static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writew(swab16(val), ctx->regs + reg);
}
static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val)
{
iowrite16be(val, ctx->regs + reg);
}
static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writel(val, ctx->regs + reg);
}
static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writel_relaxed(val, ctx->regs + reg);
}
static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val)
{
iowrite32(val, ctx->regs + reg);
}
static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
writel(swab32(val), ctx->regs + reg);
}
static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
unsigned int reg, unsigned int val)
{
iowrite32be(val, ctx->regs + reg);
}
static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_mmio_context *ctx = context;
int ret;
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
ctx->reg_write(ctx, reg, val);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
}
static int regmap_mmio_noinc_write(void *context, unsigned int reg,
const void *val, size_t val_count)
{
struct regmap_mmio_context *ctx = context;
int ret = 0;
int i;
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
/*
* There are no native, assembly-optimized write single register
* operations for big endian, so fall back to emulation if this
* is needed. (Single bytes are fine, they are not affected by
* endianness.)
*/
if (ctx->big_endian && (ctx->val_bytes > 1)) {
switch (ctx->val_bytes) {
case 2:
{
const u16 *valp = (const u16 *)val;
for (i = 0; i < val_count; i++)
writew(swab16(valp[i]), ctx->regs + reg);
goto out_clk;
}
case 4:
{
const u32 *valp = (const u32 *)val;
for (i = 0; i < val_count; i++)
writel(swab32(valp[i]), ctx->regs + reg);
goto out_clk;
}
default:
ret = -EINVAL;
goto out_clk;
}
}
switch (ctx->val_bytes) {
case 1:
writesb(ctx->regs + reg, (const u8 *)val, val_count);
break;
case 2:
writesw(ctx->regs + reg, (const u16 *)val, val_count);
break;
case 4:
writesl(ctx->regs + reg, (const u32 *)val, val_count);
break;
default:
ret = -EINVAL;
break;
}
out_clk:
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return ret;
}
static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readb(ctx->regs + reg);
}
static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readb_relaxed(ctx->regs + reg);
}
static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return ioread8(ctx->regs + reg);
}
static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readw(ctx->regs + reg);
}
static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readw_relaxed(ctx->regs + reg);
}
static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return ioread16(ctx->regs + reg);
}
static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return swab16(readw(ctx->regs + reg));
}
static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return ioread16be(ctx->regs + reg);
}
static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readl(ctx->regs + reg);
}
static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return readl_relaxed(ctx->regs + reg);
}
static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return ioread32(ctx->regs + reg);
}
static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return swab32(readl(ctx->regs + reg));
}
static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
return ioread32be(ctx->regs + reg);
}
static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_mmio_context *ctx = context;
int ret;
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
*val = ctx->reg_read(ctx, reg);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
}
static int regmap_mmio_noinc_read(void *context, unsigned int reg,
void *val, size_t val_count)
{
struct regmap_mmio_context *ctx = context;
int ret = 0;
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
switch (ctx->val_bytes) {
case 1:
readsb(ctx->regs + reg, (u8 *)val, val_count);
break;
case 2:
readsw(ctx->regs + reg, (u16 *)val, val_count);
break;
case 4:
readsl(ctx->regs + reg, (u32 *)val, val_count);
break;
default:
ret = -EINVAL;
goto out_clk;
}
/*
* There are no native, assembly-optimized write single register
* operations for big endian, so fall back to emulation if this
* is needed. (Single bytes are fine, they are not affected by
* endianness.)
*/
if (ctx->big_endian && (ctx->val_bytes > 1)) {
switch (ctx->val_bytes) {
case 2:
swab16_array(val, val_count);
break;
case 4:
swab32_array(val, val_count);
break;
default:
ret = -EINVAL;
break;
}
}
out_clk:
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return ret;
}
static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
if (!IS_ERR(ctx->clk)) {
clk_unprepare(ctx->clk);
if (!ctx->attached_clk)
clk_put(ctx->clk);
}
kfree(context);
}
static const struct regmap_bus regmap_mmio = {
.fast_io = true,
.reg_write = regmap_mmio_write,
.reg_read = regmap_mmio_read,
.reg_noinc_write = regmap_mmio_noinc_write,
.reg_noinc_read = regmap_mmio_noinc_read,
.free_context = regmap_mmio_free_context,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
const char *clk_id,
void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
int min_stride;
int ret;
ret = regmap_mmio_regbits_check(config->reg_bits);
if (ret)
return ERR_PTR(ret);
if (config->pad_bits)
return ERR_PTR(-EINVAL);
min_stride = regmap_mmio_get_min_stride(config->val_bits);
if (min_stride < 0)
return ERR_PTR(min_stride);
if (config->reg_stride && config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
if (config->use_relaxed_mmio && config->io_port)
return ERR_PTR(-EINVAL);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
switch (regmap_get_val_endian(dev, ®map_mmio, config)) {
case REGMAP_ENDIAN_DEFAULT:
case REGMAP_ENDIAN_LITTLE:
#ifdef __LITTLE_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
switch (config->val_bits) {
case 8:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread8;
ctx->reg_write = regmap_mmio_iowrite8;
} else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read8_relaxed;
ctx->reg_write = regmap_mmio_write8_relaxed;
} else {
ctx->reg_read = regmap_mmio_read8;
ctx->reg_write = regmap_mmio_write8;
}
break;
case 16:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread16le;
ctx->reg_write = regmap_mmio_iowrite16le;
} else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read16le_relaxed;
ctx->reg_write = regmap_mmio_write16le_relaxed;
} else {
ctx->reg_read = regmap_mmio_read16le;
ctx->reg_write = regmap_mmio_write16le;
}
break;
case 32:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread32le;
ctx->reg_write = regmap_mmio_iowrite32le;
} else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read32le_relaxed;
ctx->reg_write = regmap_mmio_write32le_relaxed;
} else {
ctx->reg_read = regmap_mmio_read32le;
ctx->reg_write = regmap_mmio_write32le;
}
break;
default:
ret = -EINVAL;
goto err_free;
}
break;
case REGMAP_ENDIAN_BIG:
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
ctx->big_endian = true;
switch (config->val_bits) {
case 8:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread8;
ctx->reg_write = regmap_mmio_iowrite8;
} else {
ctx->reg_read = regmap_mmio_read8;
ctx->reg_write = regmap_mmio_write8;
}
break;
case 16:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread16be;
ctx->reg_write = regmap_mmio_iowrite16be;
} else {
ctx->reg_read = regmap_mmio_read16be;
ctx->reg_write = regmap_mmio_write16be;
}
break;
case 32:
if (config->io_port) {
ctx->reg_read = regmap_mmio_ioread32be;
ctx->reg_write = regmap_mmio_iowrite32be;
} else {
ctx->reg_read = regmap_mmio_read32be;
ctx->reg_write = regmap_mmio_write32be;
}
break;
default:
ret = -EINVAL;
goto err_free;
}
break;
default:
ret = -EINVAL;
goto err_free;
}
if (clk_id == NULL)
return ctx;
ctx->clk = clk_get(dev, clk_id);
if (IS_ERR(ctx->clk)) {
ret = PTR_ERR(ctx->clk);
goto err_free;
}
ret = clk_prepare(ctx->clk);
if (ret < 0) {
clk_put(ctx->clk);
goto err_free;
}
return ctx;
err_free:
kfree(ctx);
return ERR_PTR(ret);
}
struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap_mmio_context *ctx;
ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return __regmap_init(dev, ®map_mmio, ctx, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
const char *clk_id,
void __iomem *regs,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap_mmio_context *ctx;
ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return __devm_regmap_init(dev, ®map_mmio, ctx, config,
lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
{
struct regmap_mmio_context *ctx = map->bus_context;
ctx->clk = clk;
ctx->attached_clk = true;
return clk_prepare(ctx->clk);
}
EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
void regmap_mmio_detach_clk(struct regmap *map)
{
struct regmap_mmio_context *ctx = map->bus_context;
clk_unprepare(ctx->clk);
ctx->attached_clk = false;
ctx->clk = NULL;
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-mmio.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - Memory region
//
// This is intended for testing only
//
// Copyright (c) 2023, Arm Ltd
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include "internal.h"
static int regmap_ram_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_ram_data *data = context;
data->vals[reg] = val;
data->written[reg] = true;
return 0;
}
static int regmap_ram_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_ram_data *data = context;
*val = data->vals[reg];
data->read[reg] = true;
return 0;
}
static void regmap_ram_free_context(void *context)
{
struct regmap_ram_data *data = context;
kfree(data->vals);
kfree(data->read);
kfree(data->written);
kfree(data);
}
static const struct regmap_bus regmap_ram = {
.fast_io = true,
.reg_write = regmap_ram_write,
.reg_read = regmap_ram_read,
.free_context = regmap_ram_free_context,
};
struct regmap *__regmap_init_ram(const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap *map;
if (!config->max_register) {
pr_crit("No max_register specified for RAM regmap\n");
return ERR_PTR(-EINVAL);
}
data->read = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
data->written = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
map = __regmap_init(NULL, ®map_ram, data, config,
lock_key, lock_name);
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_ram);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-ram.c |
// SPDX-License-Identifier: GPL-2.0
//
// regmap KUnit tests
//
// Copyright 2023 Arm Ltd
#include <kunit/test.h>
#include "internal.h"
#define BLOCK_TEST_SIZE 12
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
.val_bits = sizeof(unsigned int) * 8,
};
struct regcache_types {
enum regcache_type type;
const char *name;
};
static void case_to_desc(const struct regcache_types *t, char *desc)
{
strcpy(desc, t->name);
}
static const struct regcache_types regcache_types_list[] = {
{ REGCACHE_NONE, "none" },
{ REGCACHE_FLAT, "flat" },
{ REGCACHE_RBTREE, "rbtree" },
{ REGCACHE_MAPLE, "maple" },
};
KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
static const struct regcache_types real_cache_types_list[] = {
{ REGCACHE_FLAT, "flat" },
{ REGCACHE_RBTREE, "rbtree" },
{ REGCACHE_MAPLE, "maple" },
};
KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
static const struct regcache_types sparse_cache_types_list[] = {
{ REGCACHE_RBTREE, "rbtree" },
{ REGCACHE_MAPLE, "maple" },
};
KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
static struct regmap *gen_regmap(struct regmap_config *config,
struct regmap_ram_data **data)
{
unsigned int *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * sizeof(unsigned int);
int i;
struct reg_default *defaults;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
get_random_bytes(buf, size);
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
return ERR_PTR(-ENOMEM);
(*data)->vals = buf;
if (config->num_reg_defaults) {
defaults = kcalloc(config->num_reg_defaults,
sizeof(struct reg_default),
GFP_KERNEL);
if (!defaults)
return ERR_PTR(-ENOMEM);
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i * config->reg_stride;
defaults[i].def = buf[i * config->reg_stride];
}
}
ret = regmap_init_ram(config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
}
return ret;
}
static bool reg_5_false(struct device *context, unsigned int reg)
{
return reg != 5;
}
static void basic_read_write(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* If we write a value to a register we can read it back */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
regmap_exit(map);
}
static void bulk_write(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/*
* Data written via the bulk API can be read back with single
* reads.
*/
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
regmap_exit(map);
}
static void bulk_read(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* Data written as single writes can be read via the bulk API */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
regmap_exit(map);
}
static void write_readonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
/* Change the value of all registers, readonly should fail */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.readable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[i] = false;
/*
* Try to read all the registers, the writeonly one should
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
if (t->type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
}
}
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
regmap_exit(map);
}
static void reg_defaults(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read back the expected default data */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
}
static void reg_defaults_read_dev(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* We should have read the cache defaults back from the map */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
/* Read back the expected default data */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
}
static void register_patch(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
struct reg_sequence patch[2];
unsigned int rval[BLOCK_TEST_SIZE];
int i;
/* We need defaults so readback works */
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
patch[0].reg = 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
patch[1].reg = 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
ARRAY_SIZE(patch)));
/* Only the patched registers are written */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
switch (i) {
case 2:
case 5:
KUNIT_EXPECT_TRUE(test, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
break;
default:
KUNIT_EXPECT_FALSE(test, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
break;
}
}
regmap_exit(map);
}
static void stride(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Only even registers can be accessed, try both read and write */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
data->read[i] = false;
data->written[i] = false;
if (i % 2) {
KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
regmap_exit(map);
}
static struct regmap_range_cfg test_range = {
.selector_reg = 1,
.selector_mask = 0xff,
.window_start = 4,
.window_len = 10,
.range_min = 20,
.range_max = 40,
};
static bool test_range_volatile(struct device *dev, unsigned int reg)
{
if (reg >= test_range.window_start &&
reg <= test_range.selector_reg + test_range.window_len)
return true;
if (reg >= test_range.range_min && reg <= test_range.range_max)
return true;
return false;
}
static void basic_ranges(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.volatile_reg = test_range_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = test_range.range_min; i < test_range.range_max; i++) {
data->read[i] = false;
data->written[i] = false;
}
/* Reset the page to a non-zero value to trigger a change */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
test_range.range_max));
/* Check we set the page and use the window for writes */
data->written[test_range.selector_reg] = false;
data->written[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
data->written[test_range.selector_reg] = false;
data->written[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
test_range.range_min +
test_range.window_len,
0));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
/* Same for reads */
data->written[test_range.selector_reg] = false;
data->read[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
data->written[test_range.selector_reg] = false;
data->read[test_range.window_start] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
test_range.range_min +
test_range.window_len,
&val));
KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
/* No physical access triggered in the virtual range */
for (i = test_range.range_min; i < test_range.range_max; i++) {
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
regmap_exit(map);
}
/* Try to stress dynamic creation of cache data structures */
static void stress_insert(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval, *vals;
size_t buf_sz;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.max_register = 300;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
GFP_KERNEL);
KUNIT_ASSERT_FALSE(test, vals == NULL);
buf_sz = sizeof(unsigned long) * config.max_register;
get_random_bytes(vals, buf_sz);
/* Write data into the map/cache in ever decreasing strides */
for (i = 0; i < config.max_register; i += 100)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 50)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 25)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 10)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 5)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 3)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i += 2)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
for (i = 0; i < config.max_register; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
/* Do reads from the cache (if there is one) match? */
for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
}
regmap_exit(map);
}
static void cache_bypass(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
/* Read the bypassed value */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
KUNIT_EXPECT_EQ(test, data->vals[0], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
regmap_exit(map);
}
static void cache_sync(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
memset(data->vals, 0, sizeof(val));
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, true, data->written[i]);
regmap_exit(map);
}
static void cache_sync_defaults(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
/* Resync */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
regmap_exit(map);
}
static void cache_sync_readonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.writeable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
regmap_exit(map);
}
static void cache_sync_patch(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
struct reg_sequence patch[2];
unsigned int rval[BLOCK_TEST_SIZE], val;
int i;
/* We need defaults so readback works */
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
patch[0].reg = 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
patch[1].reg = 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
ARRAY_SIZE(patch)));
/* Sync the cache */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
switch (i) {
case 2:
case 5:
KUNIT_EXPECT_EQ(test, true, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
break;
default:
KUNIT_EXPECT_EQ(test, false, data->written[i]);
KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
break;
}
}
regmap_exit(map);
}
static void cache_drop(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval[BLOCK_TEST_SIZE];
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Ensure the data is read from the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[i] = false;
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
KUNIT_EXPECT_FALSE(test, data->read[i]);
data->read[i] = false;
}
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
/* Drop some registers */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
/* Reread and check only the dropped registers hit the device. */
KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
regmap_exit(map);
}
static void cache_present(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, data->read[i]);
/* Fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
/* Now everything should be cached */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
regmap_exit(map);
}
struct raw_test_types {
const char *name;
enum regcache_type cache_type;
enum regmap_endian val_endian;
};
static void raw_to_desc(const struct raw_test_types *t, char *desc)
{
strcpy(desc, t->name);
}
static const struct raw_test_types raw_types_list[] = {
{ "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
{ "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
{ "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
{ "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
{ "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
{ "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
{ "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
};
KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
static const struct raw_test_types raw_cache_types_list[] = {
{ "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
{ "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
{ "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
{ "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
{ "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
};
KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.reg_bits = 16,
.val_bits = 16,
};
static struct regmap *gen_raw_regmap(struct regmap_config *config,
struct raw_test_types *test_type,
struct regmap_ram_data **data)
{
u16 *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * config->reg_bits / 8;
int i;
struct reg_default *defaults;
config->cache_type = test_type->cache_type;
config->val_format_endian = test_type->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
get_random_bytes(buf, size);
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
return ERR_PTR(-ENOMEM);
(*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1;
defaults = kcalloc(config->num_reg_defaults,
sizeof(struct reg_default),
GFP_KERNEL);
if (!defaults)
return ERR_PTR(-ENOMEM);
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
switch (test_type->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
case REGMAP_ENDIAN_BIG:
defaults[i].def = be16_to_cpu(buf[i]);
break;
default:
return ERR_PTR(-EINVAL);
}
}
/*
* We use the defaults in the tests but they don't make sense
* to the core if there's no cache.
*/
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
}
return ret;
}
static void raw_read_defaults_single(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Check that we can read the defaults via the API */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 *rval;
u16 def;
size_t val_len;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
val_len = sizeof(*rval) * (config.max_register + 1);
rval = kmalloc(val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
} else {
KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
}
}
kfree(rval);
regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 val;
unsigned int rval;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* If we write a value to a register we can read it back */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 *hw_buf;
u16 val[2];
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
/* Do a raw write */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
switch (i) {
case 2:
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2]));
}
break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 val[2];
u16 *hw_buf;
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
if (config.val_format_endian == REGMAP_ENDIAN_BIG)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
be16_to_cpu(val[0])));
else
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
le16_to_cpu(val[0])));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
switch (i) {
case 2:
case 3:
case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2]));
}
break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
/* Do the sync */
regcache_cache_only(map, false);
regcache_mark_dirty(map);
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
regmap_exit(map);
}
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
{}
};
static struct kunit_suite regmap_test_suite = {
.name = "regmap",
.test_cases = regmap_test_cases,
};
kunit_test_suite(regmap_test_suite);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-kunit.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - SPI AVMM support
//
// Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/swab.h>
/*
* This driver implements the regmap operations for a generic SPI
* master to access the registers of the spi slave chip which has an
* Avalone bus in it.
*
* The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
* in the spi slave chip. The IP acts as a bridge to convert encoded streams of
* bytes from the host to the internal register read/write on Avalon bus. In
* order to issue register access requests to the slave chip, the host should
* send formatted bytes that conform to the transfer protocol.
* The transfer protocol contains 3 layers: transaction layer, packet layer
* and physical layer.
*
* Reference Documents could be found at:
* https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html
*
* Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
* introduction to the protocol.
*
* Chapter "Avalon Packets to Transactions Converter Core" describes
* the transaction layer.
*
* Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
* describes the packet layer.
*
* Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
* physical layer.
*
*
* When host issues a regmap read/write, the driver will transform the request
* to byte stream layer by layer. It formats the register addr, value and
* length to the transaction layer request, then converts the request to packet
* layer bytes stream and then to physical layer bytes stream. Finally the
* driver sends the formatted byte stream over SPI bus to the slave chip.
*
* The spi-avmm IP on the slave chip decodes the byte stream and initiates
* register read/write on its internal Avalon bus, and then encodes the
* response to byte stream and sends back to host.
*
* The driver receives the byte stream, reverses the 3 layers transformation,
* and finally gets the response value (read out data for register read,
* successful written size for register write).
*/
#define PKT_SOP 0x7a
#define PKT_EOP 0x7b
#define PKT_CHANNEL 0x7c
#define PKT_ESC 0x7d
#define PHY_IDLE 0x4a
#define PHY_ESC 0x4d
#define TRANS_CODE_WRITE 0x0
#define TRANS_CODE_SEQ_WRITE 0x4
#define TRANS_CODE_READ 0x10
#define TRANS_CODE_SEQ_READ 0x14
#define TRANS_CODE_NO_TRANS 0x7f
#define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200))
/* slave's register addr is 32 bits */
#define SPI_AVMM_REG_SIZE 4UL
/* slave's register value is 32 bits */
#define SPI_AVMM_VAL_SIZE 4UL
/*
* max rx size could be larger. But considering the buffer consuming,
* it is proper that we limit 1KB xfer at max.
*/
#define MAX_READ_CNT 256UL
#define MAX_WRITE_CNT 1UL
struct trans_req_header {
u8 code;
u8 rsvd;
__be16 size;
__be32 addr;
} __packed;
struct trans_resp_header {
u8 r_code;
u8 rsvd;
__be16 size;
} __packed;
#define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header))
#define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header))
/*
* In transaction layer,
* the write request format is: Transaction request header + data
* the read request format is: Transaction request header
* the write response format is: Transaction response header
* the read response format is: pure data, no Transaction response header
*/
#define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
#define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE
#define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
#define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
#define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE
#define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT)
/* tx & rx share one transaction layer buffer */
#define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \
TRANS_TX_MAX : TRANS_RX_MAX)
/*
* In tx phase, the host prepares all the phy layer bytes of a request in the
* phy buffer and sends them in a batch.
*
* The packet layer and physical layer defines several special chars for
* various purpose, when a transaction layer byte hits one of these special
* chars, it should be escaped. The escape rule is, "Escape char first,
* following the byte XOR'ed with 0x20".
*
* This macro defines the max possible length of the phy data. In the worst
* case, all transaction layer bytes need to be escaped (so the data length
* doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally
* we should make sure the length is aligned to SPI BPW.
*/
#define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4)
/*
* Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
* length of the rx bit stream is unpredictable. So the driver reads the words
* one by one, and parses each word immediately into transaction layer buffer.
* Only one word length of phy buffer is used for rx.
*/
#define PHY_BUF_SIZE PHY_TX_MAX
/**
* struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
*
* @spi: spi slave associated with this bridge.
* @word_len: bytes of word for spi transfer.
* @trans_len: length of valid data in trans_buf.
* @phy_len: length of valid data in phy_buf.
* @trans_buf: the bridge buffer for transaction layer data.
* @phy_buf: the bridge buffer for physical layer data.
* @swap_words: the word swapping cb for phy data. NULL if not needed.
*
* As a device's registers are implemented on the AVMM bus address space, it
* requires the driver to issue formatted requests to spi slave to AVMM bus
* master bridge to perform register access.
*/
struct spi_avmm_bridge {
struct spi_device *spi;
unsigned char word_len;
unsigned int trans_len;
unsigned int phy_len;
/* bridge buffer used in translation between protocol layers */
char trans_buf[TRANS_BUF_SIZE];
char phy_buf[PHY_BUF_SIZE];
void (*swap_words)(void *buf, unsigned int len);
};
static void br_swap_words_32(void *buf, unsigned int len)
{
swab32_array(buf, len / 4);
}
/*
* Format transaction layer data in br->trans_buf according to the register
* access request, Store valid transaction layer data length in br->trans_len.
*/
static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg,
u32 *wr_val, u32 count)
{
struct trans_req_header *header;
unsigned int trans_len;
u8 code;
__le32 *data;
int i;
if (is_read) {
if (count == 1)
code = TRANS_CODE_READ;
else
code = TRANS_CODE_SEQ_READ;
} else {
if (count == 1)
code = TRANS_CODE_WRITE;
else
code = TRANS_CODE_SEQ_WRITE;
}
header = (struct trans_req_header *)br->trans_buf;
header->code = code;
header->rsvd = 0;
header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE);
header->addr = cpu_to_be32(reg);
trans_len = TRANS_REQ_HD_SIZE;
if (!is_read) {
trans_len += SPI_AVMM_VAL_SIZE * count;
if (trans_len > sizeof(br->trans_buf))
return -ENOMEM;
data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE);
for (i = 0; i < count; i++)
*data++ = cpu_to_le32(*wr_val++);
}
/* Store valid trans data length for next layer */
br->trans_len = trans_len;
return 0;
}
/*
* Convert transaction layer data (in br->trans_buf) to phy layer data, store
* them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
* layer data length in br->phy_len.
*
* phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
* with PHY_IDLE, then the slave will just drop them.
*
* The driver will not simply pad 4a at the tail. The concern is that driver
* will not store MISO data during tx phase, if the driver pads 4a at the tail,
* it is possible that if the slave is fast enough to response at the padding
* time. As a result these rx bytes are lost. In the following case, 7a,7c,00
* will lost.
* MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|...
* MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|...
*
* So the driver moves EOP and bytes after EOP to the end of the aligned size,
* then fill the hole with PHY_IDLE. As following:
* before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|
* after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40|
* Then if the slave will not get the entire packet before the tx phase is
* over, it can't responsed to anything either.
*/
static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br)
{
char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL;
unsigned int aligned_phy_len, move_size;
bool need_esc = false;
tb = br->trans_buf;
tb_end = tb + br->trans_len;
pb = br->phy_buf;
pb_limit = pb + ARRAY_SIZE(br->phy_buf);
*pb++ = PKT_SOP;
/*
* The driver doesn't support multiple channels so the channel number
* is always 0.
*/
*pb++ = PKT_CHANNEL;
*pb++ = 0x0;
for (; pb < pb_limit && tb < tb_end; pb++) {
if (need_esc) {
*pb = *tb++ ^ 0x20;
need_esc = false;
continue;
}
/* EOP should be inserted before the last valid char */
if (tb == tb_end - 1 && !pb_eop) {
*pb = PKT_EOP;
pb_eop = pb;
continue;
}
/*
* insert an ESCAPE char if the data value equals any special
* char.
*/
switch (*tb) {
case PKT_SOP:
case PKT_EOP:
case PKT_CHANNEL:
case PKT_ESC:
*pb = PKT_ESC;
need_esc = true;
break;
case PHY_IDLE:
case PHY_ESC:
*pb = PHY_ESC;
need_esc = true;
break;
default:
*pb = *tb++;
break;
}
}
/* The phy buffer is used out but transaction layer data remains */
if (tb < tb_end)
return -ENOMEM;
/* Store valid phy data length for spi transfer */
br->phy_len = pb - br->phy_buf;
if (br->word_len == 1)
return 0;
/* Do phy buf padding if word_len > 1 byte. */
aligned_phy_len = ALIGN(br->phy_len, br->word_len);
if (aligned_phy_len > sizeof(br->phy_buf))
return -ENOMEM;
if (aligned_phy_len == br->phy_len)
return 0;
/* move EOP and bytes after EOP to the end of aligned size */
move_size = pb - pb_eop;
memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size);
/* fill the hole with PHY_IDLEs */
memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len);
/* update the phy data length */
br->phy_len = aligned_phy_len;
return 0;
}
/*
* In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
* ignore rx in tx phase.
*/
static int br_do_tx(struct spi_avmm_bridge *br)
{
/* reorder words for spi transfer */
if (br->swap_words)
br->swap_words(br->phy_buf, br->phy_len);
/* send all data in phy_buf */
return spi_write(br->spi, br->phy_buf, br->phy_len);
}
/*
* This function read the rx byte stream from SPI word by word and convert
* them to transaction layer data in br->trans_buf. It also stores the length
* of rx transaction layer data in br->trans_len
*
* The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
* prepare a fixed length buffer to receive all of the rx data in a batch. We
* have to read word by word and convert them to transaction layer data at
* once.
*/
static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br)
{
bool eop_found = false, channel_found = false, esc_found = false;
bool valid_word = false, last_try = false;
struct device *dev = &br->spi->dev;
char *pb, *tb_limit, *tb = NULL;
unsigned long poll_timeout;
int ret, i;
tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf);
pb = br->phy_buf;
poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
while (tb < tb_limit) {
ret = spi_read(br->spi, pb, br->word_len);
if (ret)
return ret;
/* reorder the word back */
if (br->swap_words)
br->swap_words(pb, br->word_len);
valid_word = false;
for (i = 0; i < br->word_len; i++) {
/* drop everything before first SOP */
if (!tb && pb[i] != PKT_SOP)
continue;
/* drop PHY_IDLE */
if (pb[i] == PHY_IDLE)
continue;
valid_word = true;
/*
* We don't support multiple channels, so error out if
* a non-zero channel number is found.
*/
if (channel_found) {
if (pb[i] != 0) {
dev_err(dev, "%s channel num != 0\n",
__func__);
return -EFAULT;
}
channel_found = false;
continue;
}
switch (pb[i]) {
case PKT_SOP:
/*
* reset the parsing if a second SOP appears.
*/
tb = br->trans_buf;
eop_found = false;
channel_found = false;
esc_found = false;
break;
case PKT_EOP:
/*
* No special char is expected after ESC char.
* No special char (except ESC & PHY_IDLE) is
* expected after EOP char.
*
* The special chars are all dropped.
*/
if (esc_found || eop_found)
return -EFAULT;
eop_found = true;
break;
case PKT_CHANNEL:
if (esc_found || eop_found)
return -EFAULT;
channel_found = true;
break;
case PKT_ESC:
case PHY_ESC:
if (esc_found)
return -EFAULT;
esc_found = true;
break;
default:
/* Record the normal byte in trans_buf. */
if (esc_found) {
*tb++ = pb[i] ^ 0x20;
esc_found = false;
} else {
*tb++ = pb[i];
}
/*
* We get the last normal byte after EOP, it is
* time we finish. Normally the function should
* return here.
*/
if (eop_found) {
br->trans_len = tb - br->trans_buf;
return 0;
}
}
}
if (valid_word) {
/* update poll timeout when we get valid word */
poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
last_try = false;
} else {
/*
* We timeout when rx keeps invalid for some time. But
* it is possible we are scheduled out for long time
* after a spi_read. So when we are scheduled in, a SW
* timeout happens. But actually HW may have worked fine and
* has been ready long time ago. So we need to do an extra
* read, if we get a valid word then we could continue rx,
* otherwise real a HW issue happens.
*/
if (last_try)
return -ETIMEDOUT;
if (time_after(jiffies, poll_timeout))
last_try = true;
}
}
/*
* We have used out all transfer layer buffer but cannot find the end
* of the byte stream.
*/
dev_err(dev, "%s transfer buffer is full but rx doesn't end\n",
__func__);
return -EFAULT;
}
/*
* For read transactions, the avmm bus will directly return register values
* without transaction response header.
*/
static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br,
u32 *val, unsigned int expected_count)
{
unsigned int i, trans_len = br->trans_len;
__le32 *data;
if (expected_count * SPI_AVMM_VAL_SIZE != trans_len)
return -EFAULT;
data = (__le32 *)br->trans_buf;
for (i = 0; i < expected_count; i++)
*val++ = le32_to_cpu(*data++);
return 0;
}
/*
* For write transactions, the slave will return a transaction response
* header.
*/
static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br,
unsigned int expected_count)
{
unsigned int trans_len = br->trans_len;
struct trans_resp_header *resp;
u8 code;
u16 val_len;
if (trans_len != TRANS_RESP_HD_SIZE)
return -EFAULT;
resp = (struct trans_resp_header *)br->trans_buf;
code = resp->r_code ^ 0x80;
val_len = be16_to_cpu(resp->size);
if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE)
return -EFAULT;
/* error out if the trans code doesn't align with the val size */
if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) ||
(val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE))
return -EFAULT;
return 0;
}
static int do_reg_access(void *context, bool is_read, unsigned int reg,
unsigned int *value, unsigned int count)
{
struct spi_avmm_bridge *br = context;
int ret;
/* invalidate bridge buffers first */
br->trans_len = 0;
br->phy_len = 0;
ret = br_trans_tx_prepare(br, is_read, reg, value, count);
if (ret)
return ret;
ret = br_pkt_phy_tx_prepare(br);
if (ret)
return ret;
ret = br_do_tx(br);
if (ret)
return ret;
ret = br_do_rx_and_pkt_phy_parse(br);
if (ret)
return ret;
if (is_read)
return br_rd_trans_rx_parse(br, value, count);
else
return br_wr_trans_rx_parse(br, count);
}
static int regmap_spi_avmm_gather_write(void *context,
const void *reg_buf, size_t reg_len,
const void *val_buf, size_t val_len)
{
if (reg_len != SPI_AVMM_REG_SIZE)
return -EINVAL;
if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
return -EINVAL;
return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf,
val_len / SPI_AVMM_VAL_SIZE);
}
static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes)
{
if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE)
return -EINVAL;
return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE,
data + SPI_AVMM_REG_SIZE,
bytes - SPI_AVMM_REG_SIZE);
}
static int regmap_spi_avmm_read(void *context,
const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
if (reg_len != SPI_AVMM_REG_SIZE)
return -EINVAL;
if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
return -EINVAL;
return do_reg_access(context, true, *(u32 *)reg_buf, val_buf,
(val_len / SPI_AVMM_VAL_SIZE));
}
static struct spi_avmm_bridge *
spi_avmm_bridge_ctx_gen(struct spi_device *spi)
{
struct spi_avmm_bridge *br;
if (!spi)
return ERR_PTR(-ENODEV);
/* Only support BPW == 8 or 32 now. Try 32 BPW first. */
spi->mode = SPI_MODE_1;
spi->bits_per_word = 32;
if (spi_setup(spi)) {
spi->bits_per_word = 8;
if (spi_setup(spi))
return ERR_PTR(-EINVAL);
}
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br)
return ERR_PTR(-ENOMEM);
br->spi = spi;
br->word_len = spi->bits_per_word / 8;
if (br->word_len == 4) {
/*
* The protocol requires little endian byte order but MSB
* first. So driver needs to swap the byte order word by word
* if word length > 1.
*/
br->swap_words = br_swap_words_32;
}
return br;
}
static void spi_avmm_bridge_ctx_free(void *context)
{
kfree(context);
}
static const struct regmap_bus regmap_spi_avmm_bus = {
.write = regmap_spi_avmm_write,
.gather_write = regmap_spi_avmm_gather_write,
.read = regmap_spi_avmm_read,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.free_context = spi_avmm_bridge_ctx_free,
};
struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct spi_avmm_bridge *bridge;
struct regmap *map;
bridge = spi_avmm_bridge_ctx_gen(spi);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
map = __regmap_init(&spi->dev, ®map_spi_avmm_bus,
bridge, config, lock_key, lock_name);
if (IS_ERR(map)) {
spi_avmm_bridge_ctx_free(bridge);
return ERR_CAST(map);
}
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm);
struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct spi_avmm_bridge *bridge;
struct regmap *map;
bridge = spi_avmm_bridge_ctx_gen(spi);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus,
bridge, config, lock_key, lock_name);
if (IS_ERR(map)) {
spi_avmm_bridge_ctx_free(bridge);
return ERR_CAST(map);
}
return map;
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/base/regmap/regmap-spi-avmm.c |
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - debugfs
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <[email protected]>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/list.h>
#include "internal.h"
struct regmap_debugfs_node {
struct regmap *map;
struct list_head link;
};
static unsigned int dummy_index;
static struct dentry *regmap_debugfs_root;
static LIST_HEAD(regmap_debugfs_early_list);
static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val)
{
return snprintf(NULL, 0, "%x", max_val);
}
static ssize_t regmap_name_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct regmap *map = file->private_data;
const char *name = "nodev";
int ret;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (map->dev && map->dev->driver)
name = map->dev->driver->name;
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
if (ret < 0) {
kfree(buf);
return ret;
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations regmap_name_fops = {
.open = simple_open,
.read = regmap_name_read_file,
.llseek = default_llseek,
};
static void regmap_debugfs_free_dump_cache(struct regmap *map)
{
struct regmap_debugfs_off_cache *c;
while (!list_empty(&map->debugfs_off_cache)) {
c = list_first_entry(&map->debugfs_off_cache,
struct regmap_debugfs_off_cache,
list);
list_del(&c->list);
kfree(c);
}
}
static bool regmap_printable(struct regmap *map, unsigned int reg)
{
if (regmap_precious(map, reg))
return false;
if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
return false;
return true;
}
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
*/
static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int base,
loff_t from,
loff_t *pos)
{
struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
unsigned int i, ret;
unsigned int fpos_offset;
unsigned int reg_offset;
/* Suppress the cache if we're using a subrange */
if (base)
return base;
/*
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
*/
mutex_lock(&map->cache_lock);
i = base;
if (list_empty(&map->debugfs_off_cache)) {
for (; i <= map->max_register; i += map->reg_stride) {
/* Skip unprinted registers, closing off cache entry */
if (!regmap_printable(map, i)) {
if (c) {
c->max = p - 1;
c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
}
continue;
}
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
return base;
}
c->min = p;
c->base_reg = i;
}
p += map->debugfs_tot_len;
}
}
/* Close the last entry off if we didn't scan beyond it */
if (c) {
c->max = p - 1;
c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
}
/*
* This should never happen; we return above if we fail to
* allocate and we should never be in this code if there are
* no registers at all.
*/
WARN_ON(list_empty(&map->debugfs_off_cache));
ret = base;
/* Find the relevant block:offset */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
if (from >= c->min && from <= c->max) {
fpos_offset = from - c->min;
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
mutex_unlock(&map->cache_lock);
return c->base_reg + (reg_offset * map->reg_stride);
}
*pos = c->max;
ret = c->max_reg;
}
mutex_unlock(&map->cache_lock);
return ret;
}
static inline void regmap_calc_tot_len(struct regmap *map,
void *buf, size_t count)
{
/* Calculate the length of a fixed format */
if (!map->debugfs_tot_len) {
map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
map->debugfs_val_len = 2 * map->format.val_bytes;
map->debugfs_tot_len = map->debugfs_reg_len +
map->debugfs_val_len + 3; /* : \n */
}
}
static int regmap_next_readable_reg(struct regmap *map, int reg)
{
struct regmap_debugfs_off_cache *c;
int ret = -EINVAL;
if (regmap_printable(map, reg + map->reg_stride)) {
ret = reg + map->reg_stride;
} else {
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
if (reg > c->max_reg)
continue;
if (reg < c->base_reg) {
ret = c->base_reg;
break;
}
}
mutex_unlock(&map->cache_lock);
}
return ret;
}
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
{
size_t buf_pos = 0;
loff_t p = *ppos;
ssize_t ret;
int i;
char *buf;
unsigned int val, start_reg;
if (*ppos < 0 || !count)
return -EINVAL;
if (count > (PAGE_SIZE << MAX_ORDER))
count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
regmap_calc_tot_len(map, buf, count);
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
for (i = start_reg; i >= 0 && i <= to;
i = regmap_next_readable_reg(map, i)) {
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
if (buf_pos + map->debugfs_tot_len > count)
break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
map->debugfs_reg_len, i - from);
buf_pos += map->debugfs_reg_len + 2;
/* Format the value, write all X if we can't read */
ret = regmap_read(map, i, &val);
if (ret == 0)
snprintf(buf + buf_pos, count - buf_pos,
"%.*x", map->debugfs_val_len, val);
else
memset(buf + buf_pos, 'X',
map->debugfs_val_len);
buf_pos += 2 * map->format.val_bytes;
buf[buf_pos++] = '\n';
}
p += map->debugfs_tot_len;
}
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out;
}
*ppos += buf_pos;
out:
kfree(buf);
return ret;
}
static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct regmap *map = file->private_data;
return regmap_read_debugfs(map, 0, map->max_register, user_buf,
count, ppos);
}
#undef REGMAP_ALLOW_WRITE_DEBUGFS
#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous especially when we have clients such as
* PMICs, therefore don't provide any real compile time configuration option
* for this feature, people who want to use this will need to modify
* the source code directly.
*/
static ssize_t regmap_map_write_file(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[32];
size_t buf_size;
char *start = buf;
unsigned long reg, value;
struct regmap *map = file->private_data;
int ret;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
while (*start == ' ')
start++;
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
if (kstrtoul(start, 16, &value))
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
ret = regmap_write(map, reg, value);
if (ret < 0)
return ret;
return buf_size;
}
#else
#define regmap_map_write_file NULL
#endif
static const struct file_operations regmap_map_fops = {
.open = simple_open,
.read = regmap_map_read_file,
.write = regmap_map_write_file,
.llseek = default_llseek,
};
static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct regmap_range_node *range = file->private_data;
struct regmap *map = range->map;
return regmap_read_debugfs(map, range->range_min, range->range_max,
user_buf, count, ppos);
}
static const struct file_operations regmap_range_fops = {
.open = simple_open,
.read = regmap_range_read_file,
.llseek = default_llseek,
};
static ssize_t regmap_reg_ranges_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct regmap *map = file->private_data;
struct regmap_debugfs_off_cache *c;
loff_t p = 0;
size_t buf_pos = 0;
char *buf;
char *entry;
int ret;
unsigned int entry_len;
if (*ppos < 0 || !count)
return -EINVAL;
if (count > (PAGE_SIZE << MAX_ORDER))
count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!entry) {
kfree(buf);
return -ENOMEM;
}
/* While we are at it, build the register dump cache
* now so the read() operation on the `registers' file
* can benefit from using the cache. We do not care
* about the file position information that is contained
* in the cache, just about the actual register blocks */
regmap_calc_tot_len(map, buf, count);
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the `range' file */
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
c->base_reg, c->max_reg);
if (p >= *ppos) {
if (buf_pos + entry_len > count)
break;
memcpy(buf + buf_pos, entry, entry_len);
buf_pos += entry_len;
}
p += entry_len;
}
mutex_unlock(&map->cache_lock);
kfree(entry);
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out_buf;
}
*ppos += buf_pos;
out_buf:
kfree(buf);
return ret;
}
static const struct file_operations regmap_reg_ranges_fops = {
.open = simple_open,
.read = regmap_reg_ranges_read_file,
.llseek = default_llseek,
};
static int regmap_access_show(struct seq_file *s, void *ignored)
{
struct regmap *map = s->private;
int i, reg_len;
reg_len = regmap_calc_reg_len(map->max_register);
for (i = 0; i <= map->max_register; i += map->reg_stride) {
/* Ignore registers which are neither readable nor writable */
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
continue;
/* Format the register */
seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
regmap_readable(map, i) ? 'y' : 'n',
regmap_writeable(map, i) ? 'y' : 'n',
regmap_volatile(map, i) ? 'y' : 'n',
regmap_precious(map, i) ? 'y' : 'n');
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(regmap_access);
static ssize_t regmap_cache_only_write_file(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
bool new_val, require_sync = false;
int err;
err = kstrtobool_from_user(user_buf, count, &new_val);
/* Ignore malforned data like debugfs_write_file_bool() */
if (err)
return count;
err = debugfs_file_get(file->f_path.dentry);
if (err)
return err;
map->lock(map->lock_arg);
if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
} else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
map->cache_only = new_val;
map->unlock(map->lock_arg);
debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
if (err)
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
return count;
}
static const struct file_operations regmap_cache_only_fops = {
.open = simple_open,
.read = debugfs_read_file_bool,
.write = regmap_cache_only_write_file,
};
static ssize_t regmap_cache_bypass_write_file(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
bool new_val;
int err;
err = kstrtobool_from_user(user_buf, count, &new_val);
/* Ignore malforned data like debugfs_write_file_bool() */
if (err)
return count;
err = debugfs_file_get(file->f_path.dentry);
if (err)
return err;
map->lock(map->lock_arg);
if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
} else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
map->cache_bypass = new_val;
map->unlock(map->lock_arg);
debugfs_file_put(file->f_path.dentry);
return count;
}
static const struct file_operations regmap_cache_bypass_fops = {
.open = simple_open,
.read = debugfs_read_file_bool,
.write = regmap_cache_bypass_write_file,
};
void regmap_debugfs_init(struct regmap *map)
{
struct rb_node *next;
struct regmap_range_node *range_node;
const char *devname = "dummy";
const char *name = map->name;
/*
* Userspace can initiate reads from the hardware over debugfs.
* Normally internal regmap structures and buffers are protected with
* a mutex or a spinlock, but if the regmap owner decided to disable
* all locking mechanisms, this is no longer the case. For safety:
* don't create the debugfs entries if locking is disabled.
*/
if (map->debugfs_disable) {
dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
return;
}
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return;
node->map = map;
mutex_lock(®map_debugfs_early_lock);
list_add(&node->link, ®map_debugfs_early_list);
mutex_unlock(®map_debugfs_early_lock);
return;
}
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
if (map->dev)
devname = dev_name(map->dev);
if (name) {
if (!map->debugfs_name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
if (!map->debugfs_name)
return;
}
name = map->debugfs_name;
} else {
name = devname;
}
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
if (!map->debugfs_name)
return;
name = map->debugfs_name;
dummy_index++;
}
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
debugfs_create_file("name", 0400, map->debugfs,
map, ®map_name_fops);
debugfs_create_file("range", 0400, map->debugfs,
map, ®map_reg_ranges_fops);
if (map->max_register || regmap_readable(map, 0)) {
umode_t registers_mode;
#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
registers_mode = 0600;
#else
registers_mode = 0400;
#endif
debugfs_create_file("registers", registers_mode, map->debugfs,
map, ®map_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
map, ®map_access_fops);
}
if (map->cache_type) {
debugfs_create_file("cache_only", 0600, map->debugfs,
&map->cache_only, ®map_cache_only_fops);
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
&map->cache_dirty);
debugfs_create_file("cache_bypass", 0600, map->debugfs,
&map->cache_bypass,
®map_cache_bypass_fops);
}
/*
* This could interfere with driver operation. Therefore, don't provide
* any real compile time configuration option for this feature. One will
* have to modify the source code directly in order to use it.
*/
#undef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
#ifdef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
debugfs_create_bool("force_write_field", 0600, map->debugfs,
&map->force_write_field);
#endif
next = rb_first(&map->range_tree);
while (next) {
range_node = rb_entry(next, struct regmap_range_node, node);
if (range_node->name)
debugfs_create_file(range_node->name, 0400,
map->debugfs, range_node,
®map_range_fops);
next = rb_next(&range_node->node);
}
if (map->cache_ops && map->cache_ops->debugfs_init)
map->cache_ops->debugfs_init(map);
}
void regmap_debugfs_exit(struct regmap *map)
{
if (map->debugfs) {
debugfs_remove_recursive(map->debugfs);
mutex_lock(&map->cache_lock);
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
map->debugfs_name = NULL;
} else {
struct regmap_debugfs_node *node, *tmp;
mutex_lock(®map_debugfs_early_lock);
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list,
link) {
if (node->map == map) {
list_del(&node->link);
kfree(node);
}
}
mutex_unlock(®map_debugfs_early_lock);
}
}
void regmap_debugfs_initcall(void)
{
struct regmap_debugfs_node *node, *tmp;
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
mutex_lock(®map_debugfs_early_lock);
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
regmap_debugfs_init(node->map);
list_del(&node->link);
kfree(node);
}
mutex_unlock(®map_debugfs_early_lock);
}
| linux-master | drivers/base/regmap/regmap-debugfs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "sysfs.h"
/*
* sysfs support for firmware loader
*/
void __fw_load_abort(struct fw_priv *fw_priv)
{
/*
* There is a small window in which user can write to 'loading'
* between loading done/aborted and disappearance of 'loading'
*/
if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
return;
fw_state_aborted(fw_priv);
}
#ifdef CONFIG_FW_LOADER_USER_HELPER
static ssize_t timeout_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
}
/**
* timeout_store() - set number of seconds to wait for firmware
* @class: device class pointer
* @attr: device attribute pointer
* @buf: buffer to scan for timeout value
* @count: number of bytes in @buf
*
* Sets the number of seconds to wait for the firmware. Once
* this expires an error will be returned to the driver and no
* firmware will be provided.
*
* Note: zero means 'wait forever'.
**/
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
__fw_fallback_set_timeout(tmp_loading_timeout);
return count;
}
static CLASS_ATTR_RW(timeout);
static struct attribute *firmware_class_attrs[] = {
&class_attr_timeout.attr,
NULL,
};
ATTRIBUTE_GROUPS(firmware_class);
static int do_firmware_uevent(const struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
{
if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
return -ENOMEM;
if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
return -ENOMEM;
return 0;
}
static int firmware_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
int err = 0;
mutex_lock(&fw_lock);
if (fw_sysfs->fw_priv)
err = do_firmware_uevent(fw_sysfs, env);
mutex_unlock(&fw_lock);
return err;
}
#endif /* CONFIG_FW_LOADER_USER_HELPER */
static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
if (fw_sysfs->fw_upload_priv)
fw_upload_free(fw_sysfs);
kfree(fw_sysfs);
}
static struct class firmware_class = {
.name = "firmware",
#ifdef CONFIG_FW_LOADER_USER_HELPER
.class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
#endif
.dev_release = fw_dev_release,
};
int register_sysfs_loader(void)
{
int ret = class_register(&firmware_class);
if (ret != 0)
return ret;
return register_firmware_config_sysctl();
}
void unregister_sysfs_loader(void)
{
unregister_firmware_config_sysctl();
class_unregister(&firmware_class);
}
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
int loading = 0;
mutex_lock(&fw_lock);
if (fw_sysfs->fw_priv)
loading = fw_state_is_loading(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
return sysfs_emit(buf, "%d\n", loading);
}
/**
* firmware_loading_store() - set value in the 'loading' control file
* @dev: device pointer
* @attr: device attribute pointer
* @buf: buffer to scan for loading control value
* @count: number of bytes in @buf
*
* The relevant values are:
*
* 1: Start a load, discarding any previous partial load.
* 0: Conclude the load and hand the data to the driver code.
* -1: Conclude the load with an error and discard any written data.
**/
static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
int loading = simple_strtol(buf, NULL, 10);
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
goto out;
switch (loading) {
case 1:
/* discarding any previous partial load */
fw_free_paged_buf(fw_priv);
fw_state_start(fw_priv);
break;
case 0:
if (fw_state_is_loading(fw_priv)) {
int rc;
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
* see the mapped 'buf->data' once the loading
* is completed.
*/
rc = fw_map_paged_buf(fw_priv);
if (rc)
dev_err(dev, "%s: map pages failed\n",
__func__);
else
rc = security_kernel_post_load_data(fw_priv->data,
fw_priv->size,
LOADING_FIRMWARE,
"blob");
/*
* Same logic as fw_load_abort, only the DONE bit
* is ignored and we set ABORT only on failure.
*/
if (rc) {
fw_state_aborted(fw_priv);
written = rc;
} else {
fw_state_done(fw_priv);
/*
* If this is a user-initiated firmware upload
* then start the upload in a worker thread now.
*/
rc = fw_upload_start(fw_sysfs);
if (rc)
written = rc;
}
break;
}
fallthrough;
default:
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
fallthrough;
case -1:
fw_load_abort(fw_sysfs);
if (fw_sysfs->fw_upload_priv)
fw_state_init(fw_sysfs->fw_priv);
break;
}
out:
mutex_unlock(&fw_lock);
return written;
}
DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
loff_t offset, size_t count, bool read)
{
if (read)
memcpy(buffer, fw_priv->data + offset, count);
else
memcpy(fw_priv->data + offset, buffer, count);
}
static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
loff_t offset, size_t count, bool read)
{
while (count) {
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE - 1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
if (read)
memcpy_from_page(buffer, fw_priv->pages[page_nr],
page_ofs, page_cnt);
else
memcpy_to_page(fw_priv->pages[page_nr], page_ofs,
buffer, page_cnt);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t ret_count;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
if (!fw_priv || fw_state_is_done(fw_priv)) {
ret_count = -ENODEV;
goto out;
}
if (offset > fw_priv->size) {
ret_count = 0;
goto out;
}
if (count > fw_priv->size - offset)
count = fw_priv->size - offset;
ret_count = count;
if (fw_priv->data)
firmware_rw_data(fw_priv, buffer, offset, count, true);
else
firmware_rw(fw_priv, buffer, offset, count, true);
out:
mutex_unlock(&fw_lock);
return ret_count;
}
static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
{
int err;
err = fw_grow_paged_buf(fw_sysfs->fw_priv,
PAGE_ALIGN(min_size) >> PAGE_SHIFT);
if (err)
fw_load_abort(fw_sysfs);
return err;
}
/**
* firmware_data_write() - write method for firmware
* @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
* @offset: buffer offset for write in total data store area
* @count: buffer size
*
* Data written to the 'data' attribute will be later handed to
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
if (!fw_priv || fw_state_is_done(fw_priv)) {
retval = -ENODEV;
goto out;
}
if (fw_priv->data) {
if (offset + count > fw_priv->allocated_size) {
retval = -ENOMEM;
goto out;
}
firmware_rw_data(fw_priv, buffer, offset, count, false);
retval = count;
} else {
retval = fw_realloc_pages(fw_sysfs, offset + count);
if (retval)
goto out;
retval = count;
firmware_rw(fw_priv, buffer, offset, count, false);
}
fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
out:
mutex_unlock(&fw_lock);
return retval;
}
static struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
&dev_attr_loading.attr,
#ifdef CONFIG_FW_UPLOAD
&dev_attr_cancel.attr,
&dev_attr_status.attr,
&dev_attr_error.attr,
&dev_attr_remaining_size.attr,
#endif
NULL
};
static struct bin_attribute *fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
.bin_attrs = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
};
static const struct attribute_group *fw_dev_attr_groups[] = {
&fw_dev_attr_group,
NULL
};
struct fw_sysfs *
fw_create_instance(struct firmware *firmware, const char *fw_name,
struct device *device, u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
struct device *f_dev;
fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
if (!fw_sysfs) {
fw_sysfs = ERR_PTR(-ENOMEM);
goto exit;
}
fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_sysfs->fw = firmware;
f_dev = &fw_sysfs->dev;
device_initialize(f_dev);
dev_set_name(f_dev, "%s", fw_name);
f_dev->parent = device;
f_dev->class = &firmware_class;
f_dev->groups = fw_dev_attr_groups;
exit:
return fw_sysfs;
}
| linux-master | drivers/base/firmware_loader/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/security.h>
#include <linux/highmem.h>
#include <linux/umh.h>
#include <linux/sysctl.h>
#include "fallback.h"
#include "firmware.h"
/*
* firmware fallback configuration table
*/
struct firmware_fallback_config fw_fallback_config = {
.force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
.loading_timeout = 60,
.old_timeout = 60,
};
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
static struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "ignore_sysfs_fallback",
.data = &fw_fallback_config.ignore_sysfs_fallback,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
static struct ctl_table_header *firmware_config_sysct_table_header;
int register_firmware_config_sysctl(void)
{
firmware_config_sysct_table_header =
register_sysctl("kernel/firmware_config",
firmware_config_table);
if (!firmware_config_sysct_table_header)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
void unregister_firmware_config_sysctl(void)
{
unregister_sysctl_table(firmware_config_sysct_table_header);
firmware_config_sysct_table_header = NULL;
}
EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
#endif /* CONFIG_SYSCTL */
| linux-master | drivers/base/firmware_loader/fallback_table.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.