python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/random.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>
#define IRQ_TYPE_LEGACY 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
#define STATUS_WRITE_SUCCESS BIT(2)
#define STATUS_WRITE_FAIL BIT(3)
#define STATUS_COPY_SUCCESS BIT(4)
#define STATUS_COPY_FAIL BIT(5)
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
#define FLAG_USE_DMA BIT(0)
#define TIMER_RESOLUTION 1
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
size_t msix_table_offset;
struct delayed_work cmd_handler;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
struct dma_chan *transfer_chan;
dma_cookie_t transfer_cookie;
enum dma_status transfer_status;
struct completion transfer_complete;
bool dma_supported;
bool dma_private;
const struct pci_epc_features *epc_features;
};
struct pci_epf_test_reg {
u32 magic;
u32 command;
u32 status;
u64 src_addr;
u64 dst_addr;
u32 size;
u32 checksum;
u32 irq_type;
u32 irq_number;
u32 flags;
} __packed;
static struct pci_epf_header test_header = {
.vendorid = PCI_ANY_ID,
.deviceid = PCI_ANY_ID,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
static void pci_epf_test_dma_callback(void *param)
{
struct pci_epf_test *epf_test = param;
struct dma_tx_state state;
epf_test->transfer_status =
dmaengine_tx_status(epf_test->transfer_chan,
epf_test->transfer_cookie, &state);
if (epf_test->transfer_status == DMA_COMPLETE ||
epf_test->transfer_status == DMA_ERROR)
complete(&epf_test->transfer_complete);
}
/**
* pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
* data between PCIe EP and remote PCIe RC
* @epf_test: the EPF test device that performs the data transfer operation
* @dma_dst: The destination address of the data transfer. It can be a physical
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
* @dma_src: The source address of the data transfer. It can be a physical
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
* @len: The size of the data transfer
* @dma_remote: remote RC physical address
* @dir: DMA transfer direction
*
* Function that uses dmaengine API to transfer data between PCIe EP and remote
* PCIe RC. The source and destination address can be a physical address given
* by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
*
* The function returns '0' on success and negative value on failure.
*/
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, dma_addr_t dma_remote,
enum dma_transfer_direction dir)
{
struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
epf_test->dma_chan_tx : epf_test->dma_chan_rx;
dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct pci_epf *epf = epf_test->epf;
struct dma_async_tx_descriptor *tx;
struct dma_slave_config sconf = {};
struct device *dev = &epf->dev;
int ret;
if (IS_ERR_OR_NULL(chan)) {
dev_err(dev, "Invalid DMA memcpy channel\n");
return -EINVAL;
}
if (epf_test->dma_private) {
sconf.direction = dir;
if (dir == DMA_MEM_TO_DEV)
sconf.dst_addr = dma_remote;
else
sconf.src_addr = dma_remote;
if (dmaengine_slave_config(chan, &sconf)) {
dev_err(dev, "DMA slave config fail\n");
return -EIO;
}
tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
flags);
} else {
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
flags);
}
if (!tx) {
dev_err(dev, "Failed to prepare DMA memcpy\n");
return -EIO;
}
reinit_completion(&epf_test->transfer_complete);
epf_test->transfer_chan = chan;
tx->callback = pci_epf_test_dma_callback;
tx->callback_param = epf_test;
epf_test->transfer_cookie = dmaengine_submit(tx);
ret = dma_submit_error(epf_test->transfer_cookie);
if (ret) {
dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
goto terminate;
}
dma_async_issue_pending(chan);
ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
if (ret < 0) {
dev_err(dev, "DMA wait_for_completion interrupted\n");
goto terminate;
}
if (epf_test->transfer_status == DMA_ERROR) {
dev_err(dev, "DMA transfer failed\n");
ret = -EIO;
}
terminate:
dmaengine_terminate_sync(chan);
return ret;
}
struct epf_dma_filter {
struct device *dev;
u32 dma_mask;
};
static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
{
struct epf_dma_filter *filter = node;
struct dma_slave_caps caps;
memset(&caps, 0, sizeof(caps));
dma_get_slave_caps(chan, &caps);
return chan->device->dev == filter->dev
&& (filter->dma_mask & caps.directions);
}
/**
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
* @epf_test: the EPF test device that performs data transfer operation
*
* Function to initialize EPF test DMA channel.
*/
static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct epf_dma_filter filter;
struct dma_chan *dma_chan;
dma_cap_mask_t mask;
int ret;
filter.dev = epf->epc->dev.parent;
filter.dma_mask = BIT(DMA_DEV_TO_MEM);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
if (!dma_chan) {
dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
goto fail_back_tx;
}
epf_test->dma_chan_rx = dma_chan;
filter.dma_mask = BIT(DMA_MEM_TO_DEV);
dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
if (!dma_chan) {
dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
goto fail_back_rx;
}
epf_test->dma_chan_tx = dma_chan;
epf_test->dma_private = true;
init_completion(&epf_test->transfer_complete);
return 0;
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
epf_test->dma_chan_tx = NULL;
fail_back_tx:
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
dma_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get DMA channel\n");
return ret;
}
init_completion(&epf_test->transfer_complete);
epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
return 0;
}
/**
* pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
* @epf_test: the EPF test device that performs data transfer operation
*
* Helper to cleanup EPF test DMA channel.
*/
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
{
if (!epf_test->dma_supported)
return;
dma_release_channel(epf_test->dma_chan_tx);
if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
epf_test->dma_chan_tx = NULL;
epf_test->dma_chan_rx = NULL;
return;
}
dma_release_channel(epf_test->dma_chan_rx);
epf_test->dma_chan_rx = NULL;
return;
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
const char *op, u64 size,
struct timespec64 *start,
struct timespec64 *end, bool dma)
{
struct timespec64 ts = timespec64_sub(*end, *start);
u64 rate = 0, ns;
/* calculate the rate */
ns = timespec64_to_ns(&ts);
if (ns)
rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
dev_info(&epf_test->epf->dev,
"%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
op, size, dma ? "YES" : "NO",
(u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
}
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
int ret;
void __iomem *src_addr;
void __iomem *dst_addr;
phys_addr_t src_phys_addr;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
if (!src_addr) {
dev_err(dev, "Failed to allocate source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
}
ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_src_addr;
}
dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
if (!dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err_src_map_addr;
}
ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_dst_addr;
}
ktime_get_ts64(&start);
if (reg->flags & FLAG_USE_DMA) {
if (epf_test->dma_private) {
dev_err(dev, "Cannot transfer data using DMA\n");
ret = -EINVAL;
goto err_map_addr;
}
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
src_phys_addr, reg->size, 0,
DMA_MEM_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
void *buf;
buf = kzalloc(reg->size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_map_addr;
}
memcpy_fromio(buf, src_addr, reg->size);
memcpy_toio(dst_addr, buf, reg->size);
kfree(buf);
}
ktime_get_ts64(&end);
pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
reg->flags & FLAG_USE_DMA);
err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
err:
if (!ret)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
}
static void pci_epf_test_read(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
int ret;
void __iomem *src_addr;
void *buf;
u32 crc32;
phys_addr_t phys_addr;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
struct device *dma_dev = epf->epc->dev.parent;
src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!src_addr) {
dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
}
ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_addr;
}
buf = kzalloc(reg->size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_map_addr;
}
if (reg->flags & FLAG_USE_DMA) {
dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dst_phys_addr)) {
dev_err(dev, "Failed to map destination buffer addr\n");
ret = -ENOMEM;
goto err_dma_map;
}
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
phys_addr, reg->size,
reg->src_addr, DMA_DEV_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
DMA_FROM_DEVICE);
} else {
ktime_get_ts64(&start);
memcpy_fromio(buf, src_addr, reg->size);
ktime_get_ts64(&end);
}
pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
reg->flags & FLAG_USE_DMA);
crc32 = crc32_le(~0, buf, reg->size);
if (crc32 != reg->checksum)
ret = -EIO;
err_dma_map:
kfree(buf);
err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
err:
if (!ret)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
}
static void pci_epf_test_write(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
int ret;
void __iomem *dst_addr;
void *buf;
phys_addr_t phys_addr;
phys_addr_t src_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
struct device *dma_dev = epf->epc->dev.parent;
dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!dst_addr) {
dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err;
}
ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_addr;
}
buf = kzalloc(reg->size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_map_addr;
}
get_random_bytes(buf, reg->size);
reg->checksum = crc32_le(~0, buf, reg->size);
if (reg->flags & FLAG_USE_DMA) {
src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, src_phys_addr)) {
dev_err(dev, "Failed to map source buffer addr\n");
ret = -ENOMEM;
goto err_dma_map;
}
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test, phys_addr,
src_phys_addr, reg->size,
reg->dst_addr,
DMA_MEM_TO_DEV);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
dma_unmap_single(dma_dev, src_phys_addr, reg->size,
DMA_TO_DEVICE);
} else {
ktime_get_ts64(&start);
memcpy_toio(dst_addr, buf, reg->size);
ktime_get_ts64(&end);
}
pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
reg->flags & FLAG_USE_DMA);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
* error in observed in the host system.
*/
usleep_range(1000, 2000);
err_dma_map:
kfree(buf);
err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
err:
if (!ret)
reg->status |= STATUS_WRITE_SUCCESS;
else
reg->status |= STATUS_WRITE_FAIL;
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
u32 status = reg->status | STATUS_IRQ_RAISED;
int count;
/*
* Set the status before raising the IRQ to ensure that the host sees
* the updated value when it gets the IRQ.
*/
WRITE_ONCE(reg->status, status);
switch (reg->irq_type) {
case IRQ_TYPE_LEGACY:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
reg->irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_EPC_IRQ_MSI, reg->irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
reg->irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_EPC_IRQ_MSIX, reg->irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
break;
}
}
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
u32 command;
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
cmd_handler.work);
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
command = READ_ONCE(reg->command);
if (!command)
goto reset_handler;
WRITE_ONCE(reg->command, 0);
WRITE_ONCE(reg->status, 0);
if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
goto reset_handler;
}
if (reg->irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
switch (command) {
case COMMAND_RAISE_LEGACY_IRQ:
case COMMAND_RAISE_MSI_IRQ:
case COMMAND_RAISE_MSIX_IRQ:
pci_epf_test_raise_irq(epf_test, reg);
break;
case COMMAND_WRITE:
pci_epf_test_write(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
case COMMAND_READ:
pci_epf_test_read(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
case COMMAND_COPY:
pci_epf_test_copy(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
default:
dev_err(dev, "Invalid command 0x%x\n", command);
break;
}
reset_handler:
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
msecs_to_jiffies(1));
}
static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
struct pci_epf_bar *epf_bar;
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
epf_bar);
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
}
}
}
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
int bar, add;
int ret;
struct pci_epf_bar *epf_bar;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
const struct pci_epc_features *epc_features;
epc_features = epf_test->epc_features;
for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
/*
* pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
* if the specific implementation required a 64-bit BAR,
* even if we only requested a 32-bit BAR.
*/
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
}
}
return 0;
}
static int pci_epf_test_core_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
const struct pci_epc_features *epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
bool msix_capable = false;
bool msi_capable = true;
int ret;
epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
if (ret) {
dev_err(dev, "Configuration header write failed\n");
return ret;
}
}
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
if (msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
}
}
if (msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
dev_err(dev, "MSI-X configuration failed\n");
return ret;
}
}
return 0;
}
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
msecs_to_jiffies(1));
return 0;
}
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
.core_init = pci_epf_test_core_init,
.link_up = pci_epf_test_link_up,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
bool msix_capable;
void *base;
int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
const struct pci_epc_features *epc_features;
size_t test_reg_size;
epc_features = epf_test->epc_features;
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
msix_capable = epc_features->msix_capable;
if (msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
epf_test->msix_table_offset = test_reg_bar_size;
/* Align to QWORD or 8 Bytes */
pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
}
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
if (epc_features->bar_fixed_size[test_reg_bar]) {
if (test_reg_size > bar_size[test_reg_bar])
return -ENOMEM;
test_reg_size = bar_size[test_reg_bar];
}
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
epc_features->align, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
}
epf_test->reg[test_reg_bar] = base;
for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
if (bar == test_reg_bar)
continue;
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
epc_features->align,
PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
}
return 0;
}
static void pci_epf_configure_bar(struct pci_epf *epf,
const struct pci_epc_features *epc_features)
{
struct pci_epf_bar *epf_bar;
bool bar_fixed_64bit;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
if (bar_fixed_64bit)
epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
if (epc_features->bar_fixed_size[i])
bar_size[i] = epc_features->bar_fixed_size[i];
}
}
static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
bool linkup_notifier = false;
bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
}
linkup_notifier = epc_features->linkup_notifier;
core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
ret = pci_epf_test_alloc_space(epf);
if (ret)
return ret;
if (!core_init_notifier) {
ret = pci_epf_test_core_init(epf);
if (ret)
return ret;
}
epf_test->dma_supported = true;
ret = pci_epf_test_init_dma_chan(epf_test);
if (ret)
epf_test->dma_supported = false;
if (!linkup_notifier && !core_init_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
return 0;
}
static const struct pci_epf_device_id pci_epf_test_ids[] = {
{
.name = "pci_epf_test",
},
{},
};
static int pci_epf_test_probe(struct pci_epf *epf,
const struct pci_epf_device_id *id)
{
struct pci_epf_test *epf_test;
struct device *dev = &epf->dev;
epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
if (!epf_test)
return -ENOMEM;
epf->header = &test_header;
epf_test->epf = epf;
INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
epf->event_ops = &pci_epf_test_event_ops;
epf_set_drvdata(epf, epf_test);
return 0;
}
static struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
};
static struct pci_epf_driver test_driver = {
.driver.name = "pci_epf_test",
.probe = pci_epf_test_probe,
.id_table = pci_epf_test_ids,
.ops = &ops,
.owner = THIS_MODULE,
};
static int __init pci_epf_test_init(void)
{
int ret;
kpcitest_workqueue = alloc_workqueue("kpcitest",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!kpcitest_workqueue) {
pr_err("Failed to allocate the kpcitest work queue\n");
return -ENOMEM;
}
ret = pci_epf_register_driver(&test_driver);
if (ret) {
destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
return 0;
}
module_init(pci_epf_test_init);
static void __exit pci_epf_test_exit(void)
{
if (kpcitest_workqueue)
destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pci/endpoint/functions/pci-epf-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI EPF driver for MHI Endpoint devices
*
* Copyright (C) 2023 Linaro Ltd.
* Author: Manivannan Sadhasivam <[email protected]>
*/
#include <linux/dmaengine.h>
#include <linux/mhi_ep.h>
#include <linux/module.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#define MHI_VERSION_1_0 0x01000000
#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
/* Platform specific flags */
#define MHI_EPF_USE_DMA BIT(0)
struct pci_epf_mhi_ep_info {
const struct mhi_ep_cntrl_config *config;
struct pci_epf_header *epf_header;
enum pci_barno bar_num;
u32 epf_flags;
u32 msi_count;
u32 mru;
u32 flags;
};
#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
{ \
.num = ch_num, \
.name = ch_name, \
.dir = direction, \
}
#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
static const struct mhi_ep_channel_config mhi_v1_channels[] = {
MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
};
static const struct mhi_ep_cntrl_config mhi_v1_config = {
.max_channels = 128,
.num_channels = ARRAY_SIZE(mhi_v1_channels),
.ch_cfg = mhi_v1_channels,
.mhi_version = MHI_VERSION_1_0,
};
static struct pci_epf_header sdx55_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
.deviceid = 0x0306,
.baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
.subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
static const struct pci_epf_mhi_ep_info sdx55_info = {
.config = &mhi_v1_config,
.epf_header = &sdx55_header,
.bar_num = BAR_0,
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
};
static struct pci_epf_header sm8450_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
.deviceid = 0x0306,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
static const struct pci_epf_mhi_ep_info sm8450_info = {
.config = &mhi_v1_config,
.epf_header = &sm8450_header,
.bar_num = BAR_0,
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
.flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {
const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
struct mhi_ep_cntrl mhi_cntrl;
struct pci_epf *epf;
struct mutex lock;
void __iomem *mmio;
resource_size_t mmio_phys;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
u32 mmio_size;
int irq;
};
static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
{
return addr & (epf_mhi->epc_features->align -1);
}
static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
phys_addr_t *paddr, void __iomem **vaddr,
size_t offset, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct pci_epf *epf = epf_mhi->epf;
struct pci_epc *epc = epf->epc;
int ret;
*vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
if (!*vaddr)
return -ENOMEM;
ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
pci_addr - offset, size + offset);
if (ret) {
pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
return ret;
}
*paddr = *paddr + offset;
*vaddr = *vaddr + offset;
return 0;
}
static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
phys_addr_t *paddr, void __iomem **vaddr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = get_align_offset(epf_mhi, pci_addr);
return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
offset, size);
}
static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
u64 pci_addr, phys_addr_t paddr,
void __iomem *vaddr, size_t offset,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct pci_epf *epf = epf_mhi->epf;
struct pci_epc *epc = epf->epc;
pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
size + offset);
}
static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
phys_addr_t paddr, void __iomem *vaddr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = get_align_offset(epf_mhi, pci_addr);
__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
size);
}
static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct pci_epf *epf = epf_mhi->epf;
struct pci_epc *epc = epf->epc;
/*
* MHI supplies 0 based MSI vectors but the API expects the vector
* number to start from 1, so we need to increment the vector by 1.
*/
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
vector + 1);
}
static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
void *to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = get_align_offset(epf_mhi, from);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
offset, size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
memcpy_fromio(to, tre_buf, size);
__pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
size);
mutex_unlock(&epf_mhi->lock);
return 0;
}
static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
void *from, u64 to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = get_align_offset(epf_mhi, to);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
offset, size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
memcpy_toio(tre_buf, from, size);
__pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
size);
mutex_unlock(&epf_mhi->lock);
return 0;
}
static void pci_epf_mhi_dma_callback(void *param)
{
complete(param);
}
static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
void *to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct dma_chan *chan = epf_mhi->dma_chan_rx;
struct device *dev = &epf_mhi->epf->dev;
DECLARE_COMPLETION_ONSTACK(complete);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config config = {};
dma_cookie_t cookie;
dma_addr_t dst_addr;
int ret;
if (size < SZ_4K)
return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_DEV_TO_MEM;
config.src_addr = from;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
dev_err(dev, "Failed to configure DMA channel\n");
goto err_unlock;
}
dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
ret = dma_mapping_error(dma_dev, dst_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
ret = -EIO;
goto err_unmap;
}
desc->callback = pci_epf_mhi_dma_callback;
desc->callback_param = &complete;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(dev, "Failed to do DMA submit\n");
goto err_unmap;
}
dma_async_issue_pending(chan);
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
if (!ret) {
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
}
err_unmap:
dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
return ret;
}
static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
u64 to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct dma_chan *chan = epf_mhi->dma_chan_tx;
struct device *dev = &epf_mhi->epf->dev;
DECLARE_COMPLETION_ONSTACK(complete);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config config = {};
dma_cookie_t cookie;
dma_addr_t src_addr;
int ret;
if (size < SZ_4K)
return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_MEM_TO_DEV;
config.dst_addr = to;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
dev_err(dev, "Failed to configure DMA channel\n");
goto err_unlock;
}
src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
ret = dma_mapping_error(dma_dev, src_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
ret = -EIO;
goto err_unmap;
}
desc->callback = pci_epf_mhi_dma_callback;
desc->callback_param = &complete;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(dev, "Failed to do DMA submit\n");
goto err_unmap;
}
dma_async_issue_pending(chan);
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
if (!ret) {
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
}
err_unmap:
dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
return ret;
}
struct epf_dma_filter {
struct device *dev;
u32 dma_mask;
};
static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
{
struct epf_dma_filter *filter = node;
struct dma_slave_caps caps;
memset(&caps, 0, sizeof(caps));
dma_get_slave_caps(chan, &caps);
return chan->device->dev == filter->dev && filter->dma_mask &
caps.directions;
}
static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
{
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct device *dev = &epf_mhi->epf->dev;
struct epf_dma_filter filter;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
filter.dev = dma_dev;
filter.dma_mask = BIT(DMA_MEM_TO_DEV);
epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
dev_err(dev, "Failed to request tx channel\n");
return -ENODEV;
}
filter.dma_mask = BIT(DMA_DEV_TO_MEM);
epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
dev_err(dev, "Failed to request rx channel\n");
dma_release_channel(epf_mhi->dma_chan_tx);
epf_mhi->dma_chan_tx = NULL;
return -ENODEV;
}
return 0;
}
static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
{
dma_release_channel(epf_mhi->dma_chan_tx);
dma_release_channel(epf_mhi->dma_chan_rx);
epf_mhi->dma_chan_tx = NULL;
epf_mhi->dma_chan_rx = NULL;
}
static int pci_epf_mhi_core_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
int ret;
epf_bar->phys_addr = epf_mhi->mmio_phys;
epf_bar->size = epf_mhi->mmio_size;
epf_bar->barno = info->bar_num;
epf_bar->flags = info->epf_flags;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "Failed to set BAR: %d\n", ret);
return ret;
}
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
order_base_2(info->msi_count));
if (ret) {
dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
return ret;
}
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
epf->header);
if (ret) {
dev_err(dev, "Failed to set Configuration header: %d\n", ret);
return ret;
}
epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epf_mhi->epc_features)
return -ENODATA;
return 0;
}
static int pci_epf_mhi_link_up(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
int ret;
if (info->flags & MHI_EPF_USE_DMA) {
ret = pci_epf_mhi_dma_init(epf_mhi);
if (ret) {
dev_err(dev, "Failed to initialize DMA: %d\n", ret);
return ret;
}
}
mhi_cntrl->mmio = epf_mhi->mmio;
mhi_cntrl->irq = epf_mhi->irq;
mhi_cntrl->mru = info->mru;
/* Assign the struct dev of PCI EP as MHI controller device */
mhi_cntrl->cntrl_dev = epc->dev.parent;
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
if (info->flags & MHI_EPF_USE_DMA) {
mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
} else {
mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
}
/* Register the MHI EP controller */
ret = mhi_ep_register_controller(mhi_cntrl, info->config);
if (ret) {
dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
return ret;
}
return 0;
}
static int pci_epf_mhi_link_down(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
return 0;
}
static int pci_epf_mhi_bme(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
struct device *dev = &epf->dev;
int ret;
/*
* Power up the MHI EP stack if link is up and stack is in power down
* state.
*/
if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
ret = mhi_ep_power_up(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
}
return 0;
}
static int pci_epf_mhi_bind(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
struct platform_device *pdev = to_platform_device(epc->dev.parent);
struct resource *res;
int ret;
/* Get MMIO base address from Endpoint controller */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
epf_mhi->mmio_phys = res->start;
epf_mhi->mmio_size = resource_size(res);
epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
if (!epf_mhi->mmio)
return -ENOMEM;
ret = platform_get_irq_byname(pdev, "doorbell");
if (ret < 0) {
iounmap(epf_mhi->mmio);
return ret;
}
epf_mhi->irq = ret;
return 0;
}
static void pci_epf_mhi_unbind(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
struct pci_epc *epc = epf->epc;
/*
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
* stack back to working state after successive bind is by getting BME
* from host.
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
iounmap(epf_mhi->mmio);
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
}
static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
.core_init = pci_epf_mhi_core_init,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
.bme = pci_epf_mhi_bme,
};
static int pci_epf_mhi_probe(struct pci_epf *epf,
const struct pci_epf_device_id *id)
{
struct pci_epf_mhi_ep_info *info =
(struct pci_epf_mhi_ep_info *)id->driver_data;
struct pci_epf_mhi *epf_mhi;
struct device *dev = &epf->dev;
epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
if (!epf_mhi)
return -ENOMEM;
epf->header = info->epf_header;
epf_mhi->info = info;
epf_mhi->epf = epf;
epf->event_ops = &pci_epf_mhi_event_ops;
mutex_init(&epf_mhi->lock);
epf_set_drvdata(epf, epf_mhi);
return 0;
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
{ .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
{ .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};
static struct pci_epf_ops pci_epf_mhi_ops = {
.unbind = pci_epf_mhi_unbind,
.bind = pci_epf_mhi_bind,
};
static struct pci_epf_driver pci_epf_mhi_driver = {
.driver.name = "pci_epf_mhi",
.probe = pci_epf_mhi_probe,
.id_table = pci_epf_mhi_ids,
.ops = &pci_epf_mhi_ops,
.owner = THIS_MODULE,
};
static int __init pci_epf_mhi_init(void)
{
return pci_epf_register_driver(&pci_epf_mhi_driver);
}
module_init(pci_epf_mhi_init);
static void __exit pci_epf_mhi_exit(void)
{
pci_epf_unregister_driver(&pci_epf_mhi_driver);
}
module_exit(pci_epf_mhi_exit);
MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/pci/endpoint/functions/pci-epf-mhi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Endpoint Function Driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
* Author: Kishon Vijay Abraham I <[email protected]>
*/
/*
* The PCI NTB function driver configures the SoC with multiple PCIe Endpoint
* (EP) controller instances (see diagram below) in such a way that
* transactions from one EP controller are routed to the other EP controller.
* Once PCI NTB function driver configures the SoC with multiple EP instances,
* HOST1 and HOST2 can communicate with each other using SoC as a bridge.
*
* +-------------+ +-------------+
* | | | |
* | HOST1 | | HOST2 |
* | | | |
* +------^------+ +------^------+
* | |
* | |
* +---------|-------------------------------------------------|---------+
* | +------v------+ +------v------+ |
* | | | | | |
* | | EP | | EP | |
* | | CONTROLLER1 | | CONTROLLER2 | |
* | | <-----------------------------------> | |
* | | | | | |
* | | | | | |
* | | | SoC With Multiple EP Instances | | |
* | | | (Configured using NTB Function) | | |
* | +-------------+ +-------------+ |
* +---------------------------------------------------------------------+
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
static struct workqueue_struct *kpcintb_workqueue;
#define COMMAND_CONFIGURE_DOORBELL 1
#define COMMAND_TEARDOWN_DOORBELL 2
#define COMMAND_CONFIGURE_MW 3
#define COMMAND_TEARDOWN_MW 4
#define COMMAND_LINK_UP 5
#define COMMAND_LINK_DOWN 6
#define COMMAND_STATUS_OK 1
#define COMMAND_STATUS_ERROR 2
#define LINK_STATUS_UP BIT(0)
#define SPAD_COUNT 64
#define DB_COUNT 4
#define NTB_MW_OFFSET 2
#define DB_COUNT_MASK GENMASK(15, 0)
#define MSIX_ENABLE BIT(16)
#define MAX_DB_COUNT 32
#define MAX_MW 4
enum epf_ntb_bar {
BAR_CONFIG,
BAR_PEER_SPAD,
BAR_DB_MW1,
BAR_MW2,
BAR_MW3,
BAR_MW4,
};
struct epf_ntb {
u32 num_mws;
u32 db_count;
u32 spad_count;
struct pci_epf *epf;
u64 mws_size[MAX_MW];
struct config_group group;
struct epf_ntb_epc *epc[2];
};
#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
struct epf_ntb_epc {
u8 func_no;
u8 vfunc_no;
bool linkup;
bool is_msix;
int msix_bar;
u32 spad_size;
struct pci_epc *epc;
struct epf_ntb *epf_ntb;
void __iomem *mw_addr[6];
size_t msix_table_offset;
struct epf_ntb_ctrl *reg;
struct pci_epf_bar *epf_bar;
enum pci_barno epf_ntb_bar[6];
struct delayed_work cmd_handler;
enum pci_epc_interface_type type;
const struct pci_epc_features *epc_features;
};
struct epf_ntb_ctrl {
u32 command;
u32 argument;
u16 command_status;
u16 link_status;
u32 topology;
u64 addr;
u64 size;
u32 num_mws;
u32 mw1_offset;
u32 spad_offset;
u32 spad_count;
u32 db_entry_size;
u32 db_data[MAX_DB_COUNT];
u32 db_offset[MAX_DB_COUNT];
} __packed;
static struct pci_epf_header epf_ntb_header = {
.vendorid = PCI_ANY_ID,
.deviceid = PCI_ANY_ID,
.baseclass_code = PCI_BASE_CLASS_MEMORY,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
/**
* epf_ntb_link_up() - Raise link_up interrupt to both the hosts
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @link_up: true or false indicating Link is UP or Down
*
* Once NTB function in HOST1 and the NTB function in HOST2 invoke
* ntb_link_enable(), this NTB function driver will trigger a link event to
* the NTB client in both the hosts.
*/
static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
{
enum pci_epc_interface_type type;
enum pci_epc_irq_type irq_type;
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
struct pci_epc *epc;
u8 func_no, vfunc_no;
bool is_msix;
int ret;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
is_msix = ntb_epc->is_msix;
ctrl = ntb_epc->reg;
if (link_up)
ctrl->link_status |= LINK_STATUS_UP;
else
ctrl->link_status &= ~LINK_STATUS_UP;
irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to raise Link Up IRQ\n",
pci_epc_interface_string(type));
return ret;
}
}
return 0;
}
/**
* epf_ntb_configure_mw() - Configure the Outbound Address Space for one host
* to access the memory window of other host
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
* @mw: Index of the memory window (either 0, 1, 2 or 3)
*
* +-----------------+ +---->+----------------+-----------+-----------------+
* | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
* +-----------------+ | +----------------+ +-----------------+
* | BAR1 | | | Doorbell 2 +---------+ | |
* +-----------------+----+ +----------------+ | | |
* | BAR2 | | Doorbell 3 +-------+ | +-----------------+
* +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
* | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
* +-----------------+ | |----------------+ | | | |
* | BAR4 | | | | | | +-----------------+
* +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
* | BAR5 | | | | | | +-----------------+
* +-----------------+ +---->-----------------+ | | | |
* EP CONTROLLER 1 | | | | +-----------------+
* | | | +---->+ MSI|X ADDRESS 4 |
* +----------------+ | +-----------------+
* (A) EP CONTROLLER 2 | | |
* (OB SPACE) | | |
* +-------> MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
* This function performs stage (B) in the above diagram (see MW1) i.e., map OB
* address space of memory window to PCI address space.
*
* This operation requires 3 parameters
* 1) Address in the outbound address space
* 2) Address in the PCI Address space
* 3) Size of the address region to be mapped
*
* The address in the outbound address space (for MW1, MW2, MW3 and MW4) is
* stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3
* BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This
* is populated in epf_ntb_alloc_peer_mem() in this driver.
*
* The address and size of the PCI address region that has to be mapped would
* be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is
* connected to HOST2.
*
* Please note Memory window1 (MW1) and Doorbell registers together will be
* mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's
* used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1],
* epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2].
*/
static int epf_ntb_configure_mw(struct epf_ntb *ntb,
enum pci_epc_interface_type type, u32 mw)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
struct pci_epc *epc;
u64 addr, size;
int ret = 0;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
ctrl = ntb_epc->reg;
addr = ctrl->addr;
size = ctrl->size;
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
if (size > ntb->mws_size[mw]) {
dev_err(&epc->dev,
"%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n",
pci_epc_interface_string(type), mw, size,
ntb->mws_size[mw]);
ret = -EINVAL;
goto err_invalid_size;
}
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&epc->dev,
"%s intf: Failed to map memory window %d address\n",
pci_epc_interface_string(type), mw);
err_invalid_size:
return ret;
}
/**
* epf_ntb_teardown_mw() - Teardown the configured OB ATU
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
* @mw: Index of the memory window (either 0, 1, 2 or 3)
*
* Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
* pci_epc_unmap_addr()
*/
static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
enum pci_epc_interface_type type, u32 mw)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
struct pci_epc *epc;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
ctrl = ntb_epc->reg;
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
* epf_ntb_configure_msi() - Map OB address space to MSI address
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
* @db_count: Number of doorbell interrupts to map
*
*+-----------------+ +----->+----------------+-----------+-----------------+
*| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS |
*+-----------------+ | +----------------+ | +-----------------+
*| BAR1 | | | Doorbell 2 +---+ | |
*+-----------------+----+ +----------------+ | | |
*| BAR2 | | Doorbell 3 +---+ | |
*+-----------------+----+ +----------------+ | | |
*| BAR3 | | | Doorbell 4 +---+ | |
*+-----------------+ | |----------------+ | |
*| BAR4 | | | | | |
*+-----------------+ | | MW1 | | |
*| BAR5 | | | | | |
*+-----------------+ +----->-----------------+ | |
* EP CONTROLLER 1 | | | |
* | | | |
* +----------------+ +-----------------+
* (A) EP CONTROLLER 2 | |
* (OB SPACE) | |
* | MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
*
* This function performs stage (B) in the above diagram (see Doorbell 1,
* Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
* doorbell to MSI address in PCI address space.
*
* This operation requires 3 parameters
* 1) Address reserved for doorbell in the outbound address space
* 2) MSI-X address in the PCIe Address space
* 3) Number of MSI-X interrupts that has to be configured
*
* The address in the outbound address space (for the Doorbell) is stored in
* epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
* HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
* with address for MW1.
*
* pci_epc_map_msi_irq() takes the MSI address from MSI capability register
* and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI
* address.
*
* epf_ntb_configure_msi() also stores the MSI data to raise each interrupt
* in db_data of the peer's control region. This helps the peer to raise
* doorbell of the other host by writing db_data to the BAR corresponding to
* BAR_DB_MW1.
*/
static int epf_ntb_configure_msi(struct epf_ntb *ntb,
enum pci_epc_interface_type type, u16 db_count)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
u32 db_entry_size, db_data, db_offset;
struct pci_epf_bar *peer_epf_bar;
struct epf_ntb_ctrl *peer_ctrl;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
struct pci_epc *epc;
int ret, i;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
peer_ctrl = peer_ntb_epc->reg;
db_entry_size = peer_ctrl->db_entry_size;
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
db_entry_size, &db_data, &db_offset);
if (ret) {
dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
pci_epc_interface_string(type));
return ret;
}
for (i = 0; i < db_count; i++) {
peer_ctrl->db_data[i] = db_data | i;
peer_ctrl->db_offset[i] = db_offset;
}
return 0;
}
/**
* epf_ntb_configure_msix() - Map OB address space to MSI-X address
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
* @db_count: Number of doorbell interrupts to map
*
*+-----------------+ +----->+----------------+-----------+-----------------+
*| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
*+-----------------+ | +----------------+ +-----------------+
*| BAR1 | | | Doorbell 2 +---------+ | |
*+-----------------+----+ +----------------+ | | |
*| BAR2 | | Doorbell 3 +-------+ | +-----------------+
*+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
*| BAR3 | | | Doorbell 4 +-----+ | +-----------------+
*+-----------------+ | |----------------+ | | | |
*| BAR4 | | | | | | +-----------------+
*+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3||
*| BAR5 | | | | | +-----------------+
*+-----------------+ +----->-----------------+ | | |
* EP CONTROLLER 1 | | | +-----------------+
* | | +---->+ MSI-X ADDRESS 4 |
* +----------------+ +-----------------+
* (A) EP CONTROLLER 2 | |
* (OB SPACE) | |
* | MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
* This function performs stage (B) in the above diagram (see Doorbell 1,
* Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
* doorbell to MSI-X address in PCI address space.
*
* This operation requires 3 parameters
* 1) Address reserved for doorbell in the outbound address space
* 2) MSI-X address in the PCIe Address space
* 3) Number of MSI-X interrupts that has to be configured
*
* The address in the outbound address space (for the Doorbell) is stored in
* epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
* HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
* with address for MW1.
*
* The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and
* the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected
* to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the
* offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix()
* gets the MSI-X address and data.
*
* epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt
* in db_data of the peer's control region. This helps the peer to raise
* doorbell of the other host by writing db_data to the BAR corresponding to
* BAR_DB_MW1.
*/
static int epf_ntb_configure_msix(struct epf_ntb *ntb,
enum pci_epc_interface_type type,
u16 db_count)
{
const struct pci_epc_features *epc_features;
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *peer_epf_bar, *epf_bar;
struct pci_epf_msix_tbl *msix_tbl;
struct epf_ntb_ctrl *peer_ctrl;
u32 db_entry_size, msg_data;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
struct pci_epc *epc;
size_t align;
u64 msg_addr;
int ret, i;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar];
msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
peer_ctrl = peer_ntb_epc->reg;
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
db_entry_size = peer_ctrl->db_entry_size;
for (i = 0; i < db_count; i++) {
msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
msg_data = msix_tbl[i].msg_data;
ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
db_entry_size);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to configure MSI-X IRQ\n",
pci_epc_interface_string(type));
return ret;
}
phys_addr = phys_addr + db_entry_size;
peer_ctrl->db_data[i] = msg_data;
peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1);
}
ntb_epc->is_msix = true;
return 0;
}
/**
* epf_ntb_configure_db() - Configure the Outbound Address Space for one host
* to ring the doorbell of other host
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
* @db_count: Count of the number of doorbells that has to be configured
* @msix: Indicates whether MSI-X or MSI should be used
*
* Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for
* one HOST to ring the doorbell of other HOST.
*/
static int epf_ntb_configure_db(struct epf_ntb *ntb,
enum pci_epc_interface_type type,
u16 db_count, bool msix)
{
struct epf_ntb_epc *ntb_epc;
struct pci_epc *epc;
int ret;
if (db_count > MAX_DB_COUNT)
return -EINVAL;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
if (msix)
ret = epf_ntb_configure_msix(ntb, type, db_count);
else
ret = epf_ntb_configure_msi(ntb, type, db_count);
if (ret)
dev_err(&epc->dev, "%s intf: Failed to configure DB\n",
pci_epc_interface_string(type));
return ret;
}
/**
* epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X
* address
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address.
*/
static void
epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
struct pci_epc *epc;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
* epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
* @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY)
*
* Workqueue function that gets invoked for the two epf_ntb_epc
* periodically (once every 5ms) to see if it has received any commands
* from NTB host. The host can send commands to configure doorbell or
* configure memory window or to update link status.
*/
static void epf_ntb_cmd_handler(struct work_struct *work)
{
enum pci_epc_interface_type type;
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
u32 command, argument;
struct epf_ntb *ntb;
struct device *dev;
u16 db_count;
bool is_msix;
int ret;
ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
ctrl = ntb_epc->reg;
command = ctrl->command;
if (!command)
goto reset_handler;
argument = ctrl->argument;
ctrl->command = 0;
ctrl->argument = 0;
ctrl = ntb_epc->reg;
type = ntb_epc->type;
ntb = ntb_epc->epf_ntb;
dev = &ntb->epf->dev;
switch (command) {
case COMMAND_CONFIGURE_DOORBELL:
db_count = argument & DB_COUNT_MASK;
is_msix = argument & MSIX_ENABLE;
ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_TEARDOWN_DOORBELL:
epf_ntb_teardown_db(ntb, type);
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_CONFIGURE_MW:
ret = epf_ntb_configure_mw(ntb, type, argument);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_TEARDOWN_MW:
epf_ntb_teardown_mw(ntb, type, argument);
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_LINK_UP:
ntb_epc->linkup = true;
if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
ntb->epc[SECONDARY_INTERFACE]->linkup) {
ret = epf_ntb_link_up(ntb, true);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
goto reset_handler;
}
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_LINK_DOWN:
ntb_epc->linkup = false;
ret = epf_ntb_link_up(ntb, false);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
break;
default:
dev_err(dev, "%s intf UNKNOWN command: %d\n",
pci_epc_interface_string(type), command);
break;
}
reset_handler:
queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
msecs_to_jiffies(5));
}
/**
* epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
* @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
* address.
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
*+-----------------+----+ +------------------+<-------+-----------------+
*| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
*+-----------------+ +-->+------------------+<-------+-----------------+
*| BAR2 | Local Memory | BAR2 |
*+-----------------+ +-----------------+
*| BAR3 | | BAR3 |
*+-----------------+ +-----------------+
*| BAR4 | | BAR4 |
*+-----------------+ +-----------------+
*| BAR5 | | BAR5 |
*+-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
* region. While BAR1 is the default peer scratchpad BAR, an NTB could have
* other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
* This function can get the exact BAR used for peer scratchpad from
* epf_ntb_bar[BAR_PEER_SPAD].
*
* Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
* gets the address of peer scratchpad from
* peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
*/
static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct pci_epc *epc;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
* epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
*+-----------------+----+ +------------------+<-------+-----------------+
*| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
*+-----------------+ +-->+------------------+<-------+-----------------+
*| BAR2 | Local Memory | BAR2 |
*+-----------------+ +-----------------+
*| BAR3 | | BAR3 |
*+-----------------+ +-----------------+
*| BAR4 | | BAR4 |
*+-----------------+ +-----------------+
*| BAR5 | | BAR5 |
*+-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
* region. While BAR1 is the default peer scratchpad BAR, an NTB could have
* other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
* This function can get the exact BAR used for peer scratchpad from
* epf_ntb_bar[BAR_PEER_SPAD].
*
* Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
* gets the address of peer scratchpad from
* peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
*/
static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *peer_epf_bar, *epf_bar;
enum pci_barno peer_barno, barno;
u32 peer_spad_offset;
u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
int ret;
dev = &ntb->epf->dev;
peer_ntb_epc = ntb->epc[!type];
peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
ntb_epc = ntb->epc[type];
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
peer_spad_offset = peer_ntb_epc->reg->spad_offset;
epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
epf_bar->size = peer_ntb_epc->spad_size;
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
pci_epc_interface_string(type));
return ret;
}
return 0;
}
/**
* epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
* @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
* address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
* +-----------------+----+ +------------------+<-------+-----------------+
* | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
* +-----------------+ +-->+------------------+<-------+-----------------+
* | BAR2 | Local Memory | BAR2 |
* +-----------------+ +-----------------+
* | BAR3 | | BAR3 |
* +-----------------+ +-----------------+
* | BAR4 | | BAR4 |
* +-----------------+ +-----------------+
* | BAR5 | | BAR5 |
* +-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
* self scratchpad region (removes inbound ATU configuration). While BAR0 is
* the default self scratchpad BAR, an NTB could have other BARs for self
* scratchpad (because of reserved BARs). This function can get the exact BAR
* used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
*
* Please note the self scratchpad region and config region is combined to
* a single region and mapped using the same BAR. Also note HOST2's peer
* scratchpad is HOST1's self scratchpad.
*/
static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct pci_epc *epc;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
* epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
* @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
* address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
* +-----------------+----+ +------------------+<-------+-----------------+
* | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
* +-----------------+ +-->+------------------+<-------+-----------------+
* | BAR2 | Local Memory | BAR2 |
* +-----------------+ +-----------------+
* | BAR3 | | BAR3 |
* +-----------------+ +-----------------+
* | BAR4 | | BAR4 |
* +-----------------+ +-----------------+
* | BAR5 | | BAR5 |
* +-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
* self scratchpad region. While BAR0 is the default self scratchpad BAR, an
* NTB could have other BARs for self scratchpad (because of reserved BARs).
* This function can get the exact BAR used for self scratchpad from
* epf_ntb_bar[BAR_CONFIG].
*
* Please note the self scratchpad region and config region is combined to
* a single region and mapped using the same BAR. Also note HOST2's peer
* scratchpad is HOST1's self scratchpad.
*/
static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct epf_ntb *ntb;
struct pci_epc *epc;
struct device *dev;
int ret;
ntb = ntb_epc->epf_ntb;
dev = &ntb->epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
pci_epc_interface_string(ntb_epc->type));
return ret;
}
return 0;
}
/**
* epf_ntb_config_spad_bar_free() - Free the physical memory associated with
* config + scratchpad region
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
* +-----------------+----+ +------------------+<-------+-----------------+
* | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
* +-----------------+ +-->+------------------+<-------+-----------------+
* | BAR2 | Local Memory | BAR2 |
* +-----------------+ +-----------------+
* | BAR3 | | BAR3 |
* +-----------------+ +-----------------+
* | BAR4 | | BAR4 |
* +-----------------+ +-----------------+
* | BAR5 | | BAR5 |
* +-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Free the Local Memory mentioned in the above diagram. After invoking this
* function, any of config + self scratchpad region of HOST1 or peer scratchpad
* region of HOST2 should not be accessed.
*/
static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
struct epf_ntb_epc *ntb_epc;
enum pci_barno barno;
struct pci_epf *epf;
epf = ntb->epf;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
if (ntb_epc->reg)
pci_epf_free_space(epf, ntb_epc->reg, barno, type);
}
}
/**
* epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
* region
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
* +-----------------+----+ +------------------+<-------+-----------------+
* | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
* +-----------------+ +-->+------------------+<-------+-----------------+
* | BAR2 | Local Memory | BAR2 |
* +-----------------+ +-----------------+
* | BAR3 | | BAR3 |
* +-----------------+ +-----------------+
* | BAR4 | | BAR4 |
* +-----------------+ +-----------------+
* | BAR5 | | BAR5 |
* +-----------------+ +-----------------+
* EP CONTROLLER 1 EP CONTROLLER 2
*
* Allocate the Local Memory mentioned in the above diagram. The size of
* CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
* is obtained from "spad-count" configfs entry.
*
* The size of both config region and scratchpad region has to be aligned,
* since the scratchpad region will also be mapped as PEER SCRATCHPAD of
* other host using a separate BAR.
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
const struct pci_epc_features *peer_epc_features, *epc_features;
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
size_t msix_table_size, pba_size, align;
enum pci_barno peer_barno, barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
u64 size, peer_size;
struct pci_epf *epf;
struct device *dev;
bool msix_capable;
u32 spad_count;
void *base;
epf = ntb->epf;
dev = &epf->dev;
ntb_epc = ntb->epc[type];
epc_features = ntb_epc->epc_features;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
size = epc_features->bar_fixed_size[barno];
align = epc_features->align;
peer_ntb_epc = ntb->epc[!type];
peer_epc_features = peer_ntb_epc->epc_features;
peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
peer_size = peer_epc_features->bar_fixed_size[peer_barno];
/* Check if epc_features is populated incorrectly */
if ((!IS_ALIGNED(size, align)))
return -EINVAL;
spad_count = ntb->spad_count;
ctrl_size = sizeof(struct epf_ntb_ctrl);
spad_size = spad_count * 4;
msix_capable = epc_features->msix_capable;
if (msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count;
ctrl_size = ALIGN(ctrl_size, 8);
ntb_epc->msix_table_offset = ctrl_size;
ntb_epc->msix_bar = barno;
/* Align to QWORD or 8 Bytes */
pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8);
ctrl_size = ctrl_size + msix_table_size + pba_size;
}
if (!align) {
ctrl_size = roundup_pow_of_two(ctrl_size);
spad_size = roundup_pow_of_two(spad_size);
} else {
ctrl_size = ALIGN(ctrl_size, align);
spad_size = ALIGN(spad_size, align);
}
if (peer_size) {
if (peer_size < spad_size)
spad_count = peer_size / 4;
spad_size = peer_size;
}
/*
* In order to make sure SPAD offset is aligned to its size,
* expand control region size to the size of SPAD if SPAD size
* is greater than control region size.
*/
if (spad_size > ctrl_size)
ctrl_size = spad_size;
if (!size)
size = ctrl_size + spad_size;
else if (size < ctrl_size + spad_size)
return -EINVAL;
base = pci_epf_alloc_space(epf, size, barno, align, type);
if (!base) {
dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
pci_epc_interface_string(type));
return -ENOMEM;
}
ntb_epc->reg = base;
ctrl = ntb_epc->reg;
ctrl->spad_offset = ctrl_size;
ctrl->spad_count = spad_count;
ctrl->num_mws = ntb->num_mws;
ctrl->db_entry_size = align ? align : 4;
ntb_epc->spad_size = spad_size;
return 0;
}
/**
* epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config +
* scratchpad region for each of PRIMARY and SECONDARY interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for
* config + scratchpad region for a specific interface
*/
static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
struct device *dev;
int ret;
dev = &ntb->epf->dev;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ret = epf_ntb_config_spad_bar_alloc(ntb, type);
if (ret) {
dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
pci_epc_interface_string(type));
return ret;
}
}
return 0;
}
/**
* epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address
* space
* @ntb_epc: EPC associated with one of the HOST which holds peers outbound
* address regions
*
* +-----------------+ +---->+----------------+-----------+-----------------+
* | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
* +-----------------+ | +----------------+ +-----------------+
* | BAR1 | | | Doorbell 2 +---------+ | |
* +-----------------+----+ +----------------+ | | |
* | BAR2 | | Doorbell 3 +-------+ | +-----------------+
* +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
* | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
* +-----------------+ | |----------------+ | | | |
* | BAR4 | | | | | | +-----------------+
* +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
* | BAR5 | | | | | | +-----------------+
* +-----------------+ +---->-----------------+ | | | |
* EP CONTROLLER 1 | | | | +-----------------+
* | | | +---->+ MSI|X ADDRESS 4 |
* +----------------+ | +-----------------+
* (A) EP CONTROLLER 2 | | |
* (OB SPACE) | | |
* +-------> MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
* Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram.
* It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3,
* MW4).
*/
static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
void __iomem *mw_addr;
phys_addr_t phys_addr;
enum epf_ntb_bar bar;
enum pci_barno barno;
struct pci_epc *epc;
size_t size;
epc = ntb_epc->epc;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
mw_addr = ntb_epc->mw_addr[barno];
epf_bar = &ntb_epc->epf_bar[barno];
phys_addr = epf_bar->phys_addr;
size = epf_bar->size;
if (mw_addr) {
pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
ntb_epc->mw_addr[barno] = NULL;
}
}
}
/**
* epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR
* @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
* address
*
* +-----------------+ +---->+----------------+-----------+-----------------+
* | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
* +-----------------+ | +----------------+ +-----------------+
* | BAR1 | | | Doorbell 2 +---------+ | |
* +-----------------+----+ +----------------+ | | |
* | BAR2 | | Doorbell 3 +-------+ | +-----------------+
* +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
* | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
* +-----------------+ | |----------------+ | | | |
* | BAR4 | | | | | | +-----------------+
* +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
* | BAR5 | | | | | | +-----------------+
* +-----------------+ +---->-----------------+ | | | |
* EP CONTROLLER 1 | | | | +-----------------+
* | | | +---->+ MSI|X ADDRESS 4 |
* +----------------+ | +-----------------+
* (A) EP CONTROLLER 2 | | |
* (OB SPACE) | | |
* +-------> MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
* Clear doorbell and memory BARs (remove inbound ATU configuration). In the above
* diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2
* BAR, MW3 BAR and MW4 BAR).
*/
static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum epf_ntb_bar bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct pci_epc *epc;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
}
/**
* epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory
* allocated in peers outbound address space
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and
* epf_ntb_free_peer_mem() which frees up HOST2 outbound memory.
*/
static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
ntb_epc = ntb->epc[type];
peer_ntb_epc = ntb->epc[!type];
epf_ntb_db_mw_bar_clear(ntb_epc);
epf_ntb_free_peer_mem(peer_ntb_epc);
}
/**
* epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Configure MSI/MSI-X capability for each interface with number of
* interrupts equal to "db_count" configfs entry.
*/
static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
const struct pci_epc_features *epc_features;
bool msix_capable, msi_capable;
struct epf_ntb_epc *ntb_epc;
u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
u32 db_count;
int ret;
ntb_epc = ntb->epc[type];
dev = &ntb->epf->dev;
epc_features = ntb_epc->epc_features;
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
if (!(msix_capable || msi_capable)) {
dev_err(dev, "MSI or MSI-X is required for doorbell\n");
return -EINVAL;
}
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
return -EINVAL;
}
ntb->db_count = db_count;
epc = ntb_epc->epc;
if (msi_capable) {
ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
if (ret) {
dev_err(dev, "%s intf: MSI configuration failed\n",
pci_epc_interface_string(type));
return ret;
}
}
if (msix_capable) {
ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
ntb_epc->msix_bar,
ntb_epc->msix_table_offset);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
}
}
return 0;
}
/**
* epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
* @dev: The PCI device.
* @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
* address
* @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
* BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4)
* @peer_ntb_epc: EPC associated with HOST whose outbound address space is
* used by @ntb_epc
* @size: Size of the address region that has to be allocated in peers OB SPACE
*
*
* +-----------------+ +---->+----------------+-----------+-----------------+
* | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
* +-----------------+ | +----------------+ +-----------------+
* | BAR1 | | | Doorbell 2 +---------+ | |
* +-----------------+----+ +----------------+ | | |
* | BAR2 | | Doorbell 3 +-------+ | +-----------------+
* +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
* | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
* +-----------------+ | |----------------+ | | | |
* | BAR4 | | | | | | +-----------------+
* +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
* | BAR5 | | | | | | +-----------------+
* +-----------------+ +---->-----------------+ | | | |
* EP CONTROLLER 1 | | | | +-----------------+
* | | | +---->+ MSI|X ADDRESS 4 |
* +----------------+ | +-----------------+
* (A) EP CONTROLLER 2 | | |
* (OB SPACE) | | |
* +-------> MW1 |
* | |
* | |
* (B) +-----------------+
* | |
* | |
* | |
* | |
* | |
* +-----------------+
* PCI Address Space
* (Managed by HOST2)
*
* Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate
* for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4).
*/
static int epf_ntb_alloc_peer_mem(struct device *dev,
struct epf_ntb_epc *ntb_epc,
enum epf_ntb_bar bar,
struct epf_ntb_epc *peer_ntb_epc,
size_t size)
{
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct pci_epc *peer_epc;
phys_addr_t phys_addr;
void __iomem *mw_addr;
enum pci_barno barno;
size_t align;
epc_features = ntb_epc->epc_features;
align = epc_features->align;
if (size < 128)
size = 128;
if (align)
size = ALIGN(size, align);
else
size = roundup_pow_of_two(size);
peer_epc = peer_ntb_epc->epc;
mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
if (!mw_addr) {
dev_err(dev, "%s intf: Failed to allocate OB address\n",
pci_epc_interface_string(peer_ntb_epc->type));
return -ENOMEM;
}
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
ntb_epc->mw_addr[barno] = mw_addr;
epf_bar->phys_addr = phys_addr;
epf_bar->size = size;
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
return 0;
}
/**
* epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates
* memory in OB address space of HOST2 and configures BAR of HOST1
*/
static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
const struct pci_epc_features *epc_features;
struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
struct pci_epf_bar *epf_bar;
struct epf_ntb_ctrl *ctrl;
u32 num_mws, db_count;
enum epf_ntb_bar bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
size_t align;
int ret, i;
u64 size;
ntb_epc = ntb->epc[type];
peer_ntb_epc = ntb->epc[!type];
dev = &ntb->epf->dev;
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
num_mws = ntb->num_mws;
db_count = ntb->db_count;
for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
if (bar == BAR_DB_MW1) {
align = align ? align : 4;
size = db_count * align;
size = ALIGN(size, ntb->mws_size[i]);
ctrl = ntb_epc->reg;
ctrl->mw1_offset = size;
size += ntb->mws_size[i];
} else {
size = ntb->mws_size[i];
}
ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
peer_ntb_epc, size);
if (ret) {
dev_err(dev, "%s intf: DoorBell mem alloc failed\n",
pci_epc_interface_string(type));
goto err_alloc_peer_mem;
}
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: DoorBell BAR set failed\n",
pci_epc_interface_string(type));
goto err_alloc_peer_mem;
}
}
return 0;
err_alloc_peer_mem:
epf_ntb_db_mw_bar_cleanup(ntb, type);
return ret;
}
/**
* epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Unbind NTB function device from EPC and relinquish reference to pci_epc
* for each of the interface.
*/
static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
struct pci_epc *epc;
struct pci_epf *epf;
if (type < 0)
return;
epf = ntb->epf;
ntb_epc = ntb->epc[type];
if (!ntb_epc)
return;
epc = ntb_epc->epc;
pci_epc_remove_epf(epc, epf, type);
pci_epc_put(epc);
}
/**
* epf_ntb_epc_destroy() - Cleanup NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
*/
static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
epf_ntb_epc_destroy_interface(ntb, type);
}
/**
* epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @epc: struct pci_epc to which a particular NTB interface should be associated
* @type: PRIMARY interface or SECONDARY interface
*
* Allocate memory for NTB EPC interface and initialize it.
*/
static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
struct pci_epc *epc,
enum pci_epc_interface_type type)
{
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct epf_ntb_epc *ntb_epc;
u8 func_no, vfunc_no;
struct pci_epf *epf;
struct device *dev;
dev = &ntb->epf->dev;
ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
if (!ntb_epc)
return -ENOMEM;
epf = ntb->epf;
vfunc_no = epf->vfunc_no;
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
epf_bar = epf->bar;
} else {
func_no = epf->sec_epc_func_no;
epf_bar = epf->sec_epc_bar;
}
ntb_epc->linkup = false;
ntb_epc->epc = epc;
ntb_epc->func_no = func_no;
ntb_epc->vfunc_no = vfunc_no;
ntb_epc->type = type;
ntb_epc->epf_bar = epf_bar;
ntb_epc->epf_ntb = ntb;
epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!epc_features)
return -EINVAL;
ntb_epc->epc_features = epc_features;
ntb->epc[type] = ntb_epc;
return 0;
}
/**
* epf_ntb_epc_create() - Create and initialize NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Get a reference to EPC device and bind NTB function device to that EPC
* for each of the interface. It is also a wrapper to
* epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface
* and initialize it
*/
static int epf_ntb_epc_create(struct epf_ntb *ntb)
{
struct pci_epf *epf;
struct device *dev;
int ret;
epf = ntb->epf;
dev = &epf->dev;
ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE);
if (ret) {
dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n");
return ret;
}
ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
SECONDARY_INTERFACE);
if (ret) {
dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
goto err_epc_create;
}
return 0;
err_epc_create:
epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
return ret;
}
/**
* epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of
* the NTB constructs (scratchpad region, doorbell, memorywindow)
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD,
* BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4.
*/
static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
const struct pci_epc_features *epc_features;
struct epf_ntb_epc *ntb_epc;
enum pci_barno barno;
enum epf_ntb_bar bar;
struct device *dev;
u32 num_mws;
int i;
barno = BAR_0;
ntb_epc = ntb->epc[type];
num_mws = ntb->num_mws;
dev = &ntb->epf->dev;
epc_features = ntb_epc->epc_features;
/* These are required BARs which are mandatory for NTB functionality */
for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
barno = pci_epc_get_next_free_bar(epc_features, barno);
if (barno < 0) {
dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
pci_epc_interface_string(type));
return barno;
}
ntb_epc->epf_ntb_bar[bar] = barno;
}
/* These are optional BARs which don't impact NTB functionality */
for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
barno = pci_epc_get_next_free_bar(epc_features, barno);
if (barno < 0) {
ntb->num_mws = i;
dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
}
ntb_epc->epf_ntb_bar[bar] = barno;
}
return 0;
}
/**
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
* to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
* BAR_MW3 and BAR_MW4 for all the interfaces.
*/
static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
struct device *dev;
int ret;
dev = &ntb->epf->dev;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ret = epf_ntb_init_epc_bar_interface(ntb, type);
if (ret) {
dev_err(dev, "Fail to init EPC bar for %s interface\n",
pci_epc_interface_string(type));
return ret;
}
}
return 0;
}
/**
* epf_ntb_epc_init_interface() - Initialize NTB interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Wrapper to initialize a particular EPC interface and start the workqueue
* to check for commands from host. This function will write to the
* EP controller HW for configuring it.
*/
static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
int ret;
ntb_epc = ntb->epc[type];
epf = ntb->epf;
dev = &epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
vfunc_no = ntb_epc->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
if (ret) {
dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
pci_epc_interface_string(type));
return ret;
}
ret = epf_ntb_peer_spad_bar_set(ntb, type);
if (ret) {
dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
pci_epc_interface_string(type));
goto err_peer_spad_bar_init;
}
ret = epf_ntb_configure_interrupt(ntb, type);
if (ret) {
dev_err(dev, "%s intf: Interrupt configuration failed\n",
pci_epc_interface_string(type));
goto err_peer_spad_bar_init;
}
ret = epf_ntb_db_mw_bar_init(ntb, type);
if (ret) {
dev_err(dev, "%s intf: DB/MW BAR init failed\n",
pci_epc_interface_string(type));
goto err_db_mw_bar_init;
}
if (vfunc_no <= 1) {
ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
if (ret) {
dev_err(dev, "%s intf: Configuration header write failed\n",
pci_epc_interface_string(type));
goto err_write_header;
}
}
INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
return 0;
err_write_header:
epf_ntb_db_mw_bar_cleanup(ntb, type);
err_db_mw_bar_init:
epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
err_peer_spad_bar_init:
epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
return ret;
}
/**
* epf_ntb_epc_cleanup_interface() - Cleanup NTB interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
* Wrapper to cleanup a particular NTB interface.
*/
static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
if (type < 0)
return;
ntb_epc = ntb->epc[type];
cancel_delayed_work(&ntb_epc->cmd_handler);
epf_ntb_db_mw_bar_cleanup(ntb, type);
epf_ntb_peer_spad_bar_clear(ntb_epc);
epf_ntb_config_sspad_bar_clear(ntb_epc);
}
/**
* epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Wrapper to cleanup all NTB interfaces.
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
epf_ntb_epc_cleanup_interface(ntb, type);
}
/**
* epf_ntb_epc_init() - Initialize all NTB interfaces
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
*
* Wrapper to initialize all NTB interface and start the workqueue
* to check for commands from host.
*/
static int epf_ntb_epc_init(struct epf_ntb *ntb)
{
enum pci_epc_interface_type type;
struct device *dev;
int ret;
dev = &ntb->epf->dev;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ret = epf_ntb_epc_init_interface(ntb, type);
if (ret) {
dev_err(dev, "%s intf: Failed to initialize\n",
pci_epc_interface_string(type));
goto err_init_type;
}
}
return 0;
err_init_type:
epf_ntb_epc_cleanup_interface(ntb, type - 1);
return ret;
}
/**
* epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
* @epf: NTB endpoint function device
*
* Initialize both the endpoint controllers associated with NTB function device.
* Invoked when a primary interface or secondary interface is bound to EPC
* device. This function will succeed only when EPC is bound to both the
* interfaces.
*/
static int epf_ntb_bind(struct pci_epf *epf)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
int ret;
if (!epf->epc) {
dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
return 0;
}
if (!epf->sec_epc) {
dev_dbg(dev, "SECONDARY EPC interface not yet bound\n");
return 0;
}
ret = epf_ntb_epc_create(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
return ret;
}
ret = epf_ntb_init_epc_bar(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
goto err_bar_init;
}
ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
if (ret) {
dev_err(dev, "Failed to allocate BAR memory\n");
goto err_bar_alloc;
}
ret = epf_ntb_epc_init(ntb);
if (ret) {
dev_err(dev, "Failed to initialize EPC\n");
goto err_bar_alloc;
}
epf_set_drvdata(epf, ntb);
return 0;
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
err_bar_init:
epf_ntb_epc_destroy(ntb);
return ret;
}
/**
* epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
* @epf: NTB endpoint function device
*
* Cleanup the initialization from epf_ntb_bind()
*/
static void epf_ntb_unbind(struct pci_epf *epf)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
epf_ntb_epc_cleanup(ntb);
epf_ntb_config_spad_bar_free(ntb);
epf_ntb_epc_destroy(ntb);
}
#define EPF_NTB_R(_name) \
static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
\
return sysfs_emit(page, "%d\n", ntb->_name); \
}
#define EPF_NTB_W(_name) \
static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
u32 val; \
\
if (kstrtou32(page, 0, &val) < 0) \
return -EINVAL; \
\
ntb->_name = val; \
\
return len; \
}
#define EPF_NTB_MW_R(_name) \
static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
int win_no; \
\
sscanf(#_name, "mw%d", &win_no); \
\
return sysfs_emit(page, "%lld\n", ntb->mws_size[win_no - 1]); \
}
#define EPF_NTB_MW_W(_name) \
static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
struct device *dev = &ntb->epf->dev; \
int win_no; \
u64 val; \
\
if (kstrtou64(page, 0, &val) < 0) \
return -EINVAL; \
\
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
\
if (ntb->num_mws < win_no) { \
dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
return -EINVAL; \
} \
\
ntb->mws_size[win_no - 1] = val; \
\
return len; \
}
static ssize_t epf_ntb_num_mws_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct epf_ntb *ntb = to_epf_ntb(group);
u32 val;
if (kstrtou32(page, 0, &val) < 0)
return -EINVAL;
if (val > MAX_MW)
return -EINVAL;
ntb->num_mws = val;
return len;
}
EPF_NTB_R(spad_count)
EPF_NTB_W(spad_count)
EPF_NTB_R(db_count)
EPF_NTB_W(db_count)
EPF_NTB_R(num_mws)
EPF_NTB_MW_R(mw1)
EPF_NTB_MW_W(mw1)
EPF_NTB_MW_R(mw2)
EPF_NTB_MW_W(mw2)
EPF_NTB_MW_R(mw3)
EPF_NTB_MW_W(mw3)
EPF_NTB_MW_R(mw4)
EPF_NTB_MW_W(mw4)
CONFIGFS_ATTR(epf_ntb_, spad_count);
CONFIGFS_ATTR(epf_ntb_, db_count);
CONFIGFS_ATTR(epf_ntb_, num_mws);
CONFIGFS_ATTR(epf_ntb_, mw1);
CONFIGFS_ATTR(epf_ntb_, mw2);
CONFIGFS_ATTR(epf_ntb_, mw3);
CONFIGFS_ATTR(epf_ntb_, mw4);
static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_spad_count,
&epf_ntb_attr_db_count,
&epf_ntb_attr_num_mws,
&epf_ntb_attr_mw1,
&epf_ntb_attr_mw2,
&epf_ntb_attr_mw3,
&epf_ntb_attr_mw4,
NULL,
};
static const struct config_item_type ntb_group_type = {
.ct_attrs = epf_ntb_attrs,
.ct_owner = THIS_MODULE,
};
/**
* epf_ntb_add_cfs() - Add configfs directory specific to NTB
* @epf: NTB endpoint function device
* @group: A pointer to the config_group structure referencing a group of
* config_items of a specific type that belong to a specific sub-system.
*
* Add configfs directory specific to NTB. This directory will hold
* NTB specific properties like db_count, spad_count, num_mws etc.,
*/
static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
struct config_group *group)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
struct config_group *ntb_group = &ntb->group;
struct device *dev = &epf->dev;
config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
return ntb_group;
}
/**
* epf_ntb_probe() - Probe NTB function driver
* @epf: NTB endpoint function device
* @id: NTB endpoint function device ID
*
* Probe NTB function driver when endpoint function bus detects a NTB
* endpoint function.
*/
static int epf_ntb_probe(struct pci_epf *epf,
const struct pci_epf_device_id *id)
{
struct epf_ntb *ntb;
struct device *dev;
dev = &epf->dev;
ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
if (!ntb)
return -ENOMEM;
epf->header = &epf_ntb_header;
ntb->epf = epf;
epf_set_drvdata(epf, ntb);
return 0;
}
static struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
};
static const struct pci_epf_device_id epf_ntb_ids[] = {
{
.name = "pci_epf_ntb",
},
{},
};
static struct pci_epf_driver epf_ntb_driver = {
.driver.name = "pci_epf_ntb",
.probe = epf_ntb_probe,
.id_table = epf_ntb_ids,
.ops = &epf_ntb_ops,
.owner = THIS_MODULE,
};
static int __init epf_ntb_init(void)
{
int ret;
kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&epf_ntb_driver);
if (ret) {
destroy_workqueue(kpcintb_workqueue);
pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
return ret;
}
return 0;
}
module_init(epf_ntb_init);
static void __exit epf_ntb_exit(void)
{
pci_epf_unregister_driver(&epf_ntb_driver);
destroy_workqueue(kpcintb_workqueue);
}
module_exit(epf_ntb_exit);
MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pci/endpoint/functions/pci-epf-ntb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Endpoint Function Driver to implement Non-Transparent Bridge functionality
* Between PCI RC and EP
*
* Copyright (C) 2020 Texas Instruments
* Copyright (C) 2022 NXP
*
* Based on pci-epf-ntb.c
* Author: Frank Li <[email protected]>
* Author: Kishon Vijay Abraham I <[email protected]>
*/
/*
* +------------+ +---------------------------------------+
* | | | |
* +------------+ | +--------------+
* | NTB | | | NTB |
* | NetDev | | | NetDev |
* +------------+ | +--------------+
* | NTB | | | NTB |
* | Transfer | | | Transfer |
* +------------+ | +--------------+
* | | | | |
* | PCI NTB | | | |
* | EPF | | | |
* | Driver | | | PCI Virtual |
* | | +---------------+ | NTB Driver |
* | | | PCI EP NTB |<------>| |
* | | | FN Driver | | |
* +------------+ +---------------+ +--------------+
* | | | | | |
* | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
* | | PCI | | | Bus |
* +------------+ +---------------+--------+--------------+
* PCIe Root Port PCI EP
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/ntb.h>
static struct workqueue_struct *kpcintb_workqueue;
#define COMMAND_CONFIGURE_DOORBELL 1
#define COMMAND_TEARDOWN_DOORBELL 2
#define COMMAND_CONFIGURE_MW 3
#define COMMAND_TEARDOWN_MW 4
#define COMMAND_LINK_UP 5
#define COMMAND_LINK_DOWN 6
#define COMMAND_STATUS_OK 1
#define COMMAND_STATUS_ERROR 2
#define LINK_STATUS_UP BIT(0)
#define SPAD_COUNT 64
#define DB_COUNT 4
#define NTB_MW_OFFSET 2
#define DB_COUNT_MASK GENMASK(15, 0)
#define MSIX_ENABLE BIT(16)
#define MAX_DB_COUNT 32
#define MAX_MW 4
enum epf_ntb_bar {
BAR_CONFIG,
BAR_DB,
BAR_MW0,
BAR_MW1,
BAR_MW2,
};
/*
* +--------------------------------------------------+ Base
* | |
* | |
* | |
* | Common Control Register |
* | |
* | |
* | |
* +-----------------------+--------------------------+ Base+spad_offset
* | | |
* | Peer Spad Space | Spad Space |
* | | |
* | | |
* +-----------------------+--------------------------+ Base+spad_offset
* | | | +spad_count * 4
* | | |
* | Spad Space | Peer Spad Space |
* | | |
* +-----------------------+--------------------------+
* Virtual PCI PCIe Endpoint
* NTB Driver NTB Driver
*/
struct epf_ntb_ctrl {
u32 command;
u32 argument;
u16 command_status;
u16 link_status;
u32 topology;
u64 addr;
u64 size;
u32 num_mws;
u32 reserved;
u32 spad_offset;
u32 spad_count;
u32 db_entry_size;
u32 db_data[MAX_DB_COUNT];
u32 db_offset[MAX_DB_COUNT];
} __packed;
struct epf_ntb {
struct ntb_dev ntb;
struct pci_epf *epf;
struct config_group group;
u32 num_mws;
u32 db_count;
u32 spad_count;
u64 mws_size[MAX_MW];
u64 db;
u32 vbus_number;
u16 vntb_pid;
u16 vntb_vid;
bool linkup;
u32 spad_size;
enum pci_barno epf_ntb_bar[6];
struct epf_ntb_ctrl *reg;
u32 *epf_db;
phys_addr_t vpci_mw_phy[MAX_MW];
void __iomem *vpci_mw_addr[MAX_MW];
struct delayed_work cmd_handler;
};
#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
static struct pci_epf_header epf_ntb_header = {
.vendorid = PCI_ANY_ID,
.deviceid = PCI_ANY_ID,
.baseclass_code = PCI_BASE_CLASS_MEMORY,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
/**
* epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @link_up: true or false indicating Link is UP or Down
*
* Once NTB function in HOST invoke ntb_link_enable(),
* this NTB function driver will trigger a link event to VHOST.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
{
if (link_up)
ntb->reg->link_status |= LINK_STATUS_UP;
else
ntb->reg->link_status &= ~LINK_STATUS_UP;
ntb_link_event(&ntb->ntb);
return 0;
}
/**
* epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
* to access the memory window of HOST
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @mw: Index of the memory window (either 0, 1, 2 or 3)
*
* EP Outbound Window
* +--------+ +-----------+
* | | | |
* | | | |
* | | | |
* | | | |
* | | +-----------+
* | Virtual| | Memory Win|
* | NTB | -----------> | |
* | Driver | | |
* | | +-----------+
* | | | |
* | | | |
* +--------+ +-----------+
* VHOST PCI EP
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
{
phys_addr_t phys_addr;
u8 func_no, vfunc_no;
u64 addr, size;
int ret = 0;
phys_addr = ntb->vpci_mw_phy[mw];
addr = ntb->reg->addr;
size = ntb->reg->size;
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&ntb->epf->epc->dev,
"Failed to map memory window %d address\n", mw);
return ret;
}
/**
* epf_ntb_teardown_mw() - Teardown the configured OB ATU
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @mw: Index of the memory window (either 0, 1, 2 or 3)
*
* Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
* pci_epc_unmap_addr()
*/
static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
{
pci_epc_unmap_addr(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
ntb->vpci_mw_phy[mw]);
}
/**
* epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
* @work: work_struct for the epf_ntb_epc
*
* Workqueue function that gets invoked for the two epf_ntb_epc
* periodically (once every 5ms) to see if it has received any commands
* from NTB HOST. The HOST can send commands to configure doorbell or
* configure memory window or to update link status.
*/
static void epf_ntb_cmd_handler(struct work_struct *work)
{
struct epf_ntb_ctrl *ctrl;
u32 command, argument;
struct epf_ntb *ntb;
struct device *dev;
int ret;
int i;
ntb = container_of(work, struct epf_ntb, cmd_handler.work);
for (i = 1; i < ntb->db_count; i++) {
if (ntb->epf_db[i]) {
ntb->db |= 1 << (i - 1);
ntb_db_event(&ntb->ntb, i);
ntb->epf_db[i] = 0;
}
}
ctrl = ntb->reg;
command = ctrl->command;
if (!command)
goto reset_handler;
argument = ctrl->argument;
ctrl->command = 0;
ctrl->argument = 0;
ctrl = ntb->reg;
dev = &ntb->epf->dev;
switch (command) {
case COMMAND_CONFIGURE_DOORBELL:
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_TEARDOWN_DOORBELL:
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_CONFIGURE_MW:
ret = epf_ntb_configure_mw(ntb, argument);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_TEARDOWN_MW:
epf_ntb_teardown_mw(ntb, argument);
ctrl->command_status = COMMAND_STATUS_OK;
break;
case COMMAND_LINK_UP:
ntb->linkup = true;
ret = epf_ntb_link_up(ntb, true);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
goto reset_handler;
case COMMAND_LINK_DOWN:
ntb->linkup = false;
ret = epf_ntb_link_up(ntb, false);
if (ret < 0)
ctrl->command_status = COMMAND_STATUS_ERROR;
else
ctrl->command_status = COMMAND_STATUS_OK;
break;
default:
dev_err(dev, "UNKNOWN command: %d\n", command);
break;
}
reset_handler:
queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
msecs_to_jiffies(5));
}
/**
* epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
* @ntb: EPC associated with one of the HOST which holds peer's outbound
* address.
*
* Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
* self scratchpad region (removes inbound ATU configuration). While BAR0 is
* the default self scratchpad BAR, an NTB could have other BARs for self
* scratchpad (because of reserved BARs). This function can get the exact BAR
* used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
*
* Please note the self scratchpad region and config region is combined to
* a single region and mapped using the same BAR. Also note VHOST's peer
* scratchpad is HOST's self scratchpad.
*
* Returns: void
*/
static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
barno = ntb->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb->epf->bar[barno];
pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
}
/**
* epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Map BAR0 of EP CONTROLLER which contains the VHOST's config and
* self scratchpad region.
*
* Please note the self scratchpad region and config region is combined to
* a single region and mapped using the same BAR.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
u8 func_no, vfunc_no;
struct device *dev;
int ret;
dev = &ntb->epf->dev;
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
barno = ntb->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb->epf->bar[barno];
ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
return ret;
}
return 0;
}
/**
* epf_ntb_config_spad_bar_free() - Free the physical memory associated with
* config + scratchpad region
* @ntb: NTB device that facilitates communication between HOST and VHOST
*/
static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
{
enum pci_barno barno;
barno = ntb->epf_ntb_bar[BAR_CONFIG];
pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
}
/**
* epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
* region
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Allocate the Local Memory mentioned in the above diagram. The size of
* CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
* is obtained from "spad-count" configfs entry.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
void *base;
int i;
const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
size = epc_features->bar_fixed_size[barno];
align = epc_features->align;
if ((!IS_ALIGNED(size, align)))
return -EINVAL;
spad_count = ntb->spad_count;
ctrl_size = sizeof(struct epf_ntb_ctrl);
spad_size = 2 * spad_count * sizeof(u32);
if (!align) {
ctrl_size = roundup_pow_of_two(ctrl_size);
spad_size = roundup_pow_of_two(spad_size);
} else {
ctrl_size = ALIGN(ctrl_size, align);
spad_size = ALIGN(spad_size, align);
}
if (!size)
size = ctrl_size + spad_size;
else if (size < ctrl_size + spad_size)
return -EINVAL;
base = pci_epf_alloc_space(epf, size, barno, align, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
}
ntb->reg = base;
ctrl = ntb->reg;
ctrl->spad_offset = ctrl_size;
ctrl->spad_count = spad_count;
ctrl->num_mws = ntb->num_mws;
ntb->spad_size = spad_size;
ctrl->db_entry_size = sizeof(u32);
for (i = 0; i < ntb->db_count; i++) {
ntb->reg->db_data[i] = 1 + i;
ntb->reg->db_offset[i] = 0;
}
return 0;
}
/**
* epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Configure MSI/MSI-X capability for each interface with number of
* interrupts equal to "db_count" configfs entry.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
struct device *dev;
u32 db_count;
int ret;
dev = &ntb->epf->dev;
epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
if (!(epc_features->msix_capable || epc_features->msi_capable)) {
dev_err(dev, "MSI or MSI-X is required for doorbell\n");
return -EINVAL;
}
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
return -EINVAL;
}
ntb->db_count = db_count;
if (epc_features->msi_capable) {
ret = pci_epc_set_msi(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
16);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
}
}
return 0;
}
/**
* epf_ntb_db_bar_init() - Configure Doorbell window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
u32 align;
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
void __iomem *mw_addr;
enum pci_barno barno;
size_t size = sizeof(u32) * ntb->db_count;
epc_features = pci_epc_get_features(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no);
align = epc_features->align;
if (size < 128)
size = 128;
if (align)
size = ALIGN(size, align);
else
size = roundup_pow_of_two(size);
barno = ntb->epf_ntb_bar[BAR_DB];
mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
if (!mw_addr) {
dev_err(dev, "Failed to allocate OB address\n");
return -ENOMEM;
}
ntb->epf_db = mw_addr;
epf_bar = &ntb->epf->bar[barno];
ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "Doorbell BAR set failed\n");
goto err_alloc_peer_mem;
}
return ret;
err_alloc_peer_mem:
pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
return -1;
}
static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
/**
* epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
* allocated in peer's outbound address space
* @ntb: NTB device that facilitates communication between HOST and VHOST
*/
static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
{
enum pci_barno barno;
barno = ntb->epf_ntb_bar[BAR_DB];
pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
&ntb->epf->bar[barno]);
}
/**
* epf_ntb_mw_bar_init() - Configure Memory window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
{
int ret = 0;
int i;
u64 size;
enum pci_barno barno;
struct device *dev = &ntb->epf->dev;
for (i = 0; i < ntb->num_mws; i++) {
size = ntb->mws_size[i];
barno = ntb->epf_ntb_bar[BAR_MW0 + i];
ntb->epf->bar[barno].barno = barno;
ntb->epf->bar[barno].size = size;
ntb->epf->bar[barno].addr = NULL;
ntb->epf->bar[barno].phys_addr = 0;
ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
PCI_BASE_ADDRESS_MEM_TYPE_64 :
PCI_BASE_ADDRESS_MEM_TYPE_32;
ret = pci_epc_set_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
&ntb->epf->bar[barno]);
if (ret) {
dev_err(dev, "MW set failed\n");
goto err_alloc_mem;
}
/* Allocate EPC outbound memory windows to vpci vntb device */
ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
&ntb->vpci_mw_phy[i],
size);
if (!ntb->vpci_mw_addr[i]) {
ret = -ENOMEM;
dev_err(dev, "Failed to allocate source address\n");
goto err_set_bar;
}
}
return ret;
err_set_bar:
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
&ntb->epf->bar[barno]);
err_alloc_mem:
epf_ntb_mw_bar_clear(ntb, i);
return ret;
}
/**
* epf_ntb_mw_bar_clear() - Clear Memory window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @num_mws: the number of Memory window BARs that to be cleared
*/
static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
{
enum pci_barno barno;
int i;
for (i = 0; i < num_mws; i++) {
barno = ntb->epf_ntb_bar[BAR_MW0 + i];
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
&ntb->epf->bar[barno]);
pci_epc_mem_free_addr(ntb->epf->epc,
ntb->vpci_mw_phy[i],
ntb->vpci_mw_addr[i],
ntb->mws_size[i]);
}
}
/**
* epf_ntb_epc_destroy() - Cleanup NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
*/
static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
{
pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
pci_epc_put(ntb->epf->epc);
}
/**
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
enum pci_barno barno;
enum epf_ntb_bar bar;
struct device *dev;
u32 num_mws;
int i;
barno = BAR_0;
num_mws = ntb->num_mws;
dev = &ntb->epf->dev;
epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
/* These are required BARs which are mandatory for NTB functionality */
for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
barno = pci_epc_get_next_free_bar(epc_features, barno);
if (barno < 0) {
dev_err(dev, "Fail to get NTB function BAR\n");
return barno;
}
ntb->epf_ntb_bar[bar] = barno;
}
/* These are optional BARs which don't impact NTB functionality */
for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
barno = pci_epc_get_next_free_bar(epc_features, barno);
if (barno < 0) {
ntb->num_mws = i;
dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
}
ntb->epf_ntb_bar[bar] = barno;
}
return 0;
}
/**
* epf_ntb_epc_init() - Initialize NTB interface
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Wrapper to initialize a particular EPC interface and start the workqueue
* to check for commands from HOST. This function will write to the
* EP controller HW for configuring it.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_epc_init(struct epf_ntb *ntb)
{
u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
int ret;
epf = ntb->epf;
dev = &epf->dev;
epc = epf->epc;
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb);
if (ret) {
dev_err(dev, "Config/self SPAD BAR init failed");
return ret;
}
ret = epf_ntb_configure_interrupt(ntb);
if (ret) {
dev_err(dev, "Interrupt configuration failed\n");
goto err_config_interrupt;
}
ret = epf_ntb_db_bar_init(ntb);
if (ret) {
dev_err(dev, "DB BAR init failed\n");
goto err_db_bar_init;
}
ret = epf_ntb_mw_bar_init(ntb);
if (ret) {
dev_err(dev, "MW BAR init failed\n");
goto err_mw_bar_init;
}
if (vfunc_no <= 1) {
ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
if (ret) {
dev_err(dev, "Configuration header write failed\n");
goto err_write_header;
}
}
INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
return 0;
err_write_header:
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
err_mw_bar_init:
epf_ntb_db_bar_clear(ntb);
err_db_bar_init:
err_config_interrupt:
epf_ntb_config_sspad_bar_clear(ntb);
return ret;
}
/**
* epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
* @ntb: NTB device that facilitates communication between HOST and VHOST
*
* Wrapper to cleanup all NTB interfaces.
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
}
#define EPF_NTB_R(_name) \
static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
\
return sprintf(page, "%d\n", ntb->_name); \
}
#define EPF_NTB_W(_name) \
static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
u32 val; \
int ret; \
\
ret = kstrtou32(page, 0, &val); \
if (ret) \
return ret; \
\
ntb->_name = val; \
\
return len; \
}
#define EPF_NTB_MW_R(_name) \
static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
char *page) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
struct device *dev = &ntb->epf->dev; \
int win_no; \
\
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
\
if (win_no <= 0 || win_no > ntb->num_mws) { \
dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
return -EINVAL; \
} \
\
return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
}
#define EPF_NTB_MW_W(_name) \
static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
struct device *dev = &ntb->epf->dev; \
int win_no; \
u64 val; \
int ret; \
\
ret = kstrtou64(page, 0, &val); \
if (ret) \
return ret; \
\
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
\
if (win_no <= 0 || win_no > ntb->num_mws) { \
dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
return -EINVAL; \
} \
\
ntb->mws_size[win_no - 1] = val; \
\
return len; \
}
static ssize_t epf_ntb_num_mws_store(struct config_item *item,
const char *page, size_t len)
{
struct config_group *group = to_config_group(item);
struct epf_ntb *ntb = to_epf_ntb(group);
u32 val;
int ret;
ret = kstrtou32(page, 0, &val);
if (ret)
return ret;
if (val > MAX_MW)
return -EINVAL;
ntb->num_mws = val;
return len;
}
EPF_NTB_R(spad_count)
EPF_NTB_W(spad_count)
EPF_NTB_R(db_count)
EPF_NTB_W(db_count)
EPF_NTB_R(num_mws)
EPF_NTB_R(vbus_number)
EPF_NTB_W(vbus_number)
EPF_NTB_R(vntb_pid)
EPF_NTB_W(vntb_pid)
EPF_NTB_R(vntb_vid)
EPF_NTB_W(vntb_vid)
EPF_NTB_MW_R(mw1)
EPF_NTB_MW_W(mw1)
EPF_NTB_MW_R(mw2)
EPF_NTB_MW_W(mw2)
EPF_NTB_MW_R(mw3)
EPF_NTB_MW_W(mw3)
EPF_NTB_MW_R(mw4)
EPF_NTB_MW_W(mw4)
CONFIGFS_ATTR(epf_ntb_, spad_count);
CONFIGFS_ATTR(epf_ntb_, db_count);
CONFIGFS_ATTR(epf_ntb_, num_mws);
CONFIGFS_ATTR(epf_ntb_, mw1);
CONFIGFS_ATTR(epf_ntb_, mw2);
CONFIGFS_ATTR(epf_ntb_, mw3);
CONFIGFS_ATTR(epf_ntb_, mw4);
CONFIGFS_ATTR(epf_ntb_, vbus_number);
CONFIGFS_ATTR(epf_ntb_, vntb_pid);
CONFIGFS_ATTR(epf_ntb_, vntb_vid);
static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_spad_count,
&epf_ntb_attr_db_count,
&epf_ntb_attr_num_mws,
&epf_ntb_attr_mw1,
&epf_ntb_attr_mw2,
&epf_ntb_attr_mw3,
&epf_ntb_attr_mw4,
&epf_ntb_attr_vbus_number,
&epf_ntb_attr_vntb_pid,
&epf_ntb_attr_vntb_vid,
NULL,
};
static const struct config_item_type ntb_group_type = {
.ct_attrs = epf_ntb_attrs,
.ct_owner = THIS_MODULE,
};
/**
* epf_ntb_add_cfs() - Add configfs directory specific to NTB
* @epf: NTB endpoint function device
* @group: A pointer to the config_group structure referencing a group of
* config_items of a specific type that belong to a specific sub-system.
*
* Add configfs directory specific to NTB. This directory will hold
* NTB specific properties like db_count, spad_count, num_mws etc.,
*
* Returns: Pointer to config_group
*/
static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
struct config_group *group)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
struct config_group *ntb_group = &ntb->group;
struct device *dev = &epf->dev;
config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
return ntb_group;
}
/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
static u32 pci_space[] = {
0xffffffff, /* Device ID, Vendor ID */
0, /* Status, Command */
0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
0, /* BAR 0 */
0, /* BAR 1 */
0, /* BAR 2 */
0, /* BAR 3 */
0, /* BAR 4 */
0, /* BAR 5 */
0, /* Cardbus CIS Pointer */
0, /* Subsystem ID, Subsystem Vendor ID */
0, /* ROM Base Address */
0, /* Reserved, Capabilities Pointer */
0, /* Reserved */
0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
};
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
{
if (devfn == 0) {
memcpy(val, ((u8 *)pci_space) + where, size);
return PCIBIOS_SUCCESSFUL;
}
return PCIBIOS_DEVICE_NOT_FOUND;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
{
return 0;
}
static struct pci_ops vpci_ops = {
.read = pci_read,
.write = pci_write,
};
static int vpci_scan_bus(void *sysdata)
{
struct pci_bus *vpci_bus;
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
if (vpci_bus)
pr_err("create pci bus\n");
pci_bus_add_devices(vpci_bus);
return 0;
}
/*==================== Virtual PCIe NTB driver ==========================*/
static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
{
struct epf_ntb *ndev = ntb_ndev(ntb);
return ndev->num_mws;
}
static int vntb_epf_spad_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->spad_count;
}
static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->num_mws;
}
static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
{
return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
}
static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
return 0;
}
static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
int ret;
struct device *dev;
dev = &ntb->ntb.dev;
barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
epf_bar = &ntb->epf->bar[barno];
epf_bar->phys_addr = addr;
epf_bar->barno = barno;
epf_bar->size = size;
ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
if (ret) {
dev_err(dev, "failure set mw trans\n");
return ret;
}
return 0;
}
static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
{
return 0;
}
static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
phys_addr_t *base, resource_size_t *size)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
if (base)
*base = ntb->vpci_mw_phy[idx];
if (size)
*size = ntb->mws_size[idx];
return 0;
}
static int vntb_epf_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
return 0;
}
static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
u32 val;
void __iomem *base = (void __iomem *)ntb->reg;
val = readl(base + off + ct + idx * sizeof(u32));
return val;
}
static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
struct epf_ntb_ctrl *ctrl = ntb->reg;
int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
void __iomem *base = (void __iomem *)ntb->reg;
writel(val, base + off + ct + idx * sizeof(u32));
return 0;
}
static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
struct epf_ntb_ctrl *ctrl = ntb->reg;
int off = ctrl->spad_offset;
void __iomem *base = (void __iomem *)ntb->reg;
u32 val;
val = readl(base + off + idx * sizeof(u32));
return val;
}
static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
struct epf_ntb_ctrl *ctrl = ntb->reg;
int off = ctrl->spad_offset;
void __iomem *base = (void __iomem *)ntb->reg;
writel(val, base + off + idx * sizeof(u32));
return 0;
}
static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
{
u32 interrupt_num = ffs(db_bits) + 1;
struct epf_ntb *ntb = ntb_ndev(ndev);
u8 func_no, vfunc_no;
int ret;
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
ret = pci_epc_raise_irq(ntb->epf->epc,
func_no,
vfunc_no,
PCI_EPC_IRQ_MSI,
interrupt_num + 1);
if (ret)
dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
return ret;
}
static u64 vntb_epf_db_read(struct ntb_dev *ndev)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
return ntb->db;
}
static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
if (addr_align)
*addr_align = SZ_4K;
if (size_align)
*size_align = 1;
if (size_max)
*size_max = ntb->mws_size[idx];
return 0;
}
static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
return ntb->reg->link_status;
}
static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
{
return 0;
}
static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
ntb->db &= ~db_bits;
return 0;
}
static int vntb_epf_link_disable(struct ntb_dev *ntb)
{
return 0;
}
static const struct ntb_dev_ops vntb_epf_ops = {
.mw_count = vntb_epf_mw_count,
.spad_count = vntb_epf_spad_count,
.peer_mw_count = vntb_epf_peer_mw_count,
.db_valid_mask = vntb_epf_db_valid_mask,
.db_set_mask = vntb_epf_db_set_mask,
.mw_set_trans = vntb_epf_mw_set_trans,
.mw_clear_trans = vntb_epf_mw_clear_trans,
.peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
.link_enable = vntb_epf_link_enable,
.spad_read = vntb_epf_spad_read,
.spad_write = vntb_epf_spad_write,
.peer_spad_read = vntb_epf_peer_spad_read,
.peer_spad_write = vntb_epf_peer_spad_write,
.peer_db_set = vntb_epf_peer_db_set,
.db_read = vntb_epf_db_read,
.mw_get_align = vntb_epf_mw_get_align,
.link_is_up = vntb_epf_link_is_up,
.db_clear_mask = vntb_epf_db_clear_mask,
.db_clear = vntb_epf_db_clear,
.link_disable = vntb_epf_link_disable,
};
static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
struct device *dev = &pdev->dev;
ndev->ntb.pdev = pdev;
ndev->ntb.topo = NTB_TOPO_NONE;
ndev->ntb.ops = &vntb_epf_ops;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
return -EINVAL;
}
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
goto err_register_dev;
}
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
return 0;
err_register_dev:
put_device(&ndev->ntb.dev);
return -EINVAL;
}
static struct pci_device_id pci_vntb_table[] = {
{
PCI_DEVICE(0xffff, 0xffff),
},
{},
};
static struct pci_driver vntb_pci_driver = {
.name = "pci-vntb",
.id_table = pci_vntb_table,
.probe = pci_vntb_probe,
};
/* ============ PCIe EPF Driver Bind ====================*/
/**
* epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
* @epf: NTB endpoint function device
*
* Initialize both the endpoint controllers associated with NTB function device.
* Invoked when a primary interface or secondary interface is bound to EPC
* device. This function will succeed only when EPC is bound to both the
* interfaces.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_bind(struct pci_epf *epf)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
int ret;
if (!epf->epc) {
dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
return 0;
}
ret = epf_ntb_init_epc_bar(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
goto err_bar_init;
}
ret = epf_ntb_config_spad_bar_alloc(ntb);
if (ret) {
dev_err(dev, "Failed to allocate BAR memory\n");
goto err_bar_alloc;
}
ret = epf_ntb_epc_init(ntb);
if (ret) {
dev_err(dev, "Failed to initialize EPC\n");
goto err_bar_alloc;
}
epf_set_drvdata(epf, ntb);
pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
pci_vntb_table[0].vendor = ntb->vntb_vid;
pci_vntb_table[0].device = ntb->vntb_pid;
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
goto err_bar_alloc;
}
vpci_scan_bus(ntb);
return 0;
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
err_bar_init:
epf_ntb_epc_destroy(ntb);
return ret;
}
/**
* epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
* @epf: NTB endpoint function device
*
* Cleanup the initialization from epf_ntb_bind()
*/
static void epf_ntb_unbind(struct pci_epf *epf)
{
struct epf_ntb *ntb = epf_get_drvdata(epf);
epf_ntb_epc_cleanup(ntb);
epf_ntb_config_spad_bar_free(ntb);
epf_ntb_epc_destroy(ntb);
pci_unregister_driver(&vntb_pci_driver);
}
// EPF driver probe
static struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
};
/**
* epf_ntb_probe() - Probe NTB function driver
* @epf: NTB endpoint function device
* @id: NTB endpoint function device ID
*
* Probe NTB function driver when endpoint function bus detects a NTB
* endpoint function.
*
* Returns: Zero for success, or an error code in case of failure
*/
static int epf_ntb_probe(struct pci_epf *epf,
const struct pci_epf_device_id *id)
{
struct epf_ntb *ntb;
struct device *dev;
dev = &epf->dev;
ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
if (!ntb)
return -ENOMEM;
epf->header = &epf_ntb_header;
ntb->epf = epf;
ntb->vbus_number = 0xff;
epf_set_drvdata(epf, ntb);
dev_info(dev, "pci-ep epf driver loaded\n");
return 0;
}
static const struct pci_epf_device_id epf_ntb_ids[] = {
{
.name = "pci_epf_vntb",
},
{},
};
static struct pci_epf_driver epf_ntb_driver = {
.driver.name = "pci_epf_vntb",
.probe = epf_ntb_probe,
.id_table = epf_ntb_ids,
.ops = &epf_ntb_ops,
.owner = THIS_MODULE,
};
static int __init epf_ntb_init(void)
{
int ret;
kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&epf_ntb_driver);
if (ret) {
destroy_workqueue(kpcintb_workqueue);
pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
return ret;
}
return 0;
}
module_init(epf_ntb_init);
static void __exit epf_ntb_exit(void)
{
pci_epf_unregister_driver(&epf_ntb_driver);
destroy_workqueue(kpcintb_workqueue);
}
module_exit(epf_ntb_exit);
MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
MODULE_AUTHOR("Frank Li <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/pci/endpoint/functions/pci-epf-vntb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microsemi Switchtec(tm) PCIe Management Driver
* Copyright (c) 2017, Microsemi Corporation
*/
#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Microsemi Corporation");
static int max_devices = 16;
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static bool use_dma_mrpc = true;
module_param(use_dma_mrpc, bool, 0644);
MODULE_PARM_DESC(use_dma_mrpc,
"Enable the use of the DMA MRPC feature");
static int nirqs = 32;
module_param(nirqs, int, 0644);
MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
struct class *switchtec_class;
EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
MRPC_IDLE = 0,
MRPC_QUEUED,
MRPC_RUNNING,
MRPC_DONE,
MRPC_IO_ERROR,
};
struct switchtec_user {
struct switchtec_dev *stdev;
enum mrpc_state state;
wait_queue_head_t cmd_comp;
struct kref kref;
struct list_head list;
bool cmd_done;
u32 cmd;
u32 status;
u32 return_code;
size_t data_len;
size_t read_len;
unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
int event_cnt;
};
/*
* The MMIO reads to the device_id register should always return the device ID
* of the device, otherwise the firmware is probably stuck or unreachable
* due to a firmware reset which clears PCI state including the BARs and Memory
* Space Enable bits.
*/
static int is_firmware_running(struct switchtec_dev *stdev)
{
u32 device = ioread32(&stdev->mmio_sys_info->device_id);
return stdev->pdev->device == device;
}
static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
{
struct switchtec_user *stuser;
stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
if (!stuser)
return ERR_PTR(-ENOMEM);
get_device(&stdev->dev);
stuser->stdev = stdev;
kref_init(&stuser->kref);
INIT_LIST_HEAD(&stuser->list);
init_waitqueue_head(&stuser->cmd_comp);
stuser->event_cnt = atomic_read(&stdev->event_cnt);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
return stuser;
}
static void stuser_free(struct kref *kref)
{
struct switchtec_user *stuser;
stuser = container_of(kref, struct switchtec_user, kref);
dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
put_device(&stuser->stdev->dev);
kfree(stuser);
}
static void stuser_put(struct switchtec_user *stuser)
{
kref_put(&stuser->kref, stuser_free);
}
static void stuser_set_state(struct switchtec_user *stuser,
enum mrpc_state state)
{
/* requires the mrpc_mutex to already be held when called */
static const char * const state_names[] = {
[MRPC_IDLE] = "IDLE",
[MRPC_QUEUED] = "QUEUED",
[MRPC_RUNNING] = "RUNNING",
[MRPC_DONE] = "DONE",
[MRPC_IO_ERROR] = "IO_ERROR",
};
stuser->state = state;
dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
stuser, state_names[state]);
}
static void mrpc_complete_cmd(struct switchtec_dev *stdev);
static void flush_wc_buf(struct switchtec_dev *stdev)
{
struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
/*
* odb (outbound doorbell) register is processed by low latency
* hardware and w/o side effect
*/
mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
SWITCHTEC_NTB_REG_DBMSG_OFFSET;
ioread32(&mmio_dbmsg->odb);
}
static void mrpc_cmd_submit(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (stdev->mrpc_busy)
return;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
if (stdev->dma_mrpc) {
stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
}
stuser_set_state(stuser, MRPC_RUNNING);
stdev->mrpc_busy = 1;
memcpy_toio(&stdev->mmio_mrpc->input_data,
stuser->data, stuser->data_len);
flush_wc_buf(stdev);
iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
}
static int mrpc_queue_cmd(struct switchtec_user *stuser)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_dev *stdev = stuser->stdev;
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
stuser->cmd_done = false;
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
return 0;
}
static void mrpc_cleanup_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next,
struct switchtec_user, list);
stuser->cmd_done = true;
wake_up_interruptible(&stuser->cmd_comp);
list_del_init(&stuser->list);
stuser_put(stuser);
stdev->mrpc_busy = 0;
mrpc_cmd_submit(stdev);
}
static void mrpc_complete_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
if (stdev->dma_mrpc)
stuser->status = stdev->dma_mrpc->status;
else
stuser->status = ioread32(&stdev->mmio_mrpc->status);
if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
return;
stuser_set_state(stuser, MRPC_DONE);
stuser->return_code = 0;
if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE &&
stuser->status != SWITCHTEC_MRPC_STATUS_ERROR)
goto out;
if (stdev->dma_mrpc)
stuser->return_code = stdev->dma_mrpc->rtn_code;
else
stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
if (stuser->return_code != 0)
goto out;
if (stdev->dma_mrpc)
memcpy(stuser->data, &stdev->dma_mrpc->data,
stuser->read_len);
else
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
stuser->read_len);
out:
mrpc_cleanup_cmd(stdev);
}
static void mrpc_event_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
stdev = container_of(work, struct switchtec_dev, mrpc_work);
dev_dbg(&stdev->dev, "%s\n", __func__);
mutex_lock(&stdev->mrpc_mutex);
cancel_delayed_work(&stdev->mrpc_timeout);
mrpc_complete_cmd(stdev);
mutex_unlock(&stdev->mrpc_mutex);
}
static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next,
struct switchtec_user, list);
stuser_set_state(stuser, MRPC_IO_ERROR);
mrpc_cleanup_cmd(stdev);
}
static void mrpc_timeout_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
u32 status;
stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
dev_dbg(&stdev->dev, "%s\n", __func__);
mutex_lock(&stdev->mrpc_mutex);
if (!is_firmware_running(stdev)) {
mrpc_error_complete_cmd(stdev);
goto out;
}
if (stdev->dma_mrpc)
status = stdev->dma_mrpc->status;
else
status = ioread32(&stdev->mmio_mrpc->status);
if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
goto out;
}
mrpc_complete_cmd(stdev);
out:
mutex_unlock(&stdev->mrpc_mutex);
}
static ssize_t device_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
u32 ver;
ver = ioread32(&stdev->mmio_sys_info->device_version);
return sysfs_emit(buf, "%x\n", ver);
}
static DEVICE_ATTR_RO(device_version);
static ssize_t fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
u32 ver;
ver = ioread32(&stdev->mmio_sys_info->firmware_version);
return sysfs_emit(buf, "%08x\n", ver);
}
static DEVICE_ATTR_RO(fw_version);
static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
{
int i;
memcpy_fromio(buf, attr, len);
buf[len] = '\n';
buf[len + 1] = 0;
for (i = len - 1; i > 0; i--) {
if (buf[i] != ' ')
break;
buf[i] = '\n';
buf[i + 1] = 0;
}
return strlen(buf);
}
#define DEVICE_ATTR_SYS_INFO_STR(field) \
static ssize_t field ## _show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct switchtec_dev *stdev = to_stdev(dev); \
struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
if (stdev->gen == SWITCHTEC_GEN3) \
return io_string_show(buf, &si->gen3.field, \
sizeof(si->gen3.field)); \
else if (stdev->gen >= SWITCHTEC_GEN4) \
return io_string_show(buf, &si->gen4.field, \
sizeof(si->gen4.field)); \
else \
return -EOPNOTSUPP; \
} \
\
static DEVICE_ATTR_RO(field)
DEVICE_ATTR_SYS_INFO_STR(vendor_id);
DEVICE_ATTR_SYS_INFO_STR(product_id);
DEVICE_ATTR_SYS_INFO_STR(product_revision);
static ssize_t component_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
/* component_vendor field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
return sysfs_emit(buf, "none\n");
return io_string_show(buf, &si->gen3.component_vendor,
sizeof(si->gen3.component_vendor));
}
static DEVICE_ATTR_RO(component_vendor);
static ssize_t component_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
/* component_id field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
return sysfs_emit(buf, "none\n");
return sysfs_emit(buf, "PM%04X\n", id);
}
static DEVICE_ATTR_RO(component_id);
static ssize_t component_revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
/* component_revision field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
return sysfs_emit(buf, "255\n");
return sysfs_emit(buf, "%d\n", rev);
}
static DEVICE_ATTR_RO(component_revision);
static ssize_t partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
return sysfs_emit(buf, "%d\n", stdev->partition);
}
static DEVICE_ATTR_RO(partition);
static ssize_t partition_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
return sysfs_emit(buf, "%d\n", stdev->partition_count);
}
static DEVICE_ATTR_RO(partition_count);
static struct attribute *switchtec_device_attrs[] = {
&dev_attr_device_version.attr,
&dev_attr_fw_version.attr,
&dev_attr_vendor_id.attr,
&dev_attr_product_id.attr,
&dev_attr_product_revision.attr,
&dev_attr_component_vendor.attr,
&dev_attr_component_id.attr,
&dev_attr_component_revision.attr,
&dev_attr_partition.attr,
&dev_attr_partition_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(switchtec_device);
static int switchtec_dev_open(struct inode *inode, struct file *filp)
{
struct switchtec_dev *stdev;
struct switchtec_user *stuser;
stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
stuser = stuser_create(stdev);
if (IS_ERR(stuser))
return PTR_ERR(stuser);
filp->private_data = stuser;
stream_open(inode, filp);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
return 0;
}
static int switchtec_dev_release(struct inode *inode, struct file *filp)
{
struct switchtec_user *stuser = filp->private_data;
stuser_put(stuser);
return 0;
}
static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
{
if (mutex_lock_interruptible(&stdev->mrpc_mutex))
return -EINTR;
if (!stdev->alive) {
mutex_unlock(&stdev->mrpc_mutex);
return -ENODEV;
}
return 0;
}
static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
size_t size, loff_t *off)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
int rc;
if (size < sizeof(stuser->cmd) ||
size > sizeof(stuser->cmd) + sizeof(stuser->data))
return -EINVAL;
stuser->data_len = size - sizeof(stuser->cmd);
rc = lock_mutex_and_test_alive(stdev);
if (rc)
return rc;
if (stuser->state != MRPC_IDLE) {
rc = -EBADE;
goto out;
}
rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
if (rc) {
rc = -EFAULT;
goto out;
}
if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
(MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
!capable(CAP_SYS_ADMIN)) {
rc = -EPERM;
goto out;
}
data += sizeof(stuser->cmd);
rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
if (rc) {
rc = -EFAULT;
goto out;
}
rc = mrpc_queue_cmd(stuser);
out:
mutex_unlock(&stdev->mrpc_mutex);
if (rc)
return rc;
return size;
}
static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
size_t size, loff_t *off)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
int rc;
if (size < sizeof(stuser->cmd) ||
size > sizeof(stuser->cmd) + sizeof(stuser->data))
return -EINVAL;
rc = lock_mutex_and_test_alive(stdev);
if (rc)
return rc;
if (stuser->state == MRPC_IDLE) {
mutex_unlock(&stdev->mrpc_mutex);
return -EBADE;
}
stuser->read_len = size - sizeof(stuser->return_code);
mutex_unlock(&stdev->mrpc_mutex);
if (filp->f_flags & O_NONBLOCK) {
if (!stuser->cmd_done)
return -EAGAIN;
} else {
rc = wait_event_interruptible(stuser->cmd_comp,
stuser->cmd_done);
if (rc < 0)
return rc;
}
rc = lock_mutex_and_test_alive(stdev);
if (rc)
return rc;
if (stuser->state == MRPC_IO_ERROR) {
mutex_unlock(&stdev->mrpc_mutex);
return -EIO;
}
if (stuser->state != MRPC_DONE) {
mutex_unlock(&stdev->mrpc_mutex);
return -EBADE;
}
rc = copy_to_user(data, &stuser->return_code,
sizeof(stuser->return_code));
if (rc) {
mutex_unlock(&stdev->mrpc_mutex);
return -EFAULT;
}
data += sizeof(stuser->return_code);
rc = copy_to_user(data, &stuser->data,
size - sizeof(stuser->return_code));
if (rc) {
mutex_unlock(&stdev->mrpc_mutex);
return -EFAULT;
}
stuser_set_state(stuser, MRPC_IDLE);
mutex_unlock(&stdev->mrpc_mutex);
if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
stuser->status == SWITCHTEC_MRPC_STATUS_ERROR)
return size;
else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
return -ENXIO;
else
return -EBADMSG;
}
static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
__poll_t ret = 0;
poll_wait(filp, &stuser->cmd_comp, wait);
poll_wait(filp, &stdev->event_wq, wait);
if (lock_mutex_and_test_alive(stdev))
return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
mutex_unlock(&stdev->mrpc_mutex);
if (stuser->cmd_done)
ret |= EPOLLIN | EPOLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
ret |= EPOLLPRI | EPOLLRDBAND;
return ret;
}
static int ioctl_flash_info(struct switchtec_dev *stdev,
struct switchtec_ioctl_flash_info __user *uinfo)
{
struct switchtec_ioctl_flash_info info = {0};
struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
if (stdev->gen == SWITCHTEC_GEN3) {
info.flash_length = ioread32(&fi->gen3.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
} else if (stdev->gen >= SWITCHTEC_GEN4) {
info.flash_length = ioread32(&fi->gen4.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
} else {
return -EOPNOTSUPP;
}
if (copy_to_user(uinfo, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
struct partition_info __iomem *pi)
{
info->address = ioread32(&pi->address);
info->length = ioread32(&pi->length);
}
static int flash_part_info_gen3(struct switchtec_dev *stdev,
struct switchtec_ioctl_flash_part_info *info)
{
struct flash_info_regs_gen3 __iomem *fi =
&stdev->mmio_flash_info->gen3;
struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
u32 active_addr = -1;
switch (info->flash_partition) {
case SWITCHTEC_IOCTL_PART_CFG0:
active_addr = ioread32(&fi->active_cfg);
set_fw_info_part(info, &fi->cfg0);
if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_CFG1:
active_addr = ioread32(&fi->active_cfg);
set_fw_info_part(info, &fi->cfg1);
if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG0:
active_addr = ioread32(&fi->active_img);
set_fw_info_part(info, &fi->img0);
if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG1:
active_addr = ioread32(&fi->active_img);
set_fw_info_part(info, &fi->img1);
if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_NVLOG:
set_fw_info_part(info, &fi->nvlog);
break;
case SWITCHTEC_IOCTL_PART_VENDOR0:
set_fw_info_part(info, &fi->vendor[0]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR1:
set_fw_info_part(info, &fi->vendor[1]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR2:
set_fw_info_part(info, &fi->vendor[2]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR3:
set_fw_info_part(info, &fi->vendor[3]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR4:
set_fw_info_part(info, &fi->vendor[4]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR5:
set_fw_info_part(info, &fi->vendor[5]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR6:
set_fw_info_part(info, &fi->vendor[6]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR7:
set_fw_info_part(info, &fi->vendor[7]);
break;
default:
return -EINVAL;
}
if (info->address == active_addr)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
return 0;
}
static int flash_part_info_gen4(struct switchtec_dev *stdev,
struct switchtec_ioctl_flash_part_info *info)
{
struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
switch (info->flash_partition) {
case SWITCHTEC_IOCTL_PART_MAP_0:
set_fw_info_part(info, &fi->map0);
break;
case SWITCHTEC_IOCTL_PART_MAP_1:
set_fw_info_part(info, &fi->map1);
break;
case SWITCHTEC_IOCTL_PART_KEY_0:
set_fw_info_part(info, &fi->key0);
if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_KEY_1:
set_fw_info_part(info, &fi->key1);
if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_BL2_0:
set_fw_info_part(info, &fi->bl2_0);
if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_BL2_1:
set_fw_info_part(info, &fi->bl2_1);
if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_CFG0:
set_fw_info_part(info, &fi->cfg0);
if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_CFG1:
set_fw_info_part(info, &fi->cfg1);
if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG0:
set_fw_info_part(info, &fi->img0);
if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG1:
set_fw_info_part(info, &fi->img1);
if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_NVLOG:
set_fw_info_part(info, &fi->nvlog);
break;
case SWITCHTEC_IOCTL_PART_VENDOR0:
set_fw_info_part(info, &fi->vendor[0]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR1:
set_fw_info_part(info, &fi->vendor[1]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR2:
set_fw_info_part(info, &fi->vendor[2]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR3:
set_fw_info_part(info, &fi->vendor[3]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR4:
set_fw_info_part(info, &fi->vendor[4]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR5:
set_fw_info_part(info, &fi->vendor[5]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR6:
set_fw_info_part(info, &fi->vendor[6]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR7:
set_fw_info_part(info, &fi->vendor[7]);
break;
default:
return -EINVAL;
}
return 0;
}
static int ioctl_flash_part_info(struct switchtec_dev *stdev,
struct switchtec_ioctl_flash_part_info __user *uinfo)
{
int ret;
struct switchtec_ioctl_flash_part_info info = {0};
if (copy_from_user(&info, uinfo, sizeof(info)))
return -EFAULT;
if (stdev->gen == SWITCHTEC_GEN3) {
ret = flash_part_info_gen3(stdev, &info);
if (ret)
return ret;
} else if (stdev->gen >= SWITCHTEC_GEN4) {
ret = flash_part_info_gen4(stdev, &info);
if (ret)
return ret;
} else {
return -EOPNOTSUPP;
}
if (copy_to_user(uinfo, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int ioctl_event_summary(struct switchtec_dev *stdev,
struct switchtec_user *stuser,
struct switchtec_ioctl_event_summary __user *usum,
size_t size)
{
struct switchtec_ioctl_event_summary *s;
int i;
u32 reg;
int ret = 0;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
s->global = ioread32(&stdev->mmio_sw_event->global_summary);
s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
s->part[i] = reg;
}
for (i = 0; i < stdev->pff_csr_count; i++) {
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
s->pff[i] = reg;
}
if (copy_to_user(usum, s, size)) {
ret = -EFAULT;
goto error_case;
}
stuser->event_cnt = atomic_read(&stdev->event_cnt);
error_case:
kfree(s);
return ret;
}
static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
size_t offset, int index)
{
return (void __iomem *)stdev->mmio_sw_event + offset;
}
static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
size_t offset, int index)
{
return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
}
static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
size_t offset, int index)
{
return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
}
#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
static const struct event_reg {
size_t offset;
u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
size_t offset, int index);
} event_regs[] = {
EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
twi_mrpc_comp_async_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
cli_mrpc_comp_async_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
intercomm_notify_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
};
static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
int event_id, int index)
{
size_t off;
if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
return (u32 __iomem *)ERR_PTR(-EINVAL);
off = event_regs[event_id].offset;
if (event_regs[event_id].map_reg == part_ev_reg) {
if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
index = stdev->partition;
else if (index < 0 || index >= stdev->partition_count)
return (u32 __iomem *)ERR_PTR(-EINVAL);
} else if (event_regs[event_id].map_reg == pff_ev_reg) {
if (index < 0 || index >= stdev->pff_csr_count)
return (u32 __iomem *)ERR_PTR(-EINVAL);
}
return event_regs[event_id].map_reg(stdev, off, index);
}
static int event_ctl(struct switchtec_dev *stdev,
struct switchtec_ioctl_event_ctl *ctl)
{
int i;
u32 __iomem *reg;
u32 hdr;
reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
if (IS_ERR(reg))
return PTR_ERR(reg);
hdr = ioread32(reg);
if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
ctl->data[i] = ioread32(®[i + 1]);
ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
ctl->count = (hdr >> 5) & 0xFF;
if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
hdr &= ~SWITCHTEC_EVENT_CLEAR;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
hdr |= SWITCHTEC_EVENT_EN_IRQ;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
hdr |= SWITCHTEC_EVENT_EN_LOG;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
hdr &= ~SWITCHTEC_EVENT_EN_LOG;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
hdr |= SWITCHTEC_EVENT_EN_CLI;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
hdr &= ~SWITCHTEC_EVENT_EN_CLI;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
hdr |= SWITCHTEC_EVENT_FATAL;
if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
hdr &= ~SWITCHTEC_EVENT_FATAL;
if (ctl->flags)
iowrite32(hdr, reg);
ctl->flags = 0;
if (hdr & SWITCHTEC_EVENT_EN_IRQ)
ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
if (hdr & SWITCHTEC_EVENT_EN_LOG)
ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
if (hdr & SWITCHTEC_EVENT_EN_CLI)
ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
if (hdr & SWITCHTEC_EVENT_FATAL)
ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
return 0;
}
static int ioctl_event_ctl(struct switchtec_dev *stdev,
struct switchtec_ioctl_event_ctl __user *uctl)
{
int ret;
int nr_idxs;
unsigned int event_flags;
struct switchtec_ioctl_event_ctl ctl;
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
return -EFAULT;
if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
return -EINVAL;
if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
return -EINVAL;
if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
if (event_regs[ctl.event_id].map_reg == global_ev_reg)
nr_idxs = 1;
else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
nr_idxs = stdev->partition_count;
else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
nr_idxs = stdev->pff_csr_count;
else
return -EINVAL;
event_flags = ctl.flags;
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
ctl.flags = event_flags;
ret = event_ctl(stdev, &ctl);
if (ret < 0 && ret != -EOPNOTSUPP)
return ret;
}
} else {
ret = event_ctl(stdev, &ctl);
if (ret < 0)
return ret;
}
if (copy_to_user(uctl, &ctl, sizeof(ctl)))
return -EFAULT;
return 0;
}
static int ioctl_pff_to_port(struct switchtec_dev *stdev,
struct switchtec_ioctl_pff_port __user *up)
{
int i, part;
u32 reg;
struct part_cfg_regs __iomem *pcfg;
struct switchtec_ioctl_pff_port p;
if (copy_from_user(&p, up, sizeof(p)))
return -EFAULT;
p.port = -1;
for (part = 0; part < stdev->partition_count; part++) {
pcfg = &stdev->mmio_part_cfg_all[part];
p.partition = part;
reg = ioread32(&pcfg->usp_pff_inst_id);
if (reg == p.pff) {
p.port = 0;
break;
}
reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
if (reg == p.pff) {
p.port = SWITCHTEC_IOCTL_PFF_VEP;
break;
}
for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
if (reg != p.pff)
continue;
p.port = i + 1;
break;
}
if (p.port != -1)
break;
}
if (copy_to_user(up, &p, sizeof(p)))
return -EFAULT;
return 0;
}
static int ioctl_port_to_pff(struct switchtec_dev *stdev,
struct switchtec_ioctl_pff_port __user *up)
{
struct switchtec_ioctl_pff_port p;
struct part_cfg_regs __iomem *pcfg;
if (copy_from_user(&p, up, sizeof(p)))
return -EFAULT;
if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
pcfg = stdev->mmio_part_cfg;
else if (p.partition < stdev->partition_count)
pcfg = &stdev->mmio_part_cfg_all[p.partition];
else
return -EINVAL;
switch (p.port) {
case 0:
p.pff = ioread32(&pcfg->usp_pff_inst_id);
break;
case SWITCHTEC_IOCTL_PFF_VEP:
p.pff = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
break;
default:
if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
return -EINVAL;
p.port = array_index_nospec(p.port,
ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
break;
}
if (copy_to_user(up, &p, sizeof(p)))
return -EFAULT;
return 0;
}
static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
int rc;
void __user *argp = (void __user *)arg;
rc = lock_mutex_and_test_alive(stdev);
if (rc)
return rc;
switch (cmd) {
case SWITCHTEC_IOCTL_FLASH_INFO:
rc = ioctl_flash_info(stdev, argp);
break;
case SWITCHTEC_IOCTL_FLASH_PART_INFO:
rc = ioctl_flash_part_info(stdev, argp);
break;
case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
rc = ioctl_event_summary(stdev, stuser, argp,
sizeof(struct switchtec_ioctl_event_summary_legacy));
break;
case SWITCHTEC_IOCTL_EVENT_CTL:
rc = ioctl_event_ctl(stdev, argp);
break;
case SWITCHTEC_IOCTL_PFF_TO_PORT:
rc = ioctl_pff_to_port(stdev, argp);
break;
case SWITCHTEC_IOCTL_PORT_TO_PFF:
rc = ioctl_port_to_pff(stdev, argp);
break;
case SWITCHTEC_IOCTL_EVENT_SUMMARY:
rc = ioctl_event_summary(stdev, stuser, argp,
sizeof(struct switchtec_ioctl_event_summary));
break;
default:
rc = -ENOTTY;
break;
}
mutex_unlock(&stdev->mrpc_mutex);
return rc;
}
static const struct file_operations switchtec_fops = {
.owner = THIS_MODULE,
.open = switchtec_dev_open,
.release = switchtec_dev_release,
.write = switchtec_dev_write,
.read = switchtec_dev_read,
.poll = switchtec_dev_poll,
.unlocked_ioctl = switchtec_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static void link_event_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
stdev = container_of(work, struct switchtec_dev, link_event_work);
if (stdev->link_notifier)
stdev->link_notifier(stdev);
}
static void check_link_state_events(struct switchtec_dev *stdev)
{
int idx;
u32 reg;
int count;
int occurred = 0;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
count = (reg >> 5) & 0xFF;
if (count != stdev->link_event_count[idx]) {
occurred = 1;
stdev->link_event_count[idx] = count;
}
}
if (occurred)
schedule_work(&stdev->link_event_work);
}
static void enable_link_state_events(struct switchtec_dev *stdev)
{
int idx;
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_pff_csr[idx].link_state_hdr);
}
}
static void enable_dma_mrpc(struct switchtec_dev *stdev)
{
writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
flush_wc_buf(stdev);
iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
}
static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
if (stdev->dma_mrpc) {
iowrite32(0, &stdev->mmio_mrpc->dma_en);
flush_wc_buf(stdev);
writeq(0, &stdev->mmio_mrpc->dma_addr);
dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
}
kfree(stdev);
}
static void stdev_kill(struct switchtec_dev *stdev)
{
struct switchtec_user *stuser, *tmpuser;
pci_clear_master(stdev->pdev);
cancel_delayed_work_sync(&stdev->mrpc_timeout);
/* Mark the hardware as unavailable and complete all completions */
mutex_lock(&stdev->mrpc_mutex);
stdev->alive = false;
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
stuser->cmd_done = true;
wake_up_interruptible(&stuser->cmd_comp);
list_del_init(&stuser->list);
stuser_put(stuser);
}
mutex_unlock(&stdev->mrpc_mutex);
/* Wake up any users waiting on event_wq */
wake_up_interruptible(&stdev->event_wq);
}
static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
{
struct switchtec_dev *stdev;
int minor;
struct device *dev;
struct cdev *cdev;
int rc;
stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!stdev)
return ERR_PTR(-ENOMEM);
stdev->alive = true;
stdev->pdev = pdev;
INIT_LIST_HEAD(&stdev->mrpc_queue);
mutex_init(&stdev->mrpc_mutex);
stdev->mrpc_busy = 0;
INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
INIT_WORK(&stdev->link_event_work, link_event_work);
init_waitqueue_head(&stdev->event_wq);
atomic_set(&stdev->event_cnt, 0);
dev = &stdev->dev;
device_initialize(dev);
dev->class = switchtec_class;
dev->parent = &pdev->dev;
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);
if (minor < 0) {
rc = minor;
goto err_put;
}
dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
dev_set_name(dev, "switchtec%d", minor);
cdev = &stdev->cdev;
cdev_init(cdev, &switchtec_fops);
cdev->owner = THIS_MODULE;
return stdev;
err_put:
put_device(&stdev->dev);
return ERR_PTR(rc);
}
static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
{
size_t off = event_regs[eid].offset;
u32 __iomem *hdr_reg;
u32 hdr;
hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
hdr = ioread32(hdr_reg);
if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
return 0;
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg);
return 1;
}
static int mask_all_events(struct switchtec_dev *stdev, int eid)
{
int idx;
int count = 0;
if (event_regs[eid].map_reg == part_ev_reg) {
for (idx = 0; idx < stdev->partition_count; idx++)
count += mask_event(stdev, eid, idx);
} else if (event_regs[eid].map_reg == pff_ev_reg) {
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
if (!stdev->pff_local[idx])
continue;
count += mask_event(stdev, eid, idx);
}
} else {
count += mask_event(stdev, eid, 0);
}
return count;
}
static irqreturn_t switchtec_event_isr(int irq, void *dev)
{
struct switchtec_dev *stdev = dev;
u32 reg;
irqreturn_t ret = IRQ_NONE;
int eid, event_count = 0;
reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
if (reg & SWITCHTEC_EVENT_OCCURRED) {
dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
ret = IRQ_HANDLED;
schedule_work(&stdev->mrpc_work);
iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
}
check_link_state_events(stdev);
for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
continue;
event_count += mask_all_events(stdev, eid);
}
if (event_count) {
atomic_inc(&stdev->event_cnt);
wake_up_interruptible(&stdev->event_wq);
dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
event_count);
return IRQ_HANDLED;
}
return ret;
}
static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
{
struct switchtec_dev *stdev = dev;
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
schedule_work(&stdev->mrpc_work);
return IRQ_HANDLED;
}
static int switchtec_init_isr(struct switchtec_dev *stdev)
{
int nvecs;
int event_irq;
int dma_mrpc_irq;
int rc;
if (nirqs < 4)
nirqs = 4;
nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
PCI_IRQ_MSIX | PCI_IRQ_MSI |
PCI_IRQ_VIRTUAL);
if (nvecs < 0)
return nvecs;
event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
if (event_irq < 0 || event_irq >= nvecs)
return -EFAULT;
event_irq = pci_irq_vector(stdev->pdev, event_irq);
if (event_irq < 0)
return event_irq;
rc = devm_request_irq(&stdev->pdev->dev, event_irq,
switchtec_event_isr, 0,
KBUILD_MODNAME, stdev);
if (rc)
return rc;
if (!stdev->dma_mrpc)
return rc;
dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
return -EFAULT;
dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
if (dma_mrpc_irq < 0)
return dma_mrpc_irq;
rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
switchtec_dma_mrpc_isr, 0,
KBUILD_MODNAME, stdev);
return rc;
}
static void init_pff(struct switchtec_dev *stdev)
{
int i;
u32 reg;
struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg;
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
}
stdev->pff_csr_count = i;
reg = ioread32(&pcfg->usp_pff_inst_id);
if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
}
}
static int switchtec_init_pci(struct switchtec_dev *stdev,
struct pci_dev *pdev)
{
int rc;
void __iomem *map;
unsigned long res_start, res_len;
u32 __iomem *part_id;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;
pci_set_master(pdev);
res_start = pci_resource_start(pdev, 0);
res_len = pci_resource_len(pdev, 0);
if (!devm_request_mem_region(&pdev->dev, res_start,
res_len, KBUILD_MODNAME))
return -EBUSY;
stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
SWITCHTEC_GAS_TOP_CFG_OFFSET);
if (!stdev->mmio_mrpc)
return -ENOMEM;
map = devm_ioremap(&pdev->dev,
res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
if (!map)
return -ENOMEM;
stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
if (stdev->gen == SWITCHTEC_GEN3)
part_id = &stdev->mmio_sys_info->gen3.partition_id;
else if (stdev->gen >= SWITCHTEC_GEN4)
part_id = &stdev->mmio_sys_info->gen4.partition_id;
else
return -EOPNOTSUPP;
stdev->partition = ioread8(part_id);
stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
if (stdev->partition_count < 1)
stdev->partition_count = 1;
init_pff(stdev);
pci_set_drvdata(pdev, stdev);
if (!use_dma_mrpc)
return 0;
if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
return 0;
stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
sizeof(*stdev->dma_mrpc),
&stdev->dma_mrpc_dma_addr,
GFP_KERNEL);
if (stdev->dma_mrpc == NULL)
return -ENOMEM;
return 0;
}
static int switchtec_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct switchtec_dev *stdev;
int rc;
if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev);
if (IS_ERR(stdev))
return PTR_ERR(stdev);
stdev->gen = id->driver_data;
rc = switchtec_init_pci(stdev, pdev);
if (rc)
goto err_put;
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
goto err_put;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
enable_link_state_events(stdev);
if (stdev->dma_mrpc)
enable_dma_mrpc(stdev);
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
goto err_devadd;
dev_info(&stdev->dev, "Management device registered.\n");
return 0;
err_devadd:
stdev_kill(stdev);
err_put:
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
return rc;
}
static void switchtec_pci_remove(struct pci_dev *pdev)
{
struct switchtec_dev *stdev = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, NULL);
cdev_device_del(&stdev->cdev, &stdev->dev);
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
put_device(&stdev->dev);
}
#define SWITCHTEC_PCI_DEVICE(device_id, gen) \
{ \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
.driver_data = gen, \
}, \
{ \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
.driver_data = gen, \
}
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), /* PFX 48xG3 */
SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), /* PFX 64xG3 */
SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), /* PFX 80xG3 */
SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), /* PFX 96xG3 */
SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), /* PSX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), /* PSX 32xG3 */
SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), /* PSX 48xG3 */
SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), /* PSX 64xG3 */
SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), /* PSX 80xG3 */
SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), /* PSX 96xG3 */
SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), /* PAX 24XG3 */
SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), /* PAX 32XG3 */
SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), /* PAX 48XG3 */
SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), /* PAX 64XG3 */
SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), /* PAX 80XG3 */
SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), /* PAX 96XG3 */
SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), /* PFXL 24XG3 */
SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), /* PFXL 32XG3 */
SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), /* PFXL 48XG3 */
SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), /* PFXL 64XG3 */
SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), /* PFXL 80XG3 */
SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), /* PFXL 96XG3 */
SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), /* PFXI 24XG3 */
SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), /* PFXI 32XG3 */
SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), /* PFXI 48XG3 */
SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), /* PFXI 64XG3 */
SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), /* PFXI 80XG3 */
SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), /* PFXI 96XG3 */
SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), /* PFX 100XG4 */
SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), /* PFX 84XG4 */
SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), /* PFX 68XG4 */
SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), /* PFX 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), /* PFX 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), /* PFX 28XG4 */
SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), /* PSX 100XG4 */
SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), /* PSX 84XG4 */
SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), /* PSX 68XG4 */
SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), /* PSX 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), /* PSX 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), /* PSX 28XG4 */
SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), /* PAX 100XG4 */
SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), /* PAX 84XG4 */
SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), /* PAX 68XG4 */
SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), /* PAX 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), /* PAX 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), /* PAX 28XG4 */
SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), /* PFXA 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), /* PFXA 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), /* PFXA 28XG4 */
SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), /* PSXA 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), /* PSXA 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), /* PSXA 28XG4 */
SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), /* PAXA 52XG4 */
SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), /* PAXA 36XG4 */
SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), /* PAXA 28XG4 */
SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5), /* PFX 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5), /* PFX 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5), /* PFX 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5), /* PFX 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5), /* PFX 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5), /* PFX 28XG5 */
SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5), /* PSX 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5), /* PSX 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5), /* PSX 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5), /* PSX 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5), /* PSX 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5), /* PSX 28XG5 */
SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5), /* PAX 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5), /* PAX 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5), /* PAX 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5), /* PAX 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5), /* PAX 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5), /* PAX 28XG5 */
SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5), /* PFXA 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5), /* PFXA 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5), /* PFXA 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5), /* PFXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5), /* PFXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5), /* PFXA 28XG5 */
SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5), /* PSXA 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5), /* PSXA 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5), /* PSXA 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5), /* PSXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5), /* PSXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5), /* PSXA 28XG5 */
SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5), /* PAXA 100XG5 */
SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5), /* PAXA 84XG5 */
SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5), /* PAXA 68XG5 */
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
static struct pci_driver switchtec_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = switchtec_pci_tbl,
.probe = switchtec_pci_probe,
.remove = switchtec_pci_remove,
};
static int __init switchtec_init(void)
{
int rc;
rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
"switchtec");
if (rc)
return rc;
switchtec_class = class_create("switchtec");
if (IS_ERR(switchtec_class)) {
rc = PTR_ERR(switchtec_class);
goto err_create_class;
}
rc = pci_register_driver(&switchtec_pci_driver);
if (rc)
goto err_pci_register;
pr_info(KBUILD_MODNAME ": loaded.\n");
return 0;
err_pci_register:
class_destroy(switchtec_class);
err_create_class:
unregister_chrdev_region(switchtec_devt, max_devices);
return rc;
}
module_init(switchtec_init);
static void __exit switchtec_exit(void)
{
pci_unregister_driver(&switchtec_pci_driver);
class_destroy(switchtec_class);
unregister_chrdev_region(switchtec_devt, max_devices);
ida_destroy(&switchtec_minor_ida);
pr_info(KBUILD_MODNAME ": unloaded.\n");
}
module_exit(switchtec_exit);
| linux-master | drivers/pci/switch/switchtec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe AER software error injection support.
*
* Debugging PCIe AER code is quite difficult because it is hard to
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
* https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
*
* Copyright 2009 Intel Corporation.
* Huang Ying <[email protected]>
*/
#define dev_fmt(fmt) "aer_inject: " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/stddef.h>
#include <linux/device.h>
#include "portdrv.h"
/* Override the existing corrected and uncorrected error masks */
static bool aer_mask_override;
module_param(aer_mask_override, bool, 0);
struct aer_error_inj {
u8 bus;
u8 dev;
u8 fn;
u32 uncor_status;
u32 cor_status;
u32 header_log0;
u32 header_log1;
u32 header_log2;
u32 header_log3;
u32 domain;
};
struct aer_error {
struct list_head list;
u32 domain;
unsigned int bus;
unsigned int devfn;
int pos_cap_err;
u32 uncor_status;
u32 cor_status;
u32 header_log0;
u32 header_log1;
u32 header_log2;
u32 header_log3;
u32 root_status;
u32 source_id;
};
struct pci_bus_ops {
struct list_head list;
struct pci_bus *bus;
struct pci_ops *ops;
};
static LIST_HEAD(einjected);
static LIST_HEAD(pci_bus_ops_list);
/* Protect einjected and pci_bus_ops_list */
static DEFINE_SPINLOCK(inject_lock);
static void aer_error_init(struct aer_error *err, u32 domain,
unsigned int bus, unsigned int devfn,
int pos_cap_err)
{
INIT_LIST_HEAD(&err->list);
err->domain = domain;
err->bus = bus;
err->devfn = devfn;
err->pos_cap_err = pos_cap_err;
}
/* inject_lock must be held before calling */
static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
unsigned int devfn)
{
struct aer_error *err;
list_for_each_entry(err, &einjected, list) {
if (domain == err->domain &&
bus == err->bus &&
devfn == err->devfn)
return err;
}
return NULL;
}
/* inject_lock must be held before calling */
static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
{
int domain = pci_domain_nr(dev->bus);
if (domain < 0)
return NULL;
return __find_aer_error(domain, dev->bus->number, dev->devfn);
}
/* inject_lock must be held before calling */
static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
{
struct pci_bus_ops *bus_ops;
list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
if (bus_ops->bus == bus)
return bus_ops->ops;
}
return NULL;
}
static struct pci_bus_ops *pci_bus_ops_pop(void)
{
unsigned long flags;
struct pci_bus_ops *bus_ops;
spin_lock_irqsave(&inject_lock, flags);
bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
struct pci_bus_ops, list);
if (bus_ops)
list_del(&bus_ops->list);
spin_unlock_irqrestore(&inject_lock, flags);
return bus_ops;
}
static u32 *find_pci_config_dword(struct aer_error *err, int where,
int *prw1cs)
{
int rw1cs = 0;
u32 *target = NULL;
if (err->pos_cap_err == -1)
return NULL;
switch (where - err->pos_cap_err) {
case PCI_ERR_UNCOR_STATUS:
target = &err->uncor_status;
rw1cs = 1;
break;
case PCI_ERR_COR_STATUS:
target = &err->cor_status;
rw1cs = 1;
break;
case PCI_ERR_HEADER_LOG:
target = &err->header_log0;
break;
case PCI_ERR_HEADER_LOG+4:
target = &err->header_log1;
break;
case PCI_ERR_HEADER_LOG+8:
target = &err->header_log2;
break;
case PCI_ERR_HEADER_LOG+12:
target = &err->header_log3;
break;
case PCI_ERR_ROOT_STATUS:
target = &err->root_status;
rw1cs = 1;
break;
case PCI_ERR_ROOT_ERR_SRC:
target = &err->source_id;
break;
}
if (prw1cs)
*prw1cs = rw1cs;
return target;
}
static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct pci_ops *ops, *my_ops;
int rv;
ops = __find_pci_bus_ops(bus);
if (!ops)
return -1;
my_ops = bus->ops;
bus->ops = ops;
rv = ops->read(bus, devfn, where, size, val);
bus->ops = my_ops;
return rv;
}
static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct pci_ops *ops, *my_ops;
int rv;
ops = __find_pci_bus_ops(bus);
if (!ops)
return -1;
my_ops = bus->ops;
bus->ops = ops;
rv = ops->write(bus, devfn, where, size, val);
bus->ops = my_ops;
return rv;
}
static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
int domain;
int rv;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
domain = pci_domain_nr(bus);
if (domain < 0)
goto out;
err = __find_aer_error(domain, bus->number, devfn);
if (!err)
goto out;
sim = find_pci_config_dword(err, where, NULL);
if (sim) {
*val = *sim;
spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
out:
rv = aer_inj_read(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
int rw1cs;
int domain;
int rv;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
domain = pci_domain_nr(bus);
if (domain < 0)
goto out;
err = __find_aer_error(domain, bus->number, devfn);
if (!err)
goto out;
sim = find_pci_config_dword(err, where, &rw1cs);
if (sim) {
if (rw1cs)
*sim ^= val;
else
*sim = val;
spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
out:
rv = aer_inj_write(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
static struct pci_ops aer_inj_pci_ops = {
.read = aer_inj_read_config,
.write = aer_inj_write_config,
};
static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
struct pci_bus *bus,
struct pci_ops *ops)
{
INIT_LIST_HEAD(&bus_ops->list);
bus_ops->bus = bus;
bus_ops->ops = ops;
}
static int pci_bus_set_aer_ops(struct pci_bus *bus)
{
struct pci_ops *ops;
struct pci_bus_ops *bus_ops;
unsigned long flags;
bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
spin_lock_irqsave(&inject_lock, flags);
if (ops == &aer_inj_pci_ops)
goto out;
pci_bus_ops_init(bus_ops, bus, ops);
list_add(&bus_ops->list, &pci_bus_ops_list);
bus_ops = NULL;
out:
spin_unlock_irqrestore(&inject_lock, flags);
kfree(bus_ops);
return 0;
}
static int aer_inject(struct aer_error_inj *einj)
{
struct aer_error *err, *rperr;
struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
struct pci_dev *dev, *rpdev;
struct pcie_device *edev;
struct device *device;
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
int ret = 0;
dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn);
if (!dev)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
/* If Root Port not found, try to find an RCEC */
if (!rpdev)
rpdev = dev->rcec;
if (!rpdev) {
pci_err(dev, "Neither Root Port nor RCEC found\n");
ret = -ENODEV;
goto out_put;
}
pos_cap_err = dev->aer_cap;
if (!pos_cap_err) {
pci_err(dev, "Device doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
&uncor_mask);
rp_pos_cap_err = rpdev->aer_cap;
if (!rp_pos_cap_err) {
pci_err(rpdev, "Root port doesn't support AER\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
if (!err_alloc) {
ret = -ENOMEM;
goto out_put;
}
rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
if (!rperr_alloc) {
ret = -ENOMEM;
goto out_put;
}
if (aer_mask_override) {
cor_mask_orig = cor_mask;
cor_mask &= !(einj->cor_status);
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
cor_mask);
uncor_mask_orig = uncor_mask;
uncor_mask &= !(einj->uncor_status);
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
uncor_mask);
}
spin_lock_irqsave(&inject_lock, flags);
err = __find_aer_error_by_dev(dev);
if (!err) {
err = err_alloc;
err_alloc = NULL;
aer_error_init(err, einj->domain, einj->bus, devfn,
pos_cap_err);
list_add(&err->list, &einjected);
}
err->uncor_status |= einj->uncor_status;
err->cor_status |= einj->cor_status;
err->header_log0 = einj->header_log0;
err->header_log1 = einj->header_log1;
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
if (!aer_mask_override && einj->cor_status &&
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
pci_warn(dev, "The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
rperr = __find_aer_error_by_dev(rpdev);
if (!rperr) {
rperr = rperr_alloc;
rperr_alloc = NULL;
aer_error_init(rperr, pci_domain_nr(rpdev->bus),
rpdev->bus->number, rpdev->devfn,
rp_pos_cap_err);
list_add(&rperr->list, &einjected);
}
if (einj->cor_status) {
if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
else
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
rperr->source_id &= 0xffff0000;
rperr->source_id |= (einj->bus << 8) | devfn;
}
if (einj->uncor_status) {
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
if (sever & einj->uncor_status) {
rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
} else
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
rperr->source_id &= 0x0000ffff;
rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
}
spin_unlock_irqrestore(&inject_lock, flags);
if (aer_mask_override) {
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
cor_mask_orig);
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
uncor_mask_orig);
}
ret = pci_bus_set_aer_ops(dev->bus);
if (ret)
goto out_put;
ret = pci_bus_set_aer_ops(rpdev->bus);
if (ret)
goto out_put;
device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER);
if (device) {
edev = to_pcie_device(device);
if (!get_service_data(edev)) {
pci_warn(edev->port, "AER service is not initialized\n");
ret = -EPROTONOSUPPORT;
goto out_put;
}
pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
ret = irq_inject_interrupt(edev->irq);
} else {
pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
}
out_put:
kfree(err_alloc);
kfree(rperr_alloc);
pci_dev_put(dev);
return ret;
}
static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
size_t usize, loff_t *off)
{
struct aer_error_inj einj;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (usize < offsetof(struct aer_error_inj, domain) ||
usize > sizeof(einj))
return -EINVAL;
memset(&einj, 0, sizeof(einj));
if (copy_from_user(&einj, ubuf, usize))
return -EFAULT;
ret = aer_inject(&einj);
return ret ? ret : usize;
}
static const struct file_operations aer_inject_fops = {
.write = aer_inject_write,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice aer_inject_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "aer_inject",
.fops = &aer_inject_fops,
};
static int __init aer_inject_init(void)
{
return misc_register(&aer_inject_device);
}
static void __exit aer_inject_exit(void)
{
struct aer_error *err, *err_next;
unsigned long flags;
struct pci_bus_ops *bus_ops;
misc_deregister(&aer_inject_device);
while ((bus_ops = pci_bus_ops_pop())) {
pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
kfree(bus_ops);
}
spin_lock_irqsave(&inject_lock, flags);
list_for_each_entry_safe(err, err_next, &einjected, list) {
list_del(&err->list);
kfree(err);
}
spin_unlock_irqrestore(&inject_lock, flags);
}
module_init(aer_inject_init);
module_exit(aer_inject_exit);
MODULE_DESCRIPTION("PCIe AER software error injector");
MODULE_LICENSE("GPL");
| linux-master | drivers/pci/pcie/aer_inject.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Root Complex Event Collector Support
*
* Authors:
* Sean V Kelley <[email protected]>
* Qiuxu Zhuo <[email protected]>
*
* Copyright (C) 2020 Intel Corp.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include "../pci.h"
struct walk_rcec_data {
struct pci_dev *rcec;
int (*user_callback)(struct pci_dev *dev, void *data);
void *user_data;
};
static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
{
unsigned long bitmap = rcec->rcec_ea->bitmap;
unsigned int devn;
/* An RCiEP found on a different bus in range */
if (rcec->bus->number != rciep->bus->number)
return true;
/* Same bus, so check bitmap */
for_each_set_bit(devn, &bitmap, 32)
if (devn == PCI_SLOT(rciep->devfn))
return true;
return false;
}
static int link_rcec_helper(struct pci_dev *dev, void *data)
{
struct walk_rcec_data *rcec_data = data;
struct pci_dev *rcec = rcec_data->rcec;
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) &&
rcec_assoc_rciep(rcec, dev)) {
dev->rcec = rcec;
pci_dbg(dev, "PME & error events signaled via %s\n",
pci_name(rcec));
}
return 0;
}
static int walk_rcec_helper(struct pci_dev *dev, void *data)
{
struct walk_rcec_data *rcec_data = data;
struct pci_dev *rcec = rcec_data->rcec;
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) &&
rcec_assoc_rciep(rcec, dev))
rcec_data->user_callback(dev, rcec_data->user_data);
return 0;
}
static void walk_rcec(int (*cb)(struct pci_dev *dev, void *data),
void *userdata)
{
struct walk_rcec_data *rcec_data = userdata;
struct pci_dev *rcec = rcec_data->rcec;
u8 nextbusn, lastbusn;
struct pci_bus *bus;
unsigned int bnr;
if (!rcec->rcec_ea)
return;
/* Walk own bus for bitmap based association */
pci_walk_bus(rcec->bus, cb, rcec_data);
nextbusn = rcec->rcec_ea->nextbusn;
lastbusn = rcec->rcec_ea->lastbusn;
/* All RCiEP devices are on the same bus as the RCEC */
if (nextbusn == 0xff && lastbusn == 0x00)
return;
for (bnr = nextbusn; bnr <= lastbusn; bnr++) {
/* No association indicated (PCIe 5.0-1, 7.9.10.3) */
if (bnr == rcec->bus->number)
continue;
bus = pci_find_bus(pci_domain_nr(rcec->bus), bnr);
if (!bus)
continue;
/* Find RCiEP devices on the given bus ranges */
pci_walk_bus(bus, cb, rcec_data);
}
}
/**
* pcie_link_rcec - Link RCiEP devices associated with RCEC.
* @rcec: RCEC whose RCiEP devices should be linked.
*
* Link the given RCEC to each RCiEP device found.
*/
void pcie_link_rcec(struct pci_dev *rcec)
{
struct walk_rcec_data rcec_data;
if (!rcec->rcec_ea)
return;
rcec_data.rcec = rcec;
rcec_data.user_callback = NULL;
rcec_data.user_data = NULL;
walk_rcec(link_rcec_helper, &rcec_data);
}
/**
* pcie_walk_rcec - Walk RCiEP devices associating with RCEC and call callback.
* @rcec: RCEC whose RCiEP devices should be walked
* @cb: Callback to be called for each RCiEP device found
* @userdata: Arbitrary pointer to be passed to callback
*
* Walk the given RCEC. Call the callback on each RCiEP found.
*
* If @cb returns anything other than 0, break out.
*/
void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct walk_rcec_data rcec_data;
if (!rcec->rcec_ea)
return;
rcec_data.rcec = rcec;
rcec_data.user_callback = cb;
rcec_data.user_data = userdata;
walk_rcec(walk_rcec_helper, &rcec_data);
}
void pci_rcec_init(struct pci_dev *dev)
{
struct rcec_ea *rcec_ea;
u32 rcec, hdr, busn;
u8 ver;
/* Only for Root Complex Event Collectors */
if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_EC)
return;
rcec = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_RCEC);
if (!rcec)
return;
rcec_ea = kzalloc(sizeof(*rcec_ea), GFP_KERNEL);
if (!rcec_ea)
return;
pci_read_config_dword(dev, rcec + PCI_RCEC_RCIEP_BITMAP,
&rcec_ea->bitmap);
/* Check whether RCEC BUSN register is present */
pci_read_config_dword(dev, rcec, &hdr);
ver = PCI_EXT_CAP_VER(hdr);
if (ver >= PCI_RCEC_BUSN_REG_VER) {
pci_read_config_dword(dev, rcec + PCI_RCEC_BUSN, &busn);
rcec_ea->nextbusn = PCI_RCEC_BUSN_NEXT(busn);
rcec_ea->lastbusn = PCI_RCEC_BUSN_LAST(busn);
} else {
/* Avoid later ver check by setting nextbusn */
rcec_ea->nextbusn = 0xff;
rcec_ea->lastbusn = 0x00;
}
dev->rcec_ea = rcec_ea;
}
void pci_rcec_exit(struct pci_dev *dev)
{
kfree(dev->rcec_ea);
dev->rcec_ea = NULL;
}
| linux-master | drivers/pci/pcie/rcec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI Express Downstream Port Containment services driver
* Author: Keith Busch <[email protected]>
*
* Copyright (C) 2016 Intel Corp.
*/
#define dev_fmt(fmt) "DPC: " fmt
#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include "portdrv.h"
#include "../pci.h"
static const char * const rp_pio_error_string[] = {
"Configuration Request received UR Completion", /* Bit Position 0 */
"Configuration Request received CA Completion", /* Bit Position 1 */
"Configuration Request Completion Timeout", /* Bit Position 2 */
NULL,
NULL,
NULL,
NULL,
NULL,
"I/O Request received UR Completion", /* Bit Position 8 */
"I/O Request received CA Completion", /* Bit Position 9 */
"I/O Request Completion Timeout", /* Bit Position 10 */
NULL,
NULL,
NULL,
NULL,
NULL,
"Memory Request received UR Completion", /* Bit Position 16 */
"Memory Request received CA Completion", /* Bit Position 17 */
"Memory Request Completion Timeout", /* Bit Position 18 */
};
void pci_save_dpc_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
}
void pci_restore_dpc_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
}
static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
#ifdef CONFIG_HOTPLUG_PCI_PCIE
static bool dpc_completed(struct pci_dev *pdev)
{
u16 status;
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
return false;
if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
return false;
return true;
}
/**
* pci_dpc_recovered - whether DPC triggered and has recovered successfully
* @pdev: PCI device
*
* Return true if DPC was triggered for @pdev and has recovered successfully.
* Wait for recovery if it hasn't completed yet. Called from the PCIe hotplug
* driver to recognize and ignore Link Down/Up events caused by DPC.
*/
bool pci_dpc_recovered(struct pci_dev *pdev)
{
struct pci_host_bridge *host;
if (!pdev->dpc_cap)
return false;
/*
* Synchronization between hotplug and DPC is not supported
* if DPC is owned by firmware and EDR is not enabled.
*/
host = pci_find_host_bridge(pdev->bus);
if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
return false;
/*
* Need a timeout in case DPC never completes due to failure of
* dpc_wait_rp_inactive(). The spec doesn't mandate a time limit,
* but reports indicate that DPC completes within 4 seconds.
*/
wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
msecs_to_jiffies(4000));
return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
}
#endif /* CONFIG_HOTPLUG_PCI_PCIE */
static int dpc_wait_rp_inactive(struct pci_dev *pdev)
{
unsigned long timeout = jiffies + HZ;
u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
while (status & PCI_EXP_DPC_RP_BUSY &&
!time_after(jiffies, timeout)) {
msleep(10);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
}
if (status & PCI_EXP_DPC_RP_BUSY) {
pci_warn(pdev, "root port still busy\n");
return -EBUSY;
}
return 0;
}
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
pci_ers_result_t ret;
u16 cap;
set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
cap = pdev->dpc_cap;
/*
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
if (!pcie_wait_for_link(pdev, false))
pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
ret = PCI_ERS_RESULT_DISCONNECT;
goto out;
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
if (pci_bridge_wait_for_secondary_bus(pdev, "DPC")) {
clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
ret = PCI_ERS_RESULT_DISCONNECT;
} else {
set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
ret = PCI_ERS_RESULT_RECOVERED;
}
out:
clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
wake_up_all(&dpc_completed_waitqueue);
return ret;
}
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
int i;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
sev, syserr, exc);
/* Get First Error Pointer */
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
first_error = (dpc_status & 0x1f00) >> 8;
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if ((status & ~mask) & (1 << i))
pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
if (pdev->dpc_rp_log_size < 4)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
&dw1);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
&dw2);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
&dw3);
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
dw0, dw1, dw2, dw3);
if (pdev->dpc_rp_log_size < 5)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
struct aer_err_info *info)
{
int pos = dev->aer_cap;
u32 status, mask, sev;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
status &= ~mask;
if (!status)
return 0;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
status &= sev;
if (status)
info->severity = AER_FATAL;
else
info->severity = AER_NONFATAL;
return 1;
}
void dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
struct aer_err_info info;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
pci_warn(pdev, "%s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
(ext_reason == 0) ? "RP PIO error" :
(ext_reason == 1) ? "software trigger" :
"reserved error");
/* show RP PIO error detail information */
if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(pdev);
else if (reason == 0 &&
dpc_get_aer_uncorrect_severity(pdev, &info) &&
aer_get_device_error_info(pdev, &info)) {
aer_print_error(pdev, &info);
pci_aer_clear_nonfatal_status(pdev);
pci_aer_clear_fatal_status(pdev);
}
}
static irqreturn_t dpc_handler(int irq, void *context)
{
struct pci_dev *pdev = context;
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
return IRQ_HANDLED;
}
static irqreturn_t dpc_irq(int irq, void *context)
{
struct pci_dev *pdev = context;
u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
return IRQ_NONE;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_INTERRUPT);
if (status & PCI_EXP_DPC_STATUS_TRIGGER)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
void pci_dpc_init(struct pci_dev *pdev)
{
u16 cap;
pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
if (!pdev->dpc_cap)
return;
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
return;
pdev->dpc_rp_extensions = true;
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
pdev->dpc_rp_log_size =
(cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
}
}
}
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
u16 ctl, cap;
if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
dpc_handler, IRQF_SHARED,
"pcie-dpc", pdev);
if (status) {
pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
status);
return status;
}
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
}
static void dpc_remove(struct pcie_device *dev)
{
struct pci_dev *pdev = dev->port;
u16 ctl;
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
}
static struct pcie_port_service_driver dpcdriver = {
.name = "dpc",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
};
int __init pcie_dpc_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
| linux-master | drivers/pci/pcie/dpc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI Express Precision Time Measurement
* Copyright (c) 2016, Intel Corporation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include "../pci.h"
/*
* If the next upstream device supports PTM, return it; otherwise return
* NULL. PTM Messages are local, so both link partners must support it.
*/
static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
{
struct pci_dev *ups = pci_upstream_bridge(dev);
/*
* Switch Downstream Ports are not permitted to have a PTM
* capability; their PTM behavior is controlled by the Upstream
* Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
* Switch Downstream Port, look up one more level.
*/
if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
ups = pci_upstream_bridge(ups);
if (ups && ups->ptm_cap)
return ups;
return NULL;
}
/*
* Find the PTM Capability (if present) and extract the information we need
* to use it.
*/
void pci_ptm_init(struct pci_dev *dev)
{
u16 ptm;
u32 cap;
struct pci_dev *ups;
if (!pci_is_pcie(dev))
return;
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
if (!ptm)
return;
dev->ptm_cap = ptm;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
/*
* Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
* furthest upstream Time Source as the PTM Root. For Endpoints,
* "the Effective Granularity is the maximum Local Clock Granularity
* reported by the PTM Root and all intervening PTM Time Sources."
*/
ups = pci_upstream_ptm(dev);
if (ups) {
if (ups->ptm_granularity == 0)
dev->ptm_granularity = 0;
else if (ups->ptm_granularity > dev->ptm_granularity)
dev->ptm_granularity = ups->ptm_granularity;
} else if (cap & PCI_PTM_CAP_ROOT) {
dev->ptm_root = 1;
} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
/*
* Per sec 7.9.15.3, this should be the Local Clock
* Granularity of the associated Time Source. But it
* doesn't say how to find that Time Source.
*/
dev->ptm_granularity = 0;
}
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
pci_enable_ptm(dev, NULL);
}
void pci_save_ptm_state(struct pci_dev *dev)
{
u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
if (!ptm)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
if (!save_state)
return;
cap = (u32 *)&save_state->cap.data[0];
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
}
void pci_restore_ptm_state(struct pci_dev *dev)
{
u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
if (!ptm)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
if (!save_state)
return;
cap = (u32 *)&save_state->cap.data[0];
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
}
/* Enable PTM in the Control register if possible */
static int __pci_enable_ptm(struct pci_dev *dev)
{
u16 ptm = dev->ptm_cap;
struct pci_dev *ups;
u32 ctrl;
if (!ptm)
return -EINVAL;
/*
* A device uses local PTM Messages to request time information
* from a PTM Root that's farther upstream. Every device along the
* path must support PTM and have it enabled so it can handle the
* messages. Therefore, if this device is not a PTM Root, the
* upstream link partner must have PTM enabled before we can enable
* PTM.
*/
if (!dev->ptm_root) {
ups = pci_upstream_ptm(dev);
if (!ups || !ups->ptm_enabled)
return -EINVAL;
}
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
ctrl |= PCI_PTM_CTRL_ENABLE;
ctrl &= ~PCI_PTM_GRANULARITY_MASK;
ctrl |= dev->ptm_granularity << 8;
if (dev->ptm_root)
ctrl |= PCI_PTM_CTRL_ROOT;
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
return 0;
}
/**
* pci_enable_ptm() - Enable Precision Time Measurement
* @dev: PCI device
* @granularity: pointer to return granularity
*
* Enable Precision Time Measurement for @dev. If successful and
* @granularity is non-NULL, return the Effective Granularity.
*
* Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
* is not a PTM Root and lacks an upstream path of PTM-enabled devices.
*/
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{
int rc;
char clock_desc[8];
rc = __pci_enable_ptm(dev);
if (rc)
return rc;
dev->ptm_enabled = 1;
if (granularity)
*granularity = dev->ptm_granularity;
switch (dev->ptm_granularity) {
case 0:
snprintf(clock_desc, sizeof(clock_desc), "unknown");
break;
case 255:
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
pci_info(dev, "PTM enabled%s, %s granularity\n",
dev->ptm_root ? " (root)" : "", clock_desc);
return 0;
}
EXPORT_SYMBOL(pci_enable_ptm);
static void __pci_disable_ptm(struct pci_dev *dev)
{
u16 ptm = dev->ptm_cap;
u32 ctrl;
if (!ptm)
return;
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
}
/**
* pci_disable_ptm() - Disable Precision Time Measurement
* @dev: PCI device
*
* Disable Precision Time Measurement for @dev.
*/
void pci_disable_ptm(struct pci_dev *dev)
{
if (dev->ptm_enabled) {
__pci_disable_ptm(dev);
dev->ptm_enabled = 0;
}
}
EXPORT_SYMBOL(pci_disable_ptm);
/*
* Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
* resume if necessary.
*/
void pci_suspend_ptm(struct pci_dev *dev)
{
if (dev->ptm_enabled)
__pci_disable_ptm(dev);
}
/* If PTM was enabled before suspend, re-enable it when resuming */
void pci_resume_ptm(struct pci_dev *dev)
{
if (dev->ptm_enabled)
__pci_enable_ptm(dev);
}
bool pcie_ptm_enabled(struct pci_dev *dev)
{
if (!dev)
return false;
return dev->ptm_enabled;
}
EXPORT_SYMBOL(pcie_ptm_enabled);
| linux-master | drivers/pci/pcie/ptm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe Native PME support
*
* Copyright (C) 2007 - 2009 Intel Corp
* Copyright (C) 2007 - 2009 Shaohua Li <[email protected]>
* Copyright (C) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#define dev_fmt(fmt) "PME: " fmt
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include "../pci.h"
#include "portdrv.h"
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
* wake-up from system sleep states.
*/
bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
if (!strncmp(str, "nomsi", 5))
pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
struct work_struct work;
bool noirq; /* If set, keep the PME interrupt disabled. */
};
/**
* pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
if (enable)
pcie_capability_set_word(dev, PCI_EXP_RTCTL,
PCI_EXP_RTCTL_PMEIE);
else
pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
PCI_EXP_RTCTL_PMEIE);
}
/**
* pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
* @bus: PCI bus to scan.
*
* Scan given PCI bus and all buses under it for devices asserting PME#.
*/
static bool pcie_pme_walk_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
bool ret = false;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
ret = true;
}
if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
ret = true;
}
return ret;
}
/**
* pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
* @bus: Secondary bus of the bridge.
* @devfn: Device/function number to check.
*
* PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
* PCIe PME message. In such that case the bridge should use the Requester ID
* of device/function number 0 on its secondary bus.
*/
static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
{
struct pci_dev *dev;
bool found = false;
if (devfn)
return false;
dev = pci_dev_get(bus->self);
if (!dev)
return false;
if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
down_read(&pci_bus_sem);
if (pcie_pme_walk_bus(bus))
found = true;
up_read(&pci_bus_sem);
}
pci_dev_put(dev);
return found;
}
/**
* pcie_pme_handle_request - Find device that generated PME and handle it.
* @port: Root port or event collector that generated the PME interrupt.
* @req_id: PCIe Requester ID of the device that generated the PME.
*/
static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
{
u8 busnr = req_id >> 8, devfn = req_id & 0xff;
struct pci_bus *bus;
struct pci_dev *dev;
bool found = false;
/* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) {
if (port->pme_poll)
port->pme_poll = false;
if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev);
found = true;
} else {
/*
* Apparently, the root port generated the PME on behalf
* of a non-PCIe device downstream. If this is done by
* a root port, the Requester ID field in its status
* register may contain either the root port's, or the
* source device's information (PCI Express Base
* Specification, Rev. 2.0, Section 6.1.9).
*/
down_read(&pci_bus_sem);
found = pcie_pme_walk_bus(port->subordinate);
up_read(&pci_bus_sem);
}
goto out;
}
/* Second, find the bus the source device is on. */
bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
if (!bus)
goto out;
/* Next, check if the PME is from a PCIe-PCI bridge. */
found = pcie_pme_from_pci_bridge(bus, devfn);
if (found)
goto out;
/* Finally, try to find the PME source on the bus. */
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_get(dev);
if (dev->devfn == devfn) {
found = true;
break;
}
pci_dev_put(dev);
}
up_read(&pci_bus_sem);
if (found) {
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
}
pci_dev_put(dev);
} else if (devfn) {
/*
* The device is not there, but we can still try to recover by
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
out:
if (!found)
pci_info(port, "Spurious native interrupt!\n");
}
/**
* pcie_pme_work_fn - Work handler for PCIe PME interrupt.
* @work: Work structure giving access to service data.
*/
static void pcie_pme_work_fn(struct work_struct *work)
{
struct pcie_pme_service_data *data =
container_of(work, struct pcie_pme_service_data, work);
struct pci_dev *port = data->srv->port;
u32 rtsta;
spin_lock_irq(&data->lock);
for (;;) {
if (data->noirq)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (PCI_POSSIBLE_ERROR(rtsta))
break;
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
* pending PMEs, the status will be set again.
*/
pcie_clear_root_pme_status(port);
spin_unlock_irq(&data->lock);
pcie_pme_handle_request(port, rtsta & 0xffff);
spin_lock_irq(&data->lock);
continue;
}
/* No need to loop if there are no more PMEs pending. */
if (!(rtsta & PCI_EXP_RTSTA_PENDING))
break;
spin_unlock_irq(&data->lock);
cpu_relax();
spin_lock_irq(&data->lock);
}
if (!data->noirq)
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
}
/**
* pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
* @irq: Interrupt vector.
* @context: Interrupt context pointer.
*/
static irqreturn_t pcie_pme_irq(int irq, void *context)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
u32 rtsta;
unsigned long flags;
port = ((struct pcie_device *)context)->port;
data = get_service_data((struct pcie_device *)context);
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
pcie_pme_interrupt_enable(port, false);
spin_unlock_irqrestore(&data->lock, flags);
/* We don't use pm_wq, because it's freezable. */
schedule_work(&data->work);
return IRQ_HANDLED;
}
/**
* pcie_pme_can_wakeup - Set the wakeup capability flag.
* @dev: PCI device to handle.
* @ign: Ignored.
*/
static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
{
device_set_wakeup_capable(&dev->dev, true);
return 0;
}
/**
* pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
* @port: PCIe root port or event collector to handle.
*
* For each device below given root port, including the port itself (or for each
* root complex integrated endpoint if @port is a root complex event collector)
* set the flag indicating that it can signal run-time wake-up events.
*/
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_can_wakeup(port, NULL);
if (pci_pcie_type(port) == PCI_EXP_TYPE_RC_EC)
pcie_walk_rcec(port, pcie_pme_can_wakeup, NULL);
else if (port->subordinate)
pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
}
/**
* pcie_pme_probe - Initialize PCIe PME service for given root port.
* @srv: PCIe service to initialize.
*/
static int pcie_pme_probe(struct pcie_device *srv)
{
struct pci_dev *port = srv->port;
struct pcie_pme_service_data *data;
int type = pci_pcie_type(port);
int ret;
/* Limit to Root Ports or Root Complex Event Collectors */
if (type != PCI_EXP_TYPE_RC_EC &&
type != PCI_EXP_TYPE_ROOT_PORT)
return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->lock);
INIT_WORK(&data->work, pcie_pme_work_fn);
data->srv = srv;
set_service_data(srv, data);
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
return ret;
}
pci_info(port, "Signaling with IRQ %d\n", srv->irq);
pcie_pme_mark_devices(port);
pcie_pme_interrupt_enable(port, true);
return 0;
}
static bool pcie_pme_check_wakeup(struct pci_bus *bus)
{
struct pci_dev *dev;
if (!bus)
return false;
list_for_each_entry(dev, &bus->devices, bus_list)
if (device_may_wakeup(&dev->dev)
|| pcie_pme_check_wakeup(dev->subordinate))
return true;
return false;
}
static void pcie_pme_disable_interrupt(struct pci_dev *port,
struct pcie_pme_service_data *data)
{
spin_lock_irq(&data->lock);
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
data->noirq = true;
spin_unlock_irq(&data->lock);
}
/**
* pcie_pme_suspend - Suspend PCIe PME service device.
* @srv: PCIe service device to suspend.
*/
static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
bool wakeup;
int ret;
if (device_may_wakeup(&port->dev)) {
wakeup = true;
} else {
down_read(&pci_bus_sem);
wakeup = pcie_pme_check_wakeup(port->subordinate);
up_read(&pci_bus_sem);
}
if (wakeup) {
ret = enable_irq_wake(srv->irq);
if (!ret)
return 0;
}
pcie_pme_disable_interrupt(port, data);
synchronize_irq(srv->irq);
return 0;
}
/**
* pcie_pme_resume - Resume PCIe PME service device.
* @srv: PCIe service device to resume.
*/
static int pcie_pme_resume(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
spin_lock_irq(&data->lock);
if (data->noirq) {
struct pci_dev *port = srv->port;
pcie_clear_root_pme_status(port);
pcie_pme_interrupt_enable(port, true);
data->noirq = false;
} else {
disable_irq_wake(srv->irq);
}
spin_unlock_irq(&data->lock);
return 0;
}
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
* @srv: PCIe service device to remove.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
pcie_pme_disable_interrupt(srv->port, data);
free_irq(srv->irq, srv);
cancel_work_sync(&data->work);
kfree(data);
}
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
/**
* pcie_pme_init - Register the PCIe PME service driver.
*/
int __init pcie_pme_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
| linux-master | drivers/pci/pcie/pme.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file implements the error recovery as a core part of PCIe error
* reporting. When a PCIe error is delivered, an error message will be
* collected and printed to console, then, an error recovery procedure
* will be executed by following the PCI error recovery rules.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen ([email protected])
* Zhang Yanmin ([email protected])
*/
#define dev_fmt(fmt) "AER: " fmt
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/aer.h>
#include "portdrv.h"
#include "../pci.h"
static pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
return PCI_ERS_RESULT_NO_AER_DRIVER;
if (new == PCI_ERS_RESULT_NONE)
return orig;
switch (orig) {
case PCI_ERS_RESULT_CAN_RECOVER:
case PCI_ERS_RESULT_RECOVERED:
orig = new;
break;
case PCI_ERS_RESULT_DISCONNECT:
if (new == PCI_ERS_RESULT_NEED_RESET)
orig = PCI_ERS_RESULT_NEED_RESET;
break;
default:
break;
}
return orig;
}
static int report_error_detected(struct pci_dev *dev,
pci_channel_state_t state,
enum pci_ers_result *result)
{
struct pci_driver *pdrv;
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
pdrv = dev->driver;
if (pci_dev_is_disconnected(dev)) {
vote = PCI_ERS_RESULT_DISCONNECT;
} else if (!pci_dev_set_io_state(dev, state)) {
pci_info(dev, "can't recover (state transition %u -> %u invalid)\n",
dev->error_state, state);
vote = PCI_ERS_RESULT_NONE;
} else if (!pdrv || !pdrv->err_handler ||
!pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
* error callbacks of "any" device in the subtree, and will
* exit in the disconnected error state.
*/
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
pci_info(dev, "can't recover (no error_detected callback)\n");
} else {
vote = PCI_ERS_RESULT_NONE;
}
} else {
err_handler = pdrv->err_handler;
vote = err_handler->error_detected(dev, state);
}
pci_uevent_ers(dev, vote);
*result = merge_result(*result, vote);
device_unlock(&dev->dev);
return 0;
}
static int report_frozen_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_frozen, data);
}
static int report_normal_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_normal, data);
}
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pdrv ||
!pdrv->err_handler ||
!pdrv->err_handler->mmio_enabled)
goto out;
err_handler = pdrv->err_handler;
vote = err_handler->mmio_enabled(dev);
*result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
}
static int report_slot_reset(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pdrv ||
!pdrv->err_handler ||
!pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
vote = err_handler->slot_reset(dev);
*result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
}
static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
!pdrv ||
!pdrv->err_handler ||
!pdrv->err_handler->resume)
goto out;
err_handler = pdrv->err_handler;
err_handler->resume(dev);
out:
pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
device_unlock(&dev->dev);
return 0;
}
/**
* pci_walk_bridge - walk bridges potentially AER affected
* @bridge: bridge which may be a Port, an RCEC, or an RCiEP
* @cb: callback to be called for each device found
* @userdata: arbitrary pointer to be passed to callback
*
* If the device provided is a bridge, walk the subordinate bus, including
* any bridged devices on buses under this bus. Call the provided callback
* on each device found.
*
* If the device provided has no subordinate bus, e.g., an RCEC or RCiEP,
* call the callback on the device itself.
*/
static void pci_walk_bridge(struct pci_dev *bridge,
int (*cb)(struct pci_dev *, void *),
void *userdata)
{
if (bridge->subordinate)
pci_walk_bus(bridge->subordinate, cb, userdata);
else
cb(bridge, userdata);
}
pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_channel_state_t state,
pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev))
{
int type = pci_pcie_type(dev);
struct pci_dev *bridge;
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
/*
* If the error was detected by a Root Port, Downstream Port, RCEC,
* or RCiEP, recovery runs on the device itself. For Ports, that
* also includes any subordinate devices.
*
* If it was detected by another device (Endpoint, etc), recovery
* runs on the device and anything else under the same Port, i.e.,
* everything under "bridge".
*/
if (type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_DOWNSTREAM ||
type == PCI_EXP_TYPE_RC_EC ||
type == PCI_EXP_TYPE_RC_END)
bridge = dev;
else
bridge = pci_upstream_bridge(dev);
pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
pci_walk_bridge(bridge, report_frozen_detected, &status);
if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
pci_warn(bridge, "subordinate device reset failed\n");
goto failed;
}
} else {
pci_walk_bridge(bridge, report_normal_detected, &status);
}
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
pci_dbg(bridge, "broadcast mmio_enabled message\n");
pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
* functions to reset slot before calling
* drivers' slot_reset callbacks?
*/
status = PCI_ERS_RESULT_RECOVERED;
pci_dbg(bridge, "broadcast slot_reset message\n");
pci_walk_bridge(bridge, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
pci_dbg(bridge, "broadcast resume message\n");
pci_walk_bridge(bridge, report_resume, &status);
/*
* If we have native control of AER, clear error status in the device
* that detected the error. If the platform retained control of AER,
* it is responsible for clearing this status. In that case, the
* signaling device may not even be visible to the OS.
*/
if (host->native_aer || pcie_ports_native) {
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
}
pci_info(bridge, "device recovery successful\n");
return status;
failed:
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
pci_info(bridge, "device recovery failed\n");
return status;
}
| linux-master | drivers/pci/pcie/err.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implement the AER root port service driver. The driver registers an IRQ
* handler. When a root port triggers an AER interrupt, the IRQ handler
* collects root port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen ([email protected])
* Zhang Yanmin ([email protected])
*
* (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
* Andrew Patterson <[email protected]>
*/
#define pr_fmt(fmt) "AER: " fmt
#define dev_fmt pr_fmt
#include <linux/bitops.h>
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <acpi/apei.h>
#include <ras/ras_event.h>
#include "../pci.h"
#include "portdrv.h"
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
unsigned int id;
};
struct aer_rpc {
struct pci_dev *rpd; /* Root Port device */
DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
/* AER stats for the device */
struct aer_stats {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
* end point is causing problems, the AER counters may increment
* at its link partner (e.g. root port) because the errors will be
* "seen" by the link partner and not the problematic end point
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
/* Counters for different type of correctable errors */
u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
/* Counters for different type of fatal uncorrectable errors */
u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
/* Counters for different type of nonfatal uncorrectable errors */
u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
/* Total number of ERR_COR sent by this device */
u64 dev_total_cor_errs;
/* Total number of ERR_FATAL sent by this device */
u64 dev_total_fatal_errs;
/* Total number of ERR_NONFATAL sent by this device */
u64 dev_total_nonfatal_errs;
/*
* Fields for Root ports & root complex event collectors only, these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
* messages received by the root port / event collector, INCLUDING the
* ones that are generated internally (by the rootport itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
PCI_ERR_UNC_COMP_ABORT| \
PCI_ERR_UNC_UNX_COMP| \
PCI_ERR_UNC_MALF_TLP)
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
#define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \
PCI_ERR_ROOT_CMD_NONFATAL_EN| \
PCI_ERR_ROOT_CMD_FATAL_EN)
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
PCI_ERR_ROOT_COR_RCV | \
PCI_ERR_ROOT_MULTI_COR_RCV | \
PCI_ERR_ROOT_MULTI_UNCOR_RCV)
static int pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
pcie_aer_disable = 1;
}
bool pci_aer_available(void)
{
return !pcie_aer_disable && pci_msi_enabled();
}
#ifdef CONFIG_PCIE_ECRC
#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
static int ecrc_policy = ECRC_POLICY_DEFAULT;
static const char * const ecrc_policy_str[] = {
[ECRC_POLICY_DEFAULT] = "bios",
[ECRC_POLICY_OFF] = "off",
[ECRC_POLICY_ON] = "on"
};
/**
* enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
int aer = dev->aer_cap;
u32 reg32;
if (!aer)
return -ENODEV;
pci_read_config_dword(dev, aer + PCI_ERR_CAP, ®32);
if (reg32 & PCI_ERR_CAP_ECRC_GENC)
reg32 |= PCI_ERR_CAP_ECRC_GENE;
if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
reg32 |= PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
/**
* disable_ecrc_checking - disables PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
int aer = dev->aer_cap;
u32 reg32;
if (!aer)
return -ENODEV;
pci_read_config_dword(dev, aer + PCI_ERR_CAP, ®32);
reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
/**
* pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
* @dev: the PCI device
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
{
if (!pcie_aer_is_native(dev))
return;
switch (ecrc_policy) {
case ECRC_POLICY_DEFAULT:
return;
case ECRC_POLICY_OFF:
disable_ecrc_checking(dev);
break;
case ECRC_POLICY_ON:
enable_ecrc_checking(dev);
break;
default:
return;
}
}
/**
* pcie_ecrc_get_policy - parse kernel command-line ecrc option
* @str: ECRC policy from kernel command line to use
*/
void pcie_ecrc_get_policy(char *str)
{
int i;
i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str);
if (i < 0)
return;
ecrc_policy = i;
}
#endif /* CONFIG_PCIE_ECRC */
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
int pcie_aer_is_native(struct pci_dev *dev)
{
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
if (!dev->aer_cap)
return 0;
return pcie_ports_native || host->native_aer;
}
EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
int rc;
if (!pcie_aer_is_native(dev))
return -EIO;
rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
return pcibios_err_to_errno(rc);
}
int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
int aer = dev->aer_cap;
u32 status, sev;
if (!pcie_aer_is_native(dev))
return -EIO;
/* Clear status bits for ERR_NONFATAL errors only */
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= ~sev;
if (status)
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
int aer = dev->aer_cap;
u32 status, sev;
if (!pcie_aer_is_native(dev))
return;
/* Clear status bits for ERR_FATAL errors only */
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= sev;
if (status)
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
}
/**
* pci_aer_raw_clear_status - Clear AER error registers.
* @dev: the PCI device
*
* Clearing AER error status registers unconditionally, regardless of
* whether they're owned by firmware or the OS.
*
* Returns 0 on success, or negative on failure.
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
int aer = dev->aer_cap;
u32 status;
int port_type;
if (!aer)
return -EIO;
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
port_type == PCI_EXP_TYPE_RC_EC) {
pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
}
pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, status);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
int pci_aer_clear_status(struct pci_dev *dev)
{
if (!pcie_aer_is_native(dev))
return -EIO;
return pci_aer_raw_clear_status(dev);
}
void pci_save_aer_state(struct pci_dev *dev)
{
int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
if (!save_state)
return;
cap = &save_state->cap.data[0];
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, cap++);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, cap++);
pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, cap++);
pci_read_config_dword(dev, aer + PCI_ERR_CAP, cap++);
if (pcie_cap_has_rtctl(dev))
pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, cap++);
}
void pci_restore_aer_state(struct pci_dev *dev)
{
int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
if (!save_state)
return;
cap = &save_state->cap.data[0];
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, *cap++);
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, *cap++);
pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, *cap++);
pci_write_config_dword(dev, aer + PCI_ERR_CAP, *cap++);
if (pcie_cap_has_rtctl(dev))
pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, *cap++);
}
void pci_aer_init(struct pci_dev *dev)
{
int n;
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!dev->aer_cap)
return;
dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
* PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
* Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
* 7.8.4).
*/
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_aer_clear_status(dev);
if (pci_aer_available())
pci_enable_pcie_error_reporting(dev);
pcie_set_ecrc_checking(dev);
}
void pci_aer_exit(struct pci_dev *dev)
{
kfree(dev->aer_stats);
dev->aer_stats = NULL;
}
#define AER_AGENT_RECEIVER 0
#define AER_AGENT_REQUESTER 1
#define AER_AGENT_COMPLETER 2
#define AER_AGENT_TRANSMITTER 3
#define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
#define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
0 : PCI_ERR_UNC_COMP_ABORT)
#define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
(PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
#define AER_GET_AGENT(t, e) \
((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
(e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
(e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
AER_AGENT_RECEIVER)
#define AER_PHYSICAL_LAYER_ERROR 0
#define AER_DATA_LINK_LAYER_ERROR 1
#define AER_TRANSACTION_LAYER_ERROR 2
#define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
PCI_ERR_COR_RCVR : 0)
#define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
(PCI_ERR_COR_BAD_TLP| \
PCI_ERR_COR_BAD_DLLP| \
PCI_ERR_COR_REP_ROLL| \
PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
#define AER_GET_LAYER_ERROR(t, e) \
((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
AER_TRANSACTION_LAYER_ERROR)
/*
* AER error strings
*/
static const char *aer_error_severity_string[] = {
"Uncorrected (Non-Fatal)",
"Uncorrected (Fatal)",
"Corrected"
};
static const char *aer_error_layer[] = {
"Physical Layer",
"Data Link Layer",
"Transaction Layer"
};
static const char *aer_correctable_error_string[] = {
"RxErr", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
"BadTLP", /* Bit Position 6 */
"BadDLLP", /* Bit Position 7 */
"Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
"Timeout", /* Bit Position 12 */
"NonFatalErr", /* Bit Position 13 */
"CorrIntErr", /* Bit Position 14 */
"HeaderOF", /* Bit Position 15 */
NULL, /* Bit Position 16 */
NULL, /* Bit Position 17 */
NULL, /* Bit Position 18 */
NULL, /* Bit Position 19 */
NULL, /* Bit Position 20 */
NULL, /* Bit Position 21 */
NULL, /* Bit Position 22 */
NULL, /* Bit Position 23 */
NULL, /* Bit Position 24 */
NULL, /* Bit Position 25 */
NULL, /* Bit Position 26 */
NULL, /* Bit Position 27 */
NULL, /* Bit Position 28 */
NULL, /* Bit Position 29 */
NULL, /* Bit Position 30 */
NULL, /* Bit Position 31 */
};
static const char *aer_uncorrectable_error_string[] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
"DLP", /* Bit Position 4 */
"SDES", /* Bit Position 5 */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
"TLP", /* Bit Position 12 */
"FCP", /* Bit Position 13 */
"CmpltTO", /* Bit Position 14 */
"CmpltAbrt", /* Bit Position 15 */
"UnxCmplt", /* Bit Position 16 */
"RxOF", /* Bit Position 17 */
"MalfTLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
"UnsupReq", /* Bit Position 20 */
"ACSViol", /* Bit Position 21 */
"UncorrIntErr", /* Bit Position 22 */
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
NULL, /* Bit Position 27 */
NULL, /* Bit Position 28 */
NULL, /* Bit Position 29 */
NULL, /* Bit Position 30 */
NULL, /* Bit Position 31 */
};
static const char *aer_agent_string[] = {
"Receiver ID",
"Requester ID",
"Completer ID",
"Transmitter ID"
};
#define aer_stats_dev_attr(name, stats_array, strings_array, \
total_string, total_field) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
unsigned int i; \
struct pci_dev *pdev = to_pci_dev(dev); \
u64 *stats = pdev->aer_stats->stats_array; \
size_t len = 0; \
\
for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
stats[i]); \
else if (stats[i]) \
len += sysfs_emit_at(buf, len, \
#stats_array "_bit[%d] %llu\n",\
i, stats[i]); \
} \
len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
pdev->aer_stats->total_field); \
return len; \
} \
static DEVICE_ATTR_RO(name)
aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
aer_correctable_error_string, "ERR_COR",
dev_total_cor_errs);
aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
aer_uncorrectable_error_string, "ERR_FATAL",
dev_total_fatal_errs);
aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
aer_uncorrectable_error_string, "ERR_NONFATAL",
dev_total_nonfatal_errs);
#define aer_stats_rootport_attr(name, field) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
} \
static DEVICE_ATTR_RO(name)
aer_stats_rootport_attr(aer_rootport_total_err_cor,
rootport_total_cor_errs);
aer_stats_rootport_attr(aer_rootport_total_err_fatal,
rootport_total_fatal_errs);
aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
rootport_total_nonfatal_errs);
static struct attribute *aer_stats_attrs[] __ro_after_init = {
&dev_attr_aer_dev_correctable.attr,
&dev_attr_aer_dev_fatal.attr,
&dev_attr_aer_dev_nonfatal.attr,
&dev_attr_aer_rootport_total_err_cor.attr,
&dev_attr_aer_rootport_total_err_fatal.attr,
&dev_attr_aer_rootport_total_err_nonfatal.attr,
NULL
};
static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
if (!pdev->aer_stats)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
a == &dev_attr_aer_rootport_total_err_fatal.attr ||
a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
(pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC)))
return 0;
return a->mode;
}
const struct attribute_group aer_stats_attr_group = {
.attrs = aer_stats_attrs,
.is_visible = aer_stats_attrs_are_visible,
};
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
unsigned long status = info->status & ~info->mask;
int i, max = -1;
u64 *counter = NULL;
struct aer_stats *aer_stats = pdev->aer_stats;
if (!aer_stats)
return;
switch (info->severity) {
case AER_CORRECTABLE:
aer_stats->dev_total_cor_errs++;
counter = &aer_stats->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
aer_stats->dev_total_nonfatal_errs++;
counter = &aer_stats->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
aer_stats->dev_total_fatal_errs++;
counter = &aer_stats->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
for_each_set_bit(i, &status, max)
counter[i]++;
}
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
struct aer_stats *aer_stats = pdev->aer_stats;
if (!aer_stats)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
aer_stats->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
aer_stats->rootport_total_fatal_errs++;
else
aer_stats->rootport_total_nonfatal_errs++;
}
}
static void __print_tlp_header(struct pci_dev *dev,
struct aer_header_log_regs *t)
{
pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
t->dw0, t->dw1, t->dw2, t->dw3);
}
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
const char **strings;
unsigned long status = info->status & ~info->mask;
const char *level, *errmsg;
int i;
if (info->severity == AER_CORRECTABLE) {
strings = aer_correctable_error_string;
level = KERN_WARNING;
} else {
strings = aer_uncorrectable_error_string;
level = KERN_ERR;
}
for_each_set_bit(i, &status, 32) {
errmsg = strings[i];
if (!errmsg)
errmsg = "Unknown Error Bit";
pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
pci_dev_aer_stats_incr(dev, info);
}
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int layer, agent;
int id = pci_dev_id(dev);
const char *level;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
aer_error_severity_string[info->severity]);
goto out;
}
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
if (info->tlp_header_valid)
__print_tlp_header(dev, &info->tlp);
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
pci_err(dev, " Error of this Agent is reported first\n");
trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
info->severity, info->tlp_header_valid, &info->tlp);
}
static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
{
u8 bus = info->id >> 8;
u8 devfn = info->id & 0xff;
pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
info->multi_error_valid ? "Multiple " : "",
aer_error_severity_string[info->severity],
pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
int cper_severity_to_aer(int cper_severity)
{
switch (cper_severity) {
case CPER_SEV_RECOVERABLE:
return AER_NONFATAL;
case CPER_SEV_FATAL:
return AER_FATAL;
default:
return AER_CORRECTABLE;
}
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
struct aer_err_info info;
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
memset(&info, 0, sizeof(info));
info.severity = aer_severity;
info.status = status;
info.mask = mask;
info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
__aer_print_error(dev, &info);
pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
pci_err(dev, "aer_uncor_severity: 0x%08x\n",
aer->uncor_severity);
if (tlp_header_valid)
__print_tlp_header(dev, &aer->header_log);
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
}
#endif
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
* @dev: pointer to pci_dev to be added
*/
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
e_info->error_dev_num++;
return 0;
}
return -ENOSPC;
}
/**
* is_error_source - check whether the device is source of reported error
* @dev: pointer to pci_dev to be checked
* @e_info: pointer to reported error info
*/
static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
int aer = dev->aer_cap;
u32 status, mask;
u16 reg16;
/*
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
if ((PCI_BUS_NUM(e_info->id) != 0) &&
!(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
/* Device ID match? */
if (e_info->id == pci_dev_id(dev))
return true;
/* Continue id comparing if there is no multiple error */
if (!e_info->multi_error_valid)
return false;
}
/*
* When either
* 1) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
* 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
return false;
/* Check if AER is enabled */
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16);
if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
if (!aer)
return false;
/* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
} else {
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
}
if (status & ~mask)
return true;
return false;
}
static int find_device_iter(struct pci_dev *dev, void *data)
{
struct aer_err_info *e_info = (struct aer_err_info *)data;
if (is_error_source(dev, e_info)) {
/* List this device */
if (add_error_device(e_info, dev)) {
/* We cannot handle more... Stop iteration */
/* TODO: Should print error message here? */
return 1;
}
/* If there is only a single error, stop iteration */
if (!e_info->multi_error_valid)
return 1;
}
return 0;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
* @e_info: including detailed error information such like id
*
* Return true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
* struct aer_err_info pointed by @e_info properly. This function must fill
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
/* Must reset in this function */
e_info->error_dev_num = 0;
/* Is Root Port an agent that sends error message? */
result = find_device_iter(dev, e_info);
if (result)
return true;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_RC_EC)
pcie_walk_rcec(parent, find_device_iter, e_info);
else
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
pci_info(parent, "can't find device of ID%04x\n", e_info->id);
return false;
}
return true;
}
/**
* handle_error_source - handle logging error into an event log
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
int aer = dev->aer_cap;
if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
if (aer)
pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
if (pcie_aer_is_native(dev)) {
struct pci_driver *pdrv = dev->driver;
if (pdrv && pdrv->err_handler &&
pdrv->err_handler->cor_error_detected)
pdrv->err_handler->cor_error_detected(dev);
pcie_clear_device_status(dev);
}
} else if (info->severity == AER_NONFATAL)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
pci_dev_put(dev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
#define AER_RECOVER_RING_SIZE 16
struct aer_recover_entry {
u8 bus;
u8 devfn;
u16 domain;
int severity;
struct aer_capability_regs *regs;
};
static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
AER_RECOVER_RING_SIZE);
static void aer_recover_work_func(struct work_struct *work)
{
struct aer_recover_entry entry;
struct pci_dev *pdev;
while (kfifo_get(&aer_recover_ring, &entry)) {
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
entry.domain, entry.bus,
PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
continue;
}
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
aer_root_reset);
else if (entry.severity == AER_FATAL)
pcie_do_recovery(pdev, pci_channel_io_frozen,
aer_root_reset);
pci_dev_put(pdev);
}
}
/*
* Mutual exclusion for writers of aer_recover_ring, reader side don't
* need lock, because there is only one reader and lock is not needed
* between reader and writer.
*/
static DEFINE_SPINLOCK(aer_recover_ring_lock);
static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
int severity, struct aer_capability_regs *aer_regs)
{
struct aer_recover_entry entry = {
.bus = bus,
.devfn = devfn,
.domain = domain,
.severity = severity,
.regs = aer_regs,
};
if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
&aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
pr_err("buffer overflow in recovery for %04x:%02x:%02x.%x\n",
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
/**
* aer_get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
*
* Return 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
int temp;
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
/* The device might not support AER */
if (!aer)
return 0;
if (info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS,
&info->status);
pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
} else if (type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC ||
type == PCI_EXP_TYPE_DOWNSTREAM ||
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
&info->status);
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
/* Get First Error Pointer */
pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
info->first_error = PCI_ERR_CAP_FEP(temp);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
pci_read_config_dword(dev,
aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
pci_read_config_dword(dev,
aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
pci_read_config_dword(dev,
aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
pci_read_config_dword(dev,
aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
}
}
return 1;
}
static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (aer_get_device_error_info(e_info->dev[i], e_info))
handle_error_source(e_info->dev[i], e_info);
}
}
/**
* aer_isr_one_error - consume an error detected by root port
* @rpc: pointer to the root port which holds an error
* @e_src: pointer to an error source
*/
static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
struct pci_dev *pdev = rpc->rpd;
struct aer_err_info e_info;
pci_rootport_aer_stats_incr(pdev, e_src);
/*
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
*/
if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
e_info.id = ERR_COR_ID(e_src->id);
e_info.severity = AER_CORRECTABLE;
if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
e_info.multi_error_valid = 1;
else
e_info.multi_error_valid = 0;
aer_print_port_info(pdev, &e_info);
if (find_source_device(pdev, &e_info))
aer_process_err_devices(&e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
e_info.id = ERR_UNCOR_ID(e_src->id);
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
e_info.severity = AER_FATAL;
else
e_info.severity = AER_NONFATAL;
if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
e_info.multi_error_valid = 1;
else
e_info.multi_error_valid = 0;
aer_print_port_info(pdev, &e_info);
if (find_source_device(pdev, &e_info))
aer_process_err_devices(&e_info);
}
}
/**
* aer_isr - consume errors detected by root port
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
* Invoked, as DPC, when root port records new detected error
*/
static irqreturn_t aer_isr(int irq, void *context)
{
struct pcie_device *dev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(dev);
struct aer_err_source e_src;
if (kfifo_is_empty(&rpc->aer_fifo))
return IRQ_NONE;
while (kfifo_get(&rpc->aer_fifo, &e_src))
aer_isr_one_error(rpc, &e_src);
return IRQ_HANDLED;
}
/**
* aer_irq - Root Port's ISR
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
* Invoked when Root Port detects AER messages.
*/
static irqreturn_t aer_irq(int irq, void *context)
{
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
struct pci_dev *rp = rpc->rpd;
int aer = rp->aer_cap;
struct aer_err_source e_src = {};
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
if (!(e_src.status & AER_ERR_STATUS_MASK))
return IRQ_NONE;
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
pci_write_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, e_src.status);
if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
/**
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
* Invoked when PCIe bus loads AER service driver.
*/
static void aer_enable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
int aer = pdev->aer_cap;
u16 reg16;
u32 reg32;
/* Clear PCIe Capability's Device Status */
pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16);
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
/* Disable system error generation in response to error messages */
pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
SYSTEM_ERROR_INTR_ON_MESG_MASK);
/* Clear error status */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, ®32);
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
pci_read_config_dword(pdev, aer + PCI_ERR_COR_STATUS, ®32);
pci_write_config_dword(pdev, aer + PCI_ERR_COR_STATUS, reg32);
pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, ®32);
pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
/* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, ®32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
}
/**
* aer_disable_rootport - disable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
* Invoked when PCIe bus unloads AER service driver.
*/
static void aer_disable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
int aer = pdev->aer_cap;
u32 reg32;
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, ®32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
/* Clear Root's error status reg */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, ®32);
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
}
/**
* aer_remove - clean up resources
* @dev: pointer to the pcie_dev data structure
*
* Invoked when PCI Express bus unloads or AER probe fails.
*/
static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
aer_disable_rootport(rpc);
}
/**
* aer_probe - initialize resources
* @dev: pointer to the pcie_dev data structure
*
* Invoked when PCI Express bus loads AER service driver.
*/
static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
AER_MAX_TYPEOF_COR_ERRS);
BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
AER_MAX_TYPEOF_UNCOR_ERRS);
/* Limit to Root Ports or Root Complex Event Collectors */
if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
(pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
return -ENODEV;
rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc)
return -ENOMEM;
rpc->rpd = port;
INIT_KFIFO(rpc->aer_fifo);
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
IRQF_SHARED, "aerdrv", dev);
if (status) {
pci_err(port, "request AER IRQ %d failed\n", dev->irq);
return status;
}
aer_enable_rootport(rpc);
pci_info(port, "enabled with IRQ %d\n", dev->irq);
return 0;
}
/**
* aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP
* @dev: pointer to Root Port, RCEC, or RCiEP
*
* Invoked by Port Bus driver when performing reset.
*/
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
struct pci_dev *root;
int aer;
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 reg32;
int rc;
/*
* Only Root Ports and RCECs have AER Root Command and Root Status
* registers. If "dev" is an RCiEP, the relevant registers are in
* the RCEC.
*/
if (type == PCI_EXP_TYPE_RC_END)
root = dev->rcec;
else
root = pcie_find_root_port(dev);
/*
* If the platform retained control of AER, an RCiEP may not have
* an RCEC visible to us, so dev->rcec ("root") may be NULL. In
* that case, firmware is responsible for these registers.
*/
aer = root ? root->aer_cap : 0;
if ((host->native_aer || pcie_ports_native) && aer) {
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
}
if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
if (!rc)
pci_info(dev, "has been reset\n");
else
pci_info(dev, "not reset (no FLR support: %d)\n", rc);
} else {
rc = pci_bus_error_reset(dev);
pci_info(dev, "%s Port link has been reset (%d)\n",
pci_is_root_bus(dev->bus) ? "Root" : "Downstream", rc);
}
if ((host->native_aer || pcie_ports_native) && aer) {
/* Clear Root Error Status */
pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, ®32);
pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32);
/* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
}
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
.remove = aer_remove,
};
/**
* pcie_aer_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
*/
int __init pcie_aer_init(void)
{
if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
| linux-master | drivers/pci/pcie/aer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Enable PCIe link L0s/L1 state and Clock Power Management
*
* Copyright (C) 2007 Intel
* Copyright (C) Zhang Yanmin ([email protected])
* Copyright (C) Shaohua Li ([email protected])
*/
#include <linux/kernel.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include "../pci.h"
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
/* Note: those are not register definitions */
#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
#define ASPM_STATE_L1 (4) /* L1 state */
#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
ASPM_STATE_L1_2_MASK)
#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
ASPM_STATE_L1SS)
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
struct pci_dev *downstream; /* Downstream component, function 0 */
struct pcie_link_state *root; /* pointer to the root port link */
struct pcie_link_state *parent; /* pointer to the parent Link state */
struct list_head sibling; /* node in link_list */
/* ASPM state */
u32 aspm_support:7; /* Supported ASPM state */
u32 aspm_enabled:7; /* Enabled ASPM state */
u32 aspm_capable:7; /* Capable ASPM state with latency */
u32 aspm_default:7; /* Default ASPM state by BIOS */
u32 aspm_disable:7; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
};
static int aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
#define POLICY_DEFAULT 0 /* BIOS default setting */
#define POLICY_PERFORMANCE 1 /* high performance */
#define POLICY_POWERSAVE 2 /* high power saving */
#define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
#ifdef CONFIG_PCIEASPM_PERFORMANCE
static int aspm_policy = POLICY_PERFORMANCE;
#elif defined CONFIG_PCIEASPM_POWERSAVE
static int aspm_policy = POLICY_POWERSAVE;
#elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
static int aspm_policy = POLICY_POWER_SUPERSAVE;
#else
static int aspm_policy;
#endif
static const char *policy_str[] = {
[POLICY_DEFAULT] = "default",
[POLICY_PERFORMANCE] = "performance",
[POLICY_POWERSAVE] = "powersave",
[POLICY_POWER_SUPERSAVE] = "powersupersave"
};
/*
* The L1 PM substate capability is only implemented in function 0 in a
* multi function device.
*/
static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
{
struct pci_dev *child;
list_for_each_entry(child, &linkbus->devices, bus_list)
if (PCI_FUNC(child->devfn) == 0)
return child;
return NULL;
}
static int policy_to_aspm_state(struct pcie_link_state *link)
{
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
return (ASPM_STATE_L0S | ASPM_STATE_L1);
case POLICY_POWER_SUPERSAVE:
/* Enable Everything */
return ASPM_STATE_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
return 0;
}
static int policy_to_clkpm_state(struct pcie_link_state *link)
{
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
case POLICY_POWER_SUPERSAVE:
/* Enable Clock PM */
return 1;
case POLICY_DEFAULT:
return link->clkpm_default;
}
return 0;
}
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CLKREQ_EN,
val);
link->clkpm_enabled = !!enable;
}
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/*
* Don't enable Clock PM if the link is not Clock PM capable
* or Clock PM is disabled
*/
if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
return;
pcie_set_clkpm_nocheck(link, enable);
}
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
* All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
*/
child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
BUG_ON(!pci_is_pcie(child));
/* Check downstream component if bit Slot Clock Configuration is 1 */
pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Port might be already in common clock mode */
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
bool consistent = true;
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_word(child, PCI_EXP_LNKCTL,
®16);
if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
consistent = false;
break;
}
}
if (consistent)
return;
pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
}
ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CCC, ccc);
}
/* Configure upstream component */
pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CCC, ccc);
if (pcie_retrain_link(link->pdev, true)) {
/* Training failed. Restore common clock configurations */
pci_err(parent, "ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CCC,
child_old_ccc[PCI_FUNC(child->devfn)]);
pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CCC, parent_old_ccc);
}
}
/* Convert L0s latency encoding to ns */
static u32 calc_l0s_latency(u32 lnkcap)
{
u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
if (encoding == 0x7)
return (5 * 1000); /* > 4us */
return (64 << encoding);
}
/* Convert L0s acceptable latency encoding to ns */
static u32 calc_l0s_acceptable(u32 encoding)
{
if (encoding == 0x7)
return -1U;
return (64 << encoding);
}
/* Convert L1 latency encoding to ns */
static u32 calc_l1_latency(u32 lnkcap)
{
u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
if (encoding == 0x7)
return (65 * 1000); /* > 64us */
return (1000 << encoding);
}
/* Convert L1 acceptable latency encoding to ns */
static u32 calc_l1_acceptable(u32 encoding)
{
if (encoding == 0x7)
return -1U;
return (1000 << encoding);
}
/* Convert L1SS T_pwr encoding to usec */
static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
{
switch (scale) {
case 0:
return val * 2;
case 1:
return val * 10;
case 2:
return val * 100;
}
pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
return 0;
}
/*
* Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
* register. Ports enter L1.2 when the most recent LTR value is greater
* than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
* don't enter L1.2 too aggressively.
*
* See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
*/
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
u64 threshold_ns = (u64) threshold_us * 1000;
/*
* LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
* value of 0x3ff.
*/
if (threshold_ns <= 0x3ff * 1) {
*scale = 0; /* Value times 1ns */
*value = threshold_ns;
} else if (threshold_ns <= 0x3ff * 32) {
*scale = 1; /* Value times 32ns */
*value = roundup(threshold_ns, 32) / 32;
} else if (threshold_ns <= 0x3ff * 1024) {
*scale = 2; /* Value times 1024ns */
*value = roundup(threshold_ns, 1024) / 1024;
} else if (threshold_ns <= 0x3ff * 32768) {
*scale = 3; /* Value times 32768ns */
*value = roundup(threshold_ns, 32768) / 32768;
} else if (threshold_ns <= 0x3ff * 1048576) {
*scale = 4; /* Value times 1048576ns */
*value = roundup(threshold_ns, 1048576) / 1048576;
} else if (threshold_ns <= 0x3ff * (u64) 33554432) {
*scale = 5; /* Value times 33554432ns */
*value = roundup(threshold_ns, 33554432) / 33554432;
} else {
*scale = 5;
*value = 0x3ff; /* Max representable value */
}
}
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
{
u32 latency, encoding, lnkcap_up, lnkcap_dw;
u32 l1_switch_latency = 0, latency_up_l0s;
u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
u32 acceptable_l0s, acceptable_l1;
struct pcie_link_state *link;
/* Device not in D0 doesn't need latency check */
if ((endpoint->current_state != PCI_D0) &&
(endpoint->current_state != PCI_UNKNOWN))
return;
link = endpoint->bus->self->link_state;
/* Calculate endpoint L0s acceptable latency */
encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
acceptable_l0s = calc_l0s_acceptable(encoding);
/* Calculate endpoint L1 acceptable latency */
encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
acceptable_l1 = calc_l1_acceptable(encoding);
while (link) {
struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
/* Read direction exit latencies */
pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
&lnkcap_up);
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
&lnkcap_dw);
latency_up_l0s = calc_l0s_latency(lnkcap_up);
latency_up_l1 = calc_l1_latency(lnkcap_up);
latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
latency_dw_l1 = calc_l1_latency(lnkcap_dw);
/* Check upstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
(latency_up_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_UP;
/* Check downstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
(latency_dw_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_DW;
/*
* Check L1 latency.
* Every switch on the path to root complex need 1
* more microsecond for L1. Spec doesn't mention L0s.
*
* The exit latencies for L1 substates are not advertised
* by a device. Since the spec also doesn't mention a way
* to determine max latencies introduced by enabling L1
* substates on the components, it is not clear how to do
* a L1 substate exit latency check. We assume that the
* L1 exit latencies advertised by a device include L1
* substate latencies (and hence do not do any check).
*/
latency = max_t(u32, latency_up_l1, latency_dw_l1);
if ((link->aspm_capable & ASPM_STATE_L1) &&
(latency + l1_switch_latency > acceptable_l1))
link->aspm_capable &= ~ASPM_STATE_L1;
l1_switch_latency += 1000;
link = link->parent;
}
}
static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
u32 clear, u32 set)
{
u32 val;
pci_read_config_dword(pdev, pos, &val);
val &= ~clear;
val |= set;
pci_write_config_dword(pdev, pos, val);
}
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l12_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 val1, val2, scale1, scale2;
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
u32 pl1_2_enables, cl1_2_enables;
/* Choose the greater of the two Port Common_Mode_Restore_Times */
val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
t_common_mode = max(val1, val2);
/* Choose the greater of the two Port T_POWER_ON times */
val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
if (calc_l12_pwron(parent, scale1, val1) >
calc_l12_pwron(child, scale2, val2)) {
ctl2 |= scale1 | (val1 << 3);
t_power_on = calc_l12_pwron(parent, scale1, val1);
} else {
ctl2 |= scale2 | (val2 << 3);
t_power_on = calc_l12_pwron(child, scale2, val2);
}
/*
* Set LTR_L1.2_THRESHOLD to the time required to transition the
* Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
* downstream devices report (via LTR) that they can tolerate at
* least that much latency.
*
* Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
* Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
* least 4us.
*/
l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
encode_l12_threshold(l1_2_threshold, &scale, &value);
ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
/* Some broken devices only support dword access to L1 SS */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
if (ctl1 == pctl1 && ctl1 == cctl1 &&
ctl2 == pctl2 && ctl2 == cctl2)
return;
/* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
if (pl1_2_enables || cl1_2_enables) {
pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1_2_MASK, 0);
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1_2_MASK, 0);
}
/* Program T_POWER_ON times in both ports */
pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
/* Program Common_Mode_Restore_Time in upstream device */
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
/* Program LTR_L1.2_THRESHOLD time in both ports */
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
if (pl1_2_enables || cl1_2_enables) {
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
pl1_2_enables);
pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
cl1_2_enables);
}
}
static void aspm_l1ss_init(struct pcie_link_state *link)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 parent_l1ss_cap, child_l1ss_cap;
u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
if (!parent->l1ss || !child->l1ss)
return;
/* Setup L1 substate */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
&parent_l1ss_cap);
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
&child_l1ss_cap);
if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
parent_l1ss_cap = 0;
if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
child_l1ss_cap = 0;
/*
* If we don't have LTR for the entire path from the Root Complex
* to this device, we can't use ASPM L1.2 because it relies on the
* LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
*/
if (!child->ltr_path)
child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
if (parent_l1ss_cap)
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
&parent_l1ss_ctl1);
if (child_l1ss_cap)
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
&child_l1ss_ctl1);
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
if (link->aspm_support & ASPM_STATE_L1_2_MASK)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
struct pci_bus *linkbus = parent->subordinate;
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
link->aspm_enabled = ASPM_STATE_ALL;
link->aspm_disable = ASPM_STATE_ALL;
return;
}
/*
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
return;
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
/*
* Re-read upstream/downstream components' register state after
* clock configuration. L0s & L1 exit latencies in the otherwise
* read-only Link Capabilities may change depending on common clock
* configuration (PCIe r5.0, sec 7.5.3.6).
*/
pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
/*
* Setup L0s state
*
* Note that we must not enable L0s in either direction on a
* given link unless components on both sides of the link each
* support L0s.
*/
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
link->aspm_support |= ASPM_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_DW;
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
link->aspm_support |= ASPM_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
aspm_l1ss_init(link);
/* Save default state */
link->aspm_default = link->aspm_enabled;
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
continue;
pcie_aspm_check_latency(child);
}
}
/* Configure the ASPM L1 substates */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
u32 val, enable_req;
struct pci_dev *child = link->downstream, *parent = link->pdev;
enable_req = (link->aspm_enabled ^ state) & state;
/*
* Here are the rules specified in the PCIe spec for enabling L1SS:
* - When enabling L1.x, enable bit at parent first, then at child
* - When disabling L1.x, disable bit at child first, then at parent
* - When enabling ASPM L1.x, need to disable L1
* (at child followed by parent).
* - The ASPM/PCIPM L1.2 must be disabled while programming timing
* parameters
*
* To keep it simple, disable all L1SS bits first, and later enable
* what is needed.
*/
/* Disable all L1 substates */
pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
/*
* If needed, disable L1, and it gets enabled later
* in pcie_config_aspm_link().
*/
if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPM_L1, 0);
pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPM_L1, 0);
}
val = 0;
if (state & ASPM_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
if (state & ASPM_STATE_L1_2)
val |= PCI_L1SS_CTL1_ASPM_L1_2;
if (state & ASPM_STATE_L1_1_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_1;
if (state & ASPM_STATE_L1_2_PCIPM)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC, val);
}
static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
{
u32 upstream = 0, dwstream = 0;
struct pci_dev *child = link->downstream, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/* Enable only the states that were not explicitly disabled */
state &= (link->aspm_capable & ~link->aspm_disable);
/* Can't enable any substates if L1 is not enabled */
if (!(state & ASPM_STATE_L1))
state &= ~ASPM_STATE_L1SS;
/* Spec says both ports must be in D0 before enabling PCI PM substates*/
if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
state &= ~ASPM_STATE_L1_SS_PCIPM;
state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
}
/* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
if (state & ASPM_STATE_L0S_UP)
dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L0S_DW)
upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L1) {
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
if (link->aspm_capable & ASPM_STATE_L1SS)
pcie_config_aspm_l1ss(link, state);
/*
* Spec 2.0 suggests all functions should be configured the
* same setting for ASPM. Enabling ASPM L1 should be done in
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & ASPM_STATE_L1)
pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_config_aspm_dev(child, dwstream);
if (!(state & ASPM_STATE_L1))
pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
}
static void pcie_config_aspm_path(struct pcie_link_state *link)
{
while (link) {
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link = link->parent;
}
}
static void free_link_state(struct pcie_link_state *link)
{
link->pdev->link_state = NULL;
kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
struct pci_dev *child;
u32 reg32;
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
*/
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
if (!pci_is_pcie(child))
return -EINVAL;
/*
* If ASPM is disabled then we're not going to change
* the BIOS state. It's safe to continue even if it's a
* pre-1.1 device
*/
if (aspm_disabled)
continue;
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
return -EINVAL;
}
}
return 0;
}
static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
{
struct pcie_link_state *link;
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return NULL;
INIT_LIST_HEAD(&link->sibling);
link->pdev = pdev;
link->downstream = pci_function_0(pdev->subordinate);
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
* hierarchies. Note that some PCIe host implementations omit
* the root ports entirely, in which case a downstream port on
* a switch may become the root of the link state chain for all
* its subordinate endpoints.
*/
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
!pdev->bus->parent->self) {
link->root = link;
} else {
struct pcie_link_state *parent;
parent = pdev->bus->parent->self->link_state;
if (!parent) {
kfree(link);
return NULL;
}
link->parent = parent;
link->root = link->parent->root;
}
list_add(&link->sibling, &link_list);
pdev->link_state = link;
return link;
}
static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
{
struct pci_dev *child;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
}
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scanned.
* @pdev: the root port or switch downstream port
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
if (!aspm_support_enabled)
return;
if (pdev->link_state)
return;
/*
* We allocate pcie_link_state for the component on the upstream
* end of a Link, so there's nothing to do unless this device is
* downstream port.
*/
if (!pcie_downstream_port(pdev))
return;
/* VIA has a strange chipset, root port is under a bridge */
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
return;
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
mutex_lock(&aspm_lock);
link = alloc_pcie_link_state(pdev);
if (!link)
goto unlock;
/*
* Setup initial ASPM state. Note that we need to configure
* upstream links also because capable state of them can be
* update through pcie_aspm_cap_init().
*/
pcie_aspm_cap_init(link, blacklist);
/* Setup initial Clock PM state */
pcie_clkpm_cap_init(link, blacklist);
/*
* At this stage drivers haven't had an opportunity to change the
* link policy setting. Enabling ASPM on broken hardware can cripple
* it even before the driver has had a chance to disable ASPM, so
* default to a safe level right now. If we're enabling ASPM beyond
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
if (aspm_policy != POLICY_POWERSAVE &&
aspm_policy != POLICY_POWER_SUPERSAVE) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
pcie_aspm_update_sysfs_visibility(pdev);
unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
}
/* Recheck latencies and update aspm_capable for links under the root */
static void pcie_update_aspm_capable(struct pcie_link_state *root)
{
struct pcie_link_state *link;
BUG_ON(root->parent);
list_for_each_entry(link, &link_list, sibling) {
if (link->root != root)
continue;
link->aspm_capable = link->aspm_support;
}
list_for_each_entry(link, &link_list, sibling) {
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
if (link->root != root)
continue;
list_for_each_entry(child, &linkbus->devices, bus_list) {
if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
(pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
continue;
pcie_aspm_check_latency(child);
}
}
}
/* @pdev: the endpoint device */
void pcie_aspm_exit_link_state(struct pci_dev *pdev)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
if (!parent || !parent->link_state)
return;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link = parent->link_state;
root = link->root;
parent_link = link->parent;
/*
* link->downstream is a pointer to the pci_dev of function 0. If
* we remove that function, the pci_dev is about to be deallocated,
* so we can't use link->downstream again. Free the link state to
* avoid this.
*
* If we're removing a non-0 function, it's possible we could
* retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
* programming the same ASPM Control value for all functions of
* multi-function devices, so disable ASPM for all of them.
*/
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
free_link_state(link);
/* Recheck latencies and configure upstream links */
if (parent_link) {
pcie_update_aspm_capable(root);
pcie_config_aspm_path(parent_link);
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
if (aspm_disabled || !link)
return;
if (aspm_policy != POLICY_POWERSAVE &&
aspm_policy != POLICY_POWER_SUPERSAVE)
return;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
{
struct pci_dev *bridge;
if (!pci_is_pcie(pdev))
return NULL;
bridge = pci_upstream_bridge(pdev);
if (!bridge || !pci_is_pcie(bridge))
return NULL;
return bridge->link_state;
}
static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
if (!link)
return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
* systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
* the _OSC method), we can't honor that request. Windows has
* a similar mechanism using "PciASPMOptOut", which is also
* ignored in this situation.
*/
if (aspm_disabled) {
pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
return -EPERM;
}
if (sem)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
if (state & PCIE_LINK_STATE_L0S)
link->aspm_disable |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
link->aspm_disable |= ASPM_STATE_L1;
if (state & PCIE_LINK_STATE_L1_1)
link->aspm_disable |= ASPM_STATE_L1_1;
if (state & PCIE_LINK_STATE_L1_2)
link->aspm_disable |= ASPM_STATE_L1_2;
if (state & PCIE_LINK_STATE_L1_1_PCIPM)
link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
if (state & PCIE_LINK_STATE_L1_2_PCIPM)
link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM)
link->clkpm_disable = 1;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
return 0;
}
int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
/**
* pci_disable_link_state - Disable device's link state, so the link will
* never enter specific states. Note that if the BIOS didn't grant ASPM
* control to the OS, this does nothing because we can't touch the LNKCTL
* register. Returns 0 or a negative errno.
*
* @pdev: PCI device
* @state: ASPM link state to disable
*/
int pci_disable_link_state(struct pci_dev *pdev, int state)
{
return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state);
/**
* pci_enable_link_state - Clear and set the default device link state so that
* the link may be allowed to enter the specified states. Note that if the
* BIOS didn't grant ASPM control to the OS, this does nothing because we can't
* touch the LNKCTL register. Also note that this does not enable states
* disabled by pci_disable_link_state(). Return 0 or a negative errno.
*
* @pdev: PCI device
* @state: Mask of ASPM link states to enable
*/
int pci_enable_link_state(struct pci_dev *pdev, int state)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
if (!link)
return -EINVAL;
/*
* A driver requested that ASPM be enabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
* systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
* the _OSC method), we can't honor that request.
*/
if (aspm_disabled) {
pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
return -EPERM;
}
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link->aspm_default = 0;
if (state & PCIE_LINK_STATE_L0S)
link->aspm_default |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
link->aspm_default |= ASPM_STATE_L1;
/* L1 PM substates require L1 */
if (state & PCIE_LINK_STATE_L1_1)
link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1;
if (state & PCIE_LINK_STATE_L1_2)
link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1;
if (state & PCIE_LINK_STATE_L1_1_PCIPM)
link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1;
if (state & PCIE_LINK_STATE_L1_2_PCIPM)
link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
return 0;
}
EXPORT_SYMBOL(pci_enable_link_state);
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
int i;
struct pcie_link_state *link;
if (aspm_disabled)
return -EPERM;
i = sysfs_match_string(policy_str, val);
if (i < 0)
return i;
if (i == aspm_policy)
return 0;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
list_for_each_entry(link, &link_list, sibling) {
pcie_config_aspm_link(link, policy_to_aspm_state(link));
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
return 0;
}
static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
{
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (i == aspm_policy)
cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
else
cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
cnt += sprintf(buffer + cnt, "\n");
return cnt;
}
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
NULL, 0644);
/**
* pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
* @pdev: Target device.
*
* Relies on the upstream bridge's link_state being valid. The link_state
* is deallocated only when the last child of the bridge (i.e., @pdev or a
* sibling) is removed, and the caller should be holding a reference to
* @pdev, so this should be safe.
*/
bool pcie_aspm_enabled(struct pci_dev *pdev)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
if (!link)
return false;
return link->aspm_enabled;
}
EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
static ssize_t aspm_attr_show_common(struct device *dev,
struct device_attribute *attr,
char *buf, u8 state)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
}
static ssize_t aspm_attr_store_common(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len, u8 state)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
bool state_enable;
if (kstrtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
if (state_enable) {
link->aspm_disable &= ~state;
/* need to enable L1 for substates */
if (state & ASPM_STATE_L1SS)
link->aspm_disable &= ~ASPM_STATE_L1;
} else {
link->aspm_disable |= state;
}
pcie_config_aspm_link(link, policy_to_aspm_state(link));
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
return len;
}
#define ASPM_ATTR(_f, _s) \
static ssize_t _f##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
\
static ssize_t _f##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t len) \
{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
ASPM_ATTR(l0s_aspm, L0S)
ASPM_ATTR(l1_aspm, L1)
ASPM_ATTR(l1_1_aspm, L1_1)
ASPM_ATTR(l1_2_aspm, L1_2)
ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
static ssize_t clkpm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
}
static ssize_t clkpm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
bool state_enable;
if (kstrtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link->clkpm_disable = !state_enable;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
return len;
}
static DEVICE_ATTR_RW(clkpm);
static DEVICE_ATTR_RW(l0s_aspm);
static DEVICE_ATTR_RW(l1_aspm);
static DEVICE_ATTR_RW(l1_1_aspm);
static DEVICE_ATTR_RW(l1_2_aspm);
static DEVICE_ATTR_RW(l1_1_pcipm);
static DEVICE_ATTR_RW(l1_2_pcipm);
static struct attribute *aspm_ctrl_attrs[] = {
&dev_attr_clkpm.attr,
&dev_attr_l0s_aspm.attr,
&dev_attr_l1_aspm.attr,
&dev_attr_l1_1_aspm.attr,
&dev_attr_l1_2_aspm.attr,
&dev_attr_l1_1_pcipm.attr,
&dev_attr_l1_2_pcipm.attr,
NULL
};
static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
static const u8 aspm_state_map[] = {
ASPM_STATE_L0S,
ASPM_STATE_L1,
ASPM_STATE_L1_1,
ASPM_STATE_L1_2,
ASPM_STATE_L1_1_PCIPM,
ASPM_STATE_L1_2_PCIPM,
};
if (aspm_disabled || !link)
return 0;
if (n == 0)
return link->clkpm_capable ? a->mode : 0;
return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
}
const struct attribute_group aspm_ctrl_attr_group = {
.name = "link",
.attrs = aspm_ctrl_attrs,
.is_visible = aspm_ctrl_attrs_are_visible,
};
static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
aspm_force = 1;
printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
}
return 1;
}
__setup("pcie_aspm=", pcie_aspm_disable);
void pcie_no_aspm(void)
{
/*
* Disabling ASPM is intended to prevent the kernel from modifying
* existing hardware state, not to clear existing state. To that end:
* (a) set policy to POLICY_DEFAULT in order to avoid changing state
* (b) prevent userspace from changing policy
*/
if (!aspm_force) {
aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
}
}
bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
| linux-master | drivers/pci/pcie/aspm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI Error Disconnect Recover support
* Author: Kuppuswamy Sathyanarayanan <[email protected]>
*
* Copyright (C) 2020 Intel Corp.
*/
#define dev_fmt(fmt) "EDR: " fmt
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include "portdrv.h"
#include "../pci.h"
#define EDR_PORT_DPC_ENABLE_DSM 0x0C
#define EDR_PORT_LOCATE_DSM 0x0D
#define EDR_OST_SUCCESS 0x80
#define EDR_OST_FAILED 0x81
/*
* _DSM wrapper function to enable/disable DPC
* @pdev : PCI device structure
*
* returns 0 on success or errno on failure.
*/
static int acpi_enable_dpc(struct pci_dev *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
union acpi_object *obj, argv4, req;
int status = 0;
/*
* Behavior when calling unsupported _DSM functions is undefined,
* so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
req.type = ACPI_TYPE_INTEGER;
req.integer.value = 1;
argv4.type = ACPI_TYPE_PACKAGE;
argv4.package.count = 1;
argv4.package.elements = &req;
/*
* Per Downstream Port Containment Related Enhancements ECN to PCI
* Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
* optional. Return success if it's not implemented.
*/
obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
if (obj->type != ACPI_TYPE_INTEGER) {
pci_err(pdev, FW_BUG "Enable DPC _DSM returned non integer\n");
status = -EIO;
}
if (obj->integer.value != 1) {
pci_err(pdev, "Enable DPC _DSM failed to enable DPC\n");
status = -EIO;
}
ACPI_FREE(obj);
return status;
}
/*
* _DSM wrapper function to locate DPC port
* @pdev : Device which received EDR event
*
* Returns pci_dev or NULL. Caller is responsible for dropping a reference
* on the returned pci_dev with pci_dev_put().
*/
static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
union acpi_object *obj;
u16 port;
/*
* Behavior when calling unsupported _DSM functions is undefined,
* so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
return pci_dev_get(pdev);
obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
EDR_PORT_LOCATE_DSM, NULL);
if (!obj)
return pci_dev_get(pdev);
if (obj->type != ACPI_TYPE_INTEGER) {
ACPI_FREE(obj);
pci_err(pdev, FW_BUG "Locate Port _DSM returned non integer\n");
return NULL;
}
/*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
* 2:0 = function
*/
port = obj->integer.value;
ACPI_FREE(obj);
return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
PCI_BUS_NUM(port), port & 0xff);
}
/*
* _OST wrapper function to let firmware know the status of EDR event
* @pdev : Device used to send _OST
* @edev : Device which experienced EDR event
* @status : Status of EDR event
*/
static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev,
u16 status)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
u32 ost_status;
pci_dbg(pdev, "Status for %s: %#x\n", pci_name(edev), status);
ost_status = PCI_DEVID(edev->bus->number, edev->devfn) << 16;
ost_status |= status;
status = acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_DISCONNECT_RECOVER,
ost_status, NULL);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void edr_handle_event(acpi_handle handle, u32 event, void *data)
{
struct pci_dev *pdev = data, *edev;
pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
u16 status;
if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
return;
/*
* pdev is a Root Port or Downstream Port that is still present and
* has triggered a containment event, e.g., DPC, so its child
* devices have been disconnected (ACPI r6.5, sec 5.6.6).
*/
pci_info(pdev, "EDR event received\n");
/*
* Locate the port that experienced the containment event. pdev
* may be that port or a parent of it (PCI Firmware r3.3, sec
* 4.6.13).
*/
edev = acpi_dpc_port_get(pdev);
if (!edev) {
pci_err(pdev, "Firmware failed to locate DPC port\n");
return;
}
pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev));
/* If port does not support DPC, just send the OST */
if (!edev->dpc_cap) {
pci_err(edev, FW_BUG "This device doesn't support DPC\n");
goto send_ost;
}
/* Check if there is a valid DPC trigger */
pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
pci_err(edev, "Invalid DPC trigger %#010x\n", status);
goto send_ost;
}
dpc_process_error(edev);
pci_aer_raw_clear_status(edev);
/*
* Irrespective of whether the DPC event is triggered by ERR_FATAL
* or ERR_NONFATAL, since the link is already down, use the FATAL
* error recovery path for both cases.
*/
estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link);
send_ost:
/*
* If recovery is successful, send _OST(0xF, BDF << 16 | 0x80)
* to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81).
*/
if (estate == PCI_ERS_RESULT_RECOVERED) {
pci_dbg(edev, "DPC port successfully recovered\n");
pcie_clear_device_status(edev);
acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
} else {
pci_dbg(edev, "DPC port recovery failed\n");
acpi_send_edr_status(pdev, edev, EDR_OST_FAILED);
}
pci_dev_put(edev);
}
void pci_acpi_add_edr_notifier(struct pci_dev *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
acpi_status status;
if (!adev) {
pci_dbg(pdev, "No valid ACPI node, skipping EDR init\n");
return;
}
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
edr_handle_event, pdev);
if (ACPI_FAILURE(status)) {
pci_err(pdev, "Failed to install notify handler\n");
return;
}
if (acpi_enable_dpc(pdev))
acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
edr_handle_event);
else
pci_dbg(pdev, "Notify handler installed\n");
}
void pci_acpi_remove_edr_notifier(struct pci_dev *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
if (!adev)
return;
acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
edr_handle_event);
pci_dbg(pdev, "Notify handler removed\n");
}
| linux-master | drivers/pci/pcie/edr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Purpose: PCI Express Port Bus Driver
*
* Copyright (C) 2004 Intel
* Copyright (C) Tom Long Nguyen ([email protected])
*/
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/aer.h>
#include "../pci.h"
#include "portdrv.h"
/*
* The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
* be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI
* supports a maximum of 32 vectors per function.
*/
#define PCIE_PORT_MAX_MSI_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
struct portdrv_service_data {
struct pcie_port_service_driver *drv;
struct device *dev;
u32 service;
};
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
*
* Invoked automatically when device is being removed in response to
* device_unregister(dev). Release all resources being claimed.
*/
static void release_pcie_device(struct device *dev)
{
kfree(to_pcie_device(dev));
}
/*
* Fill in *pme, *aer, *dpc with the relevant Interrupt Message Numbers if
* services are enabled in "mask". Return the number of MSI/MSI-X vectors
* required to accommodate the largest Message Number.
*/
static int pcie_message_numbers(struct pci_dev *dev, int mask,
u32 *pme, u32 *aer, u32 *dpc)
{
u32 nvec = 0, pos;
u16 reg16;
/*
* The Interrupt Message Number indicates which vector is used, i.e.,
* the MSI-X table entry or the MSI offset between the base Message
* Data and the generated interrupt message. See PCIe r3.1, sec
* 7.8.2, 7.10.10, 7.31.2.
*/
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
nvec = *pme + 1;
}
#ifdef CONFIG_PCIEAER
if (mask & PCIE_PORT_SERVICE_AER) {
u32 reg32;
pos = dev->aer_cap;
if (pos) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
®32);
*aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
nvec = max(nvec, *aer + 1);
}
}
#endif
if (mask & PCIE_PORT_SERVICE_DPC) {
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
if (pos) {
pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
®16);
*dpc = reg16 & PCI_EXP_DPC_IRQ;
nvec = max(nvec, *dpc + 1);
}
}
return nvec;
}
/**
* pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
* for given port
* @dev: PCI Express port to handle
* @irqs: Array of interrupt vectors to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: 0 on success, error code on failure
*/
static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
{
int nr_entries, nvec, pcie_irq;
u32 pme = 0, aer = 0, dpc = 0;
/* Allocate the maximum possible number of MSI/MSI-X vectors */
nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
/* See how many and which Interrupt Message Numbers we actually use */
nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
if (nvec > nr_entries) {
pci_free_irq_vectors(dev);
return -EIO;
}
/*
* If we allocated more than we need, free them and reallocate fewer.
*
* Reallocating may change the specific vectors we get, so
* pci_irq_vector() must be done *after* the reallocation.
*
* If we're using MSI, hardware is *allowed* to change the Interrupt
* Message Numbers when we free and reallocate the vectors, but we
* assume it won't because we allocate enough vectors for the
* biggest Message Number we found.
*/
if (nvec != nr_entries) {
pci_free_irq_vectors(dev);
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
/* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_irq = pci_irq_vector(dev, pme);
irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
}
if (mask & PCIE_PORT_SERVICE_AER)
irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
if (mask & PCIE_PORT_SERVICE_DPC)
irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
return 0;
}
/**
* pcie_init_service_irqs - initialize irqs for PCI Express port services
* @dev: PCI Express port to handle
* @irqs: Array of irqs to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: Interrupt mode associated with the port
*/
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = -1;
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
* interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
goto legacy_irq;
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
legacy_irq:
/* fall back to legacy IRQ */
ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
if (ret < 0)
return -ENODEV;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = pci_irq_vector(dev, 0);
return 0;
}
/**
* get_port_device_capability - discover capabilities of a PCI Express port
* @dev: PCI Express port to examine
*
* The capabilities are read from the port's PCI Express configuration registers
* as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and
* 7.9 - 7.11.
*
* Return value: Bitmask of discovered port capabilities
*/
static int get_port_device_capability(struct pci_dev *dev)
{
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
if (dev->is_hotplug_bridge &&
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
(pcie_ports_native || host->native_pcie_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
/*
* Disable hot-plug interrupts in case they have been enabled
* by the BIOS and the hot-plug service driver is not loaded.
*/
pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
#ifdef CONFIG_PCIEAER
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) &&
dev->aer_cap && pci_aer_available() &&
(pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
#endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) &&
(pcie_ports_native || host->native_pme)) {
services |= PCIE_PORT_SERVICE_PME;
/*
* Disable PME interrupt on this port in case it's been enabled
* by the BIOS (the PME service driver will enable it when
* necessary).
*/
pcie_pme_interrupt_enable(dev, false);
}
/*
* With dpc-native, allow Linux to use DPC even if it doesn't have
* permission to use AER.
*/
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
pci_aer_available() &&
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
if (linkcap & PCI_EXP_LNKCAP_LBNC)
services |= PCIE_PORT_SERVICE_BWNOTIF;
}
return services;
}
/**
* pcie_device_init - allocate and initialize PCI Express port service device
* @pdev: PCI Express port to associate the service device with
* @service: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
*/
static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
{
int retval;
struct pcie_device *pcie;
struct device *device;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->port = pdev;
pcie->irq = irq;
pcie->service = service;
/* Initialize generic device interface */
device = &pcie->device;
device->bus = &pcie_port_bus_type;
device->release = release_pcie_device; /* callback to free pcie dev */
dev_set_name(device, "%s:pcie%03x",
pci_name(pdev),
get_descriptor_id(pci_pcie_type(pdev), service));
device->parent = &pdev->dev;
device_enable_async_suspend(device);
retval = device_register(device);
if (retval) {
put_device(device);
return retval;
}
pm_runtime_no_callbacks(device);
return 0;
}
/**
* pcie_port_device_register - register PCI Express port
* @dev: PCI Express port to register
*
* Allocate the port extension structure and register services associated with
* the port.
*/
static int pcie_port_device_register(struct pci_dev *dev)
{
int status, capabilities, i, nr_service;
int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
/* Enable PCI Express port device */
status = pci_enable_device(dev);
if (status)
return status;
/* Get and check PCI Express port services */
capabilities = get_port_device_capability(dev);
if (!capabilities)
return 0;
pci_set_master(dev);
/*
* Initialize service irqs. Don't use service devices that
* require interrupts if there is no way to generate them.
* However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
* that can be used in the absence of irqs. Allow them to determine
* if that is to be used.
*/
status = pcie_init_service_irqs(dev, irqs, capabilities);
if (status) {
capabilities &= PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}
/* Allocate child services if any */
status = -ENODEV;
nr_service = 0;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
int service = 1 << i;
if (!(capabilities & service))
continue;
if (!pcie_device_init(dev, service, irqs[i]))
nr_service++;
}
if (!nr_service)
goto error_cleanup_irqs;
return 0;
error_cleanup_irqs:
pci_free_irq_vectors(dev);
error_disable:
pci_disable_device(dev);
return status;
}
typedef int (*pcie_callback_t)(struct pcie_device *);
static int pcie_port_device_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
size_t offset = *(size_t *)data;
pcie_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
cb = *(pcie_callback_t *)((void *)service_driver + offset);
if (cb)
return cb(to_pcie_device(dev));
}
return 0;
}
#ifdef CONFIG_PM
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
static int pcie_port_device_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, suspend);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
static int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
* pcie_port_device_resume - resume port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
static int pcie_port_device_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
* pcie_port_device_runtime_suspend - runtime suspend port services
* @dev: PCI Express port to handle
*/
static int pcie_port_device_runtime_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
* pcie_port_device_runtime_resume - runtime resume port services
* @dev: PCI Express port to handle
*/
static int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
return device_for_each_child(dev, &off, pcie_port_device_iter);
}
#endif /* PM */
static int remove_iter(struct device *dev, void *data)
{
if (dev->bus == &pcie_port_bus_type)
device_unregister(dev);
return 0;
}
static int find_service_iter(struct device *device, void *data)
{
struct pcie_port_service_driver *service_driver;
struct portdrv_service_data *pdrvs;
u32 service;
pdrvs = (struct portdrv_service_data *) data;
service = pdrvs->service;
if (device->bus == &pcie_port_bus_type && device->driver) {
service_driver = to_service_driver(device->driver);
if (service_driver->service == service) {
pdrvs->drv = service_driver;
pdrvs->dev = device;
return 1;
}
}
return 0;
}
/**
* pcie_port_find_device - find the struct device
* @dev: PCI Express port the service is associated with
* @service: For the service to find
*
* Find the struct device associated with given service on a pci_dev
*/
struct device *pcie_port_find_device(struct pci_dev *dev,
u32 service)
{
struct device *device;
struct portdrv_service_data pdrvs;
pdrvs.dev = NULL;
pdrvs.service = service;
device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
device = pdrvs.dev;
return device;
}
EXPORT_SYMBOL_GPL(pcie_port_find_device);
/**
* pcie_port_device_remove - unregister PCI Express port service devices
* @dev: PCI Express port the service devices to unregister are associated with
*
* Remove PCI Express port service devices associated with given port and
* disable MSI-X or MSI for the port.
*/
static void pcie_port_device_remove(struct pci_dev *dev)
{
device_for_each_child(&dev->dev, NULL, remove_iter);
pci_free_irq_vectors(dev);
}
/**
* pcie_port_probe_service - probe driver for given PCI Express port service
* @dev: PCI Express port service device to probe against
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* whenever match is found between the driver and a port service device.
*/
static int pcie_port_probe_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
int status;
if (!dev || !dev->driver)
return -ENODEV;
driver = to_service_driver(dev->driver);
if (!driver || !driver->probe)
return -ENODEV;
pciedev = to_pcie_device(dev);
status = driver->probe(pciedev);
if (status)
return status;
get_device(dev);
return 0;
}
/**
* pcie_port_remove_service - detach driver from given PCI Express port service
* @dev: PCI Express port service device to handle
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* when device_unregister() is called for the port service device associated
* with the driver.
*/
static int pcie_port_remove_service(struct device *dev)
{
struct pcie_device *pciedev;
struct pcie_port_service_driver *driver;
if (!dev || !dev->driver)
return 0;
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
driver->remove(pciedev);
put_device(dev);
}
return 0;
}
/**
* pcie_port_shutdown_service - shut down given PCI Express port service
* @dev: PCI Express port service device to handle
*
* If PCI Express port service driver is registered with
* pcie_port_service_register(), this function will be called by the driver core
* when device_shutdown() is called for the port service device associated
* with the driver.
*/
static void pcie_port_shutdown_service(struct device *dev) {}
/**
* pcie_port_service_register - register PCI Express port service driver
* @new: PCI Express port service driver to register
*/
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
if (pcie_ports_disabled)
return -ENODEV;
new->driver.name = new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
new->driver.remove = pcie_port_remove_service;
new->driver.shutdown = pcie_port_shutdown_service;
return driver_register(&new->driver);
}
/**
* pcie_port_service_unregister - unregister PCI Express port service driver
* @drv: PCI Express port service driver to unregister
*/
void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
{
driver_unregister(&drv->driver);
}
/* If this switch is set, PCIe port native services should not be enabled. */
bool pcie_ports_disabled;
/*
* If the user specified "pcie_ports=native", use the PCIe services regardless
* of whether the platform has given us permission. On ACPI systems, this
* means we ignore _OSC.
*/
bool pcie_ports_native;
/*
* If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe
* service even if the platform hasn't given us permission.
*/
bool pcie_ports_dpc_native;
static int __init pcie_port_setup(char *str)
{
if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
else if (!strncmp(str, "native", 6))
pcie_ports_native = true;
else if (!strncmp(str, "dpc-native", 10))
pcie_ports_dpc_native = true;
return 1;
}
__setup("pcie_ports=", pcie_port_setup);
/* global data */
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
if (!to_pci_dev(dev)->bridge_d3)
return -EBUSY;
return pcie_port_device_runtime_suspend(dev);
}
static int pcie_port_runtime_idle(struct device *dev)
{
/*
* Assume the PCI core has set bridge_d3 whenever it thinks the port
* should be good to go to D3. Everything else, including moving
* the port to D3, is handled by the PCI core.
*/
return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
}
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume_noirq = pcie_port_device_resume_noirq,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_device_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
#else /* !PM */
#define PCIE_PORTDRV_PM_OPS NULL
#endif /* !PM */
/*
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
*
* If detected invokes the pcie_port_device_register() method for
* this port device.
*
*/
static int pcie_portdrv_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int type = pci_pcie_type(dev);
int status;
if (!pci_is_pcie(dev) ||
((type != PCI_EXP_TYPE_ROOT_PORT) &&
(type != PCI_EXP_TYPE_UPSTREAM) &&
(type != PCI_EXP_TYPE_DOWNSTREAM) &&
(type != PCI_EXP_TYPE_RC_EC)))
return -ENODEV;
if (type == PCI_EXP_TYPE_RC_EC)
pcie_link_rcec(dev);
status = pcie_port_device_register(dev);
if (status)
return status;
pci_save_state(dev);
dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE |
DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
* config space accesses from userspace (lspci) will not
* cause the port to repeatedly suspend and resume.
*/
pm_runtime_set_autosuspend_delay(&dev->dev, 100);
pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_mark_last_busy(&dev->dev);
pm_runtime_put_autosuspend(&dev->dev);
pm_runtime_allow(&dev->dev);
}
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
}
pcie_port_device_remove(dev);
pci_disable_device(dev);
}
static void pcie_portdrv_shutdown(struct pci_dev *dev)
{
if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
}
pcie_port_device_remove(dev);
}
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
pci_channel_state_t error)
{
if (error == pci_channel_io_frozen)
return PCI_ERS_RESULT_NEED_RESET;
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, slot_reset);
device_for_each_child(&dev->dev, &off, pcie_port_device_iter);
pci_restore_state(dev);
pci_save_state(dev);
return PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
return PCI_ERS_RESULT_RECOVERED;
}
/*
* LINUX Device Driver Model
*/
static const struct pci_device_id port_pci_ids[] = {
/* handle any PCI-Express port */
{ PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) },
/* subtractive decode PCI-to-PCI bridge, class type is 060401h */
{ PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
/* handle any Root Complex Event Collector */
{ PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ },
};
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
};
static struct pci_driver pcie_portdriver = {
.name = "pcieport",
.id_table = &port_pci_ids[0],
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
.shutdown = pcie_portdrv_shutdown,
.err_handler = &pcie_portdrv_err_handler,
.driver_managed_dma = true,
.driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
{
pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
d->ident);
pcie_pme_disable_msi();
return 0;
}
static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
/*
* Boxes that should not use MSI for PCIe PME signaling.
*/
{
.callback = dmi_pcie_pme_disable_msi,
.ident = "MSI Wind U-100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
},
},
{}
};
static void __init pcie_init_services(void)
{
pcie_aer_init();
pcie_pme_init();
pcie_dpc_init();
pcie_hp_init();
}
static int __init pcie_portdrv_init(void)
{
if (pcie_ports_disabled)
return -EACCES;
pcie_init_services();
dmi_check_system(pcie_portdrv_dmi_table);
return pci_register_driver(&pcie_portdriver);
}
device_initcall(pcie_portdrv_init);
| linux-master | drivers/pci/pcie/portdrv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI watchdog table parsing support.
*
* Copyright (C) 2016, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: watchdog: " fmt
#include <linux/acpi.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include "internal.h"
#ifdef CONFIG_RTC_MC146818_LIB
#include <linux/mc146818rtc.h>
/*
* There are several systems where the WDAT table is accessing RTC SRAM to
* store persistent information. This does not work well with the Linux RTC
* driver so on those systems we skip WDAT driver and prefer iTCO_wdt
* instead.
*
* See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
*/
static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
{
const struct acpi_wdat_entry *entries;
int i;
entries = (struct acpi_wdat_entry *)(wdat + 1);
for (i = 0; i < wdat->entries; i++) {
const struct acpi_generic_address *gas;
gas = &entries[i].register_region;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
switch (gas->address) {
case RTC_PORT(0):
case RTC_PORT(1):
case RTC_PORT(2):
case RTC_PORT(3):
return true;
}
}
}
return false;
}
#else
static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
{
return false;
}
#endif
static bool acpi_no_watchdog;
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
if (acpi_disabled || acpi_no_watchdog)
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
(struct acpi_table_header **)&wdat);
if (ACPI_FAILURE(status)) {
/* It is fine if there is no WDAT */
return NULL;
}
if (acpi_watchdog_uses_rtc(wdat)) {
acpi_put_table((struct acpi_table_header *)wdat);
pr_info("Skipping WDAT on this system because it uses RTC SRAM\n");
return NULL;
}
return wdat;
}
/**
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
bool acpi_has_watchdog(void)
{
return !!acpi_watchdog_get_wdat();
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
/* ACPI watchdog can be disabled on boot command line */
static int __init disable_acpi_watchdog(char *str)
{
acpi_no_watchdog = true;
return 1;
}
__setup("acpi_no_watchdog", disable_acpi_watchdog);
void __init acpi_watchdog_init(void)
{
const struct acpi_wdat_entry *entries;
const struct acpi_table_wdat *wdat;
struct list_head resource_list;
struct resource_entry *rentry;
struct platform_device *pdev;
struct resource *resources;
size_t nresources = 0;
int i;
wdat = acpi_watchdog_get_wdat();
if (!wdat) {
/* It is fine if there is no WDAT */
return;
}
/* Watchdog disabled by BIOS */
if (!(wdat->flags & ACPI_WDAT_ENABLED))
goto fail_put_wdat;
/* Skip legacy PCI WDT devices */
if (wdat->pci_segment != 0xff || wdat->pci_bus != 0xff ||
wdat->pci_device != 0xff || wdat->pci_function != 0xff)
goto fail_put_wdat;
INIT_LIST_HEAD(&resource_list);
entries = (struct acpi_wdat_entry *)(wdat + 1);
for (i = 0; i < wdat->entries; i++) {
const struct acpi_generic_address *gas;
struct resource_entry *rentry;
struct resource res = {};
bool found;
gas = &entries[i].register_region;
res.start = gas->address;
res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
res.flags = IORESOURCE_MEM;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
res.flags = IORESOURCE_IO;
} else {
pr_warn("Unsupported address space: %u\n",
gas->space_id);
goto fail_free_resource_list;
}
found = false;
resource_list_for_each_entry(rentry, &resource_list) {
if (rentry->res->flags == res.flags &&
resource_union(rentry->res, &res, rentry->res)) {
found = true;
break;
}
}
if (!found) {
rentry = resource_list_create_entry(NULL, 0);
if (!rentry)
goto fail_free_resource_list;
*rentry->res = res;
resource_list_add_tail(rentry, &resource_list);
nresources++;
}
}
resources = kcalloc(nresources, sizeof(*resources), GFP_KERNEL);
if (!resources)
goto fail_free_resource_list;
i = 0;
resource_list_for_each_entry(rentry, &resource_list)
resources[i++] = *rentry->res;
pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
resources, nresources);
if (IS_ERR(pdev))
pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
kfree(resources);
fail_free_resource_list:
resource_list_free(&resource_list);
fail_put_wdat:
acpi_put_table((struct acpi_table_header *)wdat);
}
| linux-master | drivers/acpi/acpi_watchdog.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IOAPIC/IOxAPIC/IOSAPIC driver
*
* Copyright (C) 2009 Fujitsu Limited.
* (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2014 Intel Corporation
*
* Based on original drivers/pci/ioapic.c
* Yinghai Lu <[email protected]>
* Jiang Liu <[email protected]>
*/
/*
* This driver manages I/O APICs added by hotplug after boot.
* We try to claim all I/O APIC devices, but those present at boot were
* registered when we parsed the ACPI MADT.
*/
#define pr_fmt(fmt) "ACPI: IOAPIC: " fmt
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <acpi/acpi.h>
#include "internal.h"
struct acpi_pci_ioapic {
acpi_handle root_handle;
acpi_handle handle;
u32 gsi_base;
struct resource res;
struct pci_dev *pdev;
struct list_head list;
};
static LIST_HEAD(ioapic_list);
static DEFINE_MUTEX(ioapic_list_lock);
static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
{
struct resource *res = data;
struct resource_win win;
/*
* We might assign this to 'res' later, make sure all pointers are
* cleared before the resource is added to the global list
*/
memset(&win, 0, sizeof(win));
res->flags = 0;
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
return AE_OK;
if (!acpi_dev_resource_memory(acpi_res, res)) {
if (acpi_dev_resource_address_space(acpi_res, &win) ||
acpi_dev_resource_ext_address_space(acpi_res, &win))
*res = win.res;
}
if ((res->flags & IORESOURCE_PREFETCH) ||
(res->flags & IORESOURCE_DISABLED))
res->flags = 0;
return AE_CTRL_TERMINATE;
}
static bool acpi_is_ioapic(acpi_handle handle, char **type)
{
acpi_status status;
struct acpi_device_info *info;
char *hid = NULL;
bool match = false;
if (!acpi_has_method(handle, "_GSB"))
return false;
status = acpi_get_object_info(handle, &info);
if (ACPI_SUCCESS(status)) {
if (info->valid & ACPI_VALID_HID)
hid = info->hardware_id.string;
if (hid) {
if (strcmp(hid, "ACPI0009") == 0) {
*type = "IOxAPIC";
match = true;
} else if (strcmp(hid, "ACPI000A") == 0) {
*type = "IOAPIC";
match = true;
}
}
kfree(info);
}
return match;
}
static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
acpi_status status;
unsigned long long gsi_base;
struct acpi_pci_ioapic *ioapic;
struct pci_dev *dev = NULL;
struct resource *res = NULL, *pci_res = NULL, *crs_res;
char *type = NULL;
if (!acpi_is_ioapic(handle, &type))
return AE_OK;
mutex_lock(&ioapic_list_lock);
list_for_each_entry(ioapic, &ioapic_list, list)
if (ioapic->handle == handle) {
mutex_unlock(&ioapic_list_lock);
return AE_OK;
}
status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle, "failed to evaluate _GSB method\n");
goto exit;
}
ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
if (!ioapic) {
pr_err("cannot allocate memory for new IOAPIC\n");
goto exit;
} else {
ioapic->root_handle = (acpi_handle)context;
ioapic->handle = handle;
ioapic->gsi_base = (u32)gsi_base;
INIT_LIST_HEAD(&ioapic->list);
}
if (acpi_ioapic_registered(handle, (u32)gsi_base))
goto done;
dev = acpi_get_pci_dev(handle);
if (dev && pci_resource_len(dev, 0)) {
if (pci_enable_device(dev) < 0)
goto exit_put;
pci_set_master(dev);
if (pci_request_region(dev, 0, type))
goto exit_disable;
pci_res = &dev->resource[0];
ioapic->pdev = dev;
} else {
pci_dev_put(dev);
dev = NULL;
}
crs_res = &ioapic->res;
acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, crs_res);
crs_res->name = type;
crs_res->flags |= IORESOURCE_BUSY;
if (crs_res->flags == 0) {
acpi_handle_warn(handle, "failed to get resource\n");
goto exit_release;
} else if (insert_resource(&iomem_resource, crs_res)) {
acpi_handle_warn(handle, "failed to insert resource\n");
goto exit_release;
}
/* try pci resource first, then "_CRS" resource */
res = pci_res;
if (!res || !res->flags)
res = crs_res;
if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
acpi_handle_warn(handle, "failed to register IOAPIC\n");
goto exit_release;
}
done:
list_add(&ioapic->list, &ioapic_list);
mutex_unlock(&ioapic_list_lock);
if (dev)
dev_info(&dev->dev, "%s at %pR, GSI %u\n",
type, res, (u32)gsi_base);
else
acpi_handle_info(handle, "%s at %pR, GSI %u\n",
type, res, (u32)gsi_base);
return AE_OK;
exit_release:
if (dev)
pci_release_region(dev, 0);
if (ioapic->res.flags && ioapic->res.parent)
release_resource(&ioapic->res);
exit_disable:
if (dev)
pci_disable_device(dev);
exit_put:
pci_dev_put(dev);
kfree(ioapic);
exit:
mutex_unlock(&ioapic_list_lock);
*(acpi_status *)rv = AE_ERROR;
return AE_OK;
}
int acpi_ioapic_add(acpi_handle root_handle)
{
acpi_status status, retval = AE_OK;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root_handle,
UINT_MAX, handle_ioapic_add, NULL,
root_handle, (void **)&retval);
return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
}
void pci_ioapic_remove(struct acpi_pci_root *root)
{
struct acpi_pci_ioapic *ioapic, *tmp;
mutex_lock(&ioapic_list_lock);
list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
if (root->device->handle != ioapic->root_handle)
continue;
if (ioapic->pdev) {
pci_release_region(ioapic->pdev, 0);
pci_disable_device(ioapic->pdev);
pci_dev_put(ioapic->pdev);
}
}
mutex_unlock(&ioapic_list_lock);
}
int acpi_ioapic_remove(struct acpi_pci_root *root)
{
int retval = 0;
struct acpi_pci_ioapic *ioapic, *tmp;
mutex_lock(&ioapic_list_lock);
list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
if (root->device->handle != ioapic->root_handle)
continue;
if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
retval = -EBUSY;
if (ioapic->res.flags && ioapic->res.parent)
release_resource(&ioapic->res);
list_del(&ioapic->list);
kfree(ioapic);
}
mutex_unlock(&ioapic_list_lock);
return retval;
}
| linux-master | drivers/acpi/ioapic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* custom_method.c - debugfs interface for customizing ACPI control method
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/acpi.h>
#include <linux/security.h>
#include "internal.h"
MODULE_LICENSE("GPL");
static struct dentry *cm_dentry;
/* /sys/kernel/debug/acpi/custom_method */
static ssize_t cm_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
int ret;
ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
if (ret)
return ret;
if (!(*ppos)) {
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
return -EINVAL;
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
/* make sure the buf is not allocated */
kfree(buf);
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes)) {
kfree(buf);
buf = NULL;
return -EINVAL;
}
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
uncopied_bytes -= count;
*ppos += count;
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
return count;
}
static const struct file_operations cm_fops = {
.write = cm_write,
.llseek = default_llseek,
};
static int __init acpi_custom_method_init(void)
{
cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_debugfs_dir, NULL, &cm_fops);
return 0;
}
static void __exit acpi_custom_method_exit(void)
{
debugfs_remove(cm_dentry);
}
module_init(acpi_custom_method_init);
module_exit(acpi_custom_method_exit);
| linux-master | drivers/acpi/custom_method.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SMBus driver for ACPI Embedded Controller (v0.1)
*
* Copyright (c) 2007 Alexey Starikovskiy
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
struct acpi_smb_hc {
struct acpi_ec *ec;
struct mutex lock;
wait_queue_head_t wait;
u8 offset;
u8 query_bit;
smbus_alarm_callback callback;
void *context;
bool done;
};
static int acpi_smbus_hc_add(struct acpi_device *device);
static void acpi_smbus_hc_remove(struct acpi_device *device);
static const struct acpi_device_id sbs_device_ids[] = {
{"ACPI0001", 0},
{"ACPI0005", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
static struct acpi_driver acpi_smb_hc_driver = {
.name = "smbus_hc",
.class = ACPI_SMB_HC_CLASS,
.ids = sbs_device_ids,
.ops = {
.add = acpi_smbus_hc_add,
.remove = acpi_smbus_hc_remove,
},
};
union acpi_smb_status {
u8 raw;
struct {
u8 status:5;
u8 reserved:1;
u8 alarm:1;
u8 done:1;
} fields;
};
enum acpi_smb_status_codes {
SMBUS_OK = 0,
SMBUS_UNKNOWN_FAILURE = 0x07,
SMBUS_DEVICE_ADDRESS_NACK = 0x10,
SMBUS_DEVICE_ERROR = 0x11,
SMBUS_DEVICE_COMMAND_ACCESS_DENIED = 0x12,
SMBUS_UNKNOWN_ERROR = 0x13,
SMBUS_DEVICE_ACCESS_DENIED = 0x17,
SMBUS_TIMEOUT = 0x18,
SMBUS_HOST_UNSUPPORTED_PROTOCOL = 0x19,
SMBUS_BUSY = 0x1a,
SMBUS_PEC_ERROR = 0x1f,
};
enum acpi_smb_offset {
ACPI_SMB_PROTOCOL = 0, /* protocol, PEC */
ACPI_SMB_STATUS = 1, /* status */
ACPI_SMB_ADDRESS = 2, /* address */
ACPI_SMB_COMMAND = 3, /* command */
ACPI_SMB_DATA = 4, /* 32 data registers */
ACPI_SMB_BLOCK_COUNT = 0x24, /* number of data bytes */
ACPI_SMB_ALARM_ADDRESS = 0x25, /* alarm address */
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
};
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
{
return ec_read(hc->offset + address, data);
}
static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
{
return ec_write(hc->offset + address, data);
}
static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
{
if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
return 0;
return -ETIME;
}
static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
u8 address, u8 command, u8 *data, u8 length)
{
int ret = -EFAULT, i;
u8 temp, sz = 0;
if (!hc) {
pr_err("host controller is not configured\n");
return ret;
}
mutex_lock(&hc->lock);
hc->done = false;
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
goto end;
if (temp) {
ret = -EBUSY;
goto end;
}
smb_hc_write(hc, ACPI_SMB_COMMAND, command);
if (!(protocol & 0x01)) {
smb_hc_write(hc, ACPI_SMB_BLOCK_COUNT, length);
for (i = 0; i < length; ++i)
smb_hc_write(hc, ACPI_SMB_DATA + i, data[i]);
}
smb_hc_write(hc, ACPI_SMB_ADDRESS, address << 1);
smb_hc_write(hc, ACPI_SMB_PROTOCOL, protocol);
/*
* Wait for completion. Save the status code, data size,
* and data into the return package (if required by the protocol).
*/
ret = wait_transaction_complete(hc, 1000);
if (ret || !(protocol & 0x01))
goto end;
switch (protocol) {
case SMBUS_RECEIVE_BYTE:
case SMBUS_READ_BYTE:
sz = 1;
break;
case SMBUS_READ_WORD:
sz = 2;
break;
case SMBUS_READ_BLOCK:
if (smb_hc_read(hc, ACPI_SMB_BLOCK_COUNT, &sz)) {
ret = -EFAULT;
goto end;
}
sz &= 0x1f;
break;
}
for (i = 0; i < sz; ++i)
smb_hc_read(hc, ACPI_SMB_DATA + i, &data[i]);
end:
mutex_unlock(&hc->lock);
return ret;
}
int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 *data)
{
return acpi_smbus_transaction(hc, protocol, address, command, data, 0);
}
EXPORT_SYMBOL_GPL(acpi_smbus_read);
int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
u8 command, u8 *data, u8 length)
{
return acpi_smbus_transaction(hc, protocol, address, command, data, length);
}
EXPORT_SYMBOL_GPL(acpi_smbus_write);
int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
smbus_alarm_callback callback, void *context)
{
mutex_lock(&hc->lock);
hc->callback = callback;
hc->context = context;
mutex_unlock(&hc->lock);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_smbus_register_callback);
int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
{
mutex_lock(&hc->lock);
hc->callback = NULL;
hc->context = NULL;
mutex_unlock(&hc->lock);
acpi_os_wait_events_complete();
return 0;
}
EXPORT_SYMBOL_GPL(acpi_smbus_unregister_callback);
static inline void acpi_smbus_callback(void *context)
{
struct acpi_smb_hc *hc = context;
if (hc->callback)
hc->callback(hc->context);
}
static int smbus_alarm(void *context)
{
struct acpi_smb_hc *hc = context;
union acpi_smb_status status;
u8 address;
if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
return 0;
/* Check if it is only a completion notify */
if (status.fields.done && status.fields.status == SMBUS_OK) {
hc->done = true;
wake_up(&hc->wait);
}
if (!status.fields.alarm)
return 0;
mutex_lock(&hc->lock);
smb_hc_read(hc, ACPI_SMB_ALARM_ADDRESS, &address);
status.fields.alarm = 0;
smb_hc_write(hc, ACPI_SMB_STATUS, status.raw);
/* We are only interested in events coming from known devices */
switch (address >> 1) {
case ACPI_SBS_CHARGER:
case ACPI_SBS_MANAGER:
case ACPI_SBS_BATTERY:
acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_smbus_callback, hc);
}
mutex_unlock(&hc->lock);
return 0;
}
typedef int (*acpi_ec_query_func) (void *data);
extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
unsigned long long val;
struct acpi_smb_hc *hc;
if (!device)
return -EINVAL;
status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
pr_err("error obtaining _EC.\n");
return -EIO;
}
strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
if (!hc)
return -ENOMEM;
mutex_init(&hc->lock);
init_waitqueue_head(&hc->wait);
hc->ec = acpi_driver_data(acpi_dev_parent(device));
hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff;
device->driver_data = hc;
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
hc->offset, hc->query_bit);
return 0;
}
extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
static void acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
if (!device)
return;
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
acpi_os_wait_events_complete();
kfree(hc);
device->driver_data = NULL;
}
module_acpi_driver(acpi_smb_hc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Starikovskiy");
MODULE_DESCRIPTION("ACPI SMBus HC driver");
| linux-master | drivers/acpi/sbshc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* osi.c - _OSI implementation
*
* Copyright (C) 2016 Intel Corporation
* Author: Lv Zheng <[email protected]>
*/
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/platform_data/x86/apple.h>
#include "internal.h"
#define OSI_STRING_LENGTH_MAX 64
#define OSI_STRING_ENTRIES_MAX 16
struct acpi_osi_entry {
char string[OSI_STRING_LENGTH_MAX];
bool enable;
};
static struct acpi_osi_config {
u8 default_disabling;
unsigned int linux_enable:1;
unsigned int linux_dmi:1;
unsigned int linux_cmdline:1;
unsigned int darwin_enable:1;
unsigned int darwin_dmi:1;
unsigned int darwin_cmdline:1;
} osi_config;
static struct acpi_osi_config osi_config;
static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
{"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
if (!strcmp("Linux", interface)) {
pr_notice_once(FW_BUG
"BIOS _OSI(Linux) query %s%s\n",
osi_config.linux_enable ? "honored" : "ignored",
osi_config.linux_cmdline ? " via cmdline" :
osi_config.linux_dmi ? " via DMI" : "");
}
if (!strcmp("Darwin", interface)) {
pr_notice_once(
"BIOS _OSI(Darwin) query %s%s\n",
osi_config.darwin_enable ? "honored" : "ignored",
osi_config.darwin_cmdline ? " via cmdline" :
osi_config.darwin_dmi ? " via DMI" : "");
}
return supported;
}
void __init acpi_osi_setup(char *str)
{
struct acpi_osi_entry *osi;
bool enable = true;
int i;
if (!acpi_gbl_create_osi_method)
return;
if (str == NULL || *str == '\0') {
pr_info("_OSI method disabled\n");
acpi_gbl_create_osi_method = FALSE;
return;
}
if (*str == '!') {
str++;
if (*str == '\0') {
/* Do not override acpi_osi=!* */
if (!osi_config.default_disabling)
osi_config.default_disabling =
ACPI_DISABLE_ALL_VENDOR_STRINGS;
return;
} else if (*str == '*') {
osi_config.default_disabling = ACPI_DISABLE_ALL_STRINGS;
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
osi->enable = false;
}
return;
} else if (*str == '!') {
osi_config.default_disabling = 0;
return;
}
enable = false;
}
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
if (!strcmp(osi->string, str)) {
osi->enable = enable;
break;
} else if (osi->string[0] == '\0') {
osi->enable = enable;
strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
break;
}
}
}
static void __init __acpi_osi_setup_darwin(bool enable)
{
osi_config.darwin_enable = !!enable;
if (enable) {
acpi_osi_setup("!");
acpi_osi_setup("Darwin");
} else {
acpi_osi_setup("!!");
acpi_osi_setup("!Darwin");
}
}
static void __init acpi_osi_setup_darwin(bool enable)
{
/* Override acpi_osi_dmi_blacklisted() */
osi_config.darwin_dmi = 0;
osi_config.darwin_cmdline = 1;
__acpi_osi_setup_darwin(enable);
}
/*
* The story of _OSI(Linux)
*
* From pre-history through Linux-2.6.22, Linux responded TRUE upon a BIOS
* OSI(Linux) query.
*
* Unfortunately, reference BIOS writers got wind of this and put
* OSI(Linux) in their example code, quickly exposing this string as
* ill-conceived and opening the door to an un-bounded number of BIOS
* incompatibilities.
*
* For example, OSI(Linux) was used on resume to re-POST a video card on
* one system, because Linux at that time could not do a speedy restore in
* its native driver. But then upon gaining quick native restore
* capability, Linux has no way to tell the BIOS to skip the time-consuming
* POST -- putting Linux at a permanent performance disadvantage. On
* another system, the BIOS writer used OSI(Linux) to infer native OS
* support for IPMI! On other systems, OSI(Linux) simply got in the way of
* Linux claiming to be compatible with other operating systems, exposing
* BIOS issues such as skipped device initialization.
*
* So "Linux" turned out to be a really poor chose of OSI string, and from
* Linux-2.6.23 onward we respond FALSE.
*
* BIOS writers should NOT query _OSI(Linux) on future systems. Linux will
* complain on the console when it sees it, and return FALSE. To get Linux
* to return TRUE for your system will require a kernel source update to
* add a DMI entry, or boot with "acpi_osi=Linux"
*/
static void __init __acpi_osi_setup_linux(bool enable)
{
osi_config.linux_enable = !!enable;
if (enable)
acpi_osi_setup("Linux");
else
acpi_osi_setup("!Linux");
}
static void __init acpi_osi_setup_linux(bool enable)
{
/* Override acpi_osi_dmi_blacklisted() */
osi_config.linux_dmi = 0;
osi_config.linux_cmdline = 1;
__acpi_osi_setup_linux(enable);
}
/*
* Modify the list of "OS Interfaces" reported to BIOS via _OSI
*
* empty string disables _OSI
* string starting with '!' disables that string
* otherwise string is added to list, augmenting built-in strings
*/
static void __init acpi_osi_setup_late(void)
{
struct acpi_osi_entry *osi;
char *str;
int i;
acpi_status status;
if (osi_config.default_disabling) {
status = acpi_update_interfaces(osi_config.default_disabling);
if (ACPI_SUCCESS(status))
pr_info("Disabled all _OSI OS vendors%s\n",
osi_config.default_disabling ==
ACPI_DISABLE_ALL_STRINGS ?
" and feature groups" : "");
}
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
str = osi->string;
if (*str == '\0')
break;
if (osi->enable) {
status = acpi_install_interface(str);
if (ACPI_SUCCESS(status))
pr_info("Added _OSI(%s)\n", str);
} else {
status = acpi_remove_interface(str);
if (ACPI_SUCCESS(status))
pr_info("Deleted _OSI(%s)\n", str);
}
}
}
static int __init osi_setup(char *str)
{
if (str && !strcmp("Linux", str))
acpi_osi_setup_linux(true);
else if (str && !strcmp("!Linux", str))
acpi_osi_setup_linux(false);
else if (str && !strcmp("Darwin", str))
acpi_osi_setup_darwin(true);
else if (str && !strcmp("!Darwin", str))
acpi_osi_setup_darwin(false);
else
acpi_osi_setup(str);
return 1;
}
__setup("acpi_osi=", osi_setup);
bool acpi_osi_is_win8(void)
{
return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
EXPORT_SYMBOL(acpi_osi_is_win8);
static void __init acpi_osi_dmi_darwin(void)
{
pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n");
osi_config.darwin_dmi = 1;
__acpi_osi_setup_darwin(true);
}
static void __init acpi_osi_dmi_linux(bool enable,
const struct dmi_system_id *d)
{
pr_notice("DMI detected to setup _OSI(\"Linux\"): %s\n", d->ident);
osi_config.linux_dmi = 1;
__acpi_osi_setup_linux(enable);
}
static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
{
acpi_osi_dmi_linux(true, d);
return 0;
}
static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
{
pr_notice("DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2006");
acpi_osi_setup("!Windows 2006 SP1");
acpi_osi_setup("!Windows 2006 SP2");
return 0;
}
static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
{
pr_notice("DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2009");
return 0;
}
static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
{
pr_notice("DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2012");
return 0;
}
/*
* Linux default _OSI response behavior is determined by this DMI table.
*
* Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
* by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
*/
static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
{
.callback = dmi_disable_osi_vista,
.ident = "Fujitsu Siemens",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
},
},
{
/*
* There have a NVIF method in MSI GX723 DSDT need call by Nvidia
* driver (e.g. nouveau) when user press brightness hotkey.
* Currently, nouveau driver didn't do the job and it causes there
* have a infinite while loop in DSDT when user press hotkey.
* We add MSI GX723's dmi information to this table for workaround
* this issue.
* Will remove MSI GX723 from the table after nouveau grows support.
*/
.callback = dmi_disable_osi_vista,
.ident = "MSI GX723",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Sony VGN-NS10J_S",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Sony VGN-SR290J",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "VGN-NS50B_L",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "VGN-SR19XN",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Toshiba Satellite L355",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
},
},
{
.callback = dmi_disable_osi_win7,
.ident = "ASUS K50IJ",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Toshiba P305D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
},
},
{
.callback = dmi_disable_osi_vista,
.ident = "Toshiba NB100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
},
},
/*
* The wireless hotkey does not work on those machines when
* returning true for _OSI("Windows 2012")
*/
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 7737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 7537",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 5437",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 3437",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Vostro 3446",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Vostro 3546",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
},
},
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
* Linux ignores it, except for the machines enumerated below.
*/
/*
* Without this EEEpc exports a non working WMI interface, with
* this it exports a working "good old" eeepc_laptop interface,
* fixing both brightness control, and rfkill not working.
*/
{
.callback = dmi_enable_osi_linux,
.ident = "Asus EEE PC 1015PX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
{}
};
static __init void acpi_osi_dmi_blacklisted(void)
{
dmi_check_system(acpi_osi_dmi_table);
/* Enable _OSI("Darwin") for Apple platforms. */
if (x86_apple_machine)
acpi_osi_dmi_darwin();
}
int __init early_acpi_osi_init(void)
{
acpi_osi_dmi_blacklisted();
return 0;
}
int __init acpi_osi_init(void)
{
acpi_install_interface_handler(acpi_osi_handler);
acpi_osi_setup_late();
return 0;
}
| linux-master | drivers/acpi/osi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BGRT boot graphic support
* Authors: Matthew Garrett, Josh Triplett <[email protected]>
* Copyright 2012 Red Hat, Inc <[email protected]>
* Copyright 2012 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/efi-bgrt.h>
static void *bgrt_image;
static struct kobject *bgrt_kobj;
#define BGRT_SHOW(_name, _member) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sysfs_emit(buf, "%d\n", bgrt_tab._member); \
} \
static struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
BGRT_SHOW(version, version);
BGRT_SHOW(status, status);
BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
memcpy(buf, attr->private + off, count);
return count;
}
static BIN_ATTR_RO(image, 0); /* size gets filled in later */
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
&bgrt_attr_status.attr,
&bgrt_attr_type.attr,
&bgrt_attr_xoffset.attr,
&bgrt_attr_yoffset.attr,
NULL,
};
static struct bin_attribute *bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
static const struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
.bin_attrs = bgrt_bin_attributes,
};
int __init acpi_parse_bgrt(struct acpi_table_header *table)
{
efi_bgrt_init(table);
return 0;
}
static int __init bgrt_init(void)
{
int ret;
if (!bgrt_tab.image_address)
return -ENODEV;
bgrt_image = memremap(bgrt_tab.image_address, bgrt_image_size,
MEMREMAP_WB);
if (!bgrt_image) {
pr_notice("Ignoring BGRT: failed to map image memory\n");
return -ENOMEM;
}
bin_attr_image.private = bgrt_image;
bin_attr_image.size = bgrt_image_size;
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
if (!bgrt_kobj) {
ret = -EINVAL;
goto out_memmap;
}
ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
if (ret)
goto out_kobject;
return 0;
out_kobject:
kobject_put(bgrt_kobj);
out_memmap:
memunmap(bgrt_image);
return ret;
}
device_initcall(bgrt_init);
| linux-master | drivers/acpi/bgrt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_tables.c - ACPI Boot-Time Table Parsing
*
* Copyright (C) 2001 Paul Diefenbaugh <[email protected]>
*/
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <linux/security.h>
#include <linux/kmemleak.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
#include CONFIG_ACPI_CUSTOM_DSDT_FILE
#endif
#define ACPI_MAX_TABLES 128
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
static int acpi_apic_instance __initdata_or_acpilib;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
ACPI_SUBTABLE_CEDT,
};
struct acpi_subtable_entry {
union acpi_subtable_headers *hdr;
enum acpi_subtable_type type;
};
/*
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
*/
static bool acpi_verify_table_checksum __initdata_or_acpilib = false;
void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
if (!header)
return;
switch (header->type) {
case ACPI_MADT_TYPE_LOCAL_APIC:
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
case ACPI_MADT_TYPE_LOCAL_X2APIC:
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
case ACPI_MADT_TYPE_IO_APIC:
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
pr_debug("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
p->id, p->address, p->global_irq_base);
}
break;
case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
{
struct acpi_madt_interrupt_override *p =
(struct acpi_madt_interrupt_override *)header;
pr_info("INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
p->bus, p->source_irq, p->global_irq,
mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
if (p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
pr_info("INT_SRC_OVR unexpected reserved flags: 0x%x\n",
p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
}
break;
case ACPI_MADT_TYPE_NMI_SOURCE:
{
struct acpi_madt_nmi_source *p =
(struct acpi_madt_nmi_source *)header;
pr_info("NMI_SRC (%s %s global_irq %d)\n",
mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p->global_irq);
}
break;
case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
{
struct acpi_madt_local_apic_nmi *p =
(struct acpi_madt_local_apic_nmi *)header;
pr_info("LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
p->processor_id,
mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p->lint);
}
break;
case ACPI_MADT_TYPE_LOCAL_X2APIC_NMI:
{
u16 polarity, trigger;
struct acpi_madt_local_x2apic_nmi *p =
(struct acpi_madt_local_x2apic_nmi *)header;
polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
pr_info("X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
p->uid,
mps_inti_flags_polarity[polarity],
mps_inti_flags_trigger[trigger],
p->lint);
}
break;
case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE:
{
struct acpi_madt_local_apic_override *p =
(struct acpi_madt_local_apic_override *)header;
pr_info("LAPIC_ADDR_OVR (address[0x%llx])\n",
p->address);
}
break;
case ACPI_MADT_TYPE_IO_SAPIC:
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
pr_debug("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
p->id, (void *)(unsigned long)p->address,
p->global_irq_base);
}
break;
case ACPI_MADT_TYPE_LOCAL_SAPIC:
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
case ACPI_MADT_TYPE_INTERRUPT_SOURCE:
{
struct acpi_madt_interrupt_source *p =
(struct acpi_madt_interrupt_source *)header;
pr_info("PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p->type, p->id, p->eid, p->io_sapic_vector,
p->global_irq);
}
break;
case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
{
struct acpi_madt_generic_interrupt *p =
(struct acpi_madt_generic_interrupt *)header;
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
{
struct acpi_madt_generic_distributor *p =
(struct acpi_madt_generic_distributor *)header;
pr_debug("GIC Distributor (gic_id[0x%04x] address[%llx] gsi_base[%d])\n",
p->gic_id, p->base_address,
p->global_irq_base);
}
break;
case ACPI_MADT_TYPE_CORE_PIC:
{
struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header;
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
case ACPI_MADT_TYPE_RINTC:
{
struct acpi_madt_rintc *p = (struct acpi_madt_rintc *)header;
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
default:
pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
header->type);
break;
}
}
static unsigned long __init_or_acpilib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
case ACPI_SUBTABLE_COMMON:
return entry->hdr->common.type;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.type;
case ACPI_SUBTABLE_PRMT:
return 0;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.type;
}
return 0;
}
static unsigned long __init_or_acpilib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
case ACPI_SUBTABLE_COMMON:
return entry->hdr->common.length;
case ACPI_SUBTABLE_HMAT:
return entry->hdr->hmat.length;
case ACPI_SUBTABLE_PRMT:
return entry->hdr->prmt.length;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.length;
}
return 0;
}
static unsigned long __init_or_acpilib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
case ACPI_SUBTABLE_COMMON:
return sizeof(entry->hdr->common);
case ACPI_SUBTABLE_HMAT:
return sizeof(entry->hdr->hmat);
case ACPI_SUBTABLE_PRMT:
return sizeof(entry->hdr->prmt);
case ACPI_SUBTABLE_CEDT:
return sizeof(entry->hdr->cedt);
}
return 0;
}
static enum acpi_subtable_type __init_or_acpilib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
return ACPI_SUBTABLE_PRMT;
if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
return ACPI_SUBTABLE_CEDT;
return ACPI_SUBTABLE_COMMON;
}
static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
{
return proc->handler || proc->handler_arg;
}
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
{
if (proc->handler)
return proc->handler(hdr, end);
if (proc->handler_arg)
return proc->handler_arg(hdr, proc->arg, end);
return -EINVAL;
}
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
* @id: table id (for debugging purposes)
* @table_size: size of the root table
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
* @proc_num: how big proc is?
* @max_entries: how many entries can we process?
*
* For each proc_num find a subtable with proc->id and run proc->handler
* on it. Assumption is that there's only single handler for particular
* entry id.
*
* The table_size is not the size of the complete ACPI table (the length
* field in the header struct), but only the size of the root table; i.e.,
* the offset from the very first byte of the complete ACPI table, to the
* first byte of the very first subtable.
*
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
static int __init_or_acpilib acpi_parse_entries_array(
char *id, unsigned long table_size,
struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
struct acpi_subtable_entry entry;
unsigned long table_end, subtable_len, entry_len;
int count = 0;
int errs = 0;
int i;
table_end = (unsigned long)table_header + table_header->length;
/* Parse all entries looking for a match. */
entry.type = acpi_get_subtable_type(id);
entry.hdr = (union acpi_subtable_headers *)
((unsigned long)table_header + table_size);
subtable_len = acpi_get_subtable_header_length(&entry);
while (((unsigned long)entry.hdr) + subtable_len < table_end) {
if (max_entries && count >= max_entries)
break;
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
if (!has_handler(&proc[i]) ||
(!errs &&
call_handler(&proc[i], entry.hdr, table_end))) {
errs++;
continue;
}
proc[i].count++;
break;
}
if (i != proc_num)
count++;
/*
* If entry->length is 0, break from this loop to avoid
* infinite loop.
*/
entry_len = acpi_get_entry_length(&entry);
if (entry_len == 0) {
pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
return -EINVAL;
}
entry.hdr = (union acpi_subtable_headers *)
((unsigned long)entry.hdr + entry_len);
}
if (max_entries && count > max_entries) {
pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
id, proc->id, count);
}
return errs ? -EINVAL : count;
}
int __init_or_acpilib acpi_table_parse_entries_array(
char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
int count;
u32 instance = 0;
if (acpi_disabled)
return -ENODEV;
if (!id)
return -EINVAL;
if (!table_size)
return -EINVAL;
if (!strncmp(id, ACPI_SIG_MADT, 4))
instance = acpi_apic_instance;
acpi_get_table(id, instance, &table_header);
if (!table_header) {
pr_debug("%4.4s not present\n", id);
return -ENODEV;
}
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
acpi_put_table(table_header);
return count;
}
static int __init_or_acpilib __acpi_table_parse_entries(
char *id, unsigned long table_size, int entry_id,
acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg,
void *arg, unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
.handler_arg = handler_arg,
.arg = arg,
};
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
max_entries);
}
int __init_or_acpilib
acpi_table_parse_cedt(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg, void *arg)
{
return __acpi_table_parse_entries(ACPI_SIG_CEDT,
sizeof(struct acpi_table_cedt), id,
NULL, handler_arg, arg, 0);
}
EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries)
{
return __acpi_table_parse_entries(id, table_size, entry_id, handler,
NULL, NULL, max_entries);
}
int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
sizeof(struct acpi_table_madt), id,
handler, max_entries);
}
/**
* acpi_table_parse - find table with @id, run @handler on it
* @id: table id to find
* @handler: handler to run
*
* Scan the ACPI System Descriptor Table (STD) for a table matching @id,
* run @handler on it.
*
* Return 0 if table found, -errno if not.
*/
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
if (acpi_disabled)
return -ENODEV;
if (!id || !handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
acpi_get_table(id, acpi_apic_instance, &table);
else
acpi_get_table(id, 0, &table);
if (table) {
handler(table);
acpi_put_table(table);
return 0;
} else
return -ENODEV;
}
/*
* The BIOS is supposed to supply a single APIC/MADT,
* but some report two. Provide a knob to use either.
* (don't you wish instance 0 and 1 were not the same?)
*/
static void __init check_multiple_madt(void)
{
struct acpi_table_header *table = NULL;
acpi_get_table(ACPI_SIG_MADT, 2, &table);
if (table) {
pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
acpi_apic_instance);
pr_warn("If \"acpi_apic_instance=%d\" works better, "
"notify [email protected]\n",
acpi_apic_instance ? 0 : 2);
acpi_put_table(table);
} else
acpi_apic_instance = 0;
return;
}
static void acpi_table_taint(struct acpi_table_header *table)
{
pr_warn("Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
table->signature, table->oem_table_id);
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
#ifdef CONFIG_ACPI_TABLE_UPGRADE
static u64 acpi_tables_addr;
static int all_tables_size;
/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
{
u8 sum = 0;
u8 *end = buffer + length;
while (buffer < end)
sum = (u8) (sum + *(buffer++));
return sum;
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
ACPI_SIG_ASF, ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR,
ACPI_SIG_HPET, ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG,
ACPI_SIG_MCHI, ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI,
ACPI_SIG_TCPA, ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT,
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
ACPI_SIG_NBFT };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
#define NR_ACPI_INITRD_TABLES 64
static struct cpio_data __initdata acpi_initrd_files[NR_ACPI_INITRD_TABLES];
static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
void __init acpi_table_upgrade(void)
{
void *data;
size_t size;
int sig, no, table_nr = 0, total_offset = 0;
long offset = 0;
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
if (IS_ENABLED(CONFIG_ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD)) {
data = __initramfs_start;
size = __initramfs_size;
} else {
data = (void *)initrd_start;
size = initrd_end - initrd_start;
}
if (data == NULL || size == 0)
return;
for (no = 0; no < NR_ACPI_INITRD_TABLES; no++) {
file = find_cpio_data(cpio_path, data, size, &offset);
if (!file.data)
break;
data += offset;
size -= offset;
if (file.size < sizeof(struct acpi_table_header)) {
pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
cpio_path, file.name);
continue;
}
table = file.data;
for (sig = 0; sig < ARRAY_SIZE(table_sigs); sig++)
if (!memcmp(table->signature, table_sigs[sig], 4))
break;
if (sig >= ARRAY_SIZE(table_sigs)) {
pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
cpio_path, file.name);
continue;
}
if (file.size != table->length) {
pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
cpio_path, file.name);
continue;
}
if (acpi_table_checksum(file.data, table->length)) {
pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
cpio_path, file.name);
continue;
}
pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
acpi_initrd_files[table_nr].data = file.data;
acpi_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
return;
if (security_locked_down(LOCKDOWN_ACPI_TABLES)) {
pr_notice("kernel is locked down, ignoring table override\n");
return;
}
acpi_tables_addr =
memblock_phys_alloc_range(all_tables_size, PAGE_SIZE,
0, ACPI_TABLE_UPGRADE_MAX_PHYS);
if (!acpi_tables_addr) {
WARN_ON(1);
return;
}
/*
* Only calling e820_add_reserve does not work and the
* tables are invalid (memory got used) later.
* memblock_reserve works as expected and the tables won't get modified.
* But it's not enough on X86 because ioremap will
* complain later (used by acpi_os_map_memory) that the pages
* that should get mapped are not marked "reserved".
* Both memblock_reserve and e820__range_add (via arch_reserve_mem_area)
* works fine.
*/
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
kmemleak_ignore_phys(acpi_tables_addr);
/*
* early_ioremap only can remap 256k one time. If we map all
* tables one time, we will hit the limit. Need to map chunks
* one by one during copying the same as that in relocate_initrd().
*/
for (no = 0; no < table_nr; no++) {
unsigned char *src_p = acpi_initrd_files[no].data;
phys_addr_t size = acpi_initrd_files[no].size;
phys_addr_t dest_addr = acpi_tables_addr + total_offset;
phys_addr_t slop, clen;
char *dest_p;
total_offset += size;
while (size) {
slop = dest_addr & ~PAGE_MASK;
clen = size;
if (clen > MAP_CHUNK_SIZE - slop)
clen = MAP_CHUNK_SIZE - slop;
dest_p = early_memremap(dest_addr & PAGE_MASK,
clen + slop);
memcpy(dest_p + slop, src_p, clen);
early_memunmap(dest_p, clen + slop);
src_p += clen;
dest_addr += clen;
size -= clen;
}
}
}
static acpi_status
acpi_table_initrd_override(struct acpi_table_header *existing_table,
acpi_physical_address *address, u32 *length)
{
int table_offset = 0;
int table_index = 0;
struct acpi_table_header *table;
u32 table_length;
*length = 0;
*address = 0;
if (!acpi_tables_addr)
return AE_OK;
while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
return AE_OK;
}
table_length = table->length;
/* Only override tables matched */
if (memcmp(existing_table->signature, table->signature, 4) ||
memcmp(table->oem_id, existing_table->oem_id,
ACPI_OEM_ID_SIZE) ||
memcmp(table->oem_table_id, existing_table->oem_table_id,
ACPI_OEM_TABLE_ID_SIZE)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
/*
* Mark the table to avoid being used in
* acpi_table_initrd_scan() and check the revision.
*/
if (test_and_set_bit(table_index, acpi_initrd_installed) ||
existing_table->oem_revision >= table->oem_revision) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
*length = table_length;
*address = acpi_tables_addr + table_offset;
pr_info("Table Upgrade: override [%4.4s-%6.6s-%8.8s]\n",
table->signature, table->oem_id,
table->oem_table_id);
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
break;
next_table:
table_offset += table_length;
table_index++;
}
return AE_OK;
}
static void __init acpi_table_initrd_scan(void)
{
int table_offset = 0;
int table_index = 0;
u32 table_length;
struct acpi_table_header *table;
if (!acpi_tables_addr)
return;
while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
return;
}
table_length = table->length;
/* Skip RSDT/XSDT which should only be used for override */
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) ||
ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
/*
* Mark the table to avoid being used in
* acpi_table_initrd_override(). Though this is not possible
* because override is disabled in acpi_install_physical_table().
*/
if (test_and_set_bit(table_index, acpi_initrd_installed)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
pr_info("Table Upgrade: install [%4.4s-%6.6s-%8.8s]\n",
table->signature, table->oem_id,
table->oem_table_id);
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
acpi_install_physical_table(acpi_tables_addr + table_offset);
next_table:
table_offset += table_length;
table_index++;
}
}
#else
static acpi_status
acpi_table_initrd_override(struct acpi_table_header *existing_table,
acpi_physical_address *address,
u32 *table_length)
{
*table_length = 0;
*address = 0;
return AE_OK;
}
static void __init acpi_table_initrd_scan(void)
{
}
#endif /* CONFIG_ACPI_TABLE_UPGRADE */
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
acpi_physical_address *address,
u32 *table_length)
{
return acpi_table_initrd_override(existing_table, address,
table_length);
}
#ifdef CONFIG_ACPI_CUSTOM_DSDT
static void *amlcode __attribute__ ((weakref("AmlCode")));
static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
#endif
acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table)
{
if (!existing_table || !new_table)
return AE_BAD_PARAMETER;
*new_table = NULL;
#ifdef CONFIG_ACPI_CUSTOM_DSDT
if (!strncmp(existing_table->signature, "DSDT", 4)) {
*new_table = (struct acpi_table_header *)&amlcode;
if (!(*new_table))
*new_table = (struct acpi_table_header *)&dsdt_amlcode;
}
#endif
if (*new_table != NULL)
acpi_table_taint(existing_table);
return AE_OK;
}
/*
* acpi_locate_initial_tables()
*
* Get the RSDP, then find and checksum all the ACPI tables.
*
* result: initial_tables[] is initialized, and points to
* a list of ACPI tables.
*/
int __init acpi_locate_initial_tables(void)
{
acpi_status status;
if (acpi_verify_table_checksum) {
pr_info("Early table checksum verification enabled\n");
acpi_gbl_enable_table_validation = TRUE;
} else {
pr_info("Early table checksum verification disabled\n");
acpi_gbl_enable_table_validation = FALSE;
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
void __init acpi_reserve_initial_tables(void)
{
int i;
for (i = 0; i < ACPI_MAX_TABLES; i++) {
struct acpi_table_desc *table_desc = &initial_tables[i];
u64 start = table_desc->address;
u64 size = table_desc->length;
if (!start || !size)
break;
pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
table_desc->signature.ascii, start, start + size - 1);
memblock_reserve(start, size);
}
}
void __init acpi_table_init_complete(void)
{
acpi_table_initrd_scan();
check_multiple_madt();
}
int __init acpi_table_init(void)
{
int ret;
ret = acpi_locate_initial_tables();
if (ret)
return ret;
acpi_table_init_complete();
return 0;
}
static int __init acpi_parse_apic_instance(char *str)
{
if (!str)
return -EINVAL;
if (kstrtoint(str, 0, &acpi_apic_instance))
return -EINVAL;
pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
return 0;
}
early_param("acpi_apic_instance", acpi_parse_apic_instance);
static int __init acpi_force_table_verification_setup(char *s)
{
acpi_verify_table_checksum = true;
return 0;
}
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
static int __init acpi_force_32bit_fadt_addr(char *s)
{
pr_info("Forcing 32 Bit FADT addresses\n");
acpi_gbl_use32_bit_fadt_addresses = TRUE;
return 0;
}
early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
| linux-master | drivers/acpi/tables.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005 Intel Corporation
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
*
* Alex Chiang <[email protected]>
* - Unified x86/ia64 implementations
*
* I/O APIC hotplug support
* Yinghai Lu <[email protected]>
* Jiang Liu <[email protected]>
*/
#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
static struct acpi_table_madt *get_madt_table(void)
{
static struct acpi_table_madt *madt;
static int read_madt;
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
(struct acpi_table_header **)&madt)))
madt = NULL;
read_madt++;
}
return madt;
}
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (lapic->processor_id != acpi_id)
return -EINVAL;
*apic_id = lapic->id;
return 0;
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
*apic_id = apic->local_apic_id;
return 0;
}
return -EINVAL;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration) {
if ((entry->length < 16) || (lsapic->uid != acpi_id))
return -EINVAL;
} else if (lsapic->processor_id != acpi_id)
return -EINVAL;
*apic_id = (lsapic->id << 8) | lsapic->eid;
return 0;
}
/*
* Retrieve the ARM CPU physical identifier (MPIDR)
*/
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
{
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
if (!(gicc->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
* GIC interrupt model, logical processors are required to
* have a Processor Device object in the DSDT, so we should
* check device_declaration here
*/
if (device_declaration && (gicc->uid == acpi_id)) {
*mpidr = gicc->arm_mpidr;
return 0;
}
return -EINVAL;
}
/*
* Retrieve the RISC-V hartid for the processor
*/
static int map_rintc_hartid(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id,
phys_cpuid_t *hartid)
{
struct acpi_madt_rintc *rintc =
container_of(entry, struct acpi_madt_rintc, header);
if (!(rintc->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
* RISC-V, logical processors are required to
* have a Processor Device object in the DSDT, so we should
* check device_declaration here
*/
if (device_declaration && rintc->uid == acpi_id) {
*hartid = rintc->hart_id;
return 0;
}
return -EINVAL;
}
/*
* Retrieve LoongArch CPU physical id
*/
static int map_core_pic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, phys_cpuid_t *phys_id)
{
struct acpi_madt_core_pic *core_pic =
container_of(entry, struct acpi_madt_core_pic, header);
if (!(core_pic->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in LoongArch
* system, logical processor acpi_id is required in _UID property
* of DSDT table, so we should check device_declaration here
*/
if (device_declaration && (core_pic->processor_id == acpi_id)) {
*phys_id = core_pic->core_id;
return 0;
}
return -EINVAL;
}
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
if (!madt)
return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
/* Parse all entries looking for a match. */
entry += sizeof(struct acpi_table_madt);
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_RINTC) {
if (!map_rintc_hartid(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_CORE_PIC) {
if (!map_core_pic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
return phys_id;
}
phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
{
struct acpi_table_madt *madt = NULL;
phys_cpuid_t rv;
acpi_get_table(ACPI_SIG_MADT, 0,
(struct acpi_table_header **)&madt);
if (!madt)
return PHYS_CPUID_INVALID;
rv = map_madt_entry(madt, 1, acpi_id);
acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
if (!buffer.length || !buffer.pointer)
goto exit;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(struct acpi_subtable_header)) {
goto exit;
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
map_gicc_mpidr(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_CORE_PIC)
map_core_pic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
phys_cpuid_t phys_id;
phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
EXPORT_SYMBOL_GPL(acpi_get_phys_id);
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
if (invalid_phys_cpuid(phys_id)) {
/*
* On UP processor, there is no _MAT or MADT table.
* So above phys_id is always set to PHYS_CPUID_INVALID.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
*
* Scope (_PR)
* {
* Processor (CPU0, 0x00, 0x00000410, 0x06) {}
* Processor (CPU1, 0x01, 0x00000410, 0x06) {}
* Processor (CPU2, 0x02, 0x00000410, 0x06) {}
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
* Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -EINVAL for other CPU's handle.
*/
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return -EINVAL;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
if (phys_id == 0)
return phys_id;
#endif
return -ENODEV;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
phys_cpuid_t phys_id;
phys_id = acpi_get_phys_id(handle, type, acpi_id);
return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
u64 *phys_addr, int *ioapic_id)
{
struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
if (ioapic->global_irq_base != gsi_base)
return 0;
*phys_addr = ioapic->address;
*ioapic_id = ioapic->id;
return 1;
}
static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
{
struct acpi_subtable_header *hdr;
unsigned long madt_end, entry;
struct acpi_table_madt *madt;
int apic_id = -1;
madt = get_madt_table();
if (!madt)
return apic_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
/* Parse all entries looking for a match. */
entry += sizeof(struct acpi_table_madt);
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
hdr = (struct acpi_subtable_header *)entry;
if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
break;
else
entry += hdr->length;
}
return apic_id;
}
static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
u64 *phys_addr)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_subtable_header *header;
union acpi_object *obj;
int apic_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
if (!buffer.length || !buffer.pointer)
goto exit;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(struct acpi_subtable_header))
goto exit;
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_IO_APIC)
get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
exit:
kfree(buffer.pointer);
return apic_id;
}
/**
* acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
* @handle: ACPI object for IOAPIC device
* @gsi_base: GSI base to match with
* @phys_addr: Pointer to store physical address of matching IOAPIC record
*
* Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
* for an ACPI IOAPIC record matching @gsi_base.
* Return IOAPIC id and store physical address in @phys_addr if found a match,
* otherwise return <0.
*/
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
{
int apic_id;
apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
if (apic_id == -1)
apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
return apic_id;
}
#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */
| linux-master | drivers/acpi/processor_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* wakeup.c - support wakeup devices
* Copyright (C) 2004 Li Shaohua <[email protected]>
*/
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "internal.h"
#include "sleep.h"
struct acpi_wakeup_handler {
struct list_head list_node;
bool (*wakeup)(void *context);
void *context;
};
static LIST_HEAD(acpi_wakeup_handler_head);
static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
/*
* We didn't lock acpi_device_lock in the file, because it invokes oops in
* suspend/resume and isn't really required as this is called in S-state. At
* that time, there is no device hotplug
**/
/**
* acpi_enable_wakeup_devices - Enable wake-up device GPEs.
* @sleep_state: ACPI system sleep state.
*
* Enable wakeup device power of devices with the state.enable flag set and set
* the wakeup enable mask bits in the GPE registers that correspond to wakeup
* devices.
*/
void acpi_enable_wakeup_devices(u8 sleep_state)
{
struct acpi_device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
if (device_may_wakeup(&dev->dev))
acpi_enable_wakeup_device_power(dev, sleep_state);
/* The wake-up power should have been enabled already. */
acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_ENABLE);
}
}
/**
* acpi_disable_wakeup_devices - Disable devices' wakeup capability.
* @sleep_state: ACPI system sleep state.
*/
void acpi_disable_wakeup_devices(u8 sleep_state)
{
struct acpi_device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_DISABLE);
if (device_may_wakeup(&dev->dev))
acpi_disable_wakeup_device_power(dev);
}
}
int __init acpi_wakeup_device_init(void)
{
struct acpi_device *dev, *tmp;
mutex_lock(&acpi_device_lock);
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (device_can_wakeup(&dev->dev)) {
/* Button GPEs are supposed to be always enabled. */
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number);
device_set_wakeup_enable(&dev->dev, true);
}
}
mutex_unlock(&acpi_device_lock);
return 0;
}
/**
* acpi_register_wakeup_handler - Register wakeup handler
* @wake_irq: The IRQ through which the device may receive wakeups
* @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
* @context: Context to pass to the handler when calling it
*
* Drivers which may share an IRQ with the SCI can use this to register
* a handler which returns true when the device they are managing wants
* to trigger a wakeup.
*/
int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
void *context)
{
struct acpi_wakeup_handler *handler;
/*
* If the device is not sharing its IRQ with the SCI, there is no
* need to register the handler.
*/
if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
return 0;
handler = kmalloc(sizeof(*handler), GFP_KERNEL);
if (!handler)
return -ENOMEM;
handler->wakeup = wakeup;
handler->context = context;
mutex_lock(&acpi_wakeup_handler_mutex);
list_add(&handler->list_node, &acpi_wakeup_handler_head);
mutex_unlock(&acpi_wakeup_handler_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
/**
* acpi_unregister_wakeup_handler - Unregister wakeup handler
* @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
* @context: Context passed to acpi_register_wakeup_handler()
*/
void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
void *context)
{
struct acpi_wakeup_handler *handler;
mutex_lock(&acpi_wakeup_handler_mutex);
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
if (handler->wakeup == wakeup && handler->context == context) {
list_del(&handler->list_node);
kfree(handler);
break;
}
}
mutex_unlock(&acpi_wakeup_handler_mutex);
}
EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
bool acpi_check_wakeup_handlers(void)
{
struct acpi_wakeup_handler *handler;
/* No need to lock, nothing else is running when we're called. */
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
if (handler->wakeup(handler->context))
return true;
}
return false;
}
| linux-master | drivers/acpi/wakeup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Address translation interface via ACPI DSM.
* Copyright (C) 2018 Intel Corporation
*
* Specification for this interface is available at:
*
* https://cdrdv2.intel.com/v1/dl/getContent/603354
*/
#include <linux/acpi.h>
#include <linux/adxl.h>
#define ADXL_REVISION 0x1
#define ADXL_IDX_GET_ADDR_PARAMS 0x1
#define ADXL_IDX_FORWARD_TRANSLATE 0x2
#define ACPI_ADXL_PATH "\\_SB.ADXL"
/*
* The specification doesn't provide a limit on how many
* components are in a memory address. But since we allocate
* memory based on the number the BIOS tells us, we should
* defend against insane values.
*/
#define ADXL_MAX_COMPONENTS 500
#undef pr_fmt
#define pr_fmt(fmt) "ADXL: " fmt
static acpi_handle handle;
static union acpi_object *params;
static const guid_t adxl_guid =
GUID_INIT(0xAA3C050A, 0x7EA4, 0x4C1F,
0xAF, 0xDA, 0x12, 0x67, 0xDF, 0xD3, 0xD4, 0x8D);
static int adxl_count;
static char **adxl_component_names;
static union acpi_object *adxl_dsm(int cmd, union acpi_object argv[])
{
union acpi_object *obj, *o;
obj = acpi_evaluate_dsm_typed(handle, &adxl_guid, ADXL_REVISION,
cmd, argv, ACPI_TYPE_PACKAGE);
if (!obj) {
pr_info("DSM call failed for cmd=%d\n", cmd);
return NULL;
}
if (obj->package.count != 2) {
pr_info("Bad pkg count %d\n", obj->package.count);
goto err;
}
o = obj->package.elements;
if (o->type != ACPI_TYPE_INTEGER) {
pr_info("Bad 1st element type %d\n", o->type);
goto err;
}
if (o->integer.value) {
pr_info("Bad ret val %llu\n", o->integer.value);
goto err;
}
o = obj->package.elements + 1;
if (o->type != ACPI_TYPE_PACKAGE) {
pr_info("Bad 2nd element type %d\n", o->type);
goto err;
}
return obj;
err:
ACPI_FREE(obj);
return NULL;
}
/**
* adxl_get_component_names - get list of memory component names
* Returns NULL terminated list of string names
*
* Give the caller a pointer to the list of memory component names
* e.g. { "SystemAddress", "ProcessorSocketId", "ChannelId", ... NULL }
* Caller should count how many strings in order to allocate a buffer
* for the return from adxl_decode().
*/
const char * const *adxl_get_component_names(void)
{
return (const char * const *)adxl_component_names;
}
EXPORT_SYMBOL_GPL(adxl_get_component_names);
/**
* adxl_decode - ask BIOS to decode a system address to memory address
* @addr: the address to decode
* @component_values: pointer to array of values for each component
* Returns 0 on success, negative error code otherwise
*
* The index of each value returned in the array matches the index of
* each component name returned by adxl_get_component_names().
* Components that are not defined for this address translation (e.g.
* mirror channel number for a non-mirrored address) are set to ~0ull.
*/
int adxl_decode(u64 addr, u64 component_values[])
{
union acpi_object argv4[2], *results, *r;
int i, cnt;
if (!adxl_component_names)
return -EOPNOTSUPP;
argv4[0].type = ACPI_TYPE_PACKAGE;
argv4[0].package.count = 1;
argv4[0].package.elements = &argv4[1];
argv4[1].integer.type = ACPI_TYPE_INTEGER;
argv4[1].integer.value = addr;
results = adxl_dsm(ADXL_IDX_FORWARD_TRANSLATE, argv4);
if (!results)
return -EINVAL;
r = results->package.elements + 1;
cnt = r->package.count;
if (cnt != adxl_count) {
ACPI_FREE(results);
return -EINVAL;
}
r = r->package.elements;
for (i = 0; i < cnt; i++)
component_values[i] = r[i].integer.value;
ACPI_FREE(results);
return 0;
}
EXPORT_SYMBOL_GPL(adxl_decode);
static int __init adxl_init(void)
{
char *path = ACPI_ADXL_PATH;
union acpi_object *p;
acpi_status status;
int i;
status = acpi_get_handle(NULL, path, &handle);
if (ACPI_FAILURE(status)) {
pr_debug("No ACPI handle for path %s\n", path);
return -ENODEV;
}
if (!acpi_has_method(handle, "_DSM")) {
pr_info("No DSM method\n");
return -ENODEV;
}
if (!acpi_check_dsm(handle, &adxl_guid, ADXL_REVISION,
ADXL_IDX_GET_ADDR_PARAMS |
ADXL_IDX_FORWARD_TRANSLATE)) {
pr_info("DSM method does not support forward translate\n");
return -ENODEV;
}
params = adxl_dsm(ADXL_IDX_GET_ADDR_PARAMS, NULL);
if (!params) {
pr_info("Failed to get component names\n");
return -ENODEV;
}
p = params->package.elements + 1;
adxl_count = p->package.count;
if (adxl_count > ADXL_MAX_COMPONENTS) {
pr_info("Insane number of address component names %d\n", adxl_count);
ACPI_FREE(params);
return -ENODEV;
}
p = p->package.elements;
/*
* Allocate one extra for NULL termination.
*/
adxl_component_names = kcalloc(adxl_count + 1, sizeof(char *), GFP_KERNEL);
if (!adxl_component_names) {
ACPI_FREE(params);
return -ENOMEM;
}
for (i = 0; i < adxl_count; i++)
adxl_component_names[i] = p[i].string.pointer;
return 0;
}
subsys_initcall(adxl_init);
| linux-master | drivers/acpi/acpi_adxl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* fan_core.c - ACPI Fan core Driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/sort.h>
#include "fan.h"
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
/* thermal cooling device callbacks */
static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_fan *fan = acpi_driver_data(device);
if (fan->acpi4) {
if (fan->fif.fine_grain_ctrl)
*state = 100 / fan->fif.step_size;
else
*state = fan->fps_count - 1;
} else {
*state = 1;
}
return 0;
}
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
return -ENODEV;
}
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
ret = -EINVAL;
goto err;
}
fst->revision = obj->package.elements[0].integer.value;
fst->control = obj->package.elements[1].integer.value;
fst->speed = obj->package.elements[2].integer.value;
err:
kfree(obj);
return ret;
}
static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
{
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_fan_fst fst;
int status, i;
status = acpi_fan_get_fst(device, &fst);
if (status)
return status;
if (fan->fif.fine_grain_ctrl) {
/* This control should be same what we set using _FSL by spec */
if (fst.control > 100) {
dev_dbg(&device->dev, "Invalid control value returned\n");
goto match_fps;
}
*state = (int) fst.control / fan->fif.step_size;
return 0;
}
match_fps:
for (i = 0; i < fan->fps_count; i++) {
if (fst.control == fan->fps[i].control)
break;
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
return -EINVAL;
}
*state = i;
return status;
}
static int fan_get_state(struct acpi_device *device, unsigned long *state)
{
int result;
int acpi_state = ACPI_STATE_D0;
result = acpi_device_update_power(device, &acpi_state);
if (result)
return result;
*state = acpi_state == ACPI_STATE_D3_COLD
|| acpi_state == ACPI_STATE_D3_HOT ?
0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1);
return 0;
}
static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
*state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_fan *fan = acpi_driver_data(device);
if (fan->acpi4)
return fan_get_state_acpi4(device, state);
else
return fan_get_state(device, state);
}
static int fan_set_state(struct acpi_device *device, unsigned long state)
{
if (state != 0 && state != 1)
return -EINVAL;
return acpi_device_set_power(device,
state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
}
static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state)
{
struct acpi_fan *fan = acpi_driver_data(device);
acpi_status status;
u64 value = state;
int max_state;
if (fan->fif.fine_grain_ctrl)
max_state = 100 / fan->fif.step_size;
else
max_state = fan->fps_count - 1;
if (state > max_state)
return -EINVAL;
if (fan->fif.fine_grain_ctrl) {
value *= fan->fif.step_size;
/* Spec allows compensate the last step only */
if (value + fan->fif.step_size > 100)
value = 100;
} else {
value = fan->fps[state].control;
}
status = acpi_execute_simple_method(device->handle, "_FSL", value);
if (ACPI_FAILURE(status)) {
dev_dbg(&device->dev, "Failed to set state by _FSL\n");
return -ENODEV;
}
return 0;
}
static int
fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_fan *fan = acpi_driver_data(device);
if (fan->acpi4)
return fan_set_state_acpi4(device, state);
else
return fan_set_state(device, state);
}
static const struct thermal_cooling_device_ops fan_cooling_ops = {
.get_max_state = fan_get_max_state,
.get_cur_state = fan_get_cur_state,
.set_cur_state = fan_set_cur_state,
};
/* --------------------------------------------------------------------------
* Driver Interface
* --------------------------------------------------------------------------
*/
static bool acpi_fan_is_acpi4(struct acpi_device *device)
{
return acpi_has_method(device->handle, "_FIF") &&
acpi_has_method(device->handle, "_FPS") &&
acpi_has_method(device->handle, "_FSL") &&
acpi_has_method(device->handle, "_FST");
}
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_buffer format = { sizeof("NNNN"), "NNNN" };
u64 fields[4];
struct acpi_buffer fif = { sizeof(fields), fields };
union acpi_object *obj;
acpi_status status;
status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE) {
dev_err(&device->dev, "Invalid _FIF data\n");
status = -EINVAL;
goto err;
}
status = acpi_extract_package(obj, &format, &fif);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FIF element\n");
status = -EINVAL;
goto err;
}
fan->fif.revision = fields[0];
fan->fif.fine_grain_ctrl = fields[1];
fan->fif.step_size = fields[2];
fan->fif.low_speed_notification = fields[3];
/* If there is a bug in step size and set as 0, change to 1 */
if (!fan->fif.step_size)
fan->fif.step_size = 1;
/* If step size > 9, change to 9 (by spec valid values 1-9) */
else if (fan->fif.step_size > 9)
fan->fif.step_size = 9;
err:
kfree(obj);
return status;
}
static int acpi_fan_speed_cmp(const void *a, const void *b)
{
const struct acpi_fan_fps *fps1 = a;
const struct acpi_fan_fps *fps2 = b;
return fps1->speed - fps2->speed;
}
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int i;
status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
dev_err(&device->dev, "Invalid _FPS data\n");
status = -EINVAL;
goto err;
}
fan->fps_count = obj->package.count - 1; /* minus revision field */
fan->fps = devm_kcalloc(&device->dev,
fan->fps_count, sizeof(struct acpi_fan_fps),
GFP_KERNEL);
if (!fan->fps) {
dev_err(&device->dev, "Not enough memory\n");
status = -ENOMEM;
goto err;
}
for (i = 0; i < fan->fps_count; i++) {
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
&fan->fps[i] };
status = acpi_extract_package(&obj->package.elements[i + 1],
&format, &fps);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FPS element\n");
goto err;
}
}
/* sort the state array according to fan speed in increase order */
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
err:
kfree(obj);
return status;
}
static int acpi_fan_probe(struct platform_device *pdev)
{
int result = 0;
struct thermal_cooling_device *cdev;
struct acpi_fan *fan;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
if (acpi_fan_is_acpi4(device)) {
result = acpi_fan_get_fif(device);
if (result)
return result;
result = acpi_fan_get_fps(device);
if (result)
return result;
result = acpi_fan_create_attributes(device);
if (result)
return result;
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
goto err_end;
}
}
if (!strncmp(pdev->name, "PNP0C0B", strlen("PNP0C0B")))
name = "Fan";
else
name = acpi_device_bid(device);
cdev = thermal_cooling_device_register(name, device,
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
goto err_end;
}
dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
fan->cdev = cdev;
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
if (result)
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
goto err_end;
}
return 0;
err_end:
if (fan->acpi4)
acpi_fan_delete_attributes(device);
return result;
}
static int acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
if (fan->acpi4) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
acpi_fan_delete_attributes(device);
}
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev)
{
struct acpi_fan *fan = dev_get_drvdata(dev);
if (fan->acpi4)
return 0;
acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0);
return AE_OK;
}
static int acpi_fan_resume(struct device *dev)
{
int result;
struct acpi_fan *fan = dev_get_drvdata(dev);
if (fan->acpi4)
return 0;
result = acpi_device_update_power(ACPI_COMPANION(dev), NULL);
if (result)
dev_err(dev, "Error updating fan power state\n");
return result;
}
static const struct dev_pm_ops acpi_fan_pm = {
.resume = acpi_fan_resume,
.freeze = acpi_fan_suspend,
.thaw = acpi_fan_resume,
.restore = acpi_fan_resume,
};
#define FAN_PM_OPS_PTR (&acpi_fan_pm)
#else
#define FAN_PM_OPS_PTR NULL
#endif
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
.remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
.pm = FAN_PM_OPS_PTR,
},
};
module_platform_driver(acpi_fan_driver);
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/fan_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sysfs.c - ACPI sysfs interface to userspace.
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/moduleparam.h>
#include "internal.h"
#ifdef CONFIG_ACPI_DEBUG
/*
* ACPI debug sysfs I/F, including:
* /sys/modules/acpi/parameters/debug_layer
* /sys/modules/acpi/parameters/debug_level
* /sys/modules/acpi/parameters/trace_method_name
* /sys/modules/acpi/parameters/trace_state
* /sys/modules/acpi/parameters/trace_debug_layer
* /sys/modules/acpi/parameters/trace_debug_level
*/
struct acpi_dlayer {
const char *name;
unsigned long value;
};
struct acpi_dlevel {
const char *name;
unsigned long value;
};
#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
static const struct acpi_dlayer acpi_debug_layers[] = {
ACPI_DEBUG_INIT(ACPI_UTILITIES),
ACPI_DEBUG_INIT(ACPI_HARDWARE),
ACPI_DEBUG_INIT(ACPI_EVENTS),
ACPI_DEBUG_INIT(ACPI_TABLES),
ACPI_DEBUG_INIT(ACPI_NAMESPACE),
ACPI_DEBUG_INIT(ACPI_PARSER),
ACPI_DEBUG_INIT(ACPI_DISPATCHER),
ACPI_DEBUG_INIT(ACPI_EXECUTER),
ACPI_DEBUG_INIT(ACPI_RESOURCES),
ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
ACPI_DEBUG_INIT(ACPI_COMPILER),
ACPI_DEBUG_INIT(ACPI_TOOLS),
};
static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_INIT),
ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
ACPI_DEBUG_INIT(ACPI_LV_INFO),
ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_PARSE),
ACPI_DEBUG_INIT(ACPI_LV_LOAD),
ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
ACPI_DEBUG_INIT(ACPI_LV_EXEC),
ACPI_DEBUG_INIT(ACPI_LV_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
ACPI_DEBUG_INIT(ACPI_LV_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_VALUES),
ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
ACPI_DEBUG_INIT(ACPI_LV_THREADS),
ACPI_DEBUG_INIT(ACPI_LV_IO),
ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_layers[i].name,
acpi_debug_layers[i].value,
(acpi_dbg_layer & acpi_debug_layers[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
ACPI_ALL_DRIVERS,
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
== 0 ? ' ' : '-');
result +=
sprintf(buffer + result,
"--\ndebug_layer = 0x%08X ( * = enabled)\n",
acpi_dbg_layer);
return result;
}
static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_levels[i].name,
acpi_debug_levels[i].value,
(acpi_dbg_level & acpi_debug_levels[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
acpi_dbg_level);
return result;
}
static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[1024];
static int param_set_trace_method_name(const char *val,
const struct kernel_param *kp)
{
u32 saved_flags = 0;
bool is_abs_path = true;
if (*val != '\\')
is_abs_path = false;
if ((is_abs_path && strlen(val) > 1023) ||
(!is_abs_path && strlen(val) > 1022)) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
/*
* It's not safe to update acpi_gbl_trace_method_name without
* having the tracer stopped, so we save the original tracer
* state and disable it.
*/
saved_flags = acpi_gbl_trace_flags;
(void)acpi_debug_trace(NULL,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
0);
/* This is a hack. We can't kmalloc in early boot. */
if (is_abs_path)
strcpy(trace_method_name, val);
else {
trace_method_name[0] = '\\';
strcpy(trace_method_name+1, val);
}
/* Restore the original tracer state */
(void)acpi_debug_trace(trace_method_name,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
saved_flags);
return 0;
}
static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
{
return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
}
static const struct kernel_param_ops param_ops_trace_method = {
.set = param_set_trace_method_name,
.get = param_get_trace_method_name,
};
static const struct kernel_param_ops param_ops_trace_attrib = {
.set = param_set_uint,
.get = param_get_uint,
};
module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
static int param_set_trace_state(const char *val,
const struct kernel_param *kp)
{
acpi_status status;
const char *method = trace_method_name;
u32 flags = 0;
/* So "xxx-once" comparison should go prior than "xxx" comparison */
#define acpi_compare_param(val, key) \
strncmp((val), (key), sizeof(key) - 1)
if (!acpi_compare_param(val, "enable")) {
method = NULL;
flags = ACPI_TRACE_ENABLED;
} else if (!acpi_compare_param(val, "disable"))
method = NULL;
else if (!acpi_compare_param(val, "method-once"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
else if (!acpi_compare_param(val, "method"))
flags = ACPI_TRACE_ENABLED;
else if (!acpi_compare_param(val, "opcode-once"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
else if (!acpi_compare_param(val, "opcode"))
flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
else
return -EINVAL;
status = acpi_debug_trace(method,
acpi_gbl_trace_dbg_level,
acpi_gbl_trace_dbg_layer,
flags);
if (ACPI_FAILURE(status))
return -EBUSY;
return 0;
}
static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable\n");
if (!acpi_gbl_trace_method_name)
return sprintf(buffer, "enable\n");
if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
return sprintf(buffer, "method-once\n");
else
return sprintf(buffer, "method\n");
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
byte, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer,
const struct kernel_param *kp)
{
int result;
result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
return result;
}
module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
* /sys/firmware/acpi/tables/data/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
static struct kobject *tables_data_kobj;
static struct kobject *dynamic_tables_kobj;
static struct kobject *hotplug_kobj;
#define ACPI_MAX_TABLE_INSTANCES 999
#define ACPI_INST_SIZE 4 /* including trailing 0 */
struct acpi_table_attr {
struct bin_attribute attr;
char name[ACPI_NAMESEG_SIZE];
int instance;
char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
struct list_head node;
};
struct acpi_data_attr {
struct bin_attribute attr;
u64 addr;
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
container_of(bin_attr, struct acpi_table_attr, attr);
struct acpi_table_header *table_header = NULL;
acpi_status status;
ssize_t rc;
status = acpi_get_table(table_attr->name, table_attr->instance,
&table_header);
if (ACPI_FAILURE(status))
return -ENODEV;
rc = memory_read_from_buffer(buf, count, &offset, table_header,
table_header->length);
acpi_put_table(table_header);
return rc;
}
static int acpi_table_attr_init(struct kobject *tables_obj,
struct acpi_table_attr *table_attr,
struct acpi_table_header *table_header)
{
struct acpi_table_header *header = NULL;
struct acpi_table_attr *attr = NULL;
char instance_str[ACPI_INST_SIZE];
sysfs_attr_init(&table_attr->attr.attr);
ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
table_attr->instance++;
if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
pr_warn("%4.4s: too many table instances\n", table_attr->name);
return -ERANGE;
}
ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header))) {
snprintf(instance_str, sizeof(instance_str), "%u",
table_attr->instance);
strcat(table_attr->filename, instance_str);
}
table_attr->attr.size = table_header->length;
table_attr->attr.read = acpi_table_show;
table_attr->attr.attr.name = table_attr->filename;
table_attr->attr.attr.mode = 0400;
return sysfs_create_bin_file(tables_obj, &table_attr->attr);
}
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
switch (event) {
case ACPI_TABLE_EVENT_INSTALL:
table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
if (acpi_table_attr_init(dynamic_tables_kobj,
table_attr, table)) {
kfree(table_attr);
return AE_ERROR;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
break;
case ACPI_TABLE_EVENT_LOAD:
case ACPI_TABLE_EVENT_UNLOAD:
case ACPI_TABLE_EVENT_UNINSTALL:
/*
* we do not need to do anything right now
* because the table is not deleted from the
* global table list when unloading it.
*/
break;
default:
return AE_BAD_PARAMETER;
}
return AE_OK;
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
void __iomem *base;
ssize_t size;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
size = data_attr->attr.size;
if (offset < 0)
return -EINVAL;
if (offset >= size)
return 0;
if (count > size - offset)
count = size - offset;
base = acpi_os_map_iomem(data_attr->addr, size);
if (!base)
return -ENOMEM;
memcpy_fromio(buf, base + offset, count);
acpi_os_unmap_iomem(base, size);
return count;
}
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
{
struct acpi_table_bert *bert = th;
if (bert->header.length < sizeof(struct acpi_table_bert) ||
bert->region_length < sizeof(struct acpi_hest_generic_status)) {
kfree(data_attr);
return -EINVAL;
}
data_attr->addr = bert->address;
data_attr->attr.size = bert->region_length;
data_attr->attr.attr.name = "BERT";
return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
}
static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
{
struct acpi_table_ccel *ccel = th;
if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
!ccel->log_area_start_address || !ccel->log_area_minimum_length) {
kfree(data_attr);
return -EINVAL;
}
data_attr->addr = ccel->log_area_start_address;
data_attr->attr.size = ccel->log_area_minimum_length;
data_attr->attr.attr.name = "CCEL";
return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
}
static struct acpi_data_obj {
char *name;
int (*fn)(void *, struct acpi_data_attr *);
} acpi_data_objs[] = {
{ ACPI_SIG_BERT, acpi_bert_data_init },
{ ACPI_SIG_CCEL, acpi_ccel_data_init },
};
#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
static int acpi_table_data_init(struct acpi_table_header *th)
{
struct acpi_data_attr *data_attr;
int i;
for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
if (!data_attr)
return -ENOMEM;
sysfs_attr_init(&data_attr->attr.attr);
data_attr->attr.read = acpi_data_show;
data_attr->attr.attr.mode = 0400;
return acpi_data_objs[i].fn(th, data_attr);
}
}
return 0;
}
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
struct acpi_table_header *table_header = NULL;
int table_index;
acpi_status status;
int ret;
tables_kobj = kobject_create_and_add("tables", acpi_kobj);
if (!tables_kobj)
goto err;
tables_data_kobj = kobject_create_and_add("data", tables_kobj);
if (!tables_data_kobj)
goto err_tables_data;
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
for (table_index = 0;; table_index++) {
status = acpi_get_table_by_index(table_index, &table_header);
if (status == AE_BAD_PARAMETER)
break;
if (ACPI_FAILURE(status))
continue;
table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return -ENOMEM;
ret = acpi_table_attr_init(tables_kobj,
table_attr, table_header);
if (ret) {
kfree(table_attr);
return ret;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
acpi_table_data_init(table_header);
}
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(tables_data_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
return 0;
err_dynamic_tables:
kobject_put(tables_data_kobj);
err_tables_data:
kobject_put(tables_kobj);
err:
return -ENOMEM;
}
/*
* Detailed ACPI IRQ counters:
* /sys/firmware/acpi/interrupts/
*/
u32 acpi_irq_handled;
u32 acpi_irq_not_handled;
#define COUNT_GPE 0
#define COUNT_SCI 1 /* acpi_irq_handled */
#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
#define COUNT_ERROR 3 /* other */
#define NUM_COUNTERS_EXTRA 4
struct event_counter {
u32 count;
u32 flags;
};
static struct event_counter *all_counters;
static u32 num_gpes;
static u32 num_counters;
static struct attribute **all_attrs;
static u32 acpi_gpe_count;
static struct attribute_group interrupt_stats_attr_group = {
.name = "interrupts",
};
static struct kobj_attribute *counter_attrs;
static void delete_gpe_attr_array(void)
{
struct event_counter *tmp = all_counters;
all_counters = NULL;
kfree(tmp);
if (counter_attrs) {
int i;
for (i = 0; i < num_gpes; i++)
kfree(counter_attrs[i].attr.name);
kfree(counter_attrs);
}
kfree(all_attrs);
}
static void gpe_count(u32 gpe_number)
{
acpi_gpe_count++;
if (!all_counters)
return;
if (gpe_number < num_gpes)
all_counters[gpe_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
}
static void fixed_event_count(u32 event_number)
{
if (!all_counters)
return;
if (event_number < ACPI_NUM_FIXED_EVENTS)
all_counters[num_gpes + event_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
}
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
if (event_type == ACPI_EVENT_TYPE_GPE) {
gpe_count(event_number);
pr_debug("GPE event 0x%02x\n", event_number);
} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
fixed_event_count(event_number);
pr_debug("Fixed event 0x%02x\n", event_number);
} else {
pr_debug("Other event 0x%02x\n", event_number);
}
}
static int get_status(u32 index, acpi_event_status *ret,
acpi_handle *handle)
{
acpi_status status;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
return -EINVAL;
if (index < num_gpes) {
status = acpi_get_gpe_device(index, handle);
if (ACPI_FAILURE(status)) {
pr_warn("Invalid GPE 0x%x", index);
return -ENXIO;
}
status = acpi_get_gpe_status(*handle, index, ret);
} else {
status = acpi_get_event_status(index - num_gpes, ret);
}
if (ACPI_FAILURE(status))
return -EIO;
return 0;
}
static ssize_t counter_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int index = attr - counter_attrs;
int size;
acpi_handle handle;
acpi_event_status status;
int result = 0;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
acpi_irq_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
acpi_irq_not_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
acpi_gpe_count;
size = sprintf(buf, "%8u", all_counters[index].count);
/* "gpe_all" or "sci" */
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
goto end;
result = get_status(index, &status, &handle);
if (result)
goto end;
if (status & ACPI_EVENT_FLAG_ENABLE_SET)
size += sprintf(buf + size, " EN");
else
size += sprintf(buf + size, " ");
if (status & ACPI_EVENT_FLAG_STATUS_SET)
size += sprintf(buf + size, " STS");
else
size += sprintf(buf + size, " ");
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
size += sprintf(buf + size, " invalid ");
else if (status & ACPI_EVENT_FLAG_ENABLED)
size += sprintf(buf + size, " enabled ");
else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
size += sprintf(buf + size, " wake_enabled");
else
size += sprintf(buf + size, " disabled ");
if (status & ACPI_EVENT_FLAG_MASKED)
size += sprintf(buf + size, " masked ");
else
size += sprintf(buf + size, " unmasked");
end:
size += sprintf(buf + size, "\n");
return result ? result : size;
}
/*
* counter_set() sets the specified counter.
* setting the total "sci" file to any value clears all counters.
* enable/disable/clear a gpe/fixed event in user space.
*/
static ssize_t counter_set(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf,
size_t size)
{
int index = attr - counter_attrs;
acpi_event_status status;
acpi_handle handle;
int result = 0;
unsigned long tmp;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
for (i = 0; i < num_counters; ++i)
all_counters[i].count = 0;
acpi_gpe_count = 0;
acpi_irq_handled = 0;
acpi_irq_not_handled = 0;
goto end;
}
/* show the event status for both GPEs and Fixed Events */
result = get_status(index, &status, &handle);
if (result)
goto end;
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
pr_warn("Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_gpe(handle, index);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_gpe(handle, index);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_STATUS_SET))
result = acpi_clear_gpe(handle, index);
else if (!strcmp(buf, "mask\n"))
result = acpi_mask_gpe(handle, index, TRUE);
else if (!strcmp(buf, "unmask\n"))
result = acpi_mask_gpe(handle, index, FALSE);
else if (!kstrtoul(buf, 0, &tmp))
all_counters[index].count = tmp;
else
result = -EINVAL;
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLE_SET))
result = acpi_disable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLE_SET))
result = acpi_enable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_STATUS_SET))
result = acpi_clear_event(event);
else if (!kstrtoul(buf, 0, &tmp))
all_counters[index].count = tmp;
else
result = -EINVAL;
} else
all_counters[index].count = strtoul(buf, NULL, 0);
if (ACPI_FAILURE(result))
result = -EINVAL;
end:
return result ? result : size;
}
/*
* A Quirk Mechanism for GPE Flooding Prevention:
*
* Quirks may be needed to prevent GPE flooding on a specific GPE. The
* flooding typically cannot be detected and automatically prevented by
* ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
* the AML tables. This normally indicates a feature gap in Linux, thus
* instead of providing endless quirk tables, we provide a boot parameter
* for those who want this quirk. For example, if the users want to prevent
* the GPE flooding for GPE 00, they need to specify the following boot
* parameter:
* acpi_mask_gpe=0x00
* Note, the parameter can be a list (see bitmap_parselist() for the details).
* The masking status can be modified by the following runtime controlling
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
#define ACPI_MASKABLE_GPE_MAX 0x100
static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
int ret;
u8 gpe;
ret = kstrtou8(val, 0, &gpe);
if (ret) {
ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
if (ret)
return ret;
} else
set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
void __init acpi_gpe_apply_masked_gpes(void)
{
acpi_handle handle;
acpi_status status;
u16 gpe;
for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
status = acpi_get_gpe_device(gpe, &handle);
if (ACPI_SUCCESS(status)) {
pr_info("Masking GPE 0x%x.\n", gpe);
(void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
void acpi_irq_stats_init(void)
{
acpi_status status;
int i;
if (all_counters)
return;
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
if (all_counters == NULL)
goto fail;
status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
if (ACPI_FAILURE(status))
goto fail;
counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
for (i = 0; i < num_counters; ++i) {
char buffer[12];
char *name;
if (i < num_gpes)
sprintf(buffer, "gpe%02X", i);
else if (i == num_gpes + ACPI_EVENT_PMTIMER)
sprintf(buffer, "ff_pmtimer");
else if (i == num_gpes + ACPI_EVENT_GLOBAL)
sprintf(buffer, "ff_gbl_lock");
else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
sprintf(buffer, "ff_pwr_btn");
else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
sprintf(buffer, "ff_slp_btn");
else if (i == num_gpes + ACPI_EVENT_RTC)
sprintf(buffer, "ff_rt_clk");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
sprintf(buffer, "gpe_all");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
sprintf(buffer, "sci");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
sprintf(buffer, "sci_not");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
sprintf(buffer, "error");
else
sprintf(buffer, "bug%02X", i);
name = kstrdup(buffer, GFP_KERNEL);
if (name == NULL)
goto fail;
sysfs_attr_init(&counter_attrs[i].attr);
counter_attrs[i].attr.name = name;
counter_attrs[i].attr.mode = 0644;
counter_attrs[i].show = counter_show;
counter_attrs[i].store = counter_set;
all_attrs[i] = &counter_attrs[i].attr;
}
interrupt_stats_attr_group.attrs = all_attrs;
if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
return;
fail:
delete_gpe_attr_array();
}
static void __exit interrupt_stats_exit(void)
{
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
}
static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
return sprintf(buf, "%d\n", hotplug->enabled);
}
static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t size)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
unsigned int val;
if (kstrtouint(buf, 10, &val) || val > 1)
return -EINVAL;
acpi_scan_hotplug_enabled(hotplug, val);
return size;
}
static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(hotplug_profile);
static const struct kobj_type acpi_hotplug_profile_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = hotplug_profile_groups,
};
void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
const char *name)
{
int error;
if (!hotplug_kobj)
goto err_out;
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error) {
kobject_put(&hotplug->kobj);
goto err_out;
}
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;
err_out:
pr_err("Unable to add hotplug profile '%s'\n", name);
}
static ssize_t force_remove_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", 0);
}
static ssize_t force_remove_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t size)
{
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
if (val) {
pr_err("Enabling force_remove is not supported anymore. Please report to [email protected] if you depend on this functionality\n");
return -EINVAL;
}
return size;
}
static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
int __init acpi_sysfs_init(void)
{
int result;
result = acpi_tables_sysfs_init();
if (result)
return result;
hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
if (!hotplug_kobj)
return -ENOMEM;
result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
if (result)
return result;
result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
return result;
}
| linux-master | drivers/acpi/sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ec.c - ACPI Embedded Controller Driver (v3)
*
* Copyright (C) 2001-2015 Intel Corporation
* Author: 2014, 2015 Lv Zheng <[email protected]>
* 2006, 2007 Alexey Starikovskiy <[email protected]>
* 2006 Denis Sadykov <[email protected]>
* 2004 Luming Yu <[email protected]>
* 2001, 2002 Andy Grover <[email protected]>
* 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2008 Alexey Starikovskiy <[email protected]>
*/
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: EC: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/io.h>
#include "internal.h"
#define ACPI_EC_CLASS "embedded_controller"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
/*
* The SCI_EVT clearing timing is not defined by the ACPI specification.
* This leads to lots of practical timing issues for the host EC driver.
* The following variations are defined (from the target EC firmware's
* perspective):
* STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
* target can clear SCI_EVT at any time so long as the host can see
* the indication by reading the status register (EC_SC). So the
* host should re-check SCI_EVT after the first time the SCI_EVT
* indication is seen, which is the same time the query request
* (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
* at any later time could indicate another event. Normally such
* kind of EC firmware has implemented an event queue and will
* return 0x00 to indicate "no outstanding event".
* QUERY: After seeing the query request (QR_EC) written to the command
* register (EC_CMD) by the host and having prepared the responding
* event value in the data register (EC_DATA), the target can safely
* clear SCI_EVT because the target can confirm that the current
* event is being handled by the host. The host then should check
* SCI_EVT right after reading the event response from the data
* register (EC_DATA).
* EVENT: After seeing the event response read from the data register
* (EC_DATA) by the host, the target can clear SCI_EVT. As the
* target requires time to notice the change in the data register
* (EC_DATA), the host may be required to wait additional guarding
* time before checking the SCI_EVT again. Such guarding may not be
* necessary if the host is notified via another IRQ.
*/
#define ACPI_EC_EVT_TIMING_STATUS 0x00
#define ACPI_EC_EVT_TIMING_QUERY 0x01
#define ACPI_EC_EVT_TIMING_EVENT 0x02
/* EC commands */
enum ec_command {
ACPI_EC_COMMAND_READ = 0x80,
ACPI_EC_COMMAND_WRITE = 0x81,
ACPI_EC_BURST_ENABLE = 0x82,
ACPI_EC_BURST_DISABLE = 0x83,
ACPI_EC_COMMAND_QUERY = 0x84,
};
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
enum {
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
EC_FLAGS_EC_REG_CALLED, /* OpReg ACPI _REG method called */
EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
EC_FLAGS_EVENTS_MASKED, /* Events masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
module_param(ec_max_queries, uint, 0644);
MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
static bool ec_busy_polling __read_mostly;
module_param(ec_busy_polling, bool, 0644);
MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
module_param(ec_polling_guard, uint, 0644);
MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
/*
* If the number of false interrupts per one transaction exceeds
* this threshold, will think there is a GPE storm happened and
* will disable the GPE for normal transaction.
*/
static unsigned int ec_storm_threshold __read_mostly = 8;
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
static bool ec_freeze_events __read_mostly;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
static bool ec_no_wakeup __read_mostly;
module_param(ec_no_wakeup, bool, 0644);
MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
acpi_handle handle;
void *data;
u8 query_bit;
struct kref kref;
};
struct transaction {
const u8 *wdata;
u8 *rdata;
unsigned short irq_count;
u8 command;
u8 wi;
u8 ri;
u8 wlen;
u8 rlen;
u8 flags;
};
struct acpi_ec_query {
struct transaction transaction;
struct work_struct work;
struct acpi_ec_query_handler *handler;
struct acpi_ec *ec;
};
static int acpi_ec_submit_query(struct acpi_ec *ec);
static void advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
struct acpi_ec *first_ec;
EXPORT_SYMBOL(first_ec);
static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt;
static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
* Logging/Debugging
* -------------------------------------------------------------------------- */
/*
* Splitters used by the developers to track the boundary of the EC
* handling processes.
*/
#ifdef DEBUG
#define EC_DBG_SEP " "
#define EC_DBG_DRV "+++++"
#define EC_DBG_STM "====="
#define EC_DBG_REQ "*****"
#define EC_DBG_EVT "#####"
#else
#define EC_DBG_SEP ""
#define EC_DBG_DRV
#define EC_DBG_STM
#define EC_DBG_REQ
#define EC_DBG_EVT
#endif
#define ec_log_raw(fmt, ...) \
pr_info(fmt "\n", ##__VA_ARGS__)
#define ec_dbg_raw(fmt, ...) \
pr_debug(fmt "\n", ##__VA_ARGS__)
#define ec_log(filter, fmt, ...) \
ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
#define ec_dbg(filter, fmt, ...) \
ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
#define ec_log_drv(fmt, ...) \
ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
#define ec_dbg_drv(fmt, ...) \
ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
#define ec_dbg_stm(fmt, ...) \
ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
#define ec_dbg_req(fmt, ...) \
ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
#define ec_dbg_evt(fmt, ...) \
ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
#define ec_dbg_ref(ec, fmt, ...) \
ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
/* --------------------------------------------------------------------------
* Device Flags
* -------------------------------------------------------------------------- */
static bool acpi_ec_started(struct acpi_ec *ec)
{
return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
!test_bit(EC_FLAGS_STOPPED, &ec->flags);
}
static bool acpi_ec_event_enabled(struct acpi_ec *ec)
{
/*
* There is an OSPM early stage logic. During the early stages
* (boot/resume), OSPMs shouldn't enable the event handling, only
* the EC transactions are allowed to be performed.
*/
if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
return false;
/*
* However, disabling the event handling is experimental for late
* stage (suspend), and is controlled by the boot parameter of
* "ec_freeze_events":
* 1. true: The EC event handling is disabled before entering
* the noirq stage.
* 2. false: The EC event handling is automatically disabled as
* soon as the EC driver is stopped.
*/
if (ec_freeze_events)
return acpi_ec_started(ec);
else
return test_bit(EC_FLAGS_STARTED, &ec->flags);
}
static bool acpi_ec_flushed(struct acpi_ec *ec)
{
return ec->reference_count == 1;
}
/* --------------------------------------------------------------------------
* EC Registers
* -------------------------------------------------------------------------- */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
ec_dbg_raw("EC_SC(R) = 0x%2.2x "
"SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
x,
!!(x & ACPI_EC_FLAG_SCI),
!!(x & ACPI_EC_FLAG_BURST),
!!(x & ACPI_EC_FLAG_CMD),
!!(x & ACPI_EC_FLAG_IBF),
!!(x & ACPI_EC_FLAG_OBF));
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
ec->timestamp = jiffies;
ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
outb(command, ec->command_addr);
ec->timestamp = jiffies;
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
outb(data, ec->data_addr);
ec->timestamp = jiffies;
}
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
static const char *acpi_ec_cmd_string(u8 cmd)
{
switch (cmd) {
case 0x80:
return "RD_EC";
case 0x81:
return "WR_EC";
case 0x82:
return "BE_EC";
case 0x83:
return "BD_EC";
case 0x84:
return "QR_EC";
}
return "UNKNOWN";
}
#else
#define acpi_ec_cmd_string(cmd) "UNDEF"
#endif
/* --------------------------------------------------------------------------
* GPE Registers
* -------------------------------------------------------------------------- */
static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
{
acpi_event_status gpe_status = 0;
(void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
}
static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
{
if (open)
acpi_enable_gpe(NULL, ec->gpe);
else {
BUG_ON(ec->reference_count < 1);
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
}
if (acpi_ec_gpe_status_set(ec)) {
/*
* On some platforms, EN=1 writes cannot trigger GPE. So
* software need to manually trigger a pseudo GPE event on
* EN=1 writes.
*/
ec_dbg_raw("Polling quirk");
advance_transaction(ec, false);
}
}
static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
{
if (close)
acpi_disable_gpe(NULL, ec->gpe);
else {
BUG_ON(ec->reference_count < 1);
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
}
}
/* --------------------------------------------------------------------------
* Transaction Management
* -------------------------------------------------------------------------- */
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
ec->gpe >= 0 && ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
static void acpi_ec_complete_request(struct acpi_ec *ec)
{
bool flushed = false;
ec->reference_count--;
if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
ec->gpe >= 0 && ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
wake_up(&ec->wait);
}
static void acpi_ec_mask_events(struct acpi_ec *ec)
{
if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
if (ec->gpe >= 0)
acpi_ec_disable_gpe(ec, false);
else
disable_irq_nosync(ec->irq);
ec_dbg_drv("Polling enabled");
set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
}
}
static void acpi_ec_unmask_events(struct acpi_ec *ec)
{
if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
if (ec->gpe >= 0)
acpi_ec_enable_gpe(ec, false);
else
enable_irq(ec->irq);
ec_dbg_drv("Polling disabled");
}
}
/*
* acpi_ec_submit_flushable_request() - Increase the reference count unless
* the flush operation is not in
* progress
* @ec: the EC device
*
* This function must be used before taking a new action that should hold
* the reference count. If this function returns false, then the action
* must be discarded or it will prevent the flush operation from being
* completed.
*/
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
{
if (!acpi_ec_started(ec))
return false;
acpi_ec_submit_request(ec);
return true;
}
static void acpi_ec_submit_event(struct acpi_ec *ec)
{
/*
* It is safe to mask the events here, because acpi_ec_close_event()
* will run at least once after this.
*/
acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (ec->event_state != EC_EVENT_READY)
return;
ec_dbg_evt("Command(%s) submitted/blocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
ec->event_state = EC_EVENT_IN_PROGRESS;
/*
* If events_to_process is greater than 0 at this point, the while ()
* loop in acpi_ec_event_handler() is still running and incrementing
* events_to_process will cause it to invoke acpi_ec_submit_query() once
* more, so it is not necessary to queue up the event work to start the
* same loop again.
*/
if (ec->events_to_process++ > 0)
return;
ec->events_in_progress++;
queue_work(ec_wq, &ec->work);
}
static void acpi_ec_complete_event(struct acpi_ec *ec)
{
if (ec->event_state == EC_EVENT_IN_PROGRESS)
ec->event_state = EC_EVENT_COMPLETE;
}
static void acpi_ec_close_event(struct acpi_ec *ec)
{
if (ec->event_state != EC_EVENT_READY)
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
ec->event_state = EC_EVENT_READY;
acpi_ec_unmask_events(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
{
if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
ec_log_drv("event unblocked");
/*
* Unconditionally invoke this once after enabling the event
* handling mechanism to detect the pending events.
*/
advance_transaction(ec, false);
}
static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
{
if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
ec_log_drv("event blocked");
}
/*
* Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
{
int i;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
if (acpi_ec_submit_query(ec))
break;
}
if (unlikely(i == ACPI_EC_CLEAR_MAX))
pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
else
pr_info("%d stale EC events cleared\n", i);
}
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
acpi_ec_clear(ec);
}
#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{
flush_workqueue(ec_wq); /* flush ec->work */
flush_workqueue(ec_query_wq); /* flush queries */
}
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
* the proper position before entering the noirq stage.
*/
__acpi_ec_flush_work();
}
void acpi_ec_flush_work(void)
{
/* Without ec_wq there is nothing to flush. */
if (!ec_wq)
return;
__acpi_ec_flush_work();
}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
unsigned long flags;
bool guarded;
spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
* evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
* acceptable period.
*
* The guarding period is applicable if the event state is not
* EC_EVENT_READY, but otherwise if the current transaction is of the
* ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already
* and it should not be applied to let the transaction transition into
* the ACPI_EC_COMMAND_POLL state immediately.
*/
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
{
ec->curr->flags |= flag;
if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
return;
switch (ec_event_clearing) {
case ACPI_EC_EVT_TIMING_STATUS:
if (flag == ACPI_EC_COMMAND_POLL)
acpi_ec_close_event(ec);
return;
case ACPI_EC_EVT_TIMING_QUERY:
if (flag == ACPI_EC_COMMAND_COMPLETE)
acpi_ec_close_event(ec);
return;
case ACPI_EC_EVT_TIMING_EVENT:
if (flag == ACPI_EC_COMMAND_COMPLETE)
acpi_ec_complete_event(ec);
}
}
static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
{
if (t->irq_count < ec_storm_threshold)
++t->irq_count;
/* Trigger if the threshold is 0 too. */
if (t->irq_count == ec_storm_threshold)
acpi_ec_mask_events(ec);
}
static void advance_transaction(struct acpi_ec *ec, bool interrupt)
{
struct transaction *t = ec->curr;
bool wakeup = false;
u8 status;
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
status = acpi_ec_read_status(ec);
/*
* Another IRQ or a guarded polling mode advancement is detected,
* the next QR_EC submission is then allowed.
*/
if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state == EC_EVENT_COMPLETE)
acpi_ec_close_event(ec);
if (!t)
goto out;
}
if (t->flags & ACPI_EC_COMMAND_POLL) {
if (t->wlen > t->wi) {
if (!(status & ACPI_EC_FLAG_IBF))
acpi_ec_write_data(ec, t->wdata[t->wi++]);
else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
acpi_ec_spurious_interrupt(ec, t);
} else if (t->rlen > t->ri) {
if (status & ACPI_EC_FLAG_OBF) {
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
wakeup = true;
if (t->command == ACPI_EC_COMMAND_QUERY)
ec_dbg_evt("Command(%s) completed by hardware",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
}
} else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
acpi_ec_spurious_interrupt(ec, t);
}
} else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
wakeup = true;
}
} else if (!(status & ACPI_EC_FLAG_IBF)) {
acpi_ec_write_cmd(ec, t->command);
ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
}
out:
if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_event(ec);
if (wakeup && interrupt)
wake_up(&ec->wait);
}
static void start_transaction(struct acpi_ec *ec)
{
ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
ec->curr->flags = 0;
}
static int ec_guard(struct acpi_ec *ec)
{
unsigned long guard = usecs_to_jiffies(ec->polling_guard);
unsigned long timeout = ec->timestamp + guard;
/* Ensure guarding period before polling EC status */
do {
if (ec->busy_polling) {
/* Perform busy polling */
if (ec_transaction_completed(ec))
return 0;
udelay(jiffies_to_usecs(guard));
} else {
/*
* Perform wait polling
* 1. Wait the transaction to be completed by the
* GPE handler after the transaction enters
* ACPI_EC_COMMAND_POLL state.
* 2. A special guarding logic is also required
* for event clearing mode "event" before the
* transaction enters ACPI_EC_COMMAND_POLL
* state.
*/
if (!ec_transaction_polled(ec) &&
!acpi_ec_guard_event(ec))
break;
if (wait_event_timeout(ec->wait,
ec_transaction_completed(ec),
guard))
return 0;
}
} while (time_before(jiffies, timeout));
return -ETIME;
}
static int ec_poll(struct acpi_ec *ec)
{
unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
unsigned long delay = jiffies +
msecs_to_jiffies(ec_delay);
do {
if (!ec_guard(ec))
return 0;
spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
unsigned long tmp;
int ret = 0;
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
goto unlock;
}
ec_dbg_ref(ec, "Increase command");
/* following two actions should be kept atomic */
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
{
int status;
u32 glk;
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
if (ec->global_lock) {
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
status = -ENODEV;
goto unlock;
}
}
status = acpi_ec_transaction_unlocked(ec, t);
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
mutex_unlock(&ec->mutex);
return status;
}
static int acpi_ec_burst_enable(struct acpi_ec *ec)
{
u8 d;
struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
return acpi_ec_transaction(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
{
struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
.wdata = NULL, .rdata = NULL,
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
acpi_ec_transaction(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
{
int result;
u8 d;
struct transaction t = {.command = ACPI_EC_COMMAND_READ,
.wdata = &address, .rdata = &d,
.wlen = 1, .rlen = 1};
result = acpi_ec_transaction(ec, &t);
*data = d;
return result;
}
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
.wdata = wdata, .rdata = NULL,
.wlen = 2, .rlen = 0};
return acpi_ec_transaction(ec, &t);
}
int ec_read(u8 addr, u8 *val)
{
int err;
u8 temp_data;
if (!first_ec)
return -ENODEV;
err = acpi_ec_read(first_ec, addr, &temp_data);
if (!err) {
*val = temp_data;
return 0;
}
return err;
}
EXPORT_SYMBOL(ec_read);
int ec_write(u8 addr, u8 val)
{
if (!first_ec)
return -ENODEV;
return acpi_ec_write(first_ec, addr, val);
}
EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 *wdata, unsigned wdata_len,
u8 *rdata, unsigned rdata_len)
{
struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata,
.wlen = wdata_len, .rlen = rdata_len};
if (!first_ec)
return -ENODEV;
return acpi_ec_transaction(first_ec, &t);
}
EXPORT_SYMBOL(ec_transaction);
/* Get the handle to the EC device */
acpi_handle ec_get_handle(void)
{
if (!first_ec)
return NULL;
return first_ec->handle;
}
EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
if (!resuming) {
acpi_ec_submit_request(ec);
ec_dbg_ref(ec, "Increase driver");
}
ec_log_drv("EC started");
}
spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
unsigned long flags;
bool flushed;
spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease driver");
} else if (!ec_freeze_events)
__acpi_ec_disable_event(ec);
clear_bit(EC_FLAGS_STARTED, &ec->flags);
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
if (!ec)
return;
mutex_lock(&ec->mutex);
/* Prevent transactions from being carried out */
acpi_ec_stop(ec, true);
mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions(void)
{
/*
* Allow transactions to happen again (this function is called from
* atomic context during wakeup, so we don't need to acquire the mutex).
*/
if (first_ec)
acpi_ec_start(first_ec, true);
}
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
kref_get(&handler->kref);
mutex_unlock(&ec->mutex);
return handler;
}
}
mutex_unlock(&ec->mutex);
return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
{
struct acpi_ec_query_handler *handler =
container_of(kref, struct acpi_ec_query_handler, kref);
kfree(handler);
}
static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
{
kref_put(&handler->kref, acpi_ec_query_handler_release);
}
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data)
{
struct acpi_ec_query_handler *handler;
if (!handle && !func)
return -EINVAL;
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler)
return -ENOMEM;
handler->query_bit = query_bit;
handler->handle = handle;
handler->func = func;
handler->data = data;
mutex_lock(&ec->mutex);
kref_init(&handler->kref);
list_add(&handler->node, &ec->list);
mutex_unlock(&ec->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
bool remove_all, u8 query_bit)
{
struct acpi_ec_query_handler *handler, *tmp;
LIST_HEAD(free_list);
mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
/*
* When remove_all is false, only remove custom query handlers
* which have handler->func set. This is done to preserve query
* handlers discovered thru ACPI, as they should continue handling
* EC queries.
*/
if (remove_all || (handler->func && handler->query_bit == query_bit)) {
list_del_init(&handler->node);
list_add(&handler->node, &free_list);
}
}
mutex_unlock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &free_list, node)
acpi_ec_put_query_handler(handler);
}
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
flush_workqueue(ec_query_wq);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
static void acpi_ec_event_processor(struct work_struct *work)
{
struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
struct acpi_ec_query_handler *handler = q->handler;
struct acpi_ec *ec = q->ec;
ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
}
static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
{
struct acpi_ec_query *q;
struct transaction *t;
q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
if (!q)
return NULL;
INIT_WORK(&q->work, acpi_ec_event_processor);
t = &q->transaction;
t->command = ACPI_EC_COMMAND_QUERY;
t->rdata = pval;
t->rlen = 1;
q->ec = ec;
return q;
}
static int acpi_ec_submit_query(struct acpi_ec *ec)
{
struct acpi_ec_query *q;
u8 value = 0;
int result;
q = acpi_ec_create_query(ec, &value);
if (!q)
return -ENOMEM;
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
result = acpi_ec_transaction(ec, &q->transaction);
if (result)
goto err_exit;
if (!value) {
result = -ENODATA;
goto err_exit;
}
q->handler = acpi_ec_get_query_handler_by_value(ec, value);
if (!q->handler) {
result = -ENODATA;
goto err_exit;
}
/*
* It is reported that _Qxx are evaluated in a parallel way on Windows:
* https://bugzilla.kernel.org/show_bug.cgi?id=94411
*
* Put this log entry before queue_work() to make it appear in the log
* before any other messages emitted during workqueue handling.
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
spin_unlock_irq(&ec->lock);
return 0;
err_exit:
kfree(q);
return result;
}
static void acpi_ec_event_handler(struct work_struct *work)
{
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
ec_dbg_evt("Event started");
spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
/*
* Before exit, make sure that the it will be possible to queue up the
* event handling work again regardless of whether or not the query
* queued up above is processed successfully.
*/
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
bool guard_timeout;
acpi_ec_complete_event(ec);
ec_dbg_evt("Event stopped");
spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
advance_transaction(ec, false);
} else {
acpi_ec_close_event(ec);
ec_dbg_evt("Event stopped");
}
ec->events_in_progress--;
spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
{
/*
* Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
* changes to always trigger a GPE interrupt.
*
* GPE STS is a W1C register, which means:
*
* 1. Software can clear it without worrying about clearing the other
* GPEs' STS bits when the hardware sets them in parallel.
*
* 2. As long as software can ensure only clearing it when it is set,
* hardware won't set it in parallel.
*/
if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
acpi_clear_gpe(NULL, ec->gpe);
advance_transaction(ec, true);
}
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, void *data)
{
acpi_ec_handle_interrupt(data);
return ACPI_INTERRUPT_HANDLED;
}
static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
{
acpi_ec_handle_interrupt(data);
return IRQ_HANDLED;
}
/* --------------------------------------------------------------------------
* Address Space Management
* -------------------------------------------------------------------------- */
static acpi_status
acpi_ec_space_handler(u32 function, acpi_physical_address address,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
struct acpi_ec *ec = handler_context;
int result = 0, i, bytes = bits / 8;
u8 *value = (u8 *)value64;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value)
result = (function == ACPI_READ) ?
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
case -ENODEV:
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
default:
return AE_OK;
}
}
/* --------------------------------------------------------------------------
* Driver Interface
* -------------------------------------------------------------------------- */
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context);
static void acpi_ec_free(struct acpi_ec *ec)
{
if (first_ec == ec)
first_ec = NULL;
if (boot_ec == ec)
boot_ec = NULL;
kfree(ec);
}
static struct acpi_ec *acpi_ec_alloc(void)
{
struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
if (!ec)
return NULL;
mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
spin_lock_init(&ec->lock);
INIT_WORK(&ec->work, acpi_ec_event_handler);
ec->timestamp = jiffies;
ec->busy_polling = true;
ec->polling_guard = 0;
ec->gpe = -1;
ec->irq = -1;
return ec;
}
static acpi_status
acpi_ec_register_query_methods(acpi_handle handle, u32 level,
void *context, void **return_value)
{
char node_name[5];
struct acpi_buffer buffer = { sizeof(node_name), node_name };
struct acpi_ec *ec = context;
int value = 0;
acpi_status status;
status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
return AE_OK;
}
static acpi_status
ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
{
acpi_status status;
unsigned long long tmp = 0;
struct acpi_ec *ec = context;
/* clear addr values, ec_parse_io_ports depend on it */
ec->command_addr = ec->data_addr = 0;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
ec_parse_io_ports, ec);
if (ACPI_FAILURE(status))
return status;
if (ec->data_addr == 0 || ec->command_addr == 0)
return AE_OK;
/* Get GPE bit assignment (EC events). */
/* TODO: Add support for _GPE returning a package */
status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
if (ACPI_SUCCESS(status))
ec->gpe = tmp;
/*
* Errors are non-fatal, allowing for ACPI Reduced Hardware
* platforms which use GpioInt instead of GPE.
*/
/* Use the global lock for all EC transactions? */
tmp = 0;
acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
ec->global_lock = tmp;
ec->handle = handle;
return AE_CTRL_TERMINATE;
}
static bool install_gpe_event_handler(struct acpi_ec *ec)
{
acpi_status status;
status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
ACPI_GPE_EDGE_TRIGGERED,
&acpi_ec_gpe_handler, ec);
if (ACPI_FAILURE(status))
return false;
if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true);
return true;
}
static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
{
return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
"ACPI EC", ec) >= 0;
}
/**
* ec_install_handlers - Install service callbacks and register query methods.
* @ec: Target EC.
* @device: ACPI device object corresponding to @ec.
* @call_reg: If _REG should be called to notify OpRegion availability
*
* Install a handler for the EC address space type unless it has been installed
* already. If @device is not NULL, also look for EC query methods in the
* namespace and register them, and install an event (either GPE or GPIO IRQ)
* handler for the EC, if possible.
*
* Return:
* -ENODEV if the address space handler cannot be installed, which means
* "unable to handle transactions",
* -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
* or 0 (success) otherwise.
*/
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg)
{
acpi_status status;
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler_no_reg(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
NULL, ec);
if (ACPI_FAILURE(status)) {
acpi_ec_stop(ec, false);
return -ENODEV;
}
set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
ec->address_space_handler_holder = ec->handle;
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
if (!device)
return 0;
if (ec->gpe < 0) {
/* ACPI reduced hardware platforms use a GpioInt from _CRS. */
int irq = acpi_dev_gpio_irq_get(device, 0);
/*
* Bail out right away for deferred probing or complete the
* initialization regardless of any other errors.
*/
if (irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
else if (irq >= 0)
ec->irq = irq;
}
if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods,
NULL, ec, NULL);
set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
bool ready = false;
if (ec->gpe >= 0)
ready = install_gpe_event_handler(ec);
else if (ec->irq >= 0)
ready = install_gpio_irq_event_handler(ec);
if (ready) {
set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
acpi_ec_leave_noirq(ec);
}
/*
* Failures to install an event handler are not fatal, because
* the EC can be polled for events.
*/
}
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
return 0;
}
static void ec_remove_handlers(struct acpi_ec *ec)
{
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(
ec->address_space_handler_holder,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err("failed to remove space handler\n");
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
/*
* Stops handling the EC transactions after removing the operation
* region handler. This is required because _REG(DISCONNECT)
* invoked during the removal can result in new EC transactions.
*
* Flushes the EC requests and thus disables the GPE before
* removing the GPE handler. This is required by the current ACPICA
* GPE core. ACPICA GPE core will automatically disable a GPE when
* it is indicated but there is no way to handle it. So the drivers
* must disable the GPEs prior to removing the GPE handlers.
*/
acpi_ec_stop(ec, false);
if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
if (ec->gpe >= 0 &&
ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
pr_err("failed to remove gpe handler\n");
if (ec->irq >= 0)
free_irq(ec->irq, ec);
clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
}
if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
acpi_ec_remove_query_handlers(ec, true, 0);
clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
}
static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg)
{
int ret;
ret = ec_install_handlers(ec, device, call_reg);
if (ret)
return ret;
/* First EC capable of handling transactions */
if (!first_ec)
first_ec = ec;
pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
ec->data_addr);
if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
if (ec->gpe >= 0)
pr_info("GPE=0x%x\n", ec->gpe);
else
pr_info("IRQ=%d\n", ec->irq);
}
return ret;
}
static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec;
int ret;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
if (boot_ec && (boot_ec->handle == device->handle ||
!strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
/* Fast path: this device corresponds to the boot EC. */
ec = boot_ec;
} else {
acpi_status status;
ec = acpi_ec_alloc();
if (!ec)
return -ENOMEM;
status = ec_parse_device(device->handle, 0, ec, NULL);
if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL;
goto err;
}
if (boot_ec && ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr) {
/*
* Trust PNP0C09 namespace location rather than ECDT ID.
* But trust ECDT GPE rather than _GPE because of ASUS
* quirks. So do not change boot_ec->gpe to ec->gpe,
* except when the TRUST_DSDT_GPE quirk is set.
*/
boot_ec->handle = ec->handle;
if (EC_FLAGS_TRUST_DSDT_GPE)
boot_ec->gpe = ec->gpe;
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
}
}
ret = acpi_ec_setup(ec, device, true);
if (ret)
goto err;
if (ec == boot_ec)
acpi_handle_info(boot_ec->handle,
"Boot %s EC initialization complete\n",
boot_ec_is_ecdt ? "ECDT" : "DSDT");
acpi_handle_info(ec->handle,
"EC: Used to handle transactions and events\n");
device->driver_data = ec;
ret = !!request_region(ec->data_addr, 1, "EC data");
WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
/* Reprobe devices depending on the EC */
acpi_dev_clear_dependencies(device);
acpi_handle_debug(ec->handle, "enumerated.\n");
return 0;
err:
if (ec != boot_ec)
acpi_ec_free(ec);
return ret;
}
static void acpi_ec_remove(struct acpi_device *device)
{
struct acpi_ec *ec;
if (!device)
return;
ec = acpi_driver_data(device);
release_region(ec->data_addr, 1);
release_region(ec->command_addr, 1);
device->driver_data = NULL;
if (ec != boot_ec) {
ec_remove_handlers(ec);
acpi_ec_free(ec);
}
}
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context)
{
struct acpi_ec *ec = context;
if (resource->type != ACPI_RESOURCE_TYPE_IO)
return AE_OK;
/*
* The first address region returned is the data port, and
* the second address region returned is the status/command
* port.
*/
if (ec->data_addr == 0)
ec->data_addr = resource->data.io.minimum;
else if (ec->command_addr == 0)
ec->command_addr = resource->data.io.minimum;
else
return AE_CTRL_TERMINATE;
return AE_OK;
}
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
{ACPI_ECDT_HID, 0},
{"", 0},
};
/*
* This function is not Windows-compatible as Windows never enumerates the
* namespace EC before the main ACPI device enumeration process. It is
* retained for historical reason and will be deprecated in the future.
*/
void __init acpi_ec_dsdt_probe(void)
{
struct acpi_ec *ec;
acpi_status status;
int ret;
/*
* If a platform has ECDT, there is no need to proceed as the
* following probe is not a part of the ACPI device enumeration,
* executing _STA is not safe, and thus this probe may risk of
* picking up an invalid EC device.
*/
if (boot_ec)
return;
ec = acpi_ec_alloc();
if (!ec)
return;
/*
* At this point, the namespace is initialized, so start to find
* the namespace objects.
*/
status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
if (ACPI_FAILURE(status) || !ec->handle) {
acpi_ec_free(ec);
return;
}
/*
* When the DSDT EC is available, always re-configure boot EC to
* have _REG evaluated. _REG can only be evaluated after the
* namespace initialization.
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
ret = acpi_ec_setup(ec, NULL, true);
if (ret) {
acpi_ec_free(ec);
return;
}
boot_ec = ec;
acpi_handle_info(ec->handle,
"Boot DSDT EC used to handle transactions\n");
}
/*
* acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
*
* First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
* found a matching object in the namespace.
*
* Next, in case the DSDT EC is not functioning, it is still necessary to
* provide a functional ECDT EC to handle events, so add an extra device object
* to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
*
* This is useful on platforms with valid ECDT and invalid DSDT EC settings,
* like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
*/
static void __init acpi_ec_ecdt_start(void)
{
struct acpi_table_ecdt *ecdt_ptr;
acpi_handle handle;
acpi_status status;
/* Bail out if a matching EC has been found in the namespace. */
if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT)
return;
/* Look up the object pointed to from the ECDT in the namespace. */
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_FAILURE(status))
return;
status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
if (ACPI_SUCCESS(status)) {
boot_ec->handle = handle;
/* Add a special ACPI device object to represent the boot EC. */
acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
}
acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
/*
* On some hardware it is necessary to clear events accumulated by the EC during
* sleep. These ECs stop reporting GPEs until they are manually polled, if too
* many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
*
* https://bugzilla.kernel.org/show_bug.cgi?id=44161
*
* Ideally, the EC should also be instructed NOT to accumulate events during
* sleep (which Windows seems to do somehow), but the interface to control this
* behaviour is not known at this time.
*
* Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
* however it is very likely that other Samsung models are affected.
*
* On systems which don't accumulate _Q events during sleep, this extra check
* should be harmless.
*/
static int ec_clear_on_resume(const struct dmi_system_id *id)
{
pr_debug("Detected system needing EC poll on resume.\n");
EC_FLAGS_CLEAR_ON_RESUME = 1;
ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
return 0;
}
/*
* Some ECDTs contain wrong register addresses.
* MSI MS-171F
* https://bugzilla.kernel.org/show_bug.cgi?id=12461
*/
static int ec_correct_ecdt(const struct dmi_system_id *id)
{
pr_debug("Detected system needing ECDT address correction.\n");
EC_FLAGS_CORRECT_ECDT = 1;
return 0;
}
/*
* Some ECDTs contain wrong GPE setting, but they share the same port addresses
* with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
* https://bugzilla.kernel.org/show_bug.cgi?id=209989
*/
static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
{
pr_debug("Detected system needing DSDT GPE setting.\n");
EC_FLAGS_TRUST_DSDT_GPE = 1;
return 0;
}
static const struct dmi_system_id ec_dmi_table[] __initconst = {
{
/*
* MSI MS-171F
* https://bugzilla.kernel.org/show_bug.cgi?id=12461
*/
.callback = ec_correct_ecdt,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),
},
},
{
/*
* HP Pavilion Gaming Laptop 15-cx0xxx
* https://bugzilla.kernel.org/show_bug.cgi?id=209989
*/
.callback = ec_honor_dsdt_gpe,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
},
},
{
/*
* HP Pavilion Gaming Laptop 15-cx0041ur
*/
.callback = ec_honor_dsdt_gpe,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
},
},
{
/*
* Samsung hardware
* https://bugzilla.kernel.org/show_bug.cgi?id=44161
*/
.callback = ec_clear_on_resume,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
},
},
{}
};
void __init acpi_ec_ecdt_probe(void)
{
struct acpi_table_ecdt *ecdt_ptr;
struct acpi_ec *ec;
acpi_status status;
int ret;
/* Generate a boot ec context. */
dmi_check_system(ec_dmi_table);
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_FAILURE(status))
return;
if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
/*
* Asus X50GL:
* https://bugzilla.kernel.org/show_bug.cgi?id=11880
*/
goto out;
}
ec = acpi_ec_alloc();
if (!ec)
goto out;
if (EC_FLAGS_CORRECT_ECDT) {
ec->command_addr = ecdt_ptr->data.address;
ec->data_addr = ecdt_ptr->control.address;
} else {
ec->command_addr = ecdt_ptr->control.address;
ec->data_addr = ecdt_ptr->data.address;
}
/*
* Ignore the GPE value on Reduced Hardware platforms.
* Some products have this set to an erroneous value.
*/
if (!acpi_gbl_reduced_hardware)
ec->gpe = ecdt_ptr->gpe;
ec->handle = ACPI_ROOT_OBJECT;
/*
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
goto out;
}
boot_ec = ec;
boot_ec_is_ecdt = true;
pr_info("Boot ECDT EC used to handle transactions\n");
out:
acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
#ifdef CONFIG_PM_SLEEP
static int acpi_ec_suspend(struct device *dev)
{
struct acpi_ec *ec =
acpi_driver_data(to_acpi_device(dev));
if (!pm_suspend_no_platform() && ec_freeze_events)
acpi_ec_disable_event(ec);
return 0;
}
static int acpi_ec_suspend_noirq(struct device *dev)
{
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
/*
* The SCI handler doesn't run at this point, so the GPE can be
* masked at the low level without side effects.
*/
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
acpi_ec_enter_noirq(ec);
return 0;
}
static int acpi_ec_resume_noirq(struct device *dev)
{
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
acpi_ec_leave_noirq(ec);
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
}
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
acpi_driver_data(to_acpi_device(dev));
acpi_ec_enable_event(ec);
return 0;
}
void acpi_ec_mark_gpe_for_wake(void)
{
if (first_ec && !ec_no_wakeup)
acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
}
EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
void acpi_ec_set_gpe_wake_mask(u8 action)
{
if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
{
return ec->events_in_progress + ec->queries_in_progress > 0;
}
bool acpi_ec_dispatch_gpe(void)
{
bool work_in_progress = false;
if (!first_ec)
return acpi_any_gpe_status_set(U32_MAX);
/*
* Report wakeup if the status bit is set for any enabled GPE other
* than the EC one.
*/
if (acpi_any_gpe_status_set(first_ec->gpe))
return true;
/*
* Cancel the SCI wakeup and process all pending events in case there
* are any wakeup ones in there.
*
* Note that if any non-EC GPEs are active at this point, the SCI will
* retrigger after the rearming in acpi_s2idle_wake(), so no events
* should be missed by canceling the wakeup here.
*/
pm_system_cancel_wakeup();
/*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
clear_gpe_and_advance_transaction(first_ec, false);
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
pm_pr_dbg("ACPI EC GPE dispatched\n");
/* Drain EC work. */
do {
acpi_ec_flush_work();
pm_pr_dbg("ACPI EC work flushed\n");
spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops acpi_ec_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
static int param_set_event_clearing(const char *val,
const struct kernel_param *kp)
{
int result = 0;
if (!strncmp(val, "status", sizeof("status") - 1)) {
ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
} else if (!strncmp(val, "query", sizeof("query") - 1)) {
ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
} else if (!strncmp(val, "event", sizeof("event") - 1)) {
ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
pr_info("Assuming SCI_EVT clearing on event reads\n");
} else
result = -EINVAL;
return result;
}
static int param_get_event_clearing(char *buffer,
const struct kernel_param *kp)
{
switch (ec_event_clearing) {
case ACPI_EC_EVT_TIMING_STATUS:
return sprintf(buffer, "status\n");
case ACPI_EC_EVT_TIMING_QUERY:
return sprintf(buffer, "query\n");
case ACPI_EC_EVT_TIMING_EVENT:
return sprintf(buffer, "event\n");
default:
return sprintf(buffer, "invalid\n");
}
return 0;
}
module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
NULL, 0644);
MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
static struct acpi_driver acpi_ec_driver = {
.name = "ec",
.class = ACPI_EC_CLASS,
.ids = ec_device_ids,
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
},
.drv.pm = &acpi_ec_pm,
};
static void acpi_ec_destroy_workqueues(void)
{
if (ec_wq) {
destroy_workqueue(ec_wq);
ec_wq = NULL;
}
if (ec_query_wq) {
destroy_workqueue(ec_query_wq);
ec_query_wq = NULL;
}
}
static int acpi_ec_init_workqueues(void)
{
if (!ec_wq)
ec_wq = alloc_ordered_workqueue("kec", 0);
if (!ec_query_wq)
ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
if (!ec_wq || !ec_query_wq) {
acpi_ec_destroy_workqueues();
return -ENODEV;
}
return 0;
}
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
{ },
};
void __init acpi_ec_init(void)
{
int result;
result = acpi_ec_init_workqueues();
if (result)
return;
/*
* Disable EC wakeup on following systems to prevent periodic
* wakeup from EC GPE.
*/
if (dmi_check_system(acpi_ec_no_wakeup)) {
ec_no_wakeup = true;
pr_debug("Disabling EC wakeup on suspend-to-idle\n");
}
/* Driver must be registered after acpi_ec_init_workqueues(). */
acpi_bus_register_driver(&acpi_ec_driver);
acpi_ec_ecdt_start();
}
/* EC driver currently not unloadable */
#if 0
static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
acpi_ec_destroy_workqueues();
}
#endif /* 0 */
| linux-master | drivers/acpi/ec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/acpi/resource.c - ACPI device resources interpretation.
*
* Copyright (C) 2012, Intel Corp.
* Author: Rafael J. Wysocki <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
static inline bool acpi_iospace_resource_valid(struct resource *res)
{
/* On X86 IO space is limited to the [0 - 64K] IO port range */
return res->end < 0x10003;
}
#else
#define valid_IRQ(i) (true)
/*
* ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
* addresses mapping IO space in CPU physical address space, IO space
* resources can be placed anywhere in the 64-bit physical address space.
*/
static inline bool
acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
{
return ext_irq->resource_source.string_length == 0 &&
ext_irq->producer_consumer == ACPI_CONSUMER;
}
#else
static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq)
{
return true;
}
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
{
u64 reslen = end - start + 1;
/*
* CHECKME: len might be required to check versus a minimum
* length as well. 1 for io is fine, but for memory it does
* not make any sense at all.
* Note: some BIOSes report incorrect length for ACPI address space
* descriptor, so remove check of 'reslen == len' to avoid regression.
*/
if (len && reslen && start <= end)
return true;
pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
io ? "io" : "mem", start, end, len);
return false;
}
static void acpi_dev_memresource_flags(struct resource *res, u64 len,
u8 write_protect)
{
res->flags = IORESOURCE_MEM;
if (!acpi_dev_resource_len_valid(res->start, res->end, len, false))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (write_protect == ACPI_READ_WRITE_MEMORY)
res->flags |= IORESOURCE_MEM_WRITEABLE;
}
static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
u8 write_protect)
{
res->start = start;
res->end = start + len - 1;
acpi_dev_memresource_flags(res, len, write_protect);
}
/**
* acpi_dev_resource_memory - Extract ACPI memory resource information.
* @ares: Input ACPI resource object.
* @res: Output generic resource object.
*
* Check if the given ACPI resource object represents a memory resource and
* if that's the case, use the information in it to populate the generic
* resource object pointed to by @res.
*
* Return:
* 1) false with res->flags setting to zero: not the expected resource type
* 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
* 3) true: valid assigned resource
*/
bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
{
struct acpi_resource_memory24 *memory24;
struct acpi_resource_memory32 *memory32;
struct acpi_resource_fixed_memory32 *fixed_memory32;
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
acpi_dev_get_memresource(res, memory24->minimum << 8,
memory24->address_length << 8,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
break;
default:
res->flags = 0;
return false;
}
return !(res->flags & IORESOURCE_DISABLED);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
u8 io_decode, u8 translation_type)
{
res->flags = IORESOURCE_IO;
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
res->flags |= IORESOURCE_IO_16BIT_ADDR;
if (translation_type == ACPI_SPARSE_TRANSLATION)
res->flags |= IORESOURCE_IO_SPARSE;
}
static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
u8 io_decode)
{
res->start = start;
res->end = start + len - 1;
acpi_dev_ioresource_flags(res, len, io_decode, 0);
}
/**
* acpi_dev_resource_io - Extract ACPI I/O resource information.
* @ares: Input ACPI resource object.
* @res: Output generic resource object.
*
* Check if the given ACPI resource object represents an I/O resource and
* if that's the case, use the information in it to populate the generic
* resource object pointed to by @res.
*
* Return:
* 1) false with res->flags setting to zero: not the expected resource type
* 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
* 3) true: valid assigned resource
*/
bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
{
struct acpi_resource_io *io;
struct acpi_resource_fixed_io *fixed_io;
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
break;
default:
res->flags = 0;
return false;
}
return !(res->flags & IORESOURCE_DISABLED);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
static bool acpi_decode_space(struct resource_win *win,
struct acpi_resource_address *addr,
struct acpi_address64_attribute *attr)
{
u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
bool wp = addr->info.mem.write_protect;
u64 len = attr->address_length;
u64 start, end, offset = 0;
struct resource *res = &win->res;
/*
* Filter out invalid descriptor according to ACPI Spec 5.0, section
* 6.4.3.5 Address Space Resource Descriptors.
*/
if ((addr->min_address_fixed != addr->max_address_fixed && len) ||
(addr->min_address_fixed && addr->max_address_fixed && !len))
pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
addr->min_address_fixed, addr->max_address_fixed, len);
/*
* For bridges that translate addresses across the bridge,
* translation_offset is the offset that must be added to the
* address on the secondary side to obtain the address on the
* primary side. Non-bridge devices must list 0 for all Address
* Translation offset bits.
*/
if (addr->producer_consumer == ACPI_PRODUCER)
offset = attr->translation_offset;
else if (attr->translation_offset)
pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
attr->translation_offset);
start = attr->minimum + offset;
end = attr->maximum + offset;
win->offset = offset;
res->start = start;
res->end = end;
if (sizeof(resource_size_t) < sizeof(u64) &&
(offset != win->offset || start != res->start || end != res->end)) {
pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
attr->minimum, attr->maximum);
return false;
}
switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp);
break;
case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec,
addr->info.io.translation_type);
break;
case ACPI_BUS_NUMBER_RANGE:
res->flags = IORESOURCE_BUS;
break;
default:
return false;
}
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
res->flags |= IORESOURCE_PREFETCH;
return !(res->flags & IORESOURCE_DISABLED);
}
/**
* acpi_dev_resource_address_space - Extract ACPI address space information.
* @ares: Input ACPI resource object.
* @win: Output generic resource object.
*
* Check if the given ACPI resource object represents an address space resource
* and if that's the case, use the information in it to populate the generic
* resource object pointed to by @win.
*
* Return:
* 1) false with win->res.flags setting to zero: not the expected resource type
* 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
* resource
* 3) true: valid assigned resource
*/
bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win)
{
struct acpi_resource_address64 addr;
win->res.flags = 0;
if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr)))
return false;
return acpi_decode_space(win, (struct acpi_resource_address *)&addr,
&addr.address);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
/**
* acpi_dev_resource_ext_address_space - Extract ACPI address space information.
* @ares: Input ACPI resource object.
* @win: Output generic resource object.
*
* Check if the given ACPI resource object represents an extended address space
* resource and if that's the case, use the information in it to populate the
* generic resource object pointed to by @win.
*
* Return:
* 1) false with win->res.flags setting to zero: not the expected resource type
* 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
* resource
* 3) true: valid assigned resource
*/
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win)
{
struct acpi_resource_extended_address64 *ext_addr;
win->res.flags = 0;
if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
return false;
ext_addr = &ares->data.ext_address64;
return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr,
&ext_addr->address);
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
/**
* acpi_dev_irq_flags - Determine IRQ resource flags.
* @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI.
* @shareable: Whether or not the interrupt is shareable.
* @wake_capable: Wake capability as provided by ACPI.
*/
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable)
{
unsigned long flags;
if (triggering == ACPI_LEVEL_SENSITIVE)
flags = polarity == ACPI_ACTIVE_LOW ?
IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL;
else
flags = polarity == ACPI_ACTIVE_LOW ?
IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE;
if (shareable == ACPI_SHARED)
flags |= IORESOURCE_IRQ_SHAREABLE;
if (wake_capable == ACPI_WAKE_CAPABLE)
flags |= IORESOURCE_IRQ_WAKECAPABLE;
return flags | IORESOURCE_IRQ;
}
EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
/**
* acpi_dev_get_irq_type - Determine irq type.
* @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI.
*/
unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
{
switch (polarity) {
case ACPI_ACTIVE_LOW:
return triggering == ACPI_EDGE_SENSITIVE ?
IRQ_TYPE_EDGE_FALLING :
IRQ_TYPE_LEVEL_LOW;
case ACPI_ACTIVE_HIGH:
return triggering == ACPI_EDGE_SENSITIVE ?
IRQ_TYPE_EDGE_RISING :
IRQ_TYPE_LEVEL_HIGH;
case ACPI_ACTIVE_BOTH:
if (triggering == ACPI_EDGE_SENSITIVE)
return IRQ_TYPE_EDGE_BOTH;
fallthrough;
default:
return IRQ_TYPE_NONE;
}
}
EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type);
static const struct dmi_system_id medion_laptop[] = {
{
.ident = "MEDION P15651",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_BOARD_NAME, "M15T"),
},
},
{
.ident = "MEDION S17405",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_BOARD_NAME, "M17T"),
},
},
{
.ident = "MEDION S17413",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
},
},
{ }
};
static const struct dmi_system_id asus_laptop[] = {
{
.ident = "Asus Vivobook K3402ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
},
},
{
.ident = "Asus Vivobook K3502ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
},
},
{
.ident = "Asus Vivobook S5402ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
},
},
{
.ident = "Asus Vivobook S5602ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
{
.ident = "Asus ExpertBook B1502CBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
},
},
{
.ident = "Asus ExpertBook B2402CBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
},
},
{
.ident = "Asus ExpertBook B2402FBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
},
},
{
.ident = "Asus ExpertBook B2502",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
},
},
{ }
};
static const struct dmi_system_id tongfang_gm_rg[] = {
{
.ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
{ }
};
static const struct dmi_system_id maingear_laptop[] = {
{
.ident = "MAINGEAR Vector Pro 2 15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
}
},
{
.ident = "MAINGEAR Vector Pro 2 17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
},
},
{ }
};
static const struct dmi_system_id pcspecialist_laptop[] = {
{
.ident = "PCSpecialist Elimina Pro 16 M",
/*
* Some models have product-name "Elimina Pro 16 M",
* others "GM6BGEQ". Match on board-name to match both.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"),
},
},
{ }
};
static const struct dmi_system_id lg_laptop[] = {
{
.ident = "LG Electronics 17U70P",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
},
},
{ }
};
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
unsigned char triggering;
unsigned char polarity;
unsigned char shareable;
bool override;
};
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ pcspecialist_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
u8 shareable)
{
int i;
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
if (dmi_check_system(entry->system) &&
entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
entry->shareable == shareable)
return entry->override;
}
#ifdef CONFIG_X86
/*
* Always use the MADT override info, except for the i8042 PS/2 ctrl
* IRQs (1 and 12). For these the DSDT IRQ settings should sometimes
* be used otherwise PS/2 keyboards / mice will not work.
*/
if (gsi != 1 && gsi != 12)
return true;
/* If the override comes from an INT_SRC_OVR MADT entry, honor it. */
if (acpi_int_src_ovr[gsi])
return true;
/*
* IRQ override isn't needed on modern AMD Zen systems and
* this override breaks active low IRQs on AMD Ryzen 6000 and
* newer systems. Skip it.
*/
if (boot_cpu_has(X86_FEATURE_ZEN))
return false;
#endif
return true;
}
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
u8 wake_capable, bool check_override)
{
int irq, p, t;
if (!valid_IRQ(gsi)) {
irqresource_disabled(res, gsi);
return;
}
/*
* In IO-APIC mode, use overridden attribute. Two reasons:
* 1. BIOS bug in DSDT
* 2. BIOS uses IO-APIC mode Interrupt Source Override
*
* We do this only if we are dealing with IRQ() or IRQNoFlags()
* resource (the legacy ISA resources). With modern ACPI 5 devices
* using extended IRQ descriptors we take the IRQ configuration
* from _CRS directly.
*/
if (check_override &&
acpi_dev_irq_override(gsi, triggering, polarity, shareable) &&
!acpi_get_override_irq(gsi, &t, &p)) {
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
if (triggering != trig || polarity != pol) {
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
p ? "low" : "high",
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
}
}
res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
if (irq >= 0) {
res->start = irq;
res->end = irq;
} else {
irqresource_disabled(res, gsi);
}
}
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
* @index: Index into the array of GSIs represented by the resource.
* @res: Output generic resource object.
*
* Check if the given ACPI resource object represents an interrupt resource
* and @index does not exceed the resource's interrupt count (true is returned
* in that case regardless of the results of the other checks)). If that's the
* case, register the GSI corresponding to @index from the array of interrupts
* represented by the resource and populate the generic resource object pointed
* to by @res accordingly. If the registration of the GSI is not successful,
* IORESOURCE_DISABLED will be set it that object's flags.
*
* Return:
* 1) false with res->flags setting to zero: not the expected resource type
* 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
* 3) true: valid assigned resource
*/
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res)
{
struct acpi_resource_irq *irq;
struct acpi_resource_extended_irq *ext_irq;
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IRQ:
/*
* Per spec, only one interrupt per descriptor is allowed in
* _CRS, but some firmware violates this, so parse them all.
*/
irq = &ares->data.irq;
if (index >= irq->interrupt_count) {
irqresource_disabled(res, 0);
return false;
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
irq->shareable, irq->wake_capable,
true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
if (index >= ext_irq->interrupt_count) {
irqresource_disabled(res, 0);
return false;
}
if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
ext_irq->shareable, ext_irq->wake_capable,
false);
else
irqresource_disabled(res, 0);
break;
default:
res->flags = 0;
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
/**
* acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources().
* @list: The head of the resource list to free.
*/
void acpi_dev_free_resource_list(struct list_head *list)
{
resource_list_free(list);
}
EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
struct res_proc_context {
struct list_head *list;
int (*preproc)(struct acpi_resource *, void *);
void *preproc_data;
int count;
int error;
};
static acpi_status acpi_dev_new_resource_entry(struct resource_win *win,
struct res_proc_context *c)
{
struct resource_entry *rentry;
rentry = resource_list_create_entry(NULL, 0);
if (!rentry) {
c->error = -ENOMEM;
return AE_NO_MEMORY;
}
*rentry->res = win->res;
rentry->offset = win->offset;
resource_list_add_tail(rentry, c->list);
c->count++;
return AE_OK;
}
static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
void *context)
{
struct res_proc_context *c = context;
struct resource_win win;
struct resource *res = &win.res;
int i;
if (c->preproc) {
int ret;
ret = c->preproc(ares, c->preproc_data);
if (ret < 0) {
c->error = ret;
return AE_ABORT_METHOD;
} else if (ret > 0) {
return AE_OK;
}
}
memset(&win, 0, sizeof(win));
if (acpi_dev_resource_memory(ares, res)
|| acpi_dev_resource_io(ares, res)
|| acpi_dev_resource_address_space(ares, &win)
|| acpi_dev_resource_ext_address_space(ares, &win))
return acpi_dev_new_resource_entry(&win, c);
for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) {
acpi_status status;
status = acpi_dev_new_resource_entry(&win, c);
if (ACPI_FAILURE(status))
return status;
}
return AE_OK;
}
static int __acpi_dev_get_resources(struct acpi_device *adev,
struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data, char *method)
{
struct res_proc_context c;
acpi_status status;
if (!adev || !adev->handle || !list_empty(list))
return -EINVAL;
if (!acpi_has_method(adev->handle, method))
return 0;
c.list = list;
c.preproc = preproc;
c.preproc_data = preproc_data;
c.count = 0;
c.error = 0;
status = acpi_walk_resources(adev->handle, method,
acpi_dev_process_resource, &c);
if (ACPI_FAILURE(status)) {
acpi_dev_free_resource_list(list);
return c.error ? c.error : -EIO;
}
return c.count;
}
/**
* acpi_dev_get_resources - Get current resources of a device.
* @adev: ACPI device node to get the resources for.
* @list: Head of the resultant list of resources (must be empty).
* @preproc: The caller's preprocessing routine.
* @preproc_data: Pointer passed to the caller's preprocessing routine.
*
* Evaluate the _CRS method for the given device node and process its output by
* (1) executing the @preproc() routine provided by the caller, passing the
* resource pointer and @preproc_data to it as arguments, for each ACPI resource
* returned and (2) converting all of the returned ACPI resources into struct
* resource objects if possible. If the return value of @preproc() in step (1)
* is different from 0, step (2) is not applied to the given ACPI resource and
* if that value is negative, the whole processing is aborted and that value is
* returned as the final error code.
*
* The resultant struct resource objects are put on the list pointed to by
* @list, that must be empty initially, as members of struct resource_entry
* objects. Callers of this routine should use %acpi_dev_free_resource_list() to
* free that list.
*
* The number of resources in the output list is returned on success, an error
* code reflecting the error condition is returned otherwise.
*/
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data)
{
return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
METHOD_NAME__CRS);
}
EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
static int is_memory(struct acpi_resource *ares, void *not_used)
{
struct resource_win win;
struct resource *res = &win.res;
memset(&win, 0, sizeof(win));
if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM))
return 1;
return !(acpi_dev_resource_memory(ares, res)
|| acpi_dev_resource_address_space(ares, &win)
|| acpi_dev_resource_ext_address_space(ares, &win));
}
/**
* acpi_dev_get_dma_resources - Get current DMA resources of a device.
* @adev: ACPI device node to get the resources for.
* @list: Head of the resultant list of resources (must be empty).
*
* Evaluate the _DMA method for the given device node and process its
* output.
*
* The resultant struct resource objects are put on the list pointed to
* by @list, that must be empty initially, as members of struct
* resource_entry objects. Callers of this routine should use
* %acpi_dev_free_resource_list() to free that list.
*
* The number of resources in the output list is returned on success,
* an error code reflecting the error condition is returned otherwise.
*/
int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
{
return __acpi_dev_get_resources(adev, list, is_memory, NULL,
METHOD_NAME__DMA);
}
EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_get_memory_resources - Get current memory resources of a device.
* @adev: ACPI device node to get the resources for.
* @list: Head of the resultant list of resources (must be empty).
*
* This is a helper function that locates all memory type resources of @adev
* with acpi_dev_get_resources().
*
* The number of resources in the output list is returned on success, an error
* code reflecting the error condition is returned otherwise.
*/
int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list)
{
return acpi_dev_get_resources(adev, list, is_memory, NULL);
}
EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources);
/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
* types
* @ares: Input ACPI resource object.
* @types: Valid resource types of IORESOURCE_XXX
*
* This is a helper function to support acpi_dev_get_resources(), which filters
* ACPI resource objects according to resource types.
*/
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types)
{
unsigned long type = 0;
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
type = IORESOURCE_MEM;
break;
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
type = IORESOURCE_IO;
break;
case ACPI_RESOURCE_TYPE_IRQ:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
type = IORESOURCE_IRQ;
break;
case ACPI_RESOURCE_TYPE_DMA:
case ACPI_RESOURCE_TYPE_FIXED_DMA:
type = IORESOURCE_DMA;
break;
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
type = IORESOURCE_REG;
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
if (ares->data.address.resource_type == ACPI_MEMORY_RANGE)
type = IORESOURCE_MEM;
else if (ares->data.address.resource_type == ACPI_IO_RANGE)
type = IORESOURCE_IO;
else if (ares->data.address.resource_type ==
ACPI_BUS_NUMBER_RANGE)
type = IORESOURCE_BUS;
break;
default:
break;
}
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
static int acpi_dev_consumes_res(struct acpi_device *adev, struct resource *res)
{
struct list_head resource_list;
struct resource_entry *rentry;
int ret, found = 0;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
return 0;
list_for_each_entry(rentry, &resource_list, node) {
if (resource_contains(rentry->res, res)) {
found = 1;
break;
}
}
acpi_dev_free_resource_list(&resource_list);
return found;
}
static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
void *context, void **ret)
{
struct resource *res = context;
struct acpi_device **consumer = (struct acpi_device **) ret;
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
if (!adev)
return AE_OK;
if (acpi_dev_consumes_res(adev, res)) {
*consumer = adev;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
/**
* acpi_resource_consumer - Find the ACPI device that consumes @res.
* @res: Resource to search for.
*
* Search the current resource settings (_CRS) of every ACPI device node
* for @res. If we find an ACPI device whose _CRS includes @res, return
* it. Otherwise, return NULL.
*/
struct acpi_device *acpi_resource_consumer(struct resource *res)
{
struct acpi_device *consumer = NULL;
acpi_get_devices(NULL, acpi_res_consumer_cb, res, (void **) &consumer);
return consumer;
}
| linux-master | drivers/acpi/resource.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI GSI IRQ layer
*
* Copyright (C) 2015 ARM Ltd.
* Author: Lorenzo Pieralisi <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
enum acpi_irq_model_id acpi_irq_model;
static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
* acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
* @gsi: GSI IRQ number to map
* @irq: pointer where linux IRQ number is stored
*
* irq location updated with irq value [>0 on success, 0 on failure]
*
* Returns: 0 on success
* -EINVAL on failure
*/
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
struct irq_domain *d;
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
DOMAIN_BUS_ANY);
*irq = irq_find_mapping(d, gsi);
/*
* *irq == 0 means no mapping, that should be reported as a
* failure, unless there is an arch-specific fallback handler.
*/
if (!*irq && acpi_gsi_to_irq_fallback)
*irq = acpi_gsi_to_irq_fallback(gsi);
return (*irq > 0) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
/**
* acpi_register_gsi() - Map a GSI to a linux IRQ number
* @dev: device for which IRQ has to be mapped
* @gsi: GSI IRQ number
* @trigger: trigger type of the GSI number to be mapped
* @polarity: polarity of the GSI to be mapped
*
* Returns: a valid linux IRQ number on success
* -EINVAL on failure
*/
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
struct irq_fwspec fwspec;
fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
if (WARN_ON(!fwspec.fwnode)) {
pr_warn("GSI: No registered irqchip, giving up\n");
return -EINVAL;
}
fwspec.param[0] = gsi;
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
return irq_create_fwspec_mapping(&fwspec);
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
/**
* acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
* @gsi: GSI IRQ number
*/
void acpi_unregister_gsi(u32 gsi)
{
struct irq_domain *d;
int irq;
if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16))
return;
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
DOMAIN_BUS_ANY);
irq = irq_find_mapping(d, gsi);
irq_dispose_mapping(irq);
}
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
/**
* acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source.
* @source: acpi_resource_source to use for the lookup.
* @gsi: GSI IRQ number
*
* Description:
* Retrieve the fwhandle of the device referenced by the given IRQ resource
* source.
*
* Return:
* The referenced device fwhandle or NULL on failure
*/
static struct fwnode_handle *
acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
u32 gsi)
{
struct fwnode_handle *result;
struct acpi_device *device;
acpi_handle handle;
acpi_status status;
if (!source->string_length)
return acpi_get_gsi_domain_id(gsi);
status = acpi_get_handle(NULL, source->string_ptr, &handle);
if (WARN_ON(ACPI_FAILURE(status)))
return NULL;
device = acpi_get_acpi_dev(handle);
if (WARN_ON(!device))
return NULL;
result = &device->fwnode;
acpi_put_acpi_dev(device);
return result;
}
/*
* Context for the resource walk used to lookup IRQ resources.
* Contains a return code, the lookup index, and references to the flags
* and fwspec where the result is returned.
*/
struct acpi_irq_parse_one_ctx {
int rc;
unsigned int index;
unsigned long *res_flags;
struct irq_fwspec *fwspec;
};
/**
* acpi_irq_parse_one_match - Handle a matching IRQ resource.
* @fwnode: matching fwnode
* @hwirq: hardware IRQ number
* @triggering: triggering attributes of hwirq
* @polarity: polarity attributes of hwirq
* @polarity: polarity attributes of hwirq
* @shareable: shareable attributes of hwirq
* @wake_capable: wake capable attribute of hwirq
* @ctx: acpi_irq_parse_one_ctx updated by this function
*
* Description:
* Handle a matching IRQ resource by populating the given ctx with
* the information passed.
*/
static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
u32 hwirq, u8 triggering,
u8 polarity, u8 shareable,
u8 wake_capable,
struct acpi_irq_parse_one_ctx *ctx)
{
if (!fwnode)
return;
ctx->rc = 0;
*ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
ctx->fwspec->fwnode = fwnode;
ctx->fwspec->param[0] = hwirq;
ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
ctx->fwspec->param_count = 2;
}
/**
* acpi_irq_parse_one_cb - Handle the given resource.
* @ares: resource to handle
* @context: context for the walk
*
* Description:
* This is called by acpi_walk_resources passing each resource returned by
* the _CRS method. We only inspect IRQ resources. Since IRQ resources
* might contain multiple interrupts we check if the index is within this
* one's interrupt array, otherwise we subtract the current resource IRQ
* count from the lookup index to prepare for the next resource.
* Once a match is found we call acpi_irq_parse_one_match to populate
* the result and end the walk by returning AE_CTRL_TERMINATE.
*
* Return:
* AE_OK if the walk should continue, AE_CTRL_TERMINATE if a matching
* IRQ resource was found.
*/
static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
void *context)
{
struct acpi_irq_parse_one_ctx *ctx = context;
struct acpi_resource_irq *irq;
struct acpi_resource_extended_irq *eirq;
struct fwnode_handle *fwnode;
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IRQ:
irq = &ares->data.irq;
if (ctx->index >= irq->interrupt_count) {
ctx->index -= irq->interrupt_count;
return AE_OK;
}
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
irq->shareable, irq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
if (eirq->producer_consumer == ACPI_PRODUCER)
return AE_OK;
if (ctx->index >= eirq->interrupt_count) {
ctx->index -= eirq->interrupt_count;
return AE_OK;
}
fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source,
eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
eirq->shareable, eirq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
/**
* acpi_irq_parse_one - Resolve an interrupt for a device
* @handle: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
* @fwspec: structure irq_fwspec filled by this function
* @flags: resource flags filled by this function
*
* Description:
* Resolves an interrupt for a device by walking its CRS resources to find
* the appropriate ACPI IRQ resource and populating the given struct irq_fwspec
* and flags.
*
* Return:
* The result stored in ctx.rc by the callback, or the default -EINVAL value
* if an error occurs.
*/
static int acpi_irq_parse_one(acpi_handle handle, unsigned int index,
struct irq_fwspec *fwspec, unsigned long *flags)
{
struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec };
acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_irq_parse_one_cb, &ctx);
return ctx.rc;
}
/**
* acpi_irq_get - Lookup an ACPI IRQ resource and use it to initialize resource.
* @handle: ACPI device handle
* @index: ACPI IRQ resource index to lookup
* @res: Linux IRQ resource to initialize
*
* Description:
* Look for the ACPI IRQ resource with the given index and use it to initialize
* the given Linux IRQ resource.
*
* Return:
* 0 on success
* -EINVAL if an error occurs
* -EPROBE_DEFER if the IRQ lookup/conversion failed
*/
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
{
struct irq_fwspec fwspec;
struct irq_domain *domain;
unsigned long flags;
int rc;
rc = acpi_irq_parse_one(handle, index, &fwspec, &flags);
if (rc)
return rc;
domain = irq_find_matching_fwnode(fwspec.fwnode, DOMAIN_BUS_ANY);
if (!domain)
return -EPROBE_DEFER;
rc = irq_create_fwspec_mapping(&fwspec);
if (rc <= 0)
return -EINVAL;
res->start = rc;
res->end = rc;
res->flags = flags;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_irq_get);
/**
* acpi_set_irq_model - Setup the GSI irqdomain information
* @model: the value assigned to acpi_irq_model
* @fn: a dispatcher function that will return the domain fwnode
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
struct fwnode_handle *(*fn)(u32))
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
* @fn: arch-specific fallback handler
*/
void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32))
{
acpi_gsi_to_irq_fallback = fn;
}
/**
* acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default
* GSI domain as its parent.
* @flags: Irq domain flags associated with the domain
* @size: Size of the domain.
* @fwnode: Optional fwnode of the interrupt controller
* @ops: Pointer to the interrupt domain callbacks
* @host_data: Controller private data pointer
*/
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size,
struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *d;
/* This only works for the GIC model... */
if (acpi_irq_model != ACPI_IRQ_MODEL_GIC)
return NULL;
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0),
DOMAIN_BUS_ANY);
if (!d)
return NULL;
return irq_domain_create_hierarchy(d, flags, size, fwnode, ops,
host_data);
}
EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy);
| linux-master | drivers/acpi/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for Intel Lynxpoint LPSS.
*
* Copyright (C) 2013, Intel Corporation
* Authors: Mika Westerberg <[email protected]>
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include "internal.h"
#ifdef CONFIG_X86_INTEL_LPSS
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#define LPSS_ADDR(desc) ((unsigned long)&desc)
#define LPSS_CLK_SIZE 0x04
#define LPSS_LTR_SIZE 0x18
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
#define LPSS_RESETS 0x04
#define LPSS_RESETS_RESET_FUNC BIT(0)
#define LPSS_RESETS_RESET_APB BIT(1)
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
#define LPSS_LTR_SNOOP_REQ BIT(15)
#define LPSS_LTR_SNOOP_MASK 0x0000FFFF
#define LPSS_LTR_SNOOP_LAT_1US 0x800
#define LPSS_LTR_SNOOP_LAT_32US 0xC00
#define LPSS_LTR_SNOOP_LAT_SHIFT 5
#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
#define LPSS_LTR_MAX_VAL 0x3FF
#define LPSS_TX_INT 0x20
#define LPSS_TX_INT_MASK BIT(1)
#define LPSS_PRV_REG_COUNT 9
/* LPSS Flags */
#define LPSS_CLK BIT(0)
#define LPSS_CLK_GATE BIT(1)
#define LPSS_CLK_DIVIDER BIT(2)
#define LPSS_LTR BIT(3)
#define LPSS_SAVE_CTX BIT(4)
/*
* For some devices the DSDT AML code for another device turns off the device
* before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
* as ctx register values.
* Luckily these devices always use the same ctx register values, so we can
* work around this by saving the ctx registers once on activation.
*/
#define LPSS_SAVE_CTX_ONCE BIT(5)
#define LPSS_NO_D3_DELAY BIT(6)
struct lpss_private_data;
struct lpss_device_desc {
unsigned int flags;
const char *clk_con_id;
unsigned int prv_offset;
size_t prv_size_override;
const struct property_entry *properties;
void (*setup)(struct lpss_private_data *pdata);
bool resume_from_noirq;
};
static const struct lpss_device_desc lpss_dma_desc = {
.flags = LPSS_CLK,
};
struct lpss_private_data {
struct acpi_device *adev;
void __iomem *mmio_base;
resource_size_t mmio_size;
unsigned int fixed_clk_rate;
struct clk *clk;
const struct lpss_device_desc *dev_desc;
u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
};
/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
static u32 pmc_atom_d3_mask = 0xfe000ffe;
/* LPSS run time quirks */
static unsigned int lpss_quirks;
/*
* LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
*
* The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
* it can be powered off automatically whenever the last LPSS device goes down.
* In case of no power any access to the DMA controller will hang the system.
* The behaviour is reproduced on some HP laptops based on Intel BayTrail as
* well as on ASuS T100TA transformer.
*
* This quirk overrides power state of entire LPSS island to keep DMA powered
* on whenever we have at least one other device in use.
*/
#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
/* UART Component Parameter Register */
#define LPSS_UART_CPR 0xF4
#define LPSS_UART_CPR_AFCE BIT(4)
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
unsigned int offset;
u32 val;
offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
val = readl(pdata->mmio_base + offset);
writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
val = readl(pdata->mmio_base + LPSS_UART_CPR);
if (!(val & LPSS_UART_CPR_AFCE)) {
offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
val = readl(pdata->mmio_base + offset);
val |= LPSS_GENERAL_UART_RTS_OVRD;
writel(val, pdata->mmio_base + offset);
}
}
static void lpss_deassert_reset(struct lpss_private_data *pdata)
{
unsigned int offset;
u32 val;
offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
val = readl(pdata->mmio_base + offset);
val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
writel(val, pdata->mmio_base + offset);
}
/*
* BYT PWM used for backlight control by the i915 driver on systems without
* the Crystal Cove PMIC.
*/
static struct pwm_lookup byt_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
"pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
u64 uid;
/* Only call pwm_add_table for the first PWM controller */
if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
#define LPSS_I2C_ENABLE 0x6c
static void byt_i2c_setup(struct lpss_private_data *pdata)
{
acpi_handle handle = pdata->adev->handle;
unsigned long long shared_host = 0;
acpi_status status;
u64 uid;
/* Expected to always be successfull, but better safe then sorry */
if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
if (ACPI_SUCCESS(status) && shared_host)
pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
}
lpss_deassert_reset(pdata);
if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
pdata->fixed_clk_rate = 133000000;
writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
}
/*
* BSW PWM1 is used for backlight control by the i915 driver
* BSW PWM2 is used for backlight control for fixed (etched into the glass)
* touch controls on some models. These touch-controls have specialized
* drivers which know they need the "pwm_soc_lpss_2" con-id.
*/
static struct pwm_lookup bsw_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
"pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL,
"pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
u64 uid;
/* Only call pwm_add_table for the first PWM controller */
if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
static const struct property_entry lpt_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP),
{ }
};
static const struct lpss_device_desc lpt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
| LPSS_SAVE_CTX,
.prv_offset = 0x800,
.properties = lpt_spi_properties,
};
static const struct lpss_device_desc lpt_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
.prv_offset = 0x800,
};
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
{ },
};
static const struct lpss_device_desc lpt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
| LPSS_SAVE_CTX,
.clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
.properties = uart_properties,
};
static const struct lpss_device_desc lpt_sdio_dev_desc = {
.flags = LPSS_LTR,
.prv_offset = 0x1000,
.prv_size_override = 0x1018,
};
static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_pwm_setup,
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = bsw_pwm_setup,
.resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_pwm2_dev_desc = {
.flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.resume_from_noirq = true,
};
static const struct lpss_device_desc byt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
.properties = uart_properties,
};
static const struct lpss_device_desc bsw_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY,
.clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
.properties = uart_properties,
};
static const struct property_entry byt_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP),
{ }
};
static const struct lpss_device_desc byt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
.properties = byt_spi_properties,
};
static const struct lpss_device_desc byt_sdio_dev_desc = {
.flags = LPSS_CLK,
};
static const struct lpss_device_desc byt_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
.resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
.resume_from_noirq = true,
};
static const struct property_entry bsw_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
{ }
};
static const struct lpss_device_desc bsw_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY,
.prv_offset = 0x400,
.setup = lpss_deassert_reset,
.properties = bsw_spi_properties,
};
static const struct x86_cpu_id lpss_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
#else
#define LPSS_ADDR(desc) (0UL)
#endif /* CONFIG_X86_INTEL_LPSS */
static const struct acpi_device_id acpi_lpss_device_ids[] = {
/* Generic LPSS devices */
{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
/* Lynxpoint LPSS devices */
{ "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
{ "INT33C7", },
/* BayTrail LPSS devices */
{ "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
{ "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
{ "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
{ "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
{ "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
{ "INT33B2", },
{ "INT33FC", },
/* Braswell LPSS devices */
{ "80862286", LPSS_ADDR(lpss_dma_desc) },
{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
{ "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) },
{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
{ "808622C0", LPSS_ADDR(lpss_dma_desc) },
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
/* Broadwell LPSS devices */
{ "INT3430", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT3431", LPSS_ADDR(lpt_spi_dev_desc) },
{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
{ "INT3437", },
/* Wildcat Point LPSS devices */
{ "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
{ }
};
#ifdef CONFIG_X86_INTEL_LPSS
/* LPSS main clock device. */
static struct platform_device *lpss_clk_dev;
static inline void lpt_register_clock_device(void)
{
lpss_clk_dev = platform_device_register_simple("clk-lpss-atom",
PLATFORM_DEVID_NONE,
NULL, 0);
}
static int register_device_clock(struct acpi_device *adev,
struct lpss_private_data *pdata)
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
const char *devname = dev_name(&adev->dev);
struct clk *clk;
struct lpss_clk_data *clk_data;
const char *parent, *clk_name;
void __iomem *prv_base;
if (!lpss_clk_dev)
lpt_register_clock_device();
if (IS_ERR(lpss_clk_dev))
return PTR_ERR(lpss_clk_dev);
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
clk = clk_data->clk;
if (!pdata->mmio_base
|| pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
return -ENODATA;
parent = clk_data->name;
prv_base = pdata->mmio_base + dev_desc->prv_offset;
if (pdata->fixed_clk_rate) {
clk = clk_register_fixed_rate(NULL, devname, parent, 0,
pdata->fixed_clk_rate);
goto out;
}
if (dev_desc->flags & LPSS_CLK_GATE) {
clk = clk_register_gate(NULL, devname, parent, 0,
prv_base, 0, 0, NULL);
parent = devname;
}
if (dev_desc->flags & LPSS_CLK_DIVIDER) {
/* Prevent division by zero */
if (!readl(prv_base))
writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
if (!clk_name)
return -ENOMEM;
clk = clk_register_fractional_divider(NULL, clk_name, parent,
CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
prv_base, 1, 15, 16, 15, 0, NULL);
parent = clk_name;
clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
if (!clk_name) {
kfree(parent);
return -ENOMEM;
}
clk = clk_register_gate(NULL, clk_name, parent,
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
prv_base, 31, 0, NULL);
kfree(parent);
kfree(clk_name);
}
out:
if (IS_ERR(clk))
return PTR_ERR(clk);
pdata->clk = clk;
clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
return 0;
}
struct lpss_device_links {
const char *supplier_hid;
const char *supplier_uid;
const char *consumer_hid;
const char *consumer_uid;
u32 flags;
const struct dmi_system_id *dep_missing_ids;
};
/* Please keep this list sorted alphabetically by vendor and model */
static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
},
},
{}
};
/*
* The _DEP method is used to identify dependencies but instead of creating
* device links for every handle in _DEP, only links in the following list are
* created. That is necessary because, in the general case, _DEP can refer to
* devices that might not have drivers, or that are on different buses, or where
* the supplier is not enumerated until after the consumer is probed.
*/
static const struct lpss_device_links lpss_device_links[] = {
/* CHT External sdcard slot controller depends on PMIC I2C ctrl */
{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
/* CHT iGPU depends on PMIC I2C controller */
{"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
/* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
{"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
i2c1_dep_missing_dmi_ids},
/* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
{"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
/* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
{"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
};
static bool acpi_lpss_is_supplier(struct acpi_device *adev,
const struct lpss_device_links *link)
{
return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
}
static bool acpi_lpss_is_consumer(struct acpi_device *adev,
const struct lpss_device_links *link)
{
return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
}
struct hid_uid {
const char *hid;
const char *uid;
};
static int match_hid_uid(struct device *dev, const void *data)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
const struct hid_uid *id = data;
if (!adev)
return 0;
return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
}
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
{
struct device *dev;
struct hid_uid data = {
.hid = hid,
.uid = uid,
};
dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
if (dev)
return dev;
return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
}
static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
{
struct acpi_handle_list dep_devices;
acpi_status status;
int i;
if (!acpi_has_method(adev->handle, "_DEP"))
return false;
status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
&dep_devices);
if (ACPI_FAILURE(status)) {
dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
return false;
}
for (i = 0; i < dep_devices.count; i++) {
if (dep_devices.handles[i] == handle)
return true;
}
return false;
}
static void acpi_lpss_link_consumer(struct device *dev1,
const struct lpss_device_links *link)
{
struct device *dev2;
dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
if (!dev2)
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
|| acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
}
static void acpi_lpss_link_supplier(struct device *dev1,
const struct lpss_device_links *link)
{
struct device *dev2;
dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
if (!dev2)
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
|| acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
}
static void acpi_lpss_create_device_links(struct acpi_device *adev,
struct platform_device *pdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
const struct lpss_device_links *link = &lpss_device_links[i];
if (acpi_lpss_is_supplier(adev, link))
acpi_lpss_link_consumer(&pdev->dev, link);
if (acpi_lpss_is_consumer(adev, link))
acpi_lpss_link_supplier(&pdev->dev, link);
}
}
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
const struct lpss_device_desc *dev_desc;
struct lpss_private_data *pdata;
struct resource_entry *rentry;
struct list_head resource_list;
struct platform_device *pdev;
int ret;
dev_desc = (const struct lpss_device_desc *)id->driver_data;
if (!dev_desc) {
pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
goto err_out;
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
if (rentry) {
if (dev_desc->prv_size_override)
pdata->mmio_size = dev_desc->prv_size_override;
else
pdata->mmio_size = resource_size(rentry->res);
pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
}
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
/* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
adev->pnp.type.platform_id = 0;
goto out_free;
}
pdata->adev = adev;
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
dev_desc->setup(pdata);
if (dev_desc->flags & LPSS_CLK) {
ret = register_device_clock(adev, pdata);
if (ret)
goto out_free;
}
/*
* This works around a known issue in ACPI tables where LPSS devices
* have _PS0 and _PS3 without _PSC (and no power resources), so
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
*/
acpi_device_fix_up_power(adev);
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (IS_ERR_OR_NULL(pdev)) {
adev->driver_data = NULL;
ret = PTR_ERR(pdev);
goto err_out;
}
acpi_lpss_create_device_links(adev, pdev);
return 1;
out_free:
/* Skip the device, but continue the namespace scan */
ret = 0;
err_out:
kfree(pdata);
return ret;
}
static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
{
return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
}
static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
unsigned int reg)
{
writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
}
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct lpss_private_data *pdata;
unsigned long flags;
int ret;
if (WARN_ON(!adev))
return -ENODEV;
spin_lock_irqsave(&dev->power.lock, flags);
if (pm_runtime_suspended(dev)) {
ret = -EAGAIN;
goto out;
}
pdata = acpi_driver_data(adev);
if (WARN_ON(!pdata || !pdata->mmio_base)) {
ret = -ENODEV;
goto out;
}
*val = __lpss_reg_read(pdata, reg);
ret = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 ltr_value = 0;
unsigned int reg;
int ret;
reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
ret = lpss_reg_read(dev, reg, <r_value);
if (ret)
return ret;
return sysfs_emit(buf, "%08x\n", ltr_value);
}
static ssize_t lpss_ltr_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ltr_mode = 0;
char *outstr;
int ret;
ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
if (ret)
return ret;
outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
return sprintf(buf, "%s\n", outstr);
}
static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
static struct attribute *lpss_attrs[] = {
&dev_attr_auto_ltr.attr,
&dev_attr_sw_ltr.attr,
&dev_attr_ltr_mode.attr,
NULL,
};
static const struct attribute_group lpss_attr_group = {
.attrs = lpss_attrs,
.name = "lpss_ltr",
};
static void acpi_lpss_set_ltr(struct device *dev, s32 val)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
u32 ltr_mode, ltr_val;
ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
if (val < 0) {
if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
}
return;
}
ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
val = LPSS_LTR_MAX_VAL;
} else if (val > LPSS_LTR_MAX_VAL) {
ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
} else {
ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
}
ltr_val |= val;
__lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
}
}
#ifdef CONFIG_PM
/**
* acpi_lpss_save_ctx() - Save the private registers of LPSS device
* @dev: LPSS device
* @pdata: pointer to the private data of the LPSS device
*
* Most LPSS devices have private registers which may loose their context when
* the device is powered down. acpi_lpss_save_ctx() saves those registers into
* prv_reg_ctx array.
*/
static void acpi_lpss_save_ctx(struct device *dev,
struct lpss_private_data *pdata)
{
unsigned int i;
for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
unsigned long offset = i * sizeof(u32);
pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
pdata->prv_reg_ctx[i], offset);
}
}
/**
* acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
* @dev: LPSS device
* @pdata: pointer to the private data of the LPSS device
*
* Restores the registers that were previously stored with acpi_lpss_save_ctx().
*/
static void acpi_lpss_restore_ctx(struct device *dev,
struct lpss_private_data *pdata)
{
unsigned int i;
for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
unsigned long offset = i * sizeof(u32);
__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
pdata->prv_reg_ctx[i], offset);
}
}
static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
{
/*
* The following delay is needed or the subsequent write operations may
* fail. The LPSS devices are actually PCI devices and the PCI spec
* expects 10ms delay before the device can be accessed after D3 to D0
* transition. However some platforms like BSW does not need this delay.
*/
unsigned int delay = 10; /* default 10ms delay */
if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
delay = 0;
msleep(delay);
}
static int acpi_lpss_activate(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = acpi_dev_resume(dev);
if (ret)
return ret;
acpi_lpss_d3_to_d0_delay(pdata);
/*
* This is called only on ->probe() stage where a device is either in
* known state defined by BIOS or most likely powered off. Due to this
* we have to deassert reset line to be sure that ->probe() will
* recognize the device.
*/
if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
lpss_deassert_reset(pdata);
#ifdef CONFIG_PM
if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
acpi_lpss_save_ctx(dev, pdata);
#endif
return 0;
}
static void acpi_lpss_dismiss(struct device *dev)
{
acpi_dev_suspend(dev, false);
}
/* IOSF SB for LPSS island */
#define LPSS_IOSF_UNIT_LPIOEP 0xA0
#define LPSS_IOSF_UNIT_LPIO1 0xAB
#define LPSS_IOSF_UNIT_LPIO2 0xAC
#define LPSS_IOSF_PMCSR 0x84
#define LPSS_PMCSR_D0 0
#define LPSS_PMCSR_D3hot 3
#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
#define LPSS_IOSF_GPIODEF0 0x154
#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
static bool lpss_iosf_d3_entered = true;
static void lpss_iosf_enter_d3_state(void)
{
u32 value1 = 0;
u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D3hot;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
/*
* PMC provides an information about actual status of the LPSS devices.
* Here we read the values related to LPSS power island, i.e. LPSS
* devices, excluding both LPSS DMA controllers, along with SCC domain.
*/
u32 func_dis, d3_sts_0, pmc_status;
int ret;
ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
if (ret)
return;
mutex_lock(&lpss_iosf_mutex);
ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
if (ret)
goto exit;
/*
* Get the status of entire LPSS power island per device basis.
* Shutdown both LPSS DMA controllers if and only if all other devices
* are already in D3hot.
*/
pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
if (pmc_status)
goto exit;
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
lpss_iosf_d3_entered = true;
exit:
mutex_unlock(&lpss_iosf_mutex);
}
static void lpss_iosf_exit_d3_state(void)
{
u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
LPSS_GPIODEF0_DMA_LLP;
u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D0;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
mutex_lock(&lpss_iosf_mutex);
if (!lpss_iosf_d3_entered)
goto exit;
lpss_iosf_d3_entered = false;
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
exit:
mutex_unlock(&lpss_iosf_mutex);
}
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
ret = acpi_dev_suspend(dev, wakeup);
/*
* This call must be last in the sequence, otherwise PMC will return
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
if (acpi_target_system_state() == ACPI_STATE_S0 &&
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
/*
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
if (ret)
return ret;
acpi_lpss_d3_to_d0_delay(pdata);
if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
acpi_lpss_restore_ctx(dev, pdata);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_lpss_do_suspend_late(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_suspend_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (pdata->dev_desc->resume_from_noirq)
return 0;
return acpi_lpss_do_suspend_late(dev);
}
static int acpi_lpss_suspend_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
if (pdata->dev_desc->resume_from_noirq) {
/*
* The driver's ->suspend_late callback will be invoked by
* acpi_lpss_do_suspend_late(), with the assumption that the
* driver really wanted to run that code in ->suspend_noirq, but
* it could not run after acpi_dev_suspend() and the driver
* expected the latter to be called in the "late" phase.
*/
ret = acpi_lpss_do_suspend_late(dev);
if (ret)
return ret;
}
return acpi_subsys_suspend_noirq(dev);
}
static int acpi_lpss_do_resume_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
static int acpi_lpss_resume_early(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (pdata->dev_desc->resume_from_noirq)
return 0;
if (dev_pm_skip_resume(dev))
return 0;
return acpi_lpss_do_resume_early(dev);
}
static int acpi_lpss_resume_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
/* Follow acpi_subsys_resume_noirq(). */
if (dev_pm_skip_resume(dev))
return 0;
ret = pm_generic_resume_noirq(dev);
if (ret)
return ret;
if (!pdata->dev_desc->resume_from_noirq)
return 0;
/*
* The driver's ->resume_early callback will be invoked by
* acpi_lpss_do_resume_early(), with the assumption that the driver
* really wanted to run that code in ->resume_noirq, but it could not
* run before acpi_dev_resume() and the driver expected the latter to be
* called in the "early" phase.
*/
return acpi_lpss_do_resume_early(dev);
}
static int acpi_lpss_do_restore_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_restore_early(dev);
}
static int acpi_lpss_restore_early(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (pdata->dev_desc->resume_from_noirq)
return 0;
return acpi_lpss_do_restore_early(dev);
}
static int acpi_lpss_restore_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = pm_generic_restore_noirq(dev);
if (ret)
return ret;
if (!pdata->dev_desc->resume_from_noirq)
return 0;
/* This is analogous to what happens in acpi_lpss_resume_noirq(). */
return acpi_lpss_do_restore_early(dev);
}
static int acpi_lpss_do_poweroff_late(struct device *dev)
{
int ret = pm_generic_poweroff_late(dev);
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_poweroff_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq)
return 0;
return acpi_lpss_do_poweroff_late(dev);
}
static int acpi_lpss_poweroff_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq) {
/* This is analogous to the acpi_lpss_suspend_noirq() case. */
int ret = acpi_lpss_do_poweroff_late(dev);
if (ret)
return ret;
}
return pm_generic_poweroff_noirq(dev);
}
#endif /* CONFIG_PM_SLEEP */
static int acpi_lpss_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
return ret ? ret : acpi_lpss_suspend(dev, true);
}
static int acpi_lpss_runtime_resume(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
static struct dev_pm_domain acpi_lpss_pm_domain = {
#ifdef CONFIG_PM
.activate = acpi_lpss_activate,
.dismiss = acpi_lpss_dismiss,
#endif
.ops = {
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
.suspend_noirq = acpi_lpss_suspend_noirq,
.resume_noirq = acpi_lpss_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
.poweroff = acpi_subsys_poweroff,
.poweroff_late = acpi_lpss_poweroff_late,
.poweroff_noirq = acpi_lpss_poweroff_noirq,
.restore_noirq = acpi_lpss_restore_noirq,
.restore_early = acpi_lpss_restore_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
.runtime_resume = acpi_lpss_runtime_resume,
#endif
},
};
static int acpi_lpss_platform_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct platform_device *pdev = to_platform_device(data);
struct lpss_private_data *pdata;
struct acpi_device *adev;
const struct acpi_device_id *id;
id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
if (!id || !id->driver_data)
return 0;
adev = ACPI_COMPANION(&pdev->dev);
if (!adev)
return 0;
pdata = acpi_driver_data(adev);
if (!pdata)
return 0;
if (pdata->mmio_base &&
pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
return 0;
}
switch (action) {
case BUS_NOTIFY_BIND_DRIVER:
dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
break;
case BUS_NOTIFY_DRIVER_NOT_BOUND:
case BUS_NOTIFY_UNBOUND_DRIVER:
dev_pm_domain_set(&pdev->dev, NULL);
break;
case BUS_NOTIFY_ADD_DEVICE:
dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
if (pdata->dev_desc->flags & LPSS_LTR)
return sysfs_create_group(&pdev->dev.kobj,
&lpss_attr_group);
break;
case BUS_NOTIFY_DEL_DEVICE:
if (pdata->dev_desc->flags & LPSS_LTR)
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
dev_pm_domain_set(&pdev->dev, NULL);
break;
default:
break;
}
return 0;
}
static struct notifier_block acpi_lpss_nb = {
.notifier_call = acpi_lpss_platform_notify,
};
static void acpi_lpss_bind(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
return;
if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
else
dev_err(dev, "MMIO size insufficient to access LTR\n");
}
static void acpi_lpss_unbind(struct device *dev)
{
dev->power.set_latency_tolerance = NULL;
}
static struct acpi_scan_handler lpss_handler = {
.ids = acpi_lpss_device_ids,
.attach = acpi_lpss_create_device,
.bind = acpi_lpss_bind,
.unbind = acpi_lpss_unbind,
};
void __init acpi_lpss_init(void)
{
const struct x86_cpu_id *id;
int ret;
ret = lpss_atom_clk_init();
if (ret)
return;
id = x86_match_cpu(lpss_cpu_ids);
if (id)
lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
acpi_scan_add_handler(&lpss_handler);
}
#else
static struct acpi_scan_handler lpss_handler = {
.ids = acpi_lpss_device_ids,
};
void __init acpi_lpss_init(void)
{
acpi_scan_add_handler(&lpss_handler);
}
#endif /* CONFIG_X86_INTEL_LPSS */
| linux-master | drivers/acpi/acpi_lpss.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* fan_attr.c - Create extra attributes for ACPI Fan driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include "fan.h"
MODULE_LICENSE("GPL");
static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
count = scnprintf(buf, PAGE_SIZE, "not-defined:");
else
count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
count += sysfs_emit_at(buf, count, "not-defined:");
else
count += sysfs_emit_at(buf, count, "%lld:", fps->trip_point);
if (fps->speed == 0xFFFFFFFF)
count += sysfs_emit_at(buf, count, "not-defined:");
else
count += sysfs_emit_at(buf, count, "%lld:", fps->speed);
if (fps->noise_level == 0xFFFFFFFF)
count += sysfs_emit_at(buf, count, "not-defined:");
else
count += sysfs_emit_at(buf, count, "%lld:", fps->noise_level * 100);
if (fps->power == 0xFFFFFFFF)
count += sysfs_emit_at(buf, count, "not-defined\n");
else
count += sysfs_emit_at(buf, count, "%lld\n", fps->power);
return count;
}
static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan_fst fst;
int status;
status = acpi_fan_get_fst(acpi_dev, &fst);
if (status)
return status;
return sprintf(buf, "%lld\n", fst.speed);
}
static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan *fan = acpi_driver_data(acpi_dev);
return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
}
int acpi_fan_create_attributes(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
int i, status;
sysfs_attr_init(&fan->fine_grain_control.attr);
fan->fine_grain_control.show = show_fine_grain_control;
fan->fine_grain_control.store = NULL;
fan->fine_grain_control.attr.name = "fine_grain_control";
fan->fine_grain_control.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
if (status)
return status;
/* _FST is present if we are here */
sysfs_attr_init(&fan->fst_speed.attr);
fan->fst_speed.show = show_fan_speed;
fan->fst_speed.store = NULL;
fan->fst_speed.attr.name = "fan_speed_rpm";
fan->fst_speed.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
if (status)
goto rem_fine_grain_attr;
for (i = 0; i < fan->fps_count; ++i) {
struct acpi_fan_fps *fps = &fan->fps[i];
snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
sysfs_attr_init(&fps->dev_attr.attr);
fps->dev_attr.show = show_state;
fps->dev_attr.store = NULL;
fps->dev_attr.attr.name = fps->name;
fps->dev_attr.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
if (status) {
int j;
for (j = 0; j < i; ++j)
sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
goto rem_fst_attr;
}
}
return 0;
rem_fst_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
rem_fine_grain_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
return status;
}
void acpi_fan_delete_attributes(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
int i;
for (i = 0; i < fan->fps_count; ++i)
sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
}
| linux-master | drivers/acpi/fan_attr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* processor_driver.c - ACPI Processor Driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* - Added processor hotplug support
* Copyright (C) 2013, Intel Corporation
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include "internal.h"
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_start(struct device *dev);
static int acpi_processor_stop(struct device *dev);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
{ACPI_PROCESSOR_DEVICE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
static struct device_driver acpi_processor_driver = {
.name = "processor",
.bus = &cpu_subsys,
.acpi_match_table = processor_device_ids,
.probe = acpi_processor_start,
.remove = acpi_processor_stop,
};
static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_processor *pr;
int saved;
if (device->handle != handle)
return;
pr = acpi_driver_data(device);
if (!pr)
return;
switch (event) {
case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
saved = pr->performance_platform_limit;
acpi_processor_ppc_has_changed(pr, 1);
if (saved == pr->performance_platform_limit)
break;
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
pr->performance_platform_limit);
break;
case ACPI_PROCESSOR_NOTIFY_POWER:
acpi_processor_power_state_has_changed(pr);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_PROCESSOR_NOTIFY_THROTTLING:
acpi_processor_tstate_has_changed(pr);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
default:
acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
break;
}
return;
}
static int __acpi_processor_start(struct acpi_device *device);
static int acpi_soft_cpu_online(unsigned int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
if (!pr)
return 0;
device = acpi_fetch_acpi_dev(pr->handle);
if (!device)
return 0;
/*
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
*/
if (pr->flags.need_hotplug_init) {
int ret;
pr_info("Will online and init hotplugged CPU: %d\n",
pr->id);
pr->flags.need_hotplug_init = 0;
ret = __acpi_processor_start(device);
WARN(ret, "Failed to start CPU: %d\n", pr->id);
} else {
/* Normal CPU soft online event. */
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, false);
acpi_processor_tstate_has_changed(pr);
}
return 0;
}
static int acpi_soft_cpu_dead(unsigned int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
if (!pr || !acpi_fetch_acpi_dev(pr->handle))
return 0;
acpi_processor_reevaluate_tstate(pr, true);
return 0;
}
#ifdef CONFIG_ACPI_CPU_FREQ_PSS
static void acpi_pss_perf_init(struct acpi_processor *pr)
{
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_get_throttling_info(pr);
if (pr->flags.throttling)
pr->flags.limit = 1;
}
#else
static inline void acpi_pss_perf_init(struct acpi_processor *pr) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
static int __acpi_processor_start(struct acpi_device *device)
{
struct acpi_processor *pr = acpi_driver_data(device);
acpi_status status;
int result = 0;
if (!pr)
return -ENODEV;
if (pr->flags.need_hotplug_init)
return 0;
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
acpi_pss_perf_init(pr);
result = acpi_processor_thermal_init(pr, device);
if (result)
goto err_power_exit;
status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_processor_notify, device);
if (ACPI_SUCCESS(status))
return 0;
result = -ENODEV;
acpi_processor_thermal_exit(pr, device);
err_power_exit:
acpi_processor_power_exit(pr);
return result;
}
static int acpi_processor_start(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
int ret;
if (!device)
return -ENODEV;
/* Protect against concurrent CPU hotplug operations */
cpu_hotplug_disable();
ret = __acpi_processor_start(device);
cpu_hotplug_enable();
return ret;
}
static int acpi_processor_stop(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
struct acpi_processor *pr;
if (!device)
return 0;
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_processor_notify);
pr = acpi_driver_data(device);
if (!pr)
return 0;
acpi_processor_power_exit(pr);
acpi_cppc_processor_exit(pr);
acpi_processor_thermal_exit(pr, device);
return 0;
}
bool acpi_processor_cpufreq_init;
static int acpi_processor_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
if (event == CPUFREQ_CREATE_POLICY) {
acpi_thermal_cpufreq_init(policy);
acpi_processor_ppc_init(policy);
} else if (event == CPUFREQ_REMOVE_POLICY) {
acpi_processor_ppc_exit(policy);
acpi_thermal_cpufreq_exit(policy);
}
return 0;
}
static struct notifier_block acpi_processor_notifier_block = {
.notifier_call = acpi_processor_notifier,
};
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
* ACPI, but needs symbols from this driver
*/
static enum cpuhp_state hp_online;
static int __init acpi_processor_driver_init(void)
{
int result = 0;
if (acpi_disabled)
return 0;
if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
CPUFREQ_POLICY_NOTIFIER)) {
acpi_processor_cpufreq_init = true;
acpi_processor_ignore_ppc_init();
}
result = driver_register(&acpi_processor_driver);
if (result < 0)
return result;
result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
if (result < 0)
goto err;
hp_online = result;
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
acpi_processor_throttling_init();
return 0;
err:
driver_unregister(&acpi_processor_driver);
return result;
}
static void __exit acpi_processor_driver_exit(void)
{
if (acpi_disabled)
return;
if (acpi_processor_cpufreq_init) {
cpufreq_unregister_notifier(&acpi_processor_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
acpi_processor_cpufreq_init = false;
}
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
driver_unregister(&acpi_processor_driver);
}
module_init(acpi_processor_driver_init);
module_exit(acpi_processor_driver_exit);
MODULE_ALIAS("processor");
| linux-master | drivers/acpi/processor_driver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Broadcom
* Author: Jayachandran C <[email protected]>
* Copyright (C) 2016 Semihalf
* Author: Tomasz Nowicki <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
/* Structure to hold entries from the MCFG table */
struct mcfg_entry {
struct list_head list;
phys_addr_t addr;
u16 segment;
u8 bus_start;
u8 bus_end;
};
#ifdef CONFIG_PCI_QUIRKS
struct mcfg_fixup {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
u16 segment;
struct resource bus_range;
const struct pci_ecam_ops *ops;
struct resource cfgres;
};
#define MCFG_BUS_RANGE(start, end) DEFINE_RES_NAMED((start), \
((end) - (start) + 1), \
NULL, IORESOURCE_BUS)
#define MCFG_BUS_ANY MCFG_BUS_RANGE(0x0, 0xff)
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
#ifdef CONFIG_ARM64
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops),
AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops),
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
QCOM_ECAM32(0),
QCOM_ECAM32(1),
QCOM_ECAM32(2),
QCOM_ECAM32(3),
QCOM_ECAM32(4),
QCOM_ECAM32(5),
QCOM_ECAM32(6),
QCOM_ECAM32(7),
#define HISI_QUAD_DOM(table_id, seg, ops) \
{ "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 4, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 8, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 12, &hisi_pcie_ops),
#define THUNDER_PEM_RES(addr, node) \
DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
#define THUNDER_PEM_QUIRK(rev, node) \
{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 5 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x884057000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 6 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88808f000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 7 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89001f000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 8 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
#define THUNDER_ECAM_QUIRK(rev, seg) \
{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \
&pci_thunder_ecam_ops }
/* SoC pass2.x */
THUNDER_PEM_QUIRK(1, 0),
THUNDER_PEM_QUIRK(1, 1),
THUNDER_ECAM_QUIRK(1, 10),
/* SoC pass1.x */
THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */
THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */
THUNDER_ECAM_QUIRK(2, 0),
THUNDER_ECAM_QUIRK(2, 1),
THUNDER_ECAM_QUIRK(2, 2),
THUNDER_ECAM_QUIRK(2, 3),
THUNDER_ECAM_QUIRK(2, 10),
THUNDER_ECAM_QUIRK(2, 11),
THUNDER_ECAM_QUIRK(2, 12),
THUNDER_ECAM_QUIRK(2, 13),
{ "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops},
{ "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops},
{ "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops},
{ "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops},
{ "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops},
{ "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops},
#define XGENE_V1_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v1_pcie_ecam_ops }
#define XGENE_V2_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v2_pcie_ecam_ops }
/* X-Gene SoC with v1 PCIe controller */
XGENE_V1_ECAM_MCFG(1, 0),
XGENE_V1_ECAM_MCFG(1, 1),
XGENE_V1_ECAM_MCFG(1, 2),
XGENE_V1_ECAM_MCFG(1, 3),
XGENE_V1_ECAM_MCFG(1, 4),
XGENE_V1_ECAM_MCFG(2, 0),
XGENE_V1_ECAM_MCFG(2, 1),
XGENE_V1_ECAM_MCFG(2, 2),
XGENE_V1_ECAM_MCFG(2, 3),
XGENE_V1_ECAM_MCFG(2, 4),
/* X-Gene SoC with v2.1 PCIe controller */
XGENE_V2_ECAM_MCFG(3, 0),
XGENE_V2_ECAM_MCFG(3, 1),
/* X-Gene SoC with v2.2 PCIe controller */
XGENE_V2_ECAM_MCFG(4, 0),
XGENE_V2_ECAM_MCFG(4, 1),
XGENE_V2_ECAM_MCFG(4, 2),
#define ALTRA_ECAM_QUIRK(rev, seg) \
{ "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops }
ALTRA_ECAM_QUIRK(1, 0),
ALTRA_ECAM_QUIRK(1, 1),
ALTRA_ECAM_QUIRK(1, 2),
ALTRA_ECAM_QUIRK(1, 3),
ALTRA_ECAM_QUIRK(1, 4),
ALTRA_ECAM_QUIRK(1, 5),
ALTRA_ECAM_QUIRK(1, 6),
ALTRA_ECAM_QUIRK(1, 7),
ALTRA_ECAM_QUIRK(1, 8),
ALTRA_ECAM_QUIRK(1, 9),
ALTRA_ECAM_QUIRK(1, 10),
ALTRA_ECAM_QUIRK(1, 11),
ALTRA_ECAM_QUIRK(1, 12),
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
#endif /* ARM64 */
#ifdef CONFIG_LOONGARCH
#define LOONGSON_ECAM_MCFG(table_id, seg) \
{ "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops }
LOONGSON_ECAM_MCFG("\0", 0),
LOONGSON_ECAM_MCFG("LOONGSON", 0),
LOONGSON_ECAM_MCFG("\0", 1),
LOONGSON_ECAM_MCFG("LOONGSON", 1),
#endif /* LOONGARCH */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
static char mcfg_oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
static u32 mcfg_oem_revision;
static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
struct resource *bus_range)
{
if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(f->oem_table_id, mcfg_oem_table_id,
ACPI_OEM_TABLE_ID_SIZE) &&
f->oem_revision == mcfg_oem_revision &&
f->segment == segment &&
resource_contains(&f->bus_range, bus_range))
return 1;
return 0;
}
#endif
static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
struct resource *cfgres,
const struct pci_ecam_ops **ecam_ops)
{
#ifdef CONFIG_PCI_QUIRKS
u16 segment = root->segment;
struct resource *bus_range = &root->secondary;
struct mcfg_fixup *f;
int i;
for (i = 0, f = mcfg_quirks; i < ARRAY_SIZE(mcfg_quirks); i++, f++) {
if (pci_mcfg_quirk_matches(f, segment, bus_range)) {
if (f->cfgres.start)
*cfgres = f->cfgres;
if (f->ops)
*ecam_ops = f->ops;
dev_info(&root->device->dev, "MCFG quirk: ECAM at %pR for %pR with %ps\n",
cfgres, bus_range, *ecam_ops);
return;
}
}
#endif
}
/* List to save MCFG entries */
static LIST_HEAD(pci_mcfg_list);
int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
const struct pci_ecam_ops **ecam_ops)
{
const struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
struct mcfg_entry *e;
struct resource res;
/* Use address from _CBA if present, otherwise lookup MCFG */
if (root->mcfg_addr)
goto skip_lookup;
/*
* We expect the range in bus_res in the coverage of MCFG bus range.
*/
list_for_each_entry(e, &pci_mcfg_list, list) {
if (e->segment == seg && e->bus_start <= bus_res->start &&
e->bus_end >= bus_res->end) {
root->mcfg_addr = e->addr;
}
}
skip_lookup:
memset(&res, 0, sizeof(res));
if (root->mcfg_addr) {
res.start = root->mcfg_addr + (bus_res->start << 20);
res.end = res.start + (resource_size(bus_res) << 20) - 1;
res.flags = IORESOURCE_MEM;
}
/*
* Allow quirks to override default ECAM ops and CFG resource
* range. This may even fabricate a CFG resource range in case
* MCFG does not have it. Invalid CFG start address means MCFG
* firmware bug or we need another quirk in array.
*/
pci_mcfg_apply_quirks(root, &res, &ops);
if (!res.start)
return -ENXIO;
*cfgres = res;
*ecam_ops = ops;
return 0;
}
static __init int pci_mcfg_parse(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
struct acpi_mcfg_allocation *mptr;
struct mcfg_entry *e, *arr;
int i, n;
if (header->length < sizeof(struct acpi_table_mcfg))
return -EINVAL;
n = (header->length - sizeof(struct acpi_table_mcfg)) /
sizeof(struct acpi_mcfg_allocation);
mcfg = (struct acpi_table_mcfg *)header;
mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
arr = kcalloc(n, sizeof(*arr), GFP_KERNEL);
if (!arr)
return -ENOMEM;
for (i = 0, e = arr; i < n; i++, mptr++, e++) {
e->segment = mptr->pci_segment;
e->addr = mptr->address;
e->bus_start = mptr->start_bus_number;
e->bus_end = mptr->end_bus_number;
list_add(&e->list, &pci_mcfg_list);
}
#ifdef CONFIG_PCI_QUIRKS
/* Save MCFG IDs and revision for quirks matching */
memcpy(mcfg_oem_id, header->oem_id, ACPI_OEM_ID_SIZE);
memcpy(mcfg_oem_table_id, header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
mcfg_oem_revision = header->oem_revision;
#endif
pr_info("MCFG table detected, %d entries\n", n);
return 0;
}
/* Interface called by ACPI - parse and save MCFG table */
void __init pci_mmcfg_late_init(void)
{
int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse);
if (err)
pr_debug("Failed to parse MCFG (%d)\n", err);
}
| linux-master | drivers/acpi/pci_mcfg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* acpi_lpat.c - LPAT table processing functions
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
*/
#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/acpi_lpat.h>
/**
* acpi_lpat_raw_to_temp(): Return temperature from raw value through
* LPAT conversion table
*
* @lpat_table: the temperature_raw mapping table structure
* @raw: the raw value, used as a key to get the temperature from the
* above mapping table
*
* A positive converted temperature value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
int raw)
{
int i, delta_temp, delta_raw, temp;
struct acpi_lpat *lpat = lpat_table->lpat;
for (i = 0; i < lpat_table->lpat_count - 1; i++) {
if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
(raw <= lpat[i].raw && raw >= lpat[i+1].raw))
break;
}
if (i == lpat_table->lpat_count - 1)
return -ENOENT;
delta_temp = lpat[i+1].temp - lpat[i].temp;
delta_raw = lpat[i+1].raw - lpat[i].raw;
temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
return temp;
}
EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
/**
* acpi_lpat_temp_to_raw(): Return raw value from temperature through
* LPAT conversion table
*
* @lpat_table: the temperature_raw mapping table
* @temp: the temperature, used as a key to get the raw value from the
* above mapping table
*
* The raw value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
int temp)
{
int i, delta_temp, delta_raw, raw;
struct acpi_lpat *lpat = lpat_table->lpat;
for (i = 0; i < lpat_table->lpat_count - 1; i++) {
if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
break;
}
if (i == lpat_table->lpat_count - 1)
return -ENOENT;
delta_temp = lpat[i+1].temp - lpat[i].temp;
delta_raw = lpat[i+1].raw - lpat[i].raw;
raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
return raw;
}
EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw);
/**
* acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present.
*
* @handle: Handle to acpi device
*
* Parse LPAT table to a struct of type acpi_lpat_table. On success
* it returns a pointer to newly allocated table. This table must
* be freed by the caller when finished processing, using a call to
* acpi_lpat_free_conversion_table.
*/
struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
handle)
{
struct acpi_lpat_conversion_table *lpat_table = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj_p, *obj_e;
int *lpat, i;
acpi_status status;
status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
if (ACPI_FAILURE(status))
return NULL;
obj_p = (union acpi_object *)buffer.pointer;
if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
(obj_p->package.count % 2) || (obj_p->package.count < 4))
goto out;
lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL);
if (!lpat)
goto out;
for (i = 0; i < obj_p->package.count; i++) {
obj_e = &obj_p->package.elements[i];
if (obj_e->type != ACPI_TYPE_INTEGER) {
kfree(lpat);
goto out;
}
lpat[i] = (s64)obj_e->integer.value;
}
lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL);
if (!lpat_table) {
kfree(lpat);
goto out;
}
lpat_table->lpat = (struct acpi_lpat *)lpat;
lpat_table->lpat_count = obj_p->package.count / 2;
out:
kfree(buffer.pointer);
return lpat_table;
}
EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table);
/**
* acpi_lpat_free_conversion_table(): Free LPAT table.
*
* @lpat_table: the temperature_raw mapping table structure
*
* Frees the LPAT table previously allocated by a call to
* acpi_lpat_get_conversion_table.
*/
void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
*lpat_table)
{
if (lpat_table) {
kfree(lpat_table->lpat);
kfree(lpat_table);
}
}
EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
| linux-master | drivers/acpi/acpi_lpat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* button.c - ACPI Button Driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: button: " fmt
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <acpi/button.h>
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button"
#define ACPI_BUTTON_TYPE_POWER 0x01
#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep"
#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button"
#define ACPI_BUTTON_TYPE_SLEEP 0x03
#define ACPI_BUTTON_SUBCLASS_LID "lid"
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
enum {
ACPI_BUTTON_LID_INIT_IGNORE,
ACPI_BUTTON_LID_INIT_OPEN,
ACPI_BUTTON_LID_INIT_METHOD,
ACPI_BUTTON_LID_INIT_DISABLED,
};
static const char * const lid_init_state_str[] = {
[ACPI_BUTTON_LID_INIT_IGNORE] = "ignore",
[ACPI_BUTTON_LID_INIT_OPEN] = "open",
[ACPI_BUTTON_LID_INIT_METHOD] = "method",
[ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
};
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Button Driver");
MODULE_LICENSE("GPL");
static const struct acpi_device_id button_device_ids[] = {
{ACPI_BUTTON_HID_LID, 0},
{ACPI_BUTTON_HID_SLEEP, 0},
{ACPI_BUTTON_HID_SLEEPF, 0},
{ACPI_BUTTON_HID_POWER, 0},
{ACPI_BUTTON_HID_POWERF, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
/* Please keep this list sorted alphabetically by vendor and model */
static const struct dmi_system_id dmi_lid_quirks[] = {
{
/* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
},
{
/* Nextbook Ares 8A tablet, _LID device always reports lid closed */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
DMI_MATCH(DMI_BIOS_VERSION, "M882"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
},
{
/*
* Lenovo Yoga 9 14ITL5, initial notification of the LID device
* never happens.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82BG"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{
/*
* Medion Akoya E2215T, notification of the LID device only
* happens on close, not on open and _LID always returns closed.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{
/*
* Medion Akoya E2228T, notification of the LID device only
* happens on close, not on open and _LID always returns closed.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{
/*
* Razer Blade Stealth 13 late 2019, notification of the LID device
* only happens on close, not on open and _LID always returns closed.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{}
};
static int acpi_button_add(struct acpi_device *device);
static void acpi_button_remove(struct acpi_device *device);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_suspend(struct device *dev);
static int acpi_button_resume(struct device *dev);
#else
#define acpi_button_suspend NULL
#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, acpi_button_suspend, acpi_button_resume);
static struct acpi_driver acpi_button_driver = {
.name = "button",
.class = ACPI_BUTTON_CLASS,
.ids = button_device_ids,
.ops = {
.add = acpi_button_add,
.remove = acpi_button_remove,
},
.drv.pm = &acpi_button_pm,
};
struct acpi_button {
unsigned int type;
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
int last_state;
ktime_t last_time;
bool suspended;
bool lid_state_initialized;
};
static struct acpi_device *lid_device;
static long lid_init_state = -1;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
MODULE_PARM_DESC(lid_report_interval, "Interval (ms) between lid key events");
/* FS Interface (/proc) */
static struct proc_dir_entry *acpi_button_dir;
static struct proc_dir_entry *acpi_lid_dir;
static int acpi_lid_evaluate_state(struct acpi_device *device)
{
unsigned long long lid_state;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "_LID", NULL, &lid_state);
if (ACPI_FAILURE(status))
return -ENODEV;
return lid_state ? 1 : 0;
}
static int acpi_lid_notify_state(struct acpi_device *device, int state)
{
struct acpi_button *button = acpi_driver_data(device);
ktime_t next_report;
bool do_update;
/*
* In lid_init_state=ignore mode, if user opens/closes lid
* frequently with "open" missing, and "last_time" is also updated
* frequently, "close" cannot be delivered to the userspace.
* So "last_time" is only updated after a timeout or an actual
* switch.
*/
if (lid_init_state != ACPI_BUTTON_LID_INIT_IGNORE ||
button->last_state != !!state)
do_update = true;
else
do_update = false;
next_report = ktime_add(button->last_time,
ms_to_ktime(lid_report_interval));
if (button->last_state == !!state &&
ktime_after(ktime_get(), next_report)) {
/* Complain the buggy firmware */
pr_warn_once("The lid device is not compliant to SW_LID.\n");
/*
* Send the unreliable complement switch event:
*
* On most platforms, the lid device is reliable. However
* there are exceptions:
* 1. Platforms returning initial lid state as "close" by
* default after booting/resuming:
* https://bugzilla.kernel.org/show_bug.cgi?id=89211
* https://bugzilla.kernel.org/show_bug.cgi?id=106151
* 2. Platforms never reporting "open" events:
* https://bugzilla.kernel.org/show_bug.cgi?id=106941
* On these buggy platforms, the usage model of the ACPI
* lid device actually is:
* 1. The initial returning value of _LID may not be
* reliable.
* 2. The open event may not be reliable.
* 3. The close event is reliable.
*
* But SW_LID is typed as input switch event, the input
* layer checks if the event is redundant. Hence if the
* state is not switched, the userspace cannot see this
* platform triggered reliable event. By inserting a
* complement switch event, it then is guaranteed that the
* platform triggered reliable one can always be seen by
* the userspace.
*/
if (lid_init_state == ACPI_BUTTON_LID_INIT_IGNORE) {
do_update = true;
/*
* Do generate complement switch event for "close"
* as "close" is reliable and wrong "open" won't
* trigger unexpected behaviors.
* Do not generate complement switch event for
* "open" as "open" is not reliable and wrong
* "close" will trigger unexpected behaviors.
*/
if (!state) {
input_report_switch(button->input,
SW_LID, state);
input_sync(button->input);
}
}
}
/* Send the platform triggered reliable event */
if (do_update) {
acpi_handle_debug(device->handle, "ACPI LID %s\n",
state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
button->last_time = ktime_get();
}
return 0;
}
static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
void *offset)
{
struct acpi_device *device = seq->private;
int state;
state = acpi_lid_evaluate_state(device);
seq_printf(seq, "state: %s\n",
state < 0 ? "unsupported" : (state ? "open" : "closed"));
return 0;
}
static int acpi_button_add_fs(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
struct proc_dir_entry *entry = NULL;
int ret = 0;
/* procfs I/F for ACPI lid device only */
if (button->type != ACPI_BUTTON_TYPE_LID)
return 0;
if (acpi_button_dir || acpi_lid_dir) {
pr_info("More than one Lid device found!\n");
return -EEXIST;
}
/* create /proc/acpi/button */
acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir);
if (!acpi_button_dir)
return -ENODEV;
/* create /proc/acpi/button/lid */
acpi_lid_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
if (!acpi_lid_dir) {
ret = -ENODEV;
goto remove_button_dir;
}
/* create /proc/acpi/button/lid/LID/ */
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_lid_dir);
if (!acpi_device_dir(device)) {
ret = -ENODEV;
goto remove_lid_dir;
}
/* create /proc/acpi/button/lid/LID/state */
entry = proc_create_single_data(ACPI_BUTTON_FILE_STATE, S_IRUGO,
acpi_device_dir(device), acpi_button_state_seq_show,
device);
if (!entry) {
ret = -ENODEV;
goto remove_dev_dir;
}
done:
return ret;
remove_dev_dir:
remove_proc_entry(acpi_device_bid(device),
acpi_lid_dir);
acpi_device_dir(device) = NULL;
remove_lid_dir:
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
acpi_lid_dir = NULL;
remove_button_dir:
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
acpi_button_dir = NULL;
goto done;
}
static int acpi_button_remove_fs(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
if (button->type != ACPI_BUTTON_TYPE_LID)
return 0;
remove_proc_entry(ACPI_BUTTON_FILE_STATE,
acpi_device_dir(device));
remove_proc_entry(acpi_device_bid(device),
acpi_lid_dir);
acpi_device_dir(device) = NULL;
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
acpi_lid_dir = NULL;
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
acpi_button_dir = NULL;
return 0;
}
/* Driver Interface */
int acpi_lid_open(void)
{
if (!lid_device)
return -ENODEV;
return acpi_lid_evaluate_state(lid_device);
}
EXPORT_SYMBOL(acpi_lid_open);
static int acpi_lid_update_state(struct acpi_device *device,
bool signal_wakeup)
{
int state;
state = acpi_lid_evaluate_state(device);
if (state < 0)
return state;
if (state && signal_wakeup)
acpi_pm_wakeup_event(&device->dev);
return acpi_lid_notify_state(device, state);
}
static void acpi_lid_initialize_state(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
(void)acpi_lid_notify_state(device, 1);
break;
case ACPI_BUTTON_LID_INIT_METHOD:
(void)acpi_lid_update_state(device, false);
break;
case ACPI_BUTTON_LID_INIT_IGNORE:
default:
break;
}
button->lid_state_initialized = true;
}
static void acpi_lid_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_button *button;
if (event != ACPI_BUTTON_NOTIFY_STATUS) {
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
}
button = acpi_driver_data(device);
if (!button->lid_state_initialized)
return;
acpi_lid_update_state(device, true);
}
static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_button *button;
struct input_dev *input;
int keycode;
if (event != ACPI_BUTTON_NOTIFY_STATUS) {
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
}
acpi_pm_wakeup_event(&device->dev);
button = acpi_driver_data(device);
if (button->suspended)
return;
input = button->input;
keycode = test_bit(KEY_SLEEP, input->keybit) ? KEY_SLEEP : KEY_POWER;
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev),
event, ++button->pushed);
}
static void acpi_button_notify_run(void *data)
{
acpi_button_notify(NULL, ACPI_BUTTON_NOTIFY_STATUS, data);
}
static u32 acpi_button_event(void *data)
{
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_button_notify_run, data);
return ACPI_INTERRUPT_HANDLED;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_button_suspend(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
button->suspended = true;
return 0;
}
static int acpi_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID) {
button->last_state = !!acpi_lid_evaluate_state(device);
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
}
return 0;
}
#endif
static int acpi_lid_input_open(struct input_dev *input)
{
struct acpi_device *device = input_get_drvdata(input);
struct acpi_button *button = acpi_driver_data(device);
button->last_state = !!acpi_lid_evaluate_state(device);
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
return 0;
}
static int acpi_button_add(struct acpi_device *device)
{
acpi_notify_handler handler;
struct acpi_button *button;
struct input_dev *input;
const char *hid = acpi_device_hid(device);
acpi_status status;
char *name, *class;
int error = 0;
if (!strcmp(hid, ACPI_BUTTON_HID_LID) &&
lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED)
return -ENODEV;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
device->driver_data = button;
button->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_free_button;
}
name = acpi_device_name(device);
class = acpi_device_class(device);
if (!strcmp(hid, ACPI_BUTTON_HID_POWER) ||
!strcmp(hid, ACPI_BUTTON_HID_POWERF)) {
button->type = ACPI_BUTTON_TYPE_POWER;
handler = acpi_button_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
} else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) ||
!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) {
button->type = ACPI_BUTTON_TYPE_SLEEP;
handler = acpi_button_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
} else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) {
button->type = ACPI_BUTTON_TYPE_LID;
handler = acpi_lid_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
input->open = acpi_lid_input_open;
} else {
pr_info("Unsupported hid [%s]\n", hid);
error = -ENODEV;
}
if (!error)
error = acpi_button_add_fs(device);
if (error) {
input_free_device(input);
goto err_free_button;
}
snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid);
input->name = name;
input->phys = button->phys;
input->id.bustype = BUS_HOST;
input->id.product = button->type;
input->dev.parent = &device->dev;
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
input_set_capability(input, EV_KEY, KEY_POWER);
break;
case ACPI_BUTTON_TYPE_SLEEP:
input_set_capability(input, EV_KEY, KEY_SLEEP);
break;
case ACPI_BUTTON_TYPE_LID:
input_set_capability(input, EV_SW, SW_LID);
break;
}
input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
goto err_remove_fs;
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_button_event,
device);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_button_event,
device);
break;
default:
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY, handler,
device);
break;
}
if (ACPI_FAILURE(status)) {
error = -ENODEV;
goto err_input_unregister;
}
if (button->type == ACPI_BUTTON_TYPE_LID) {
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
*/
lid_device = device;
}
device_init_wakeup(&device->dev, true);
pr_info("%s [%s]\n", name, acpi_device_bid(device));
return 0;
err_input_unregister:
input_unregister_device(input);
err_remove_fs:
acpi_button_remove_fs(device);
err_free_button:
kfree(button);
return error;
}
static void acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_button_event);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_button_event);
break;
default:
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
button->type == ACPI_BUTTON_TYPE_LID ?
acpi_lid_notify :
acpi_button_notify);
break;
}
acpi_os_wait_events_complete();
acpi_button_remove_fs(device);
input_unregister_device(button->input);
kfree(button);
}
static int param_set_lid_init_state(const char *val,
const struct kernel_param *kp)
{
int i;
i = sysfs_match_string(lid_init_state_str, val);
if (i < 0)
return i;
lid_init_state = i;
pr_info("Initial lid state set to '%s'\n", lid_init_state_str[i]);
return 0;
}
static int param_get_lid_init_state(char *buf, const struct kernel_param *kp)
{
int i, c = 0;
for (i = 0; i < ARRAY_SIZE(lid_init_state_str); i++)
if (i == lid_init_state)
c += sprintf(buf + c, "[%s] ", lid_init_state_str[i]);
else
c += sprintf(buf + c, "%s ", lid_init_state_str[i]);
buf[c - 1] = '\n'; /* Replace the final space with a newline */
return c;
}
module_param_call(lid_init_state,
param_set_lid_init_state, param_get_lid_init_state,
NULL, 0644);
MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
static int acpi_button_register_driver(struct acpi_driver *driver)
{
const struct dmi_system_id *dmi_id;
if (lid_init_state == -1) {
dmi_id = dmi_first_match(dmi_lid_quirks);
if (dmi_id)
lid_init_state = (long)dmi_id->driver_data;
else
lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
}
/*
* Modules such as nouveau.ko and i915.ko have a link time dependency
* on acpi_lid_open(), and would therefore not be loadable on ACPI
* capable kernels booted in non-ACPI mode if the return value of
* acpi_bus_register_driver() is returned from here with ACPI disabled
* when this driver is built as a module.
*/
if (acpi_disabled)
return 0;
return acpi_bus_register_driver(driver);
}
static void acpi_button_unregister_driver(struct acpi_driver *driver)
{
if (!acpi_disabled)
acpi_bus_unregister_driver(driver);
}
module_driver(acpi_button_driver, acpi_button_register_driver,
acpi_button_unregister_driver);
| linux-master | drivers/acpi/button.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI Hardware Error Device (PNP0C33) Driver
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <[email protected]>
*
* ACPI Hardware Error Device is used to report some hardware errors
* notified via SCI, mainly the corrected errors.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/hed.h>
static const struct acpi_device_id acpi_hed_ids[] = {
{"PNP0C33", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
static acpi_handle hed_handle;
static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
int register_acpi_hed_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
void unregister_acpi_hed_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
/*
* SCI to report hardware error is forwarded to the listeners of HED,
* it is used by HEST Generic Hardware Error Source with notify type
* SCI.
*/
static void acpi_hed_notify(acpi_handle handle, u32 event, void *data)
{
blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
}
static int acpi_hed_add(struct acpi_device *device)
{
int err;
/* Only one hardware error device */
if (hed_handle)
return -EINVAL;
hed_handle = device->handle;
err = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_hed_notify);
if (err)
hed_handle = NULL;
return err;
}
static void acpi_hed_remove(struct acpi_device *device)
{
acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_hed_notify);
hed_handle = NULL;
}
static struct acpi_driver acpi_hed_driver = {
.name = "hardware_error_device",
.class = "hardware_error",
.ids = acpi_hed_ids,
.ops = {
.add = acpi_hed_add,
.remove = acpi_hed_remove,
},
};
module_acpi_driver(acpi_hed_driver);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/hed.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for CMOS RTC Address Space access
*
* Copyright (C) 2013, Intel Corporation
* Authors: Lan Tianyu <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h>
#include "internal.h"
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
{ "PNP0B00" },
{ "PNP0B01" },
{ "PNP0B02" },
{}
};
static acpi_status
acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
int i;
u8 *value = (u8 *)value64;
if (address > 0xff || !value64)
return AE_BAD_PARAMETER;
if (function != ACPI_WRITE && function != ACPI_READ)
return AE_BAD_PARAMETER;
spin_lock_irq(&rtc_lock);
for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value)
if (function == ACPI_READ)
*value = CMOS_READ(address);
else
CMOS_WRITE(*value, address);
spin_unlock_irq(&rtc_lock);
return AE_OK;
}
int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
{
acpi_status status;
status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS,
&acpi_cmos_rtc_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_err("Error installing CMOS-RTC region handler\n");
return -ENODEV;
}
return 1;
}
EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler);
void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
pr_err("Error removing CMOS-RTC region handler\n");
}
EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler);
static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id)
{
return acpi_install_cmos_rtc_space_handler(adev->handle);
}
static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev)
{
acpi_remove_cmos_rtc_space_handler(adev->handle);
}
static struct acpi_scan_handler cmos_rtc_handler = {
.ids = acpi_cmos_rtc_ids,
.attach = acpi_cmos_rtc_attach_handler,
.detach = acpi_cmos_rtc_detach_handler,
};
void __init acpi_cmos_rtc_init(void)
{
acpi_scan_add_handler(&cmos_rtc_handler);
}
| linux-master | drivers/acpi/acpi_cmos_rtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_ac.c - ACPI AC Adapter Driver (Revision: 27)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: AC: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include <acpi/battery.h>
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
#define ACPI_AC_NOTIFY_STATUS 0x80
#define ACPI_AC_STATUS_OFFLINE 0x00
#define ACPI_AC_STATUS_ONLINE 0x01
#define ACPI_AC_STATUS_UNKNOWN 0xFF
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
static int acpi_ac_add(struct acpi_device *device);
static void acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(acpi_handle handle, u32 event, void *data);
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static int ac_sleep_before_get_state_ms;
static int ac_only;
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
},
.drv.pm = &acpi_ac_pm,
};
struct acpi_ac {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct acpi_device *device;
unsigned long long state;
struct notifier_block battery_nb;
};
#define to_acpi_ac(x) power_supply_get_drvdata(x)
/* AC Adapter Management */
static int acpi_ac_get_state(struct acpi_ac *ac)
{
acpi_status status = AE_OK;
if (!ac)
return -EINVAL;
if (ac_only) {
ac->state = 1;
return 0;
}
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
acpi_handle_info(ac->device->handle,
"Error reading AC Adapter state: %s\n",
acpi_format_exception(status));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
return 0;
}
/* sysfs I/F */
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct acpi_ac *ac = to_acpi_ac(psy);
if (!ac)
return -ENODEV;
if (acpi_ac_get_state(ac))
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = ac->state;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
/* Driver Model */
static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
switch (event) {
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
fallthrough;
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
/*
* A buggy BIOS may notify AC first and then sleep for
* a specific time before doing actual operations in the
* EC event handler (_Qxx). This will cause the AC state
* reported by the ACPI event to be incorrect, so wait for a
* specific time for the EC event handler to make progress.
*/
if (ac_sleep_before_get_state_ms > 0)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(device, event, (u32) ac->state);
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
}
}
static int acpi_ac_battery_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
struct acpi_bus_event *event = (struct acpi_bus_event *)data;
/*
* On HP Pavilion dv6-6179er AC status notifications aren't triggered
* when adapter is plugged/unplugged. However, battery status
* notifications are triggered when battery starts charging or
* discharging. Re-reading AC status triggers lost AC notifications,
* if AC status has changed.
*/
if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
event->type == ACPI_BATTERY_NOTIFY_STATUS)
acpi_ac_get_state(ac);
return NOTIFY_OK;
}
static int __init thinkpad_e530_quirk(const struct dmi_system_id *d)
{
ac_sleep_before_get_state_ms = 1000;
return 0;
}
static int __init ac_only_quirk(const struct dmi_system_id *d)
{
ac_only = 1;
return 0;
}
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id ac_dmi_table[] __initconst = {
{
/* Kodlix GK45 returning incorrect state */
.callback = ac_only_quirk,
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "GK45"),
},
},
{
/* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */
.callback = thinkpad_e530_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
},
},
{},
};
static int acpi_ac_add(struct acpi_device *device)
{
struct power_supply_config psy_cfg = {};
int result = 0;
struct acpi_ac *ac = NULL;
if (!device)
return -EINVAL;
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
ac->device = device;
strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_AC_CLASS);
device->driver_data = ac;
result = acpi_ac_get_state(ac);
if (result)
goto err_release_ac;
psy_cfg.drv_data = ac;
ac->charger_desc.name = acpi_device_bid(device);
ac->charger_desc.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger_desc.properties = ac_props;
ac->charger_desc.num_properties = ARRAY_SIZE(ac_props);
ac->charger_desc.get_property = get_ac_property;
ac->charger = power_supply_register(&ac->device->dev,
&ac->charger_desc, &psy_cfg);
if (IS_ERR(ac->charger)) {
result = PTR_ERR(ac->charger);
goto err_release_ac;
}
pr_info("%s [%s] (%s)\n", acpi_device_name(device),
acpi_device_bid(device), ac->state ? "on-line" : "off-line");
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
result = acpi_dev_install_notify_handler(device, ACPI_ALL_NOTIFY,
acpi_ac_notify);
if (result)
goto err_unregister;
return 0;
err_unregister:
power_supply_unregister(ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
err_release_ac:
kfree(ac);
return result;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
unsigned int old_state;
if (!dev)
return -EINVAL;
ac = acpi_driver_data(to_acpi_device(dev));
if (!ac)
return -EINVAL;
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
if (old_state != ac->state)
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
return 0;
}
#else
#define acpi_ac_resume NULL
#endif
static void acpi_ac_remove(struct acpi_device *device)
{
struct acpi_ac *ac = NULL;
if (!device || !acpi_driver_data(device))
return;
ac = acpi_driver_data(device);
acpi_dev_remove_notify_handler(device, ACPI_ALL_NOTIFY,
acpi_ac_notify);
power_supply_unregister(ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
kfree(ac);
}
static int __init acpi_ac_init(void)
{
int result;
if (acpi_disabled)
return -ENODEV;
if (acpi_quirk_skip_acpi_ac_and_battery())
return -ENODEV;
dmi_check_system(ac_dmi_table);
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0)
return -ENODEV;
return 0;
}
static void __exit acpi_ac_exit(void)
{
acpi_bus_unregister_driver(&acpi_ac_driver);
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
| linux-master | drivers/acpi/ac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/acpi/device_pm.c - ACPI device power management routines.
*
* Copyright (C) 2012, Intel Corp.
* Author: Rafael J. Wysocki <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include "fan.h"
#include "internal.h"
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
*/
const char *acpi_power_state_string(int state)
{
switch (state) {
case ACPI_STATE_D0:
return "D0";
case ACPI_STATE_D1:
return "D1";
case ACPI_STATE_D2:
return "D2";
case ACPI_STATE_D3_HOT:
return "D3hot";
case ACPI_STATE_D3_COLD:
return "D3cold";
default:
return "(unknown)";
}
}
static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
{
unsigned long long psc;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "_PSC", NULL, &psc);
if (ACPI_FAILURE(status))
return -ENODEV;
*state = psc;
return 0;
}
/**
* acpi_device_get_power - Get power state of an ACPI device.
* @device: Device to get the power state of.
* @state: Place to store the power state of the device.
*
* This function does not update the device's power.state field, but it may
* update its parent's power.state field (when the parent's power state is
* unknown and the device's power state turns out to be D0).
*
* Also, it does not update power resource reference counters to ensure that
* the power state returned by it will be persistent and it may return a power
* state shallower than previously set by acpi_device_set_power() for @device
* (if that power state depends on any power resources).
*/
int acpi_device_get_power(struct acpi_device *device, int *state)
{
int result = ACPI_STATE_UNKNOWN;
struct acpi_device *parent;
int error;
if (!device || !state)
return -EINVAL;
parent = acpi_dev_parent(device);
if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = parent ? parent->power.state : ACPI_STATE_D0;
goto out;
}
/*
* Get the device's power state from power resources settings and _PSC,
* if available.
*/
if (device->power.flags.power_resources) {
error = acpi_power_get_inferred_state(device, &result);
if (error)
return error;
}
if (device->power.flags.explicit_get) {
int psc;
error = acpi_dev_pm_explicit_get(device, &psc);
if (error)
return error;
/*
* The power resources settings may indicate a power state
* shallower than the actual power state of the device, because
* the same power resources may be referenced by other devices.
*
* For systems predating ACPI 4.0 we assume that D3hot is the
* deepest state that can be supported.
*/
if (psc > result && psc < ACPI_STATE_D3_COLD)
result = psc;
else if (result == ACPI_STATE_UNKNOWN)
result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_HOT : psc;
}
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
* to be in D0 too, except if ignore_parent is set.
*/
if (!device->power.flags.ignore_parent && parent &&
parent->power.state == ACPI_STATE_UNKNOWN &&
result == ACPI_STATE_D0)
parent->power.state = ACPI_STATE_D0;
*state = result;
out:
acpi_handle_debug(device->handle, "Power state: %s\n",
acpi_power_state_string(*state));
return 0;
}
static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
{
if (adev->power.states[state].flags.explicit_set) {
char method[5] = { '_', 'P', 'S', '0' + state, '\0' };
acpi_status status;
status = acpi_evaluate_object(adev->handle, method, NULL, NULL);
if (ACPI_FAILURE(status))
return -ENODEV;
}
return 0;
}
/**
* acpi_device_set_power - Set power state of an ACPI device.
* @device: Device to set the power state of.
* @state: New power state to set.
*
* Callers must ensure that the device is power manageable before using this
* function.
*/
int acpi_device_set_power(struct acpi_device *device, int state)
{
int target_state = state;
int result = 0;
if (!device || !device->flags.power_manageable
|| (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
acpi_handle_debug(device->handle, "Power state change: %s -> %s\n",
acpi_power_state_string(device->power.state),
acpi_power_state_string(state));
/* Make sure this is a valid target state */
/* There is a special case for D0 addressed below. */
if (state > ACPI_STATE_D0 && state == device->power.state)
goto no_change;
if (state == ACPI_STATE_D3_COLD) {
/*
* For transitions to D3cold we need to execute _PS3 and then
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
/* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
acpi_handle_debug(device->handle, "Power state %s not supported\n",
acpi_power_state_string(state));
return -ENODEV;
}
if (!device->power.flags.ignore_parent) {
struct acpi_device *parent;
parent = acpi_dev_parent(device);
if (parent && state < parent->power.state) {
acpi_handle_debug(device->handle,
"Cannot transition to %s for parent in %s\n",
acpi_power_state_string(state),
acpi_power_state_string(parent->power.state));
return -ENODEV;
}
}
/*
* Transition Power
* ----------------
* In accordance with ACPI 6, _PSx is executed before manipulating power
* resources, unless the target state is D0, in which case _PS0 is
* supposed to be executed after turning the power resources on.
*/
if (state > ACPI_STATE_D0) {
/*
* According to ACPI 6, devices cannot go from lower-power
* (deeper) states to higher-power (shallower) states.
*/
if (state < device->power.state) {
acpi_handle_debug(device->handle,
"Cannot transition from %s to %s\n",
acpi_power_state_string(device->power.state),
acpi_power_state_string(state));
return -ENODEV;
}
/*
* If the device goes from D3hot to D3cold, _PS3 has been
* evaluated for it already, so skip it in that case.
*/
if (device->power.state < ACPI_STATE_D3_HOT) {
result = acpi_dev_pm_explicit_set(device, state);
if (result)
goto end;
}
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
int cur_state = device->power.state;
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
if (cur_state == ACPI_STATE_D0) {
int psc;
/* Nothing to do here if _PSC is not present. */
if (!device->power.flags.explicit_get)
goto no_change;
/*
* The power state of the device was set to D0 last
* time, but that might have happened before a
* system-wide transition involving the platform
* firmware, so it may be necessary to evaluate _PS0
* for the device here. However, use extra care here
* and evaluate _PSC to check the device's current power
* state, and only invoke _PS0 if the evaluation of _PSC
* is successful and it returns a power state different
* from D0.
*/
result = acpi_dev_pm_explicit_get(device, &psc);
if (result || psc == ACPI_STATE_D0)
goto no_change;
}
result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
}
end:
if (result) {
acpi_handle_debug(device->handle,
"Failed to change power state to %s\n",
acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
acpi_handle_debug(device->handle, "Power state changed to %s\n",
acpi_power_state_string(target_state));
}
return result;
no_change:
acpi_handle_debug(device->handle, "Already in %s\n",
acpi_power_state_string(state));
return 0;
}
EXPORT_SYMBOL(acpi_device_set_power);
int acpi_bus_set_power(acpi_handle handle, int state)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (device)
return acpi_device_set_power(device, state);
return -ENODEV;
}
EXPORT_SYMBOL(acpi_bus_set_power);
int acpi_bus_init_power(struct acpi_device *device)
{
int state;
int result;
if (!device)
return -EINVAL;
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device)) {
device->flags.initialized = false;
return -ENXIO;
}
result = acpi_device_get_power(device, &state);
if (result)
return result;
if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
/* Reference count the power resources. */
result = acpi_power_on_resources(device, state);
if (result)
return result;
if (state == ACPI_STATE_D0) {
/*
* If _PSC is not present and the state inferred from
* power resources appears to be D0, it still may be
* necessary to execute _PS0 at this point, because
* another device using the same power resources may
* have been put into D0 previously and that's why we
* see D0 here.
*/
result = acpi_dev_pm_explicit_set(device, state);
if (result)
return result;
}
} else if (state == ACPI_STATE_UNKNOWN) {
/*
* No power resources and missing _PSC? Cross fingers and make
* it D0 in hope that this is what the BIOS put the device into.
* [We tried to force D0 here by executing _PS0, but that broke
* Toshiba P870-303 in a nasty way.]
*/
state = ACPI_STATE_D0;
}
device->power.state = state;
return 0;
}
/**
* acpi_device_fix_up_power - Force device with missing _PSC into D0.
* @device: Device object whose power state is to be fixed up.
*
* Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
* are assumed to be put into D0 by the BIOS. However, in some cases that may
* not be the case and this function should be used then.
*/
int acpi_device_fix_up_power(struct acpi_device *device)
{
int ret = 0;
if (!device->power.flags.power_resources
&& !device->power.flags.explicit_get
&& device->power.state == ACPI_STATE_D0)
ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
return ret;
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
static int fix_up_power_if_applicable(struct acpi_device *adev, void *not_used)
{
if (adev->status.present && adev->status.enabled)
acpi_device_fix_up_power(adev);
return 0;
}
/**
* acpi_device_fix_up_power_extended - Force device and its children into D0.
* @adev: Parent device object whose power state is to be fixed up.
*
* Call acpi_device_fix_up_power() for @adev and its children so long as they
* are reported as present and enabled.
*/
void acpi_device_fix_up_power_extended(struct acpi_device *adev)
{
acpi_device_fix_up_power(adev);
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;
int result;
if (device->power.state == ACPI_STATE_UNKNOWN) {
result = acpi_bus_init_power(device);
if (!result && state_p)
*state_p = device->power.state;
return result;
}
result = acpi_device_get_power(device, &state);
if (result)
return result;
if (state == ACPI_STATE_UNKNOWN) {
state = ACPI_STATE_D0;
result = acpi_device_set_power(device, state);
if (result)
return result;
} else {
if (device->power.flags.power_resources) {
/*
* We don't need to really switch the state, bu we need
* to update the power resources' reference counters.
*/
result = acpi_power_transition(device, state);
if (result)
return result;
}
device->power.state = state;
}
if (state_p)
*state_p = state;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_device_update_power);
int acpi_bus_update_power(acpi_handle handle, int *state_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (device)
return acpi_device_update_power(device, state_p);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
bool acpi_bus_power_manageable(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
return device && device->flags.power_manageable;
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
static int acpi_power_up_if_adr_present(struct acpi_device *adev, void *not_used)
{
if (!(adev->flags.power_manageable && adev->pnp.type.bus_address))
return 0;
acpi_handle_debug(adev->handle, "Power state: %s\n",
acpi_power_state_string(adev->power.state));
if (adev->power.state == ACPI_STATE_D3_COLD)
return acpi_device_set_power(adev, ACPI_STATE_D0);
return 0;
}
/**
* acpi_dev_power_up_children_with_adr - Power up childres with valid _ADR
* @adev: Parent ACPI device object.
*
* Change the power states of the direct children of @adev that are in D3cold
* and hold valid _ADR objects to D0 in order to allow bus (e.g. PCI)
* enumeration code to access them.
*/
void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
{
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
/**
* acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
* @adev: ACPI companion of the target device.
*
* Evaluate _S0W for @adev and return the value produced by it or return
* ACPI_STATE_UNKNOWN on errors (including _S0W not present).
*/
u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
{
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
if (ACPI_FAILURE(status))
return ACPI_STATE_UNKNOWN;
return state;
}
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev)
{
pm_wakeup_dev_event(dev, 0, acpi_s2idle_wakeup());
}
EXPORT_SYMBOL_GPL(acpi_pm_wakeup_event);
static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
{
struct acpi_device *adev;
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
acpi_handle_debug(handle, "Wake notify\n");
adev = acpi_get_acpi_dev(handle);
if (!adev)
return;
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
if (adev->wakeup.context.func) {
acpi_handle_debug(handle, "Running %pS for %s\n",
adev->wakeup.context.func,
dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
}
}
mutex_unlock(&acpi_pm_notifier_lock);
acpi_put_acpi_dev(adev);
}
/**
* acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
* @adev: ACPI device to add the notify handler for.
* @dev: Device to generate a wakeup event for while handling the notification.
* @func: Work function to execute when handling the notification.
*
* NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
* PM wakeup events. For example, wakeup events may be generated for bridges
* if one of the devices below the bridge is signaling wakeup, even if the
* bridge itself doesn't have a wakeup GPE associated with it.
*/
acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
void (*func)(struct acpi_device_wakeup_context *context))
{
acpi_status status = AE_ALREADY_EXISTS;
if (!dev && !func)
return AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.ws = wakeup_source_register(&adev->dev,
dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
/**
* acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
* @adev: ACPI device to remove the notifier from.
*/
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present)
goto out;
status = acpi_remove_notify_handler(adev->handle,
ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler);
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws);
adev->wakeup.flags.notifier_present = false;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
bool acpi_bus_can_wakeup(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
return device && device->wakeup.flags.valid;
}
EXPORT_SYMBOL(acpi_bus_can_wakeup);
bool acpi_pm_device_can_wakeup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
return adev ? acpi_device_can_wakeup(adev) : false;
}
/**
* acpi_dev_pm_get_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @adev: ACPI device node corresponding to @dev.
* @target_state: System state to match the resultant device state.
* @d_min_p: Location to store the highest power state available to the device.
* @d_max_p: Location to store the lowest power state available to the device.
*
* Find the lowest power (highest number) and highest power (lowest number) ACPI
* device power states that the device can be in while the system is in the
* state represented by @target_state. Store the integer numbers representing
* those stats in the memory locations pointed to by @d_max_p and @d_min_p,
* respectively.
*
* Callers must ensure that @dev and @adev are valid pointers and that @adev
* actually corresponds to @dev before using this function.
*
* Returns 0 on success or -ENODATA when one of the ACPI methods fails or
* returns a value that doesn't make sense. The memory locations pointed to by
* @d_max_p and @d_min_p are only modified on success.
*/
static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
u32 target_state, int *d_min_p, int *d_max_p)
{
char method[] = { '_', 'S', '0' + target_state, 'D', '\0' };
acpi_handle handle = adev->handle;
unsigned long long ret;
int d_min, d_max;
bool wakeup = false;
bool has_sxd = false;
acpi_status status;
/*
* If the system state is S0, the lowest power state the device can be
* in is D3cold, unless the device has _S0W and is supposed to signal
* wakeup, in which case the return value of _S0W has to be used as the
* lowest power state available to the device.
*/
d_min = ACPI_STATE_D0;
d_max = ACPI_STATE_D3_COLD;
/*
* If present, _SxD methods return the minimum D-state (highest power
* state) we can use for the corresponding S-states. Otherwise, the
* minimum D-state is D0 (ACPI 3.x).
*/
if (target_state > ACPI_STATE_S0) {
/*
* We rely on acpi_evaluate_integer() not clobbering the integer
* provided if AE_NOT_FOUND is returned.
*/
ret = d_min;
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if ((ACPI_FAILURE(status) && status != AE_NOT_FOUND)
|| ret > ACPI_STATE_D3_COLD)
return -ENODATA;
/*
* We need to handle legacy systems where D3hot and D3cold are
* the same and 3 is returned in both cases, so fall back to
* D3cold if D3hot is not a valid state.
*/
if (!adev->power.states[ret].flags.valid) {
if (ret == ACPI_STATE_D3_HOT)
ret = ACPI_STATE_D3_COLD;
else
return -ENODATA;
}
if (status == AE_OK)
has_sxd = true;
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
} else if (device_may_wakeup(dev) && dev->power.wakeirq) {
/*
* The ACPI subsystem doesn't manage the wake bit for IRQs
* defined with ExclusiveAndWake and SharedAndWake. Instead we
* expect them to be managed via the PM subsystem. Drivers
* should call dev_pm_set_wake_irq to register an IRQ as a wake
* source.
*
* If a device has a wake IRQ attached we need to check the
* _S0W method to get the correct wake D-state. Otherwise we
* end up putting the device into D3Cold which will more than
* likely disable wake functionality.
*/
wakeup = true;
} else {
/* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid;
}
/*
* If _PRW says we can wake up the system from the target sleep state,
* the D-state returned by _SxD is sufficient for that (we assume a
* wakeup-aware driver if wake is set). Still, if _SxW exists
* (ACPI 3.x), it should return the maximum (lowest power) D-state that
* can wake the system. _S0W may be valid, too.
*/
if (wakeup) {
method[3] = 'W';
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if (status == AE_NOT_FOUND) {
/* No _SxW. In this case, the ACPI spec says that we
* must not go into any power state deeper than the
* value returned from _SxD.
*/
if (has_sxd && target_state > ACPI_STATE_S0)
d_max = d_min;
} else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) {
/* Fall back to D3cold if ret is not a valid state. */
if (!adev->power.states[ret].flags.valid)
ret = ACPI_STATE_D3_COLD;
d_max = ret > d_min ? ret : d_min;
} else {
return -ENODATA;
}
}
if (d_min_p)
*d_min_p = d_min;
if (d_max_p)
*d_max_p = d_max;
return 0;
}
/**
* acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @d_min_p: Location to store the upper limit of the allowed states range.
* @d_max_in: Deepest low-power state to take into consideration.
* Return value: Preferred power state of the device on success, -ENODEV
* if there's no 'struct acpi_device' for @dev, -EINVAL if @d_max_in is
* incorrect, or -ENODATA on ACPI method failure.
*
* The caller must ensure that @dev is valid before using this function.
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
struct acpi_device *adev;
int ret, d_min, d_max;
if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD)
return -EINVAL;
if (d_max_in > ACPI_STATE_D2) {
enum pm_qos_flags_status stat;
stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat == PM_QOS_FLAGS_ALL)
d_max_in = ACPI_STATE_D2;
}
adev = ACPI_COMPANION(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
ret = acpi_dev_pm_get_state(dev, adev, acpi_target_system_state(),
&d_min, &d_max);
if (ret)
return ret;
if (d_max_in < d_min)
return -EINVAL;
if (d_max > d_max_in) {
for (d_max = d_max_in; d_max > d_min; d_max--) {
if (adev->power.states[d_max].flags.valid)
break;
}
}
if (d_min_p)
*d_min_p = d_min;
return d_max;
}
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
/**
* acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
* @context: Device wakeup context.
*/
static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
{
struct device *dev = context->dev;
if (dev) {
pm_wakeup_event(dev, 0);
pm_request_resume(dev);
}
}
static DEFINE_MUTEX(acpi_wakeup_lock);
static int __acpi_device_wakeup_enable(struct acpi_device *adev,
u32 target_state)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
acpi_status status;
int error = 0;
mutex_lock(&acpi_wakeup_lock);
/*
* If the device wakeup power is already enabled, disable it and enable
* it again in case it depends on the configuration of subordinate
* devices and the conditions have changed since it was enabled last
* time.
*/
if (wakeup->enable_count > 0)
acpi_disable_wakeup_device_power(adev);
error = acpi_enable_wakeup_device_power(adev, target_state);
if (error) {
if (wakeup->enable_count > 0) {
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
wakeup->enable_count = 0;
}
goto out;
}
if (wakeup->enable_count > 0)
goto inc;
status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
if (ACPI_FAILURE(status)) {
acpi_disable_wakeup_device_power(adev);
error = -EIO;
goto out;
}
acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
(unsigned int)wakeup->gpe_number);
inc:
if (wakeup->enable_count < INT_MAX)
wakeup->enable_count++;
else
acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n");
out:
mutex_unlock(&acpi_wakeup_lock);
return error;
}
/**
* acpi_device_wakeup_enable - Enable wakeup functionality for device.
* @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
*
* Enable the GPE associated with @adev so that it can generate wakeup signals
* for the device in response to external (remote) events and enable wakeup
* power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
{
return __acpi_device_wakeup_enable(adev, target_state);
}
/**
* acpi_device_wakeup_disable - Disable wakeup functionality for device.
* @adev: ACPI device to disable wakeup functionality for.
*
* Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
mutex_lock(&acpi_wakeup_lock);
if (!wakeup->enable_count)
goto out;
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
acpi_disable_wakeup_device_power(adev);
wakeup->enable_count--;
out:
mutex_unlock(&acpi_wakeup_lock);
}
/**
* acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
* @dev: Device to enable/disable to generate wakeup events.
* @enable: Whether to enable or disable the wakeup functionality.
*/
int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
struct acpi_device *adev;
int error;
adev = ACPI_COMPANION(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
if (!enable) {
acpi_device_wakeup_disable(adev);
dev_dbg(dev, "Wakeup disabled by ACPI\n");
return 0;
}
error = __acpi_device_wakeup_enable(adev, acpi_target_system_state());
if (!error)
dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
* @dev: Device to put into a low-power state.
* @adev: ACPI device node corresponding to @dev.
* @system_state: System state to choose the device state for.
*/
static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev,
u32 system_state)
{
int ret, state;
if (!acpi_device_power_manageable(adev))
return 0;
ret = acpi_dev_pm_get_state(dev, adev, system_state, NULL, &state);
return ret ? ret : acpi_device_set_power(adev, state);
}
/**
* acpi_dev_pm_full_power - Put ACPI device into the full-power state.
* @adev: ACPI device node to put into the full-power state.
*/
static int acpi_dev_pm_full_power(struct acpi_device *adev)
{
return acpi_device_power_manageable(adev) ?
acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
}
/**
* acpi_dev_suspend - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
* @wakeup: Whether or not to enable wakeup for the device.
*
* Put the given device into a low-power state using the standard ACPI
* mechanism. Set up remote wakeup if desired, choose the state to put the
* device into (this checks if remote wakeup is expected to work too), and set
* the power state of the device.
*/
int acpi_dev_suspend(struct device *dev, bool wakeup)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u32 target_state = acpi_target_system_state();
int error;
if (!adev)
return 0;
if (wakeup && acpi_device_can_wakeup(adev)) {
error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return -EAGAIN;
} else {
wakeup = false;
}
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error && wakeup)
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_suspend);
/**
* acpi_dev_resume - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
* mechanism. Set the power state of the device to ACPI D0 and disable wakeup.
*/
int acpi_dev_resume(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
return 0;
error = acpi_dev_pm_full_power(adev);
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume);
/**
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
* @dev: Device to suspend.
*
* Carry out the generic runtime suspend procedure for @dev and use ACPI to put
* it into a runtime low-power state.
*/
int acpi_subsys_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
return ret ? ret : acpi_dev_suspend(dev, true);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
/**
* acpi_subsys_runtime_resume - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic runtime resume procedure for it.
*/
int acpi_subsys_runtime_resume(struct device *dev)
{
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
#ifdef CONFIG_PM_SLEEP
static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
{
u32 sys_target = acpi_target_system_state();
int ret, state;
if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
return true;
if (sys_target == ACPI_STATE_S0)
return false;
if (adev->power.flags.dsw_present)
return true;
ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
if (ret)
return true;
return state != adev->power.state;
}
/**
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
* @dev: Device to prepare.
*/
int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
if (ret < 0)
return ret;
if (!ret && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
return 0;
}
return !acpi_dev_needs_resume(dev, adev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
* acpi_subsys_complete - Finalize device's resume during system resume.
* @dev: Device to handle.
*/
void acpi_subsys_complete(struct device *dev)
{
pm_generic_complete(dev);
/*
* If the device had been runtime-suspended before the system went into
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
/**
* acpi_subsys_suspend - Run the device driver's suspend callback.
* @dev: Device to handle.
*
* Follow PCI and resume devices from runtime suspend before running their
* system suspend callbacks, unless the driver can cope with runtime-suspended
* devices during system suspend and there are no ACPI-specific reasons for
* resuming them.
*/
int acpi_subsys_suspend(struct device *dev)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
return pm_generic_suspend(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
/**
* acpi_subsys_suspend_late - Suspend device using ACPI.
* @dev: Device to suspend.
*
* Carry out the generic late suspend procedure for @dev and use ACPI to put
* it into a low-power state during system transition into a sleep state.
*/
int acpi_subsys_suspend_late(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
/**
* acpi_subsys_suspend_noirq - Run the device driver's "noirq" suspend callback.
* @dev: Device to suspend.
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_noirq(dev);
if (ret)
return ret;
/*
* If the target system sleep state is suspend-to-idle, it is sufficient
* to check whether or not the device's wakeup settings are good for
* runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
* acpi_subsys_complete() to take care of fixing up the device's state
* anyway, if need be.
*/
if (device_can_wakeup(dev) && !device_may_wakeup(dev))
dev->power.may_skip_resume = false;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
/**
* acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
* @dev: Device to handle.
*/
static int acpi_subsys_resume_noirq(struct device *dev)
{
if (dev_pm_skip_resume(dev))
return 0;
return pm_generic_resume_noirq(dev);
}
/**
* acpi_subsys_resume_early - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic early resume procedure for it during system transition into the
* working state, but only do that if device either defines early resume
* handler, or does not define power operations at all. Otherwise powering up
* of the device is postponed to the normal resume phase.
*/
static int acpi_subsys_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
if (dev_pm_skip_resume(dev))
return 0;
if (pm && !pm->resume_early) {
dev_dbg(dev, "postponing D0 transition to normal resume stage\n");
return 0;
}
ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
/**
* acpi_subsys_resume - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state if it has not been
* powered up during early resume phase, and carry out the generic resume
* procedure for it during system transition into the working state.
*/
static int acpi_subsys_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret = 0;
if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) {
dev_dbg(dev, "executing postponed D0 transition\n");
ret = acpi_dev_resume(dev);
}
return ret ? ret : pm_generic_resume(dev);
}
/**
* acpi_subsys_freeze - Run the device driver's freeze callback.
* @dev: Device to handle.
*/
int acpi_subsys_freeze(struct device *dev)
{
/*
* Resume all runtime-suspended devices before creating a snapshot
* image of system memory, because the restore kernel generally cannot
* be expected to always handle them consistently and they need to be
* put into the runtime-active metastate during system resume anyway,
* so it is better to ensure that the state saved in the image will be
* always consistent with that.
*/
pm_runtime_resume(dev);
return pm_generic_freeze(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
/**
* acpi_subsys_restore_early - Restore device using ACPI.
* @dev: Device to restore.
*/
int acpi_subsys_restore_early(struct device *dev)
{
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_restore_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
/**
* acpi_subsys_poweroff - Run the device driver's poweroff callback.
* @dev: Device to handle.
*
* Follow PCI and resume devices from runtime suspend before running their
* system poweroff callbacks, unless the driver can cope with runtime-suspended
* devices during system suspend and there are no ACPI-specific reasons for
* resuming them.
*/
int acpi_subsys_poweroff(struct device *dev)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
return pm_generic_poweroff(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_poweroff);
/**
* acpi_subsys_poweroff_late - Run the device driver's poweroff callback.
* @dev: Device to handle.
*
* Carry out the generic late poweroff procedure for @dev and use ACPI to put
* it into a low-power state during system transition into a sleep state.
*/
static int acpi_subsys_poweroff_late(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_poweroff_late(dev);
if (ret)
return ret;
return acpi_dev_suspend(dev, device_may_wakeup(dev));
}
/**
* acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback.
* @dev: Device to suspend.
*/
static int acpi_subsys_poweroff_noirq(struct device *dev)
{
if (dev_pm_skip_suspend(dev))
return 0;
return pm_generic_poweroff_noirq(dev);
}
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.resume = acpi_subsys_resume,
.suspend_late = acpi_subsys_suspend_late,
.suspend_noirq = acpi_subsys_suspend_noirq,
.resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_subsys_resume_early,
.freeze = acpi_subsys_freeze,
.poweroff = acpi_subsys_poweroff,
.poweroff_late = acpi_subsys_poweroff_late,
.poweroff_noirq = acpi_subsys_poweroff_noirq,
.restore_early = acpi_subsys_restore_early,
#endif
},
};
/**
* acpi_dev_pm_detach - Remove ACPI power management from the device.
* @dev: Device to take care of.
* @power_off: Whether or not to try to remove power from the device.
*
* Remove the device from the general ACPI PM domain and remove its wakeup
* notifier. If @power_off is set, additionally remove power from the device if
* possible.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*/
static void acpi_dev_pm_detach(struct device *dev, bool power_off)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev_pm_domain_set(dev, NULL);
acpi_remove_pm_notifier(adev);
if (power_off) {
/*
* If the device's PM QoS resume latency limit or flags
* have been exposed to user space, they have to be
* hidden at this point, so that they don't affect the
* choice of the low-power state to put the device into.
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
}
/**
* acpi_dev_pm_attach - Prepare device for ACPI power management.
* @dev: Device to prepare.
* @power_on: Whether or not to power on the device.
*
* If @dev has a valid ACPI handle that has a valid struct acpi_device object
* attached to it, install a wakeup notification handler for the device and
* add it to the general ACPI PM domain. If @power_on is set, the device will
* be put into the ACPI D0 state before the function returns.
*
* This assumes that the @dev's bus type uses generic power management callbacks
* (or doesn't use any power management callbacks at all).
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
/*
* Skip devices whose ACPI companions match the device IDs below,
* because they require special power management handling incompatible
* with the generic ACPI PM domain.
*/
static const struct acpi_device_id special_pm_ids[] = {
ACPI_FAN_DEVICE_IDS,
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev || !acpi_match_device_ids(adev, special_pm_ids))
return 0;
/*
* Only attach the power domain to the first device if the
* companion is shared by multiple. This is to prevent doing power
* management twice.
*/
if (!acpi_device_is_first_physical_node(adev, dev))
return 0;
acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
/**
* acpi_storage_d3 - Check if D3 should be used in the suspend path
* @dev: Device to check
*
* Return %true if the platform firmware wants @dev to be programmed
* into D3hot or D3cold (if supported) in the suspend path, or %false
* when there is no specific preference. On some platforms, if this
* hint is ignored, @dev may remain unresponsive after suspending the
* platform as a whole.
*
* Although the property has storage in the name it actually is
* applied to the PCIe slot and plugging in a non-storage device the
* same platform restrictions will likely apply.
*/
bool acpi_storage_d3(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u8 val;
if (force_storage_d3())
return true;
if (!adev)
return false;
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
&val))
return false;
return val == 1;
}
EXPORT_SYMBOL_GPL(acpi_storage_d3);
/**
* acpi_dev_state_d0 - Tell if the device is in D0 power state
* @dev: Physical device the ACPI power state of which to check
*
* On a system without ACPI, return true. On a system with ACPI, return true if
* the current ACPI power state of the device is D0, or false otherwise.
*
* Note that the power state of a device is not well-defined after it has been
* passed to acpi_device_set_power() and before that function returns, so it is
* not valid to ask for the ACPI power state of the device in that time frame.
*
* This function is intended to be used in a driver's probe or remove
* function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for
* more information.
*/
bool acpi_dev_state_d0(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return true;
return adev->power.state == ACPI_STATE_D0;
}
EXPORT_SYMBOL_GPL(acpi_dev_state_d0);
#endif /* CONFIG_PM */
| linux-master | drivers/acpi/device_pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for PNP bus type
*
* Copyright (C) 2014, Intel Corporation
* Authors: Zhang Rui <[email protected]>
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include "internal.h"
static const struct acpi_device_id acpi_pnp_device_ids[] = {
/* pata_isapnp */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* floppy */
{"PNP0700"},
/* tpm_inf_pnp */
{"IFX0101"}, /* Infineon TPMs */
{"IFX0102"}, /* Infineon TPMs */
/*tpm_tis */
{"PNP0C31"}, /* TPM */
{"ATM1200"}, /* Atmel */
{"IFX0102"}, /* Infineon */
{"BCM0101"}, /* Broadcom */
{"BCM0102"}, /* Broadcom */
{"NSC1200"}, /* National */
{"ICO0102"}, /* Intel */
/* ide */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* ns558 */
{"ASB16fd"}, /* AdLib NSC16 */
{"AZT3001"}, /* AZT1008 */
{"CDC0001"}, /* Opl3-SAx */
{"CSC0001"}, /* CS4232 */
{"CSC000f"}, /* CS4236 */
{"CSC0101"}, /* CS4327 */
{"CTL7001"}, /* SB16 */
{"CTL7002"}, /* AWE64 */
{"CTL7005"}, /* Vibra16 */
{"ENS2020"}, /* SoundscapeVIVO */
{"ESS0001"}, /* ES1869 */
{"ESS0005"}, /* ES1878 */
{"ESS6880"}, /* ES688 */
{"IBM0012"}, /* CS4232 */
{"OPT0001"}, /* OPTi Audio16 */
{"YMH0006"}, /* Opl3-SA */
{"YMH0022"}, /* Opl3-SAx */
{"PNPb02f"}, /* Generic */
/* i8042 kbd */
{"PNP0300"},
{"PNP0301"},
{"PNP0302"},
{"PNP0303"},
{"PNP0304"},
{"PNP0305"},
{"PNP0306"},
{"PNP0309"},
{"PNP030a"},
{"PNP030b"},
{"PNP0320"},
{"PNP0343"},
{"PNP0344"},
{"PNP0345"},
{"CPQA0D7"},
/* i8042 aux */
{"AUI0200"},
{"FJC6000"},
{"FJC6001"},
{"PNP0f03"},
{"PNP0f0b"},
{"PNP0f0e"},
{"PNP0f12"},
{"PNP0f13"},
{"PNP0f19"},
{"PNP0f1c"},
{"SYN0801"},
/* fcpnp */
{"AVM0900"},
/* radio-cadet */
{"MSM0c24"}, /* ADS Cadet AM/FM Radio Card */
/* radio-gemtek */
{"ADS7183"}, /* AOpen FX-3D/Pro Radio */
/* radio-sf16fmr2 */
{"MFRad13"}, /* tuner subdevice of SF16-FMD2 */
/* ene_ir */
{"ENE0100"},
{"ENE0200"},
{"ENE0201"},
{"ENE0202"},
/* fintek-cir */
{"FIT0002"}, /* CIR */
/* ite-cir */
{"ITE8704"}, /* Default model */
{"ITE8713"}, /* CIR found in EEEBox 1501U */
{"ITE8708"}, /* Bridged IT8512 */
{"ITE8709"}, /* SRAM-Bridged IT8512 */
/* nuvoton-cir */
{"WEC0530"}, /* CIR */
{"NTN0530"}, /* CIR for new chip's pnp id */
/* Winbond CIR */
{"WEC1022"},
/* wbsd */
{"WEC0517"},
{"WEC0518"},
/* Winbond CIR */
{"TCM5090"}, /* 3Com Etherlink III (TP) */
{"TCM5091"}, /* 3Com Etherlink III */
{"TCM5094"}, /* 3Com Etherlink III (combo) */
{"TCM5095"}, /* 3Com Etherlink III (TPO) */
{"TCM5098"}, /* 3Com Etherlink III (TPC) */
{"PNP80f7"}, /* 3Com Etherlink III compatible */
{"PNP80f8"}, /* 3Com Etherlink III compatible */
/* nsc-ircc */
{"NSC6001"},
{"HWPC224"},
{"IBM0071"},
/* smsc-ircc2 */
{"SMCf010"},
/* sb1000 */
{"GIC1000"},
/* parport_pc */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
/* apple-gmux */
{"APP000B"},
/* system */
{"PNP0c02"}, /* General ID for reserving resources */
{"PNP0c01"}, /* memory controller */
/* rtc_cmos */
{"PNP0b00"},
{"PNP0b01"},
{"PNP0b02"},
/* c6xdigio */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
/* ni_atmio.c */
{"NIC1900"},
{"NIC2400"},
{"NIC2500"},
{"NIC2600"},
{"NIC2700"},
/* serial */
{"AAC000F"}, /* Archtek America Corp. Archtek SmartLink Modem 3334BT Plug & Play */
{"ADC0001"}, /* Anchor Datacomm BV. SXPro 144 External Data Fax Modem Plug & Play */
{"ADC0002"}, /* SXPro 288 External Data Fax Modem Plug & Play */
{"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
{"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */
{"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */
{"ALI5123"}, /* ALi Fast Infrared Controller */
{"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */
{"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
{"BRI1400"}, /* Boca Research 33,600 ACF Modem */
{"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */
{"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */
{"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
{"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
{"DAV0336"}, /* Davicom ISA 33.6K Modem */
{"DMB1032"}, /* Creative Modem Blaster Flash56 DI5601-1 */
{"DMB2001"}, /* Creative Modem Blaster V.90 DI5660 */
{"ETT0002"}, /* E-Tech CyberBULLET PC56RVP */
{"FUJ0202"}, /* Fujitsu 33600 PnP-I2 R Plug & Play */
{"FUJ0205"}, /* Fujitsu FMV-FX431 Plug & Play */
{"FUJ0206"}, /* Fujitsu 33600 PnP-I4 R Plug & Play */
{"FUJ0209"}, /* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
{"GVC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */
{"GVC0303"}, /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */
{"HAY0001"}, /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
{"HAY000C"}, /* Hayes Optima 336 V.34 + FAX + Voice PnP */
{"HAY000D"}, /* Hayes Optima 336B V.34 + FAX + Voice PnP */
{"HAY5670"}, /* Hayes Accura 56K Ext Fax Modem PnP */
{"HAY5674"}, /* Hayes Accura 56K Ext Fax Modem PnP */
{"HAY5675"}, /* Hayes Accura 56K Fax Modem PnP */
{"HAYF000"}, /* Hayes 288, V.34 + FAX */
{"HAYF001"}, /* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
{"IBM0033"}, /* IBM Thinkpad 701 Internal Modem Voice */
{"PNP4972"}, /* Intermec CV60 touchscreen port */
{"IXDC801"}, /* Intertex 28k8 33k6 Voice EXT PnP */
{"IXDC901"}, /* Intertex 33k6 56k Voice EXT PnP */
{"IXDD801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */
{"IXDD901"}, /* Intertex 33k6 56k Voice SP EXT PnP */
{"IXDF401"}, /* Intertex 28k8 33k6 Voice SP INT PnP */
{"IXDF801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */
{"IXDF901"}, /* Intertex 33k6 56k Voice SP EXT PnP */
{"KOR4522"}, /* KORTEX 28800 Externe PnP */
{"KORF661"}, /* KXPro 33.6 Vocal ASVD PnP */
{"LAS4040"}, /* LASAT Internet 33600 PnP */
{"LAS4540"}, /* Lasat Safire 560 PnP */
{"LAS5440"}, /* Lasat Safire 336 PnP */
{"MNP0281"}, /* Microcom TravelPorte FAST V.34 Plug & Play */
{"MNP0336"}, /* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
{"MNP0339"}, /* Microcom DeskPorte FAST EP 28.8 Plug & Play */
{"MNP0342"}, /* Microcom DeskPorte 28.8P Plug & Play */
{"MNP0500"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */
{"MNP0501"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */
{"MNP0502"}, /* Microcom DeskPorte 28.8S Internal Plug & Play */
{"MOT1105"}, /* Motorola BitSURFR Plug & Play */
{"MOT1111"}, /* Motorola TA210 Plug & Play */
{"MOT1114"}, /* Motorola HMTA 200 (ISDN) Plug & Play */
{"MOT1115"}, /* Motorola BitSURFR Plug & Play */
{"MOT1190"}, /* Motorola Lifestyle 28.8 Internal */
{"MOT1501"}, /* Motorola V.3400 Plug & Play */
{"MOT1502"}, /* Motorola Lifestyle 28.8 V.34 Plug & Play */
{"MOT1505"}, /* Motorola Power 28.8 V.34 Plug & Play */
{"MOT1509"}, /* Motorola ModemSURFR External 28.8 Plug & Play */
{"MOT150A"}, /* Motorola Premier 33.6 Desktop Plug & Play */
{"MOT150F"}, /* Motorola VoiceSURFR 56K External PnP */
{"MOT1510"}, /* Motorola ModemSURFR 56K External PnP */
{"MOT1550"}, /* Motorola ModemSURFR 56K Internal PnP */
{"MOT1560"}, /* Motorola ModemSURFR Internal 28.8 Plug & Play */
{"MOT1580"}, /* Motorola Premier 33.6 Internal Plug & Play */
{"MOT15B0"}, /* Motorola OnlineSURFR 28.8 Internal Plug & Play */
{"MOT15F0"}, /* Motorola VoiceSURFR 56K Internal PnP */
{"MVX00A1"}, /* Deskline K56 Phone System PnP */
{"MVX00F2"}, /* PC Rider K56 Phone System PnP */
{"nEC8241"}, /* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
{"PMC2430"}, /* Pace 56 Voice Internal Plug & Play Modem */
{"PNP0500"}, /* Generic standard PC COM port */
{"PNP0501"}, /* Generic 16550A-compatible COM port */
{"PNPC000"}, /* Compaq 14400 Modem */
{"PNPC001"}, /* Compaq 2400/9600 Modem */
{"PNPC031"}, /* Dial-Up Networking Serial Cable between 2 PCs */
{"PNPC032"}, /* Dial-Up Networking Parallel Cable between 2 PCs */
{"PNPC100"}, /* Standard 9600 bps Modem */
{"PNPC101"}, /* Standard 14400 bps Modem */
{"PNPC102"}, /* Standard 28800 bps Modem */
{"PNPC103"}, /* Standard Modem */
{"PNPC104"}, /* Standard 9600 bps Modem */
{"PNPC105"}, /* Standard 14400 bps Modem */
{"PNPC106"}, /* Standard 28800 bps Modem */
{"PNPC107"}, /* Standard Modem */
{"PNPC108"}, /* Standard 9600 bps Modem */
{"PNPC109"}, /* Standard 14400 bps Modem */
{"PNPC10A"}, /* Standard 28800 bps Modem */
{"PNPC10B"}, /* Standard Modem */
{"PNPC10C"}, /* Standard 9600 bps Modem */
{"PNPC10D"}, /* Standard 14400 bps Modem */
{"PNPC10E"}, /* Standard 28800 bps Modem */
{"PNPC10F"}, /* Standard Modem */
{"PNP2000"}, /* Standard PCMCIA Card Modem */
{"ROK0030"}, /* Rockwell 33.6 DPF Internal PnP, Modular Technology 33.6 Internal PnP */
{"ROK0100"}, /* KORTEX 14400 Externe PnP */
{"ROK4120"}, /* Rockwell 28.8 */
{"ROK4920"}, /* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
{"RSS00A0"}, /* Rockwell 33.6 DPF External PnP, BT Prologue 33.6 External PnP, Modular Technology 33.6 External PnP */
{"RSS0262"}, /* Viking 56K FAX INT */
{"RSS0250"}, /* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
{"SUP1310"}, /* SupraExpress 28.8 Data/Fax PnP modem */
{"SUP1381"}, /* SupraExpress 336i PnP Voice Modem */
{"SUP1421"}, /* SupraExpress 33.6 Data/Fax PnP modem */
{"SUP1590"}, /* SupraExpress 33.6 Data/Fax PnP modem */
{"SUP1620"}, /* SupraExpress 336i Sp ASVD */
{"SUP1760"}, /* SupraExpress 33.6 Data/Fax PnP modem */
{"SUP2171"}, /* SupraExpress 56i Sp Intl */
{"TEX0011"}, /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
{"UAC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */
{"USR0000"}, /* 3Com Corp. Gateway Telepath IIvi 33.6 */
{"USR0002"}, /* U.S. Robotics Sporster 33.6K Fax INT PnP */
{"USR0004"}, /* Sportster Vi 14.4 PnP FAX Voicemail */
{"USR0006"}, /* U.S. Robotics 33.6K Voice INT PnP */
{"USR0007"}, /* U.S. Robotics 33.6K Voice EXT PnP */
{"USR0009"}, /* U.S. Robotics Courier V.Everything INT PnP */
{"USR2002"}, /* U.S. Robotics 33.6K Voice INT PnP */
{"USR2070"}, /* U.S. Robotics 56K Voice INT PnP */
{"USR2080"}, /* U.S. Robotics 56K Voice EXT PnP */
{"USR3031"}, /* U.S. Robotics 56K FAX INT */
{"USR3050"}, /* U.S. Robotics 56K FAX INT */
{"USR3070"}, /* U.S. Robotics 56K Voice INT PnP */
{"USR3080"}, /* U.S. Robotics 56K Voice EXT PnP */
{"USR3090"}, /* U.S. Robotics 56K Voice INT PnP */
{"USR9100"}, /* U.S. Robotics 56K Message */
{"USR9160"}, /* U.S. Robotics 56K FAX EXT PnP */
{"USR9170"}, /* U.S. Robotics 56K FAX INT PnP */
{"USR9180"}, /* U.S. Robotics 56K Voice EXT PnP */
{"USR9190"}, /* U.S. Robotics 56K Voice INT PnP */
{"WACFXXX"}, /* Wacom tablets */
{"FPI2002"}, /* Compaq touchscreen */
{"FUJ02B2"}, /* Fujitsu Stylistic touchscreens */
{"FUJ02B3"},
{"FUJ02B4"}, /* Fujitsu Stylistic LT touchscreens */
{"FUJ02B6"}, /* Passive Fujitsu Stylistic touchscreens */
{"FUJ02B7"},
{"FUJ02B8"},
{"FUJ02B9"},
{"FUJ02BC"},
{"FUJ02E5"}, /* Fujitsu Wacom Tablet PC device */
{"FUJ02E6"}, /* Fujitsu P-series tablet PC device */
{"FUJ02E7"}, /* Fujitsu Wacom 2FGT Tablet PC device */
{"FUJ02E9"}, /* Fujitsu Wacom 1FGT Tablet PC device */
{"LTS0001"}, /* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in disguise) */
{"WCI0003"}, /* Rockwell's (PORALiNK) 33600 INT PNP */
{"WEC1022"}, /* Winbond CIR port, should not be probed. We should keep track of it to prevent the legacy serial driver from probing it */
/* scl200wdt */
{"NSC0800"}, /* National Semiconductor PC87307/PC97307 watchdog component */
/* mpu401 */
{"PNPb006"},
/* cs423x-pnpbios */
{"CSC0100"},
{"CSC0103"},
{"CSC0110"},
{"CSC0000"},
{"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
/* es18xx-pnpbios */
{"ESS1869"},
{"ESS1879"},
/* snd-opl3sa2-pnpbios */
{"YMH0021"},
{"NMX2210"}, /* Gateway Solo 2500 */
{""},
};
static bool matching_id(const char *idstr, const char *list_id)
{
int i;
if (strlen(idstr) != strlen(list_id))
return false;
if (memcmp(idstr, list_id, 3))
return false;
for (i = 3; i < 7; i++) {
char c = toupper(idstr[i]);
if (!isxdigit(c)
|| (list_id[i] != 'X' && c != toupper(list_id[i])))
return false;
}
return true;
}
static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matchid)
{
const struct acpi_device_id *devid;
for (devid = acpi_pnp_device_ids; devid->id[0]; devid++)
if (matching_id(idstr, (char *)devid->id)) {
if (matchid)
*matchid = devid;
return true;
}
return false;
}
/*
* If one of the device IDs below is present in the list of device IDs of a
* given ACPI device object, the PNP scan handler will not attach to that
* object, because there is a proper non-PNP driver in the kernel for the
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
{"INTC1080"},
{"INTC1081"},
{""},
};
static int acpi_pnp_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
return !!acpi_match_device_ids(adev, acpi_nonpnp_device_ids);
}
static struct acpi_scan_handler acpi_pnp_handler = {
.ids = acpi_pnp_device_ids,
.match = acpi_pnp_match,
.attach = acpi_pnp_attach,
};
/*
* For CMOS RTC devices, the PNP ACPI scan handler does not work, because
* there is a CMOS RTC ACPI scan handler installed already, so we need to
* check those devices and enumerate them to the PNP bus directly.
*/
static int is_cmos_rtc_device(struct acpi_device *adev)
{
static const struct acpi_device_id ids[] = {
{ "PNP0B00" },
{ "PNP0B01" },
{ "PNP0B02" },
{""},
};
return !acpi_match_device_ids(adev, ids);
}
bool acpi_is_pnp_device(struct acpi_device *adev)
{
return adev->handler == &acpi_pnp_handler || is_cmos_rtc_device(adev);
}
EXPORT_SYMBOL_GPL(acpi_is_pnp_device);
void __init acpi_pnp_init(void)
{
acpi_scan_add_handler(&acpi_pnp_handler);
}
| linux-master | drivers/acpi/acpi_pnp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* debugfs.c - ACPI debugfs interface to userspace.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/acpi.h>
#include "internal.h"
struct dentry *acpi_debugfs_dir;
EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
void __init acpi_debugfs_init(void)
{
acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
}
| linux-master | drivers/acpi/debugfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* processor_idle - idle state submodule to the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004, 2005 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* - Added processor hotplug support
* Copyright (C) 2005 Venkatesh Pallipadi <[email protected]>
* - Added support for C3 on SMP
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
#include <linux/minmax.h>
#include <linux/perf_event.h>
#include <acpi/processor.h>
#include <linux/context_tracking.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
* asm/acpi.h is not an option, as it would require more include magic. Also
* creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
*/
#ifdef CONFIG_X86
#include <asm/apic.h>
#include <asm/cpu.h>
#endif
#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0400);
static bool nocst __read_mostly;
module_param(nocst, bool, 0400);
static bool bm_check_disable __read_mostly;
module_param(bm_check_disable, bool, 0400);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
static int disabled_by_idle_boot_param(void)
{
return boot_option_idle_override == IDLE_POLL ||
boot_option_idle_override == IDLE_HALT;
}
/*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else.
*
* To skip this limit, boot/load with a large max_cstate limit.
*/
static int set_max_cstate(const struct dmi_system_id *id)
{
if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0;
pr_notice("%s detected - limiting to C%ld max_cstate."
" Override with \"processor.max_cstate=%d\"\n", id->ident,
(long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
max_cstate = (long)id->driver_data;
return 0;
}
static const struct dmi_system_id processor_power_dmi_table[] = {
{ set_max_cstate, "Clevo 5600D", {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
{ set_max_cstate, "Pavilion zv5000", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
(void *)1},
{ set_max_cstate, "Asus L8400B", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
{},
};
/*
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
raw_safe_halt();
raw_local_irq_disable();
}
}
#ifdef ARCH_APICTIMER_STOPS_ON_C3
/*
* Some BIOS implementations switch to C3 in the published C2 state.
* This seems to be a common problem on AMD boxen, but other vendors
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
type = ACPI_STATE_C1;
/*
* Check, if one of the previous states already marked the lapic
* unstable
*/
if (pwr->timer_broadcast_on_state < state)
return;
if (cx->type >= type)
pr->power.timer_broadcast_on_state = state;
}
static void __lapic_timer_propagate_broadcast(void *arg)
{
struct acpi_processor *pr = arg;
if (pr->power.timer_broadcast_on_state < INT_MAX)
tick_broadcast_enable();
else
tick_broadcast_disable();
}
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
(void *)pr, 1);
}
/* Power(C) State timer broadcast control */
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
}
#else
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
return false;
}
#endif
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
fallthrough;
default:
/* TSC could halt in idle, so notify users */
if (state > ACPI_STATE_C1)
mark_tsc_unstable("TSC halts in idle");
}
}
#else
static void tsc_check_state(int state) { return; }
#endif
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
if (!pr->pblk)
return -ENODEV;
/* if info is obtained from pblk/fadt, type equals state */
pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
#ifndef CONFIG_HOTPLUG_CPU
/*
* Check for P_LVL2_UP flag before entering C2 and above on
* an SMP system.
*/
if ((num_online_cpus() > 1) &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
/* determine C2 and C3 address from pblk */
pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
/*
* FADT specified C2 latency must be less than or equal to
* 100 microseconds.
*/
if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
acpi_gbl_FADT.c2_latency);
/* invalidate C2 */
pr->power.states[ACPI_STATE_C2].address = 0;
}
/*
* FADT supplied C3 latency must be less than or equal to
* 1000 microseconds.
*/
if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
acpi_gbl_FADT.c3_latency);
/* invalidate C3 */
pr->power.states[ACPI_STATE_C3].address = 0;
}
acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
pr->power.states[ACPI_STATE_C3].address);
snprintf(pr->power.states[ACPI_STATE_C2].desc,
ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C2].address);
snprintf(pr->power.states[ACPI_STATE_C3].desc,
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
return 0;
}
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
{
if (!pr->power.states[ACPI_STATE_C1].valid) {
/* set the first C-State to C1 */
/* all processors need to support C1 */
pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
pr->power.states[ACPI_STATE_C1].valid = 1;
pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
snprintf(pr->power.states[ACPI_STATE_C1].desc,
ACPI_CX_DESC_LEN, "ACPI HLT");
}
/* the C0 state only exists as a filler in our array */
pr->power.states[ACPI_STATE_C0].valid = 1;
return 0;
}
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
int ret;
if (nocst)
return -ENODEV;
ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
if (ret)
return ret;
if (!pr->power.count)
return -EFAULT;
pr->flags.has_cst = 1;
return 0;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
static int bm_check_flag = -1;
static int bm_control_flag = -1;
if (!cx->address)
return;
/*
* PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
* DMA transfers are used by any ISA device to avoid livelock.
* Note that we could disable Type-F DMA (as recommended by
* the erratum), but this is known to disrupt certain ISA
* devices thus we take the conservative approach.
*/
if (errata.piix4.fdma) {
acpi_handle_debug(pr->handle,
"C3 not supported on PIIX4 with Type-F DMA\n");
return;
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
if (!pr->flags.bm_control) {
if (pr->flags.has_cst != 1) {
/* bus mastering control is necessary */
acpi_handle_debug(pr->handle,
"C3 support requires BM control\n");
return;
} else {
/* Here we enter C3 without bus mastering */
acpi_handle_debug(pr->handle,
"C3 support without BM control\n");
}
}
} else {
/*
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
acpi_handle_debug(pr->handle,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n");
return;
}
}
/*
* Otherwise we've met all of our C3 requirements.
* Normalize the C3 latency to expidite policy. Enable
* checking of bus mastering status (bm_check) so we can
* use this in our C3 policy
*/
cx->valid = 1;
/*
* On older chipsets, BM_RLD needs to be set
* in order for Bus Master activity to wake the
* system from C3. Newer chipsets handle DMA
* during C3 automatically and BM_RLD is a NOP.
* In either case, the proper way to
* handle BM_RLD is to set it and leave it set.
*/
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
}
static int acpi_cst_latency_cmp(const void *a, const void *b)
{
const struct acpi_processor_cx *x = a, *y = b;
if (!(x->valid && y->valid))
return 0;
if (x->latency > y->latency)
return 1;
if (x->latency < y->latency)
return -1;
return 0;
}
static void acpi_cst_latency_swap(void *a, void *b, int n)
{
struct acpi_processor_cx *x = a, *y = b;
if (!(x->valid && y->valid))
return;
swap(x->latency, y->latency);
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
unsigned int last_latency = 0;
unsigned int last_type = 0;
bool buggy_latency = false;
pr->power.timer_broadcast_on_state = INT_MAX;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
break;
case ACPI_STATE_C2:
if (!cx->address)
break;
cx->valid = 1;
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
break;
}
if (!cx->valid)
continue;
if (cx->type >= last_type && cx->latency < last_latency)
buggy_latency = true;
last_latency = cx->latency;
last_type = cx->type;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
sort(&pr->power.states[1], max_cstate,
sizeof(struct acpi_processor_cx),
acpi_cst_latency_cmp,
acpi_cst_latency_swap);
}
lapic_timer_propagate_broadcast(pr);
return working;
}
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
unsigned int i;
int result;
/* NOTE: the idle thread may not be running while calling
* this function */
/* Zero initialize all the C-states info. */
memset(pr->power.states, 0, sizeof(pr->power.states));
result = acpi_processor_get_power_info_cst(pr);
if (result == -ENODEV)
result = acpi_processor_get_power_info_fadt(pr);
if (result)
return result;
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
/*
* if one state of type C2 or C3 is available, mark this
* CPU as being "idle manageable"
*/
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
if (pr->power.states[i].valid) {
pr->power.count = i;
pr->flags.power = 1;
}
}
return 0;
}
/**
* acpi_idle_bm_check - checks if bus master activity was detected
*/
static int acpi_idle_bm_check(void)
{
u32 bm_status = 0;
if (bm_check_disable)
return 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
if (bm_status)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
/*
* PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
* the true state of bus mastering activity; forcing us to
* manually check the BMIDEA bit of each IDE channel.
*/
else if (errata.piix4.bmisx) {
if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
|| (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
bm_status = 1;
}
return bm_status;
}
static __cpuidle void io_idle(unsigned long addr)
{
/* IO port based C-state */
inb(addr);
#ifdef CONFIG_X86
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
/*
* Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
* not this code. Assume that any Intel systems using this
* are ancient and may need the dummy wait. This also assumes
* that the motivating chipset issue was Intel-only.
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
#endif
/*
* Dummy wait op - must do something useless after P_LVL2 read
* because chipsets cannot guarantee that STPCLK# signal gets
* asserted in time to freeze execution properly
*
* This workaround has been in place since the original ACPI
* implementation was merged, circa 2002.
*
* If a profile is pointing to this instruction, please first
* consider moving your system to a more modern idle
* mechanism.
*/
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
/**
* acpi_idle_do_entry - enter idle state using the appropriate method
* @cx: cstate data
*
* Caller disables interrupt before call and enables interrupt after return.
*/
static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
perf_lopwr_cb(true);
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
io_idle(cx->address);
}
perf_lopwr_cb(false);
}
/**
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
* @dev: the target CPU
* @index: the index of suggested state
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
ACPI_FLUSH_CPU_CACHE();
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
} else
return -ENODEV;
}
/* Never reached */
return 0;
}
static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
}
static int c3_cpu_count;
static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @drv: cpuidle driver
* @pr: Target processor
* @cx: Target state context
* @index: index of target state
*/
static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int index)
{
static struct acpi_processor_cx safe_cx = {
.entry_method = ACPI_CSTATE_HALT,
};
/*
* disable bus master
* bm_check implies we need ARB_DIS
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is not set.
* In that case we cannot do much, we enter C3 without doing anything.
*/
bool dis_bm = pr->flags.bm_control;
instrumentation_begin();
/* If we can skip BM, demote to a safe state. */
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
dis_bm = false;
index = drv->safe_state_index;
if (index >= 0) {
cx = this_cpu_read(acpi_cstate[index]);
} else {
cx = &safe_cx;
index = -EBUSY;
}
}
if (dis_bm) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
}
ct_cpuidle_enter();
acpi_idle_do_entry(cx);
ct_cpuidle_exit();
/* Re-enable bus master arbitration */
if (dis_bm) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
instrumentation_end();
return index;
}
static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
struct acpi_processor *pr;
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
if (cx->type != ACPI_STATE_C1) {
if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
return acpi_idle_enter_bm(drv, pr, cx, index);
/* C2 to C1 demotion. */
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
}
}
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
acpi_idle_do_entry(cx);
return index;
}
static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
if (cx->type == ACPI_STATE_C3) {
struct acpi_processor *pr = __this_cpu_read(processors);
if (unlikely(!pr))
return 0;
if (pr->flags.bm_check) {
u8 bm_sts_skip = cx->bm_sts_skip;
/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
cx->bm_sts_skip = 1;
acpi_idle_enter_bm(drv, pr, cx, index);
cx->bm_sts_skip = bm_sts_skip;
return 0;
} else {
ACPI_FLUSH_CPU_CACHE();
}
}
acpi_idle_do_entry(cx);
return 0;
}
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
state = &acpi_idle_driver.states[count];
cx = &pr->power.states[i];
if (!cx->valid)
continue;
per_cpu(acpi_cstate[count], dev->cpu) = cx;
if (lapic_timer_needs_broadcast(pr, cx))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
if (cx->type == ACPI_STATE_C3) {
state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
if (pr->flags.bm_check)
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
}
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
if (!count)
return -EINVAL;
return 0;
}
static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
if (max_cstate == 0)
max_cstate = 1;
if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
cpuidle_poll_state_init(drv);
count = 1;
} else {
count = 0;
}
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
if (!cx->valid)
continue;
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter;
state->flags = 0;
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
if (cx->type != ACPI_STATE_C3)
drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
drv->state_count = count;
if (!count)
return -EINVAL;
return 0;
}
static inline void acpi_processor_cstate_first_run_checks(void)
{
static int first_run;
if (first_run)
return;
dmi_check_system(processor_power_dmi_table);
max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
pr_notice("processor limited to max C-state %d\n", max_cstate);
first_run++;
if (nocst)
return;
acpi_processor_claim_cst_control();
}
#else
static inline int disabled_by_idle_boot_param(void) { return 0; }
static inline void acpi_processor_cstate_first_run_checks(void) { }
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
return -ENODEV;
}
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
return -EINVAL;
}
static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
return -EINVAL;
}
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
struct acpi_lpi_states_array {
unsigned int size;
unsigned int composite_states_size;
struct acpi_lpi_state *entries;
struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
};
static int obj_get_integer(union acpi_object *obj, u32 *value)
{
if (obj->type != ACPI_TYPE_INTEGER)
return -EINVAL;
*value = obj->integer.value;
return 0;
}
static int acpi_processor_evaluate_lpi(acpi_handle handle,
struct acpi_lpi_states_array *info)
{
acpi_status status;
int ret = 0;
int pkg_count, state_idx = 1, loop;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *lpi_data;
struct acpi_lpi_state *lpi_state;
status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "No _LPI, giving up\n");
return -ENODEV;
}
lpi_data = buffer.pointer;
/* There must be at least 4 elements = 3 elements + 1 package */
if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
lpi_data->package.count < 4) {
pr_debug("not enough elements in _LPI\n");
ret = -ENODATA;
goto end;
}
pkg_count = lpi_data->package.elements[2].integer.value;
/* Validate number of power states. */
if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
pr_debug("count given by _LPI is not valid\n");
ret = -ENODATA;
goto end;
}
lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
if (!lpi_state) {
ret = -ENOMEM;
goto end;
}
info->size = pkg_count;
info->entries = lpi_state;
/* LPI States start at index 3 */
for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
union acpi_object *element, *pkg_elem, *obj;
element = &lpi_data->package.elements[loop];
if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
continue;
pkg_elem = element->package.elements;
obj = pkg_elem + 6;
if (obj->type == ACPI_TYPE_BUFFER) {
struct acpi_power_register *reg;
reg = (struct acpi_power_register *)obj->buffer.pointer;
if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
continue;
lpi_state->address = reg->address;
lpi_state->entry_method =
reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
} else if (obj->type == ACPI_TYPE_INTEGER) {
lpi_state->entry_method = ACPI_CSTATE_INTEGER;
lpi_state->address = obj->integer.value;
} else {
continue;
}
/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
obj = pkg_elem + 9;
if (obj->type == ACPI_TYPE_STRING)
strscpy(lpi_state->desc, obj->string.pointer,
ACPI_CX_DESC_LEN);
lpi_state->index = state_idx;
if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
pr_debug("No min. residency found, assuming 10 us\n");
lpi_state->min_residency = 10;
}
if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
pr_debug("No wakeup residency found, assuming 10 us\n");
lpi_state->wake_latency = 10;
}
if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
lpi_state->flags = 0;
if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
lpi_state->arch_flags = 0;
if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
lpi_state->res_cnt_freq = 1;
if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
lpi_state->enable_parent_state = 0;
}
acpi_handle_debug(handle, "Found %d power states\n", state_idx);
end:
kfree(buffer.pointer);
return ret;
}
/*
* flat_state_cnt - the number of composite LPI states after the process of flattening
*/
static int flat_state_cnt;
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
* @local: local LPI state
* @parent: parent LPI state
* @result: composite LPI state
*/
static bool combine_lpi_states(struct acpi_lpi_state *local,
struct acpi_lpi_state *parent,
struct acpi_lpi_state *result)
{
if (parent->entry_method == ACPI_CSTATE_INTEGER) {
if (!parent->address) /* 0 means autopromotable */
return false;
result->address = local->address + parent->address;
} else {
result->address = parent->address;
}
result->min_residency = max(local->min_residency, parent->min_residency);
result->wake_latency = local->wake_latency + parent->wake_latency;
result->enable_parent_state = parent->enable_parent_state;
result->entry_method = local->entry_method;
result->flags = parent->flags;
result->arch_flags = parent->arch_flags;
result->index = parent->index;
strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
return true;
}
#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
struct acpi_lpi_state *t)
{
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
static int flatten_lpi_states(struct acpi_processor *pr,
struct acpi_lpi_states_array *curr_level,
struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
curr_level->composite_states_size = 0;
for (j = 0; j < state_count; j++, t++) {
struct acpi_lpi_state *flpi;
if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
continue;
if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
pr_warn("Limiting number of LPI states to max (%d)\n",
ACPI_PROCESSOR_MAX_POWER);
pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break;
}
flpi = &pr->power.lpi_states[flat_state_cnt];
if (!prev_level) { /* leaf/processor node */
memcpy(flpi, t, sizeof(*t));
stash_composite_state(curr_level, flpi);
flat_state_cnt++;
continue;
}
for (i = 0; i < prev_level->composite_states_size; i++) {
p = prev_level->composite_states[i];
if (t->index <= p->enable_parent_state &&
combine_lpi_states(p, t, flpi)) {
stash_composite_state(curr_level, flpi);
flat_state_cnt++;
flpi++;
}
}
}
kfree(curr_level->entries);
return 0;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return -EOPNOTSUPP;
}
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
acpi_status status;
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
if (ret == -EOPNOTSUPP)
return ret;
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
flatten_lpi_states(pr, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
d = acpi_fetch_acpi_dev(pr_ahandle);
if (!d)
break;
handle = pr_ahandle;
if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
break;
/* can be optional ? */
if (!acpi_has_method(handle, "_LPI"))
break;
ret = acpi_processor_evaluate_lpi(handle, curr);
if (ret)
break;
/* flatten all the LPI states in this level of hierarchy */
flatten_lpi_states(pr, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
pr->power.count = flat_state_cnt;
/* reset the index after flattening */
for (i = 0; i < pr->power.count; i++)
pr->power.lpi_states[i].index = i;
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
return 0;
}
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;
}
/**
* acpi_idle_lpi_enter - enters an ACPI any LPI state
* @dev: the target CPU
* @drv: cpuidle driver containing cpuidle state info
* @index: index of target state
*
* Return: 0 for success or negative value for error
*/
static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct acpi_lpi_state *lpi;
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return -EINVAL;
lpi = &pr->power.lpi_states[index];
if (lpi->entry_method == ACPI_CSTATE_FFH)
return acpi_processor_ffh_lpi_enter(lpi);
return -EINVAL;
}
static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
{
int i;
struct acpi_lpi_state *lpi;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
if (!pr->flags.has_lpi)
return -EOPNOTSUPP;
for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
lpi = &pr->power.lpi_states[i];
state = &drv->states[i];
snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;
drv->safe_state_index = i;
}
drv->state_count = i;
return 0;
}
/**
* acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
* global state data i.e. idle routines
*
* @pr: the ACPI processor
*/
static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
{
int i;
struct cpuidle_driver *drv = &acpi_idle_driver;
if (!pr->flags.power_setup_done || !pr->flags.power)
return -EINVAL;
drv->safe_state_index = -1;
for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
if (pr->flags.has_lpi)
return acpi_processor_setup_lpi_states(pr);
return acpi_processor_setup_cstates(pr);
}
/**
* acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
* device i.e. per-cpu data
*
* @pr: the ACPI processor
* @dev : the cpuidle device
*/
static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
return -EINVAL;
dev->cpu = pr->id;
if (pr->flags.has_lpi)
return acpi_processor_ffh_lpi_probe(pr->id);
return acpi_processor_setup_cpuidle_cx(pr, dev);
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
{
int ret;
ret = acpi_processor_get_lpi_info(pr);
if (ret)
ret = acpi_processor_get_cstate_info(pr);
return ret;
}
int acpi_processor_hotplug(struct acpi_processor *pr)
{
int ret = 0;
struct cpuidle_device *dev;
if (disabled_by_idle_boot_param())
return 0;
if (!pr->flags.power_setup_done)
return -ENODEV;
dev = per_cpu(acpi_cpuidle_device, pr->id);
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
ret = acpi_processor_get_power_info(pr);
if (!ret && pr->flags.power) {
acpi_processor_setup_cpuidle_dev(pr, dev);
ret = cpuidle_enable_device(dev);
}
cpuidle_resume_and_unlock();
return ret;
}
int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
{
int cpu;
struct acpi_processor *_pr;
struct cpuidle_device *dev;
if (disabled_by_idle_boot_param())
return 0;
if (!pr->flags.power_setup_done)
return -ENODEV;
/*
* FIXME: Design the ACPI notification to make it once per
* system instead of once per-cpu. This condition is a hack
* to make the code that updates C-States be called once.
*/
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
/* Protect against cpu-hotplug */
cpus_read_lock();
cpuidle_pause_and_lock();
/* Disable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
continue;
dev = per_cpu(acpi_cpuidle_device, cpu);
cpuidle_disable_device(dev);
}
/* Populate Updated C-state information */
acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
continue;
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
dev = per_cpu(acpi_cpuidle_device, cpu);
acpi_processor_setup_cpuidle_dev(_pr, dev);
cpuidle_enable_device(dev);
}
}
cpuidle_resume_and_unlock();
cpus_read_unlock();
}
return 0;
}
static int acpi_processor_registered;
int acpi_processor_power_init(struct acpi_processor *pr)
{
int retval;
struct cpuidle_device *dev;
if (disabled_by_idle_boot_param())
return 0;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
/*
* Install the idle handler if processor power management is supported.
* Note that we use previously set idle handler will be used on
* platforms that only support C1.
*/
if (pr->flags.power) {
/* Register acpi_idle_driver if not already registered */
if (!acpi_processor_registered) {
acpi_processor_setup_cpuidle_states(pr);
retval = cpuidle_register_driver(&acpi_idle_driver);
if (retval)
return retval;
pr_debug("%s registered with cpuidle\n",
acpi_idle_driver.name);
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
per_cpu(acpi_cpuidle_device, pr->id) = dev;
acpi_processor_setup_cpuidle_dev(pr, dev);
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
*/
retval = cpuidle_register_device(dev);
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
return retval;
}
acpi_processor_registered++;
}
return 0;
}
int acpi_processor_power_exit(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (disabled_by_idle_boot_param())
return 0;
if (pr->flags.power) {
cpuidle_unregister_device(dev);
acpi_processor_registered--;
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
}
pr->flags.power_setup_done = 0;
return 0;
}
| linux-master | drivers/acpi/processor_idle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ec_sys.c
*
* Copyright (C) 2010 SUSE Products GmbH/Novell
* Author:
* Thomas Renninger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "internal.h"
MODULE_AUTHOR("Thomas Renninger <[email protected]>");
MODULE_DESCRIPTION("ACPI EC sysfs access driver");
MODULE_LICENSE("GPL");
static bool write_support;
module_param_hw(write_support, bool, other, 0644);
MODULE_PARM_DESC(write_support, "Dangerous, reboot and removal of battery may "
"be needed.");
#define EC_SPACE_SIZE 256
static struct dentry *acpi_ec_debugfs_dir;
static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
size_t count, loff_t *off)
{
/* Use this if support reading/writing multiple ECs exists in ec.c:
* struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
*/
unsigned int size = EC_SPACE_SIZE;
loff_t init_off = *off;
int err = 0;
if (*off >= size)
return 0;
if (*off + count >= size) {
size -= *off;
count = size;
} else
size = count;
while (size) {
u8 byte_read;
err = ec_read(*off, &byte_read);
if (err)
return err;
if (put_user(byte_read, buf + *off - init_off)) {
if (*off - init_off)
return *off - init_off; /* partial read */
return -EFAULT;
}
*off += 1;
size--;
}
return count;
}
static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
/* Use this if support reading/writing multiple ECs exists in ec.c:
* struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
*/
unsigned int size = count;
loff_t init_off = *off;
int err = 0;
if (!write_support)
return -EINVAL;
if (*off >= EC_SPACE_SIZE)
return 0;
if (*off + count >= EC_SPACE_SIZE) {
size = EC_SPACE_SIZE - *off;
count = size;
}
while (size) {
u8 byte_write;
if (get_user(byte_write, buf + *off - init_off)) {
if (*off - init_off)
return *off - init_off; /* partial write */
return -EFAULT;
}
err = ec_write(*off, byte_write);
if (err)
return err;
*off += 1;
size--;
}
return count;
}
static const struct file_operations acpi_ec_io_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = acpi_ec_read_io,
.write = acpi_ec_write_io,
.llseek = default_llseek,
};
static void acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
umode_t mode = 0400;
if (ec_device_count == 0)
acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
sprintf(name, "ec%u", ec_device_count);
dev_dir = debugfs_create_dir(name, acpi_ec_debugfs_dir);
debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe);
debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock);
if (write_support)
mode = 0600;
debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops);
}
static int __init acpi_ec_sys_init(void)
{
if (first_ec)
acpi_ec_add_debugfs(first_ec, 0);
return 0;
}
static void __exit acpi_ec_sys_exit(void)
{
debugfs_remove_recursive(acpi_ec_debugfs_dir);
}
module_init(acpi_ec_sys_init);
module_exit(acpi_ec_sys_exit);
| linux-master | drivers/acpi/ec_sys.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* FPDT support for exporting boot and suspend/resume performance data
*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
*/
#define pr_fmt(fmt) "ACPI FPDT: " fmt
#include <linux/acpi.h>
/*
* FPDT contains ACPI table header and a number of fpdt_subtable_entries.
* Each fpdt_subtable_entry points to a subtable: FBPT or S3PT.
* Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header
* and a number of fpdt performance records.
* Each FPDT performance record is composed of a fpdt_record_header and
* performance data fields, for boot or suspend or resume phase.
*/
enum fpdt_subtable_type {
SUBTABLE_FBPT,
SUBTABLE_S3PT,
};
struct fpdt_subtable_entry {
u16 type; /* refer to enum fpdt_subtable_type */
u8 length;
u8 revision;
u32 reserved;
u64 address; /* physical address of the S3PT/FBPT table */
};
struct fpdt_subtable_header {
u32 signature;
u32 length;
};
enum fpdt_record_type {
RECORD_S3_RESUME,
RECORD_S3_SUSPEND,
RECORD_BOOT,
};
struct fpdt_record_header {
u16 type; /* refer to enum fpdt_record_type */
u8 length;
u8 revision;
};
struct resume_performance_record {
struct fpdt_record_header header;
u32 resume_count;
u64 resume_prev;
u64 resume_avg;
} __attribute__((packed));
struct boot_performance_record {
struct fpdt_record_header header;
u32 reserved;
u64 firmware_start;
u64 bootloader_load;
u64 bootloader_launch;
u64 exitbootservice_start;
u64 exitbootservice_end;
} __attribute__((packed));
struct suspend_performance_record {
struct fpdt_record_header header;
u64 suspend_start;
u64 suspend_end;
} __attribute__((packed));
static struct resume_performance_record *record_resume;
static struct suspend_performance_record *record_suspend;
static struct boot_performance_record *record_boot;
#define FPDT_ATTR(phase, name) \
static ssize_t name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%llu\n", record_##phase->name); \
} \
static struct kobj_attribute name##_attr = \
__ATTR(name##_ns, 0444, name##_show, NULL)
FPDT_ATTR(resume, resume_prev);
FPDT_ATTR(resume, resume_avg);
FPDT_ATTR(suspend, suspend_start);
FPDT_ATTR(suspend, suspend_end);
FPDT_ATTR(boot, firmware_start);
FPDT_ATTR(boot, bootloader_load);
FPDT_ATTR(boot, bootloader_launch);
FPDT_ATTR(boot, exitbootservice_start);
FPDT_ATTR(boot, exitbootservice_end);
static ssize_t resume_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", record_resume->resume_count);
}
static struct kobj_attribute resume_count_attr =
__ATTR_RO(resume_count);
static struct attribute *resume_attrs[] = {
&resume_count_attr.attr,
&resume_prev_attr.attr,
&resume_avg_attr.attr,
NULL
};
static const struct attribute_group resume_attr_group = {
.attrs = resume_attrs,
.name = "resume",
};
static struct attribute *suspend_attrs[] = {
&suspend_start_attr.attr,
&suspend_end_attr.attr,
NULL
};
static const struct attribute_group suspend_attr_group = {
.attrs = suspend_attrs,
.name = "suspend",
};
static struct attribute *boot_attrs[] = {
&firmware_start_attr.attr,
&bootloader_load_attr.attr,
&bootloader_launch_attr.attr,
&exitbootservice_start_attr.attr,
&exitbootservice_end_attr.attr,
NULL
};
static const struct attribute_group boot_attr_group = {
.attrs = boot_attrs,
.name = "boot",
};
static struct kobject *fpdt_kobj;
#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
#include <linux/processor.h>
static bool fpdt_address_valid(u64 address)
{
/*
* On some systems the table contains invalid addresses
* with unsuppored high address bits set, check for this.
*/
return !(address >> boot_cpu_data.x86_phys_bits);
}
#else
static bool fpdt_address_valid(u64 address)
{
return true;
}
#endif
static int fpdt_process_subtable(u64 address, u32 subtable_type)
{
struct fpdt_subtable_header *subtable_header;
struct fpdt_record_header *record_header;
char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT");
u32 length, offset;
int result;
if (!fpdt_address_valid(address)) {
pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
return -EINVAL;
}
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
if (!subtable_header)
return -ENOMEM;
if (strncmp((char *)&subtable_header->signature, signature, 4)) {
pr_info(FW_BUG "subtable signature and type mismatch!\n");
return -EINVAL;
}
length = subtable_header->length;
acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header));
subtable_header = acpi_os_map_memory(address, length);
if (!subtable_header)
return -ENOMEM;
offset = sizeof(*subtable_header);
while (offset < length) {
record_header = (void *)subtable_header + offset;
offset += record_header->length;
switch (record_header->type) {
case RECORD_S3_RESUME:
if (subtable_type != SUBTABLE_S3PT) {
pr_err(FW_BUG "Invalid record %d for subtable %s\n",
record_header->type, signature);
return -EINVAL;
}
if (record_resume) {
pr_err("Duplicate resume performance record found.\n");
continue;
}
record_resume = (struct resume_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
if (result)
return result;
break;
case RECORD_S3_SUSPEND:
if (subtable_type != SUBTABLE_S3PT) {
pr_err(FW_BUG "Invalid %d for subtable %s\n",
record_header->type, signature);
continue;
}
if (record_suspend) {
pr_err("Duplicate suspend performance record found.\n");
continue;
}
record_suspend = (struct suspend_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
if (result)
return result;
break;
case RECORD_BOOT:
if (subtable_type != SUBTABLE_FBPT) {
pr_err(FW_BUG "Invalid %d for subtable %s\n",
record_header->type, signature);
return -EINVAL;
}
if (record_boot) {
pr_err("Duplicate boot performance record found.\n");
continue;
}
record_boot = (struct boot_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
if (result)
return result;
break;
default:
/* Other types are reserved in ACPI 6.4 spec. */
break;
}
}
return 0;
}
static int __init acpi_init_fpdt(void)
{
acpi_status status;
struct acpi_table_header *header;
struct fpdt_subtable_entry *subtable;
u32 offset = sizeof(*header);
status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
if (ACPI_FAILURE(status))
return 0;
fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
if (!fpdt_kobj) {
acpi_put_table(header);
return -ENOMEM;
}
while (offset < header->length) {
subtable = (void *)header + offset;
switch (subtable->type) {
case SUBTABLE_FBPT:
case SUBTABLE_S3PT:
fpdt_process_subtable(subtable->address,
subtable->type);
break;
default:
/* Other types are reserved in ACPI 6.4 spec. */
break;
}
offset += sizeof(*subtable);
}
return 0;
}
fs_initcall(acpi_init_fpdt);
| linux-master | drivers/acpi/acpi_fpdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI device specific properties support.
*
* Copyright (C) 2014, Intel Corporation
* All rights reserved.
*
* Authors: Mika Westerberg <[email protected]>
* Darren Hart <[email protected]>
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
#include "internal.h"
static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj);
/*
* The GUIDs here are made equivalent to each other in order to avoid extra
* complexity in the properties handling code, with the caveat that the
* kernel will accept certain combinations of GUID and properties that are
* not defined without a warning. For instance if any of the properties
* from different GUID appear in a property list of another, it will be
* accepted by the kernel. Firmware validation tools should catch these.
*/
static const guid_t prp_guids[] = {
/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
/* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
/* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
/* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
/* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
GUID_INIT(0x6c501103, 0xc189, 0x4296,
0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
/* Storage device needs D3 GUID: 5025030f-842f-4ab4-a561-99a5189762d0 */
GUID_INIT(0x5025030f, 0x842f, 0x4ab4,
0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0),
};
/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
static const guid_t buffer_prop_guid =
GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
static bool acpi_extract_properties(acpi_handle handle,
union acpi_object *desc,
struct acpi_device_data *data);
static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
bool result;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return false;
dn->name = link->package.elements[0].string.pointer;
fwnode_init(&dn->fwnode, &acpi_data_fwnode_ops);
dn->parent = parent;
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
result = acpi_extract_properties(handle, desc, &dn->data);
if (handle) {
acpi_handle scope;
acpi_status status;
/*
* The scope for the subnode object lookup is the one of the
* namespace node (device) containing the object that has
* returned the package. That is, it's the scope of that
* object's parent.
*/
status = acpi_get_parent(handle, &scope);
if (ACPI_SUCCESS(status)
&& acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
&dn->fwnode))
result = true;
} else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
&dn->fwnode)) {
result = true;
}
if (result) {
dn->handle = handle;
dn->data.pointer = desc;
list_add_tail(&dn->sibling, list);
return true;
}
kfree(dn);
acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
}
static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
struct fwnode_handle *parent)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_status status;
status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
return false;
if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
parent))
return true;
ACPI_FREE(buf.pointer);
return false;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
const union acpi_object *link,
struct list_head *list,
struct fwnode_handle *parent)
{
acpi_handle handle;
acpi_status status;
if (!scope)
return false;
status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
&handle);
if (ACPI_FAILURE(status))
return false;
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
union acpi_object *links,
struct list_head *list,
struct fwnode_handle *parent)
{
bool ret = false;
int i;
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
acpi_handle handle;
bool result;
link = &links->package.elements[i];
/* Only two elements allowed. */
if (link->package.count != 2)
continue;
/* The first one must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
/* The second one may be a string, a reference or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
case ACPI_TYPE_LOCAL_REFERENCE:
handle = link->package.elements[1].reference.handle;
result = acpi_nondev_subnode_data_ok(handle, link, list,
parent);
break;
case ACPI_TYPE_PACKAGE:
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
default:
result = false;
break;
}
ret = ret || result;
}
return ret;
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
int i;
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
const union acpi_object *guid;
union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
/*
* The first element must be a GUID and the second one must be
* a package.
*/
if (guid->type != ACPI_TYPE_BUFFER ||
guid->buffer.length != 16 ||
links->type != ACPI_TYPE_PACKAGE)
break;
if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid))
continue;
return acpi_add_nondev_subnodes(scope, links, &data->subnodes,
parent);
}
return false;
}
static bool acpi_property_value_ok(const union acpi_object *value)
{
int j;
/*
* The value must be an integer, a string, a reference, or a package
* whose every element must be an integer, a string, or a reference.
*/
switch (value->type) {
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_LOCAL_REFERENCE:
return true;
case ACPI_TYPE_PACKAGE:
for (j = 0; j < value->package.count; j++)
switch (value->package.elements[j].type) {
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_LOCAL_REFERENCE:
continue;
default:
return false;
}
return true;
}
return false;
}
static bool acpi_properties_format_valid(const union acpi_object *properties)
{
int i;
for (i = 0; i < properties->package.count; i++) {
const union acpi_object *property;
property = &properties->package.elements[i];
/*
* Only two elements allowed, the first one must be a string and
* the second one has to satisfy certain conditions.
*/
if (property->package.count != 2
|| property->package.elements[0].type != ACPI_TYPE_STRING
|| !acpi_property_value_ok(&property->package.elements[1]))
return false;
}
return true;
}
static void acpi_init_of_compatible(struct acpi_device *adev)
{
const union acpi_object *of_compatible;
int ret;
ret = acpi_data_get_property_array(&adev->data, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
struct acpi_device *parent;
parent = acpi_dev_parent(adev);
if (parent && parent->flags.of_compatible_ok)
goto out;
return;
}
}
adev->data.of_compatible = of_compatible;
out:
adev->flags.of_compatible_ok = 1;
}
static bool acpi_is_property_guid(const guid_t *guid)
{
int i;
for (i = 0; i < ARRAY_SIZE(prp_guids); i++) {
if (guid_equal(guid, &prp_guids[i]))
return true;
}
return false;
}
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
union acpi_object *properties)
{
struct acpi_device_properties *props;
props = kzalloc(sizeof(*props), GFP_KERNEL);
if (props) {
INIT_LIST_HEAD(&props->list);
props->guid = guid;
props->properties = properties;
list_add_tail(&props->list, &data->properties);
}
return props;
}
static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
{
}
static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
{
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
}
}
static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
{
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
acpi_status status;
bool ret;
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
return false;
}
ret = acpi_tie_nondev_subnodes(&dn->data);
if (!ret)
return ret;
}
return true;
}
static void acpi_data_add_buffer_props(acpi_handle handle,
struct acpi_device_data *data,
union acpi_object *properties)
{
struct acpi_device_properties *props;
union acpi_object *package;
size_t alloc_size;
unsigned int i;
u32 *count;
if (check_mul_overflow((size_t)properties->package.count,
sizeof(*package) + sizeof(void *),
&alloc_size) ||
check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
&alloc_size)) {
acpi_handle_warn(handle,
"can't allocate memory for %u buffer props",
properties->package.count);
return;
}
props = kvzalloc(alloc_size, GFP_KERNEL);
if (!props)
return;
props->guid = &buffer_prop_guid;
props->bufs = (void *)(props + 1);
props->properties = (void *)(props->bufs + properties->package.count);
/* Outer package */
package = props->properties;
package->type = ACPI_TYPE_PACKAGE;
package->package.elements = package + 1;
count = &package->package.count;
*count = 0;
/* Inner packages */
package++;
for (i = 0; i < properties->package.count; i++) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
union acpi_object *property = &properties->package.elements[i];
union acpi_object *prop, *obj, *buf_obj;
acpi_status status;
if (property->type != ACPI_TYPE_PACKAGE ||
property->package.count != 2) {
acpi_handle_warn(handle,
"buffer property %u has %u entries\n",
i, property->package.count);
continue;
}
prop = &property->package.elements[0];
obj = &property->package.elements[1];
if (prop->type != ACPI_TYPE_STRING ||
obj->type != ACPI_TYPE_STRING) {
acpi_handle_warn(handle,
"wrong object types %u and %u\n",
prop->type, obj->type);
continue;
}
status = acpi_evaluate_object_typed(handle, obj->string.pointer,
NULL, &buf,
ACPI_TYPE_BUFFER);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle,
"can't evaluate \"%*pE\" as buffer\n",
obj->string.length,
obj->string.pointer);
continue;
}
package->type = ACPI_TYPE_PACKAGE;
package->package.elements = prop;
package->package.count = 2;
buf_obj = buf.pointer;
/* Replace the string object with a buffer object */
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = buf_obj->buffer.length;
obj->buffer.pointer = buf_obj->buffer.pointer;
props->bufs[i] = buf.pointer;
package++;
(*count)++;
}
if (*count)
list_add(&props->list, &data->properties);
else
kvfree(props);
}
static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
if (desc->package.count % 2)
return false;
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
const union acpi_object *guid;
union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
/*
* The first element must be a GUID and the second one must be
* a package.
*/
if (guid->type != ACPI_TYPE_BUFFER ||
guid->buffer.length != 16 ||
properties->type != ACPI_TYPE_PACKAGE)
break;
if (guid_equal((guid_t *)guid->buffer.pointer,
&buffer_prop_guid)) {
acpi_data_add_buffer_props(scope, data, properties);
continue;
}
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
/*
* We found the matching GUID. Now validate the format of the
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
continue;
acpi_data_add_props(data, (const guid_t *)guid->buffer.pointer,
properties);
}
return !list_empty(&data->properties);
}
void acpi_init_properties(struct acpi_device *adev)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
struct acpi_hardware_id *hwid;
acpi_status status;
bool acpi_of = false;
INIT_LIST_HEAD(&adev->data.properties);
INIT_LIST_HEAD(&adev->data.subnodes);
if (!adev->handle)
return;
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
*/
list_for_each_entry(hwid, &adev->pnp.ids, list) {
if (!strcmp(hwid->id, ACPI_DT_NAMESPACE_HID)) {
acpi_of = true;
break;
}
}
status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
goto out;
if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
}
if (acpi_enumerate_nondev_subnodes(adev->handle, buf.pointer,
&adev->data, acpi_fwnode_handle(adev)))
adev->data.pointer = buf.pointer;
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
} else {
if (!acpi_tie_nondev_subnodes(&adev->data))
acpi_untie_nondev_subnodes(&adev->data);
}
out:
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
if (!adev->data.pointer)
acpi_extract_apple_properties(adev);
}
static void acpi_free_device_properties(struct list_head *list)
{
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
u32 i;
list_del(&props->list);
/* Buffer data properties were separately allocated */
if (props->bufs)
for (i = 0; i < props->properties->package.count; i++)
ACPI_FREE(props->bufs[i]);
kvfree(props);
}
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
{
struct acpi_data_node *dn, *next;
if (list_empty(list))
return;
list_for_each_entry_safe_reverse(dn, next, list, sibling) {
acpi_destroy_nondev_subnodes(&dn->data.subnodes);
wait_for_completion(&dn->kobj_done);
list_del(&dn->sibling);
ACPI_FREE((void *)dn->data.pointer);
acpi_free_device_properties(&dn->data.properties);
kfree(dn);
}
}
void acpi_free_properties(struct acpi_device *adev)
{
acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
acpi_free_device_properties(&adev->data.properties);
}
/**
* acpi_data_get_property - return an ACPI property with given name
* @data: ACPI device deta object to get the property from
* @name: Name of the property
* @type: Expected property type
* @obj: Location to store the property value (if not %NULL)
*
* Look up a property with @name and store a pointer to the resulting ACPI
* object at the location pointed to by @obj if found.
*
* Callers must not attempt to free the returned objects. These objects will be
* freed by the ACPI core automatically during the removal of @data.
*
* Return: %0 if property with @name has been found (success),
* %-EINVAL if the arguments are invalid,
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property value type doesn't match @type.
*/
static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
const struct acpi_device_properties *props;
if (!data || !name)
return -EINVAL;
if (!data->pointer || list_empty(&data->properties))
return -EINVAL;
list_for_each_entry(props, &data->properties, list) {
const union acpi_object *properties;
unsigned int i;
properties = props->properties;
for (i = 0; i < properties->package.count; i++) {
const union acpi_object *propname, *propvalue;
const union acpi_object *property;
property = &properties->package.elements[i];
propname = &property->package.elements[0];
propvalue = &property->package.elements[1];
if (!strcmp(name, propname->string.pointer)) {
if (type != ACPI_TYPE_ANY &&
propvalue->type != type)
return -EPROTO;
if (obj)
*obj = propvalue;
return 0;
}
}
}
return -EINVAL;
}
/**
* acpi_dev_get_property - return an ACPI property with given name.
* @adev: ACPI device to get the property from.
* @name: Name of the property.
* @type: Expected property type.
* @obj: Location to store the property value (if not %NULL).
*/
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj)
{
return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_property);
static const struct acpi_device_data *
acpi_device_data_of_node(const struct fwnode_handle *fwnode)
{
if (is_acpi_device_node(fwnode)) {
const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
}
if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
return NULL;
}
/**
* acpi_node_prop_get - return an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
* @propname: Name of the property.
* @valptr: Location to store a pointer to the property value (if not %NULL).
*/
int acpi_node_prop_get(const struct fwnode_handle *fwnode,
const char *propname, void **valptr)
{
return acpi_data_get_property(acpi_device_data_of_node(fwnode),
propname, ACPI_TYPE_ANY,
(const union acpi_object **)valptr);
}
/**
* acpi_data_get_property_array - return an ACPI array property with given name
* @data: ACPI data object to get the property from
* @name: Name of the property
* @type: Expected type of array elements
* @obj: Location to store a pointer to the property value (if not NULL)
*
* Look up an array property with @name and store a pointer to the resulting
* ACPI object at the location pointed to by @obj if found.
*
* Callers must not attempt to free the returned objects. Those objects will be
* freed by the ACPI core automatically during the removal of @data.
*
* Return: %0 if array property (package) with @name has been found (success),
* %-EINVAL if the arguments are invalid,
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property is not a package or the type of its elements
* doesn't match @type.
*/
static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj)
{
const union acpi_object *prop;
int ret, i;
ret = acpi_data_get_property(data, name, ACPI_TYPE_PACKAGE, &prop);
if (ret)
return ret;
if (type != ACPI_TYPE_ANY) {
/* Check that all elements are of correct type. */
for (i = 0; i < prop->package.count; i++)
if (prop->package.elements[i].type != type)
return -EPROTO;
}
if (obj)
*obj = prop;
return 0;
}
static struct fwnode_handle *
acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
if (is_acpi_data_node(child)) {
if (acpi_data_node_match(child, childname))
return child;
continue;
}
if (!strncmp(acpi_device_bid(to_acpi_device_node(child)),
childname, ACPI_NAMESEG_SIZE))
return child;
}
return NULL;
}
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
/*
* Find the referred data extension node under the
* referred device node.
*/
for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
(*element)++) {
const char *child_name = (*element)->string.pointer;
ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
if (!ref_fwnode)
return -EINVAL;
}
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference or end of the package arguments. In case of
* neither reference, nor integer, return an error, we can't parse it.
*/
for (i = 0; (*element) + i < end && i < num_args; i++) {
acpi_object_type type = (*element)[i].type;
if (type == ACPI_TYPE_LOCAL_REFERENCE)
break;
if (type == ACPI_TYPE_INTEGER)
nargs++;
else
return -EINVAL;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
if (args) {
args->fwnode = ref_fwnode;
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = (*element)[i].integer.value;
}
(*element) += nargs;
return 0;
}
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
*
* Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the
* target object in @args->adev. If the reference includes arguments, store
* them in the @args->args[] array.
*
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
* It is possible to leave holes in the property value set like in the
* example below:
*
* Package () {
* "cs-gpios",
* Package () {
* ^GPIO, 19, 0, 0,
* ^GPIO, 20, 0, 0,
* 0,
* ^GPIO, 21, 0, 0,
* }
* }
*
* Calling this function with index %2 or index %3 return %-ENOENT. If the
* property does not contain any more values %-ENOENT is returned. The NULL
* entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
const struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
data = acpi_device_data_of_node(fwnode);
if (!data)
return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
switch (obj->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
/* Plain single reference without arguments. */
if (index)
return -ENOENT;
device = acpi_fetch_acpi_dev(obj->reference.handle);
if (!device)
return -EINVAL;
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
case ACPI_TYPE_PACKAGE:
/*
* If it is not a single reference, then it is a package of
* references followed by number of ints as follows:
*
* Package () { REF, INT, REF, INT, INT }
*
* The index argument is then used to determine which reference
* the caller wants (along with the arguments).
*/
break;
default:
return -EINVAL;
}
if (index >= obj->package.count)
return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
while (element < end) {
switch (element->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
element++;
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
&element, end, num_args);
if (ret < 0)
return ret;
if (idx == index)
return 0;
break;
case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
break;
default:
return -EINVAL;
}
idx++;
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype, void *val)
{
const union acpi_object *obj;
int ret = 0;
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
else if (proptype == DEV_PROP_STRING)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
switch (proptype) {
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
if (val)
*(u8 *)val = obj->integer.value;
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
if (val)
*(u16 *)val = obj->integer.value;
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
if (val)
*(u32 *)val = obj->integer.value;
break;
case DEV_PROP_U64:
if (val)
*(u64 *)val = obj->integer.value;
break;
case DEV_PROP_STRING:
if (val)
*(char **)val = obj->string.pointer;
return 1;
default:
return -EINVAL;
}
/* When no storage provided return number of available values */
return val ? 0 : 1;
}
#define acpi_copy_property_array_uint(items, val, nval) \
({ \
typeof(items) __items = items; \
typeof(val) __val = val; \
typeof(nval) __nval = nval; \
size_t i; \
int ret = 0; \
\
for (i = 0; i < __nval; i++) { \
if (__items->type == ACPI_TYPE_BUFFER) { \
__val[i] = __items->buffer.pointer[i]; \
continue; \
} \
if (__items[i].type != ACPI_TYPE_INTEGER) { \
ret = -EPROTO; \
break; \
} \
if (__items[i].integer.value > _Generic(__val, \
u8 *: U8_MAX, \
u16 *: U16_MAX, \
u32 *: U32_MAX, \
u64 *: U64_MAX)) { \
ret = -EOVERFLOW; \
break; \
} \
\
__val[i] = __items[i].integer.value; \
} \
ret; \
})
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
{
int i;
for (i = 0; i < nval; i++) {
if (items[i].type != ACPI_TYPE_STRING)
return -EPROTO;
val[i] = items[i].string.pointer;
}
return nval;
}
static int acpi_data_prop_read(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
{
const union acpi_object *obj;
const union acpi_object *items;
int ret;
if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
/*
* The overflow error means that the property is there and it is
* single-value, but its type does not match, so return.
*/
if (ret >= 0 || ret == -EOVERFLOW)
return ret;
/*
* Reading this property as a single-value one failed, but its
* value may still be represented as one-element array, so
* continue.
*/
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
&obj);
if (ret)
return ret;
if (!val) {
if (obj->type == ACPI_TYPE_BUFFER)
return obj->buffer.length;
return obj->package.count;
}
switch (proptype) {
case DEV_PROP_STRING:
break;
case DEV_PROP_U8 ... DEV_PROP_U64:
if (obj->type == ACPI_TYPE_BUFFER) {
if (nval > obj->buffer.length)
return -EOVERFLOW;
break;
}
fallthrough;
default:
if (nval > obj->package.count)
return -EOVERFLOW;
break;
}
if (nval == 0)
return -EINVAL;
if (obj->type != ACPI_TYPE_BUFFER)
items = obj->package.elements;
else
items = obj;
switch (proptype) {
case DEV_PROP_U8:
ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
ret = acpi_copy_property_array_string(
items, (char **)val,
min_t(u32, nval, obj->package.count));
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* acpi_node_prop_read - retrieve the value of an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
* @propname: Name of the property.
* @proptype: Expected property type.
* @val: Location to store the property value (if not %NULL).
* @nval: Size of the array pointed to by @val.
*
* If @val is %NULL, return the number of array elements comprising the value
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
const char *propname, enum dev_prop_type proptype,
void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
}
static int stop_on_next(struct acpi_device *adev, void *data)
{
struct acpi_device **ret_p = data;
if (!*ret_p) {
*ret_p = adev;
return 1;
}
/* Skip until the "previous" object is found. */
if (*ret_p == adev)
*ret_p = NULL;
return 0;
}
/**
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
if ((!child || is_acpi_device_node(child)) && adev) {
struct acpi_device *child_adev = to_acpi_device_node(child);
acpi_dev_for_each_child(adev, stop_on_next, &child_adev);
if (child_adev)
return acpi_fwnode_handle(child_adev);
child = NULL;
}
if (!child || is_acpi_data_node(child)) {
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
const struct list_head *head;
struct list_head *next;
struct acpi_data_node *dn;
/*
* We can have a combination of device and data nodes, e.g. with
* hierarchical _DSD properties. Make sure the adev pointer is
* restored before going through data nodes, otherwise we will
* be looking for data_nodes below the last device found instead
* of the common fwnode shared by device_nodes and data_nodes.
*/
adev = to_acpi_device_node(fwnode);
if (adev)
head = &adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
return NULL;
if (list_empty(head))
return NULL;
if (child) {
dn = to_acpi_data_node(child);
next = dn->sibling.next;
if (next == head)
return NULL;
dn = list_entry(next, struct acpi_data_node, sibling);
} else {
dn = list_first_entry(head, struct acpi_data_node, sibling);
}
return &dn->fwnode;
}
return NULL;
}
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
*
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
static struct fwnode_handle *
acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
}
if (is_acpi_device_node(fwnode)) {
struct acpi_device *parent;
parent = acpi_dev_parent(to_acpi_device_node(fwnode));
if (parent)
return acpi_fwnode_handle(parent);
}
return NULL;
}
/*
* Return true if the node is an ACPI graph node. Called on either ports
* or endpoints.
*/
static bool is_acpi_graph_node(struct fwnode_handle *fwnode,
const char *str)
{
unsigned int len = strlen(str);
const char *name;
if (!len || !is_acpi_data_node(fwnode))
return false;
name = to_acpi_data_node(fwnode)->name;
return (fwnode_property_present(fwnode, "reg") &&
!strncmp(name, str, len) && name[len] == '@') ||
fwnode_property_present(fwnode, str);
}
/**
* acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* Looks up next endpoint ACPI firmware node below a given @fwnode. Returns
* %NULL if there is no next endpoint or in case of error. In case of success
* the next endpoint is returned.
*/
static struct fwnode_handle *acpi_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
struct fwnode_handle *endpoint;
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
* have a "reg" property that also has the number of the
* port node. For compatibility reasons a node is also
* recognised as a port node from the "port" property.
*/
if (is_acpi_graph_node(port, "port"))
break;
} while (port);
} else {
port = fwnode_get_parent(prev);
}
if (!port)
return NULL;
endpoint = fwnode_get_next_child_node(port, prev);
while (!endpoint) {
port = fwnode_get_next_child_node(fwnode, port);
if (!port)
break;
if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
}
/*
* The names of the endpoint nodes begin with "endpoint@" followed by
* the number of the endpoint node and they also have a "reg" property
* that also has the number of the endpoint node. For compatibility
* reasons a node is also recognised as an endpoint node from the
* "endpoint" property.
*/
if (!is_acpi_graph_node(endpoint, "endpoint"))
return NULL;
return endpoint;
}
/**
* acpi_graph_get_child_prop_value - Return a child with a given property value
* @fwnode: device fwnode
* @prop_name: The name of the property to look for
* @val: the desired property value
*
* Return the port node corresponding to a given port number. Returns
* the child node on success, NULL otherwise.
*/
static struct fwnode_handle *acpi_graph_get_child_prop_value(
const struct fwnode_handle *fwnode, const char *prop_name,
unsigned int val)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
return child;
}
return NULL;
}
/**
* acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint
* @__fwnode: Endpoint firmware node pointing to a remote device
*
* Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
static struct fwnode_handle *
acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
{
struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
struct fwnode_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
return NULL;
/* Direct endpoint reference? */
if (!is_acpi_device_node(args.fwnode))
return args.nargs ? NULL : args.fwnode;
/*
* Always require two arguments with the reference: port and
* endpoint indices.
*/
if (args.nargs != 2)
return NULL;
fwnode = args.fwnode;
port_nr = args.args[0];
endpoint_nr = args.args[1];
fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr);
return acpi_graph_get_child_prop_value(fwnode, "endpoint", endpoint_nr);
}
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
return false;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
static const void *
acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
return acpi_device_get_match_data(dev);
}
static bool acpi_fwnode_device_dma_supported(const struct fwnode_handle *fwnode)
{
return acpi_dma_supported(to_acpi_device_node(fwnode));
}
static enum dev_dma_attr
acpi_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
{
return acpi_get_dma_attr(to_acpi_device_node(fwnode));
}
static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !acpi_node_prop_get(fwnode, propname, NULL);
}
static int
acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
enum dev_prop_type type;
switch (elem_size) {
case sizeof(u8):
type = DEV_PROP_U8;
break;
case sizeof(u16):
type = DEV_PROP_U16;
break;
case sizeof(u32):
type = DEV_PROP_U32;
break;
case sizeof(u64):
type = DEV_PROP_U64;
break;
default:
return -ENXIO;
}
return acpi_node_prop_read(fwnode, propname, type, val, nval);
}
static int
acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
val, nval);
}
static int
acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int args_count, unsigned int index,
struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, prop, index,
args_count, args);
}
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
struct fwnode_handle *parent;
/* Is this the root node? */
parent = fwnode_get_parent(fwnode);
if (!parent)
return "\\";
fwnode_handle_put(parent);
if (is_acpi_data_node(fwnode)) {
const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return dn->name;
}
adev = to_acpi_device_node(fwnode);
if (WARN_ON(!adev))
return NULL;
return acpi_device_bid(adev);
}
static const char *
acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
/* Is this the root node? */
parent = fwnode_get_parent(fwnode);
if (!parent)
return "";
/* Is this 2nd node from the root? */
parent = fwnode_get_next_parent(parent);
if (!parent)
return "";
fwnode_handle_put(parent);
/* ACPI device or data node. */
return ".";
}
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
return acpi_node_get_parent(fwnode);
}
static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode);
endpoint->local_fwnode = fwnode;
if (fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port))
fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
return 0;
}
static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
unsigned int index)
{
struct resource res;
int ret;
ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
if (ret)
return ret;
return res.start;
}
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
.device_get_match_data = acpi_fwnode_device_get_match_data, \
.device_dma_supported = \
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
.graph_get_remote_endpoint = \
acpi_graph_get_remote_endpoint, \
.graph_get_port_parent = acpi_fwnode_get_parent, \
.graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
.irq_get = acpi_fwnode_irq_get, \
}; \
EXPORT_SYMBOL_GPL(ops)
DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
const struct fwnode_operations acpi_static_fwnode_ops;
bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) &&
fwnode->ops == &acpi_device_fwnode_ops;
}
EXPORT_SYMBOL(is_acpi_device_node);
bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
}
EXPORT_SYMBOL(is_acpi_data_node);
| linux-master | drivers/acpi/property.c |
/*
* Copyright (C) 2015 Red Hat Inc.
* Hans de Goede <[email protected]>
* Copyright (C) 2008 SuSE Linux Products GmbH
* Thomas Renninger <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public License
*
* video_detect.c:
* After PCI devices are glued with ACPI devices
* acpi_get_pci_dev() can be called to identify ACPI graphics
* devices for which a real graphics card is plugged in
*
* Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
* are available, video.ko should be used to handle the device.
*
* Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop,
* sony_acpi,... can take care about backlight brightness.
*
* Backlight drivers can use acpi_video_get_backlight_type() to determine which
* driver should handle the backlight. RAW/GPU-driver backlight drivers must
* use the acpi_video_backlight_use_native() helper for this.
*
* If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
* this file will not be compiled and acpi_video_get_backlight_type() will
* always return acpi_backlight_vendor.
*/
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/apple-gmux.h>
#include <linux/backlight.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
static void acpi_video_parse_cmdline(void)
{
if (!strcmp("vendor", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_vendor;
if (!strcmp("video", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_video;
if (!strcmp("native", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_native;
if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
if (!strcmp("apple_gmux", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_apple_gmux;
if (!strcmp("none", acpi_video_backlight_string))
acpi_backlight_cmdline = acpi_backlight_none;
}
static acpi_status
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
{
struct acpi_device *acpi_dev = acpi_fetch_acpi_dev(handle);
long *cap = context;
struct pci_dev *dev;
static const struct acpi_device_id video_ids[] = {
{ACPI_VIDEO_HID, 0},
{"", 0},
};
if (acpi_dev && !acpi_match_device_ids(acpi_dev, video_ids)) {
dev = acpi_get_pci_dev(handle);
if (!dev)
return AE_OK;
pci_dev_put(dev);
*cap |= acpi_is_video_device(handle);
}
return AE_OK;
}
/* This depends on ACPI_WMI which is X86 only */
#ifdef CONFIG_X86
static bool nvidia_wmi_ec_supported(void)
{
struct wmi_brightness_args args = {
.mode = WMI_BRIGHTNESS_MODE_GET,
.val = 0,
.ret = 0,
};
struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
acpi_status status;
status = wmi_evaluate_method(WMI_BRIGHTNESS_GUID, 0,
WMI_BRIGHTNESS_METHOD_SOURCE, &buf, &buf);
if (ACPI_FAILURE(status))
return false;
/*
* If brightness is handled by the EC then nvidia-wmi-ec-backlight
* should be used, else the GPU driver(s) should be used.
*/
return args.ret == WMI_BRIGHTNESS_SOURCE_EC;
}
#else
static bool nvidia_wmi_ec_supported(void)
{
return false;
}
#endif
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
{
acpi_backlight_dmi = acpi_backlight_vendor;
return 0;
}
static int video_detect_force_video(const struct dmi_system_id *d)
{
acpi_backlight_dmi = acpi_backlight_video;
return 0;
}
static int video_detect_force_native(const struct dmi_system_id *d)
{
acpi_backlight_dmi = acpi_backlight_native;
return 0;
}
static const struct dmi_system_id video_detect_dmi_table[] = {
/*
* Models which should use the vendor backlight interface,
* because of broken ACPI video backlight control.
*/
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */
.callback = video_detect_force_vendor,
/* Acer KAV80 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
},
},
{
.callback = video_detect_force_vendor,
/* Asus UL30VT */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
},
},
{
.callback = video_detect_force_vendor,
/* Asus UL30A */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
{
.callback = video_detect_force_vendor,
/* Asus X55U */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
},
},
{
/* https://bugs.launchpad.net/bugs/1000146 */
.callback = video_detect_force_vendor,
/* Asus X101CH */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
},
},
{
.callback = video_detect_force_vendor,
/* Asus X401U */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
},
},
{
.callback = video_detect_force_vendor,
/* Asus X501U */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
},
},
{
/* https://bugs.launchpad.net/bugs/1000146 */
.callback = video_detect_force_vendor,
/* Asus 1015CX */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
},
},
{
.callback = video_detect_force_vendor,
/* Samsung N150/N210/N220 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
},
},
{
.callback = video_detect_force_vendor,
/* Samsung NF110/NF210/NF310 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
},
},
{
.callback = video_detect_force_vendor,
/* Samsung NC210 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
},
},
{
.callback = video_detect_force_vendor,
/* Xiaomi Mi Pad 2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
/*
* Models which should use the vendor backlight interface,
* because of broken native backlight control.
*/
{
.callback = video_detect_force_vendor,
/* Sony Vaio PCG-FRV35 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
},
},
/*
* Toshiba models with Transflective display, these need to use
* the toshiba_acpi vendor driver for proper Transflective handling.
*/
{
.callback = video_detect_force_vendor,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R500"),
},
},
{
.callback = video_detect_force_vendor,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R600"),
},
},
/*
* Models which need acpi_video backlight control where the GPU drivers
* do not call acpi_video_register_backlight() because no internal panel
* is detected. Typically these are all-in-ones (monitors with builtin
* PC) where the panel connection shows up as regular DP instead of eDP.
*/
{
.callback = video_detect_force_video,
/* Apple iMac14,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
},
},
{
.callback = video_detect_force_video,
/* Apple iMac14,2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
},
},
/*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
* native_backlight on these to fix this:
* https://bugzilla.kernel.org/show_bug.cgi?id=81691
*/
{
.callback = video_detect_force_video,
/* ThinkPad T420 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"),
},
},
{
.callback = video_detect_force_video,
/* ThinkPad T520 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
},
},
{
.callback = video_detect_force_video,
/* ThinkPad X201s */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
},
},
{
.callback = video_detect_force_video,
/* ThinkPad X201T */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
},
},
/* The native backlight controls do not work on some older machines */
{
/* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */
.callback = video_detect_force_video,
/* HP ENVY 15 Notebook */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
{
.callback = video_detect_force_video,
/* SAMSUNG 870Z5E/880Z5E/680Z5E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
},
},
{
.callback = video_detect_force_video,
/* SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME,
"370R4E/370R4V/370R5E/3570RE/370R5V"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
.callback = video_detect_force_video,
/* SAMSUNG 3570R/370R/470R/450R/510R/4450RV */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME,
"3570R/370R/470R/450R/510R/4450RV"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
.callback = video_detect_force_video,
/* SAMSUNG 670Z5E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
.callback = video_detect_force_video,
/* SAMSUNG 730U3E/740U3E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
},
},
{
/* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
.callback = video_detect_force_video,
/* SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME,
"900X3C/900X3D/900X3E/900X4C/900X4D"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1272633 */
.callback = video_detect_force_video,
/* Dell XPS14 L421X */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
.callback = video_detect_force_video,
/* Dell XPS15 L521X */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=108971 */
.callback = video_detect_force_video,
/* SAMSUNG 530U4E/540U4E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
},
},
{
/* https://bugs.launchpad.net/bugs/1894667 */
.callback = video_detect_force_video,
/* HP 635 Notebook */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
},
},
/* Non win8 machines which need native backlight nevertheless */
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1201530 */
.callback = video_detect_force_native,
/* Lenovo Ideapad S405 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"),
},
},
{
/* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */
.callback = video_detect_force_native,
/* Lenovo Ideapad Z470 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
.callback = video_detect_force_native,
/* Lenovo Ideapad Z570 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
},
{
.callback = video_detect_force_native,
/* Lenovo E41-25 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
},
},
{
.callback = video_detect_force_native,
/* Lenovo E41-45 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
},
},
{
.callback = video_detect_force_native,
/* Lenovo ThinkPad X131e (3371 AMD version) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "3371"),
},
},
{
.callback = video_detect_force_native,
/* Apple iMac11,3 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"),
},
},
{
/* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */
.callback = video_detect_force_native,
/* Apple iMac12,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"),
},
},
{
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */
.callback = video_detect_force_native,
/* Apple iMac12,2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"),
},
},
{
.callback = video_detect_force_native,
/* Dell Inspiron N4010 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N4010"),
},
},
{
.callback = video_detect_force_native,
/* Dell Vostro V131 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
.callback = video_detect_force_native,
/* Dell XPS 17 L702X */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
},
},
{
.callback = video_detect_force_native,
/* Dell Precision 7510 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
{
.callback = video_detect_force_native,
/* Dell Studio 1569 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"),
},
},
{
.callback = video_detect_force_native,
/* Acer Aspire 3830TG */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
},
},
{
.callback = video_detect_force_native,
/* Acer Aspire 4810T */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
},
},
{
.callback = video_detect_force_native,
/* Acer Aspire 5738z */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
DMI_MATCH(DMI_BOARD_NAME, "JV50"),
},
},
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1012674 */
.callback = video_detect_force_native,
/* Acer Aspire 5741 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=42993 */
.callback = video_detect_force_native,
/* Acer Aspire 5750 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=42833 */
.callback = video_detect_force_native,
/* Acer Extensa 5235 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
},
},
{
.callback = video_detect_force_native,
/* Acer TravelMate 4750 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
.callback = video_detect_force_native,
/* Acer TravelMate 5735Z */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"),
DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=36322 */
.callback = video_detect_force_native,
/* Acer TravelMate 5760 */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
},
},
{
.callback = video_detect_force_native,
/* ASUSTeK COMPUTER INC. GA401 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
},
},
{
.callback = video_detect_force_native,
/* ASUSTeK COMPUTER INC. GA502 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
},
},
{
.callback = video_detect_force_native,
/* ASUSTeK COMPUTER INC. GA503 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
{
.callback = video_detect_force_native,
/* Asus U46E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
},
},
{
.callback = video_detect_force_native,
/* Asus UX303UB */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
},
},
{
.callback = video_detect_force_native,
/* HP EliteBook 8460p */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
},
},
{
.callback = video_detect_force_native,
/* HP Pavilion g6-1d80nr / B4U19UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
},
},
{
.callback = video_detect_force_native,
/* Samsung N150P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
DMI_MATCH(DMI_BOARD_NAME, "N150P"),
},
},
{
.callback = video_detect_force_native,
/* Samsung N145P/N250P/N260P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
},
},
{
.callback = video_detect_force_native,
/* Samsung N250P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
DMI_MATCH(DMI_BOARD_NAME, "N250P"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=202401 */
.callback = video_detect_force_native,
/* Sony Vaio VPCEH3U1E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
},
},
{
.callback = video_detect_force_native,
/* Sony Vaio VPCY11S1E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCY11S1E"),
},
},
/*
* These Toshibas have a broken acpi-video interface for brightness
* control. They also have an issue where the panel is off after
* suspend until a special firmware call is made to turn it back
* on. This is handled by the toshiba_acpi kernel module, so that
* module must be enabled for these models to work correctly.
*/
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_detect_force_native,
/* Toshiba Portégé R700 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
},
{
/* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
/* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_detect_force_native,
/* Toshiba Satellite/Portégé R830 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
},
},
{
.callback = video_detect_force_native,
/* Toshiba Satellite/Portégé Z830 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
},
},
/*
* Models which have nvidia-ec-wmi support, but should not use it.
* Note this indicates a likely firmware bug on these models and should
* be revisited if/when Linux gets support for dynamic mux mode.
*/
{
.callback = video_detect_force_native,
/* Dell G15 5515 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
{
.callback = video_detect_force_native,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
},
},
{ },
};
static bool google_cros_ec_present(void)
{
return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C");
}
/*
* Windows 8 and newer no longer use the ACPI video interface, so it often
* does not work. So on win8+ systems prefer native brightness control.
* Chromebooks should always prefer native backlight control.
*/
static bool prefer_native_over_acpi_video(void)
{
return acpi_osi_is_win8() || google_cros_ec_present();
}
/*
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
*/
enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
static bool apple_gmux_present;
static bool native_available;
static bool init_done;
static long video_caps;
/* Parse cmdline, dmi and acpi only once */
mutex_lock(&init_mutex);
if (!init_done) {
acpi_video_parse_cmdline();
dmi_check_system(video_detect_dmi_table);
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
apple_gmux_present = apple_gmux_detect(NULL, NULL);
init_done = true;
}
if (native)
native_available = true;
mutex_unlock(&init_mutex);
if (auto_detect)
*auto_detect = false;
/*
* The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else.
*/
if (acpi_backlight_cmdline != acpi_backlight_undef)
return acpi_backlight_cmdline;
/* DMI quirks override any autodetection. */
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
if (auto_detect)
*auto_detect = true;
/* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
if (apple_gmux_present)
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */
if ((video_caps & ACPI_VIDEO_BACKLIGHT) &&
!(native_available && prefer_native_over_acpi_video()))
return acpi_backlight_video;
/* Use native if available */
if (native_available)
return acpi_backlight_native;
/*
* The vendor specific BIOS interfaces are only necessary for
* laptops from before ~2008.
*
* For laptops from ~2008 till ~2023 this point is never reached
* because on those (video_caps & ACPI_VIDEO_BACKLIGHT) above is true.
*
* Laptops from after ~2023 no longer support ACPI_VIDEO_BACKLIGHT,
* if this point is reached on those, this likely means that
* the GPU kms driver which sets native_available has not loaded yet.
*
* Returning acpi_backlight_vendor in this case is known to sometimes
* cause a non working vendor specific /sys/class/backlight device to
* get registered.
*
* Return acpi_backlight_none on laptops with ACPI tables written
* for Windows 8 (laptops from after ~2012) to avoid this problem.
*/
if (acpi_osi_is_win8())
return acpi_backlight_none;
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
EXPORT_SYMBOL(__acpi_video_get_backlight_type);
| linux-master | drivers/acpi/video_detect.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dock.c - ACPI dock station driver
*
* Copyright (C) 2006, 2014, Intel Corp.
* Author: Kristen Carlson Accardi <[email protected]>
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
#include "internal.h"
static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
"undock immediately when the undock button is pressed, 0 will cause"
" the driver to wait for userspace to write the undock sysfs file "
" before undocking");
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
u32 flags;
struct list_head dependent_devices;
struct list_head sibling;
struct platform_device *dock_device;
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
struct dock_dependent_device {
struct list_head list;
struct acpi_device *adev;
};
#define DOCK_DOCKING 0x00000001
#define DOCK_UNDOCKING 0x00000002
#define DOCK_IS_DOCK 0x00000010
#define DOCK_IS_ATA 0x00000020
#define DOCK_IS_BAT 0x00000040
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
enum dock_callback_type {
DOCK_CALL_HANDLER,
DOCK_CALL_FIXUP,
DOCK_CALL_UEVENT,
};
/*****************************************************************************
* Dock Dependent device functions *
*****************************************************************************/
/**
* add_dock_dependent_device - associate a device with the dock station
* @ds: Dock station.
* @adev: Dependent ACPI device object.
*
* Add the dependent device to the dock's dependent device list.
*/
static int add_dock_dependent_device(struct dock_station *ds,
struct acpi_device *adev)
{
struct dock_dependent_device *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->adev = adev;
INIT_LIST_HEAD(&dd->list);
list_add_tail(&dd->list, &ds->dependent_devices);
return 0;
}
static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
enum dock_callback_type cb_type)
{
struct acpi_device *adev = dd->adev;
acpi_lock_hp_context();
if (!adev->hp)
goto out;
if (cb_type == DOCK_CALL_FIXUP) {
void (*fixup)(struct acpi_device *);
fixup = adev->hp->fixup;
if (fixup) {
acpi_unlock_hp_context();
fixup(adev);
return;
}
} else if (cb_type == DOCK_CALL_UEVENT) {
void (*uevent)(struct acpi_device *, u32);
uevent = adev->hp->uevent;
if (uevent) {
acpi_unlock_hp_context();
uevent(adev, event);
return;
}
} else {
int (*notify)(struct acpi_device *, u32);
notify = adev->hp->notify;
if (notify) {
acpi_unlock_hp_context();
notify(adev, event);
return;
}
}
out:
acpi_unlock_hp_context();
}
static struct dock_station *find_dock_station(acpi_handle handle)
{
struct dock_station *ds;
list_for_each_entry(ds, &dock_stations, sibling)
if (ds->handle == handle)
return ds;
return NULL;
}
/**
* find_dock_dependent_device - get a device dependent on this dock
* @ds: the dock station
* @adev: ACPI device object to find.
*
* iterate over the dependent device list for this dock. If the
* dependent device matches the handle, return.
*/
static struct dock_dependent_device *
find_dock_dependent_device(struct dock_station *ds, struct acpi_device *adev)
{
struct dock_dependent_device *dd;
list_for_each_entry(dd, &ds->dependent_devices, list)
if (adev == dd->adev)
return dd;
return NULL;
}
void register_dock_dependent_device(struct acpi_device *adev,
acpi_handle dshandle)
{
struct dock_station *ds = find_dock_station(dshandle);
if (ds && !find_dock_dependent_device(ds, adev))
add_dock_dependent_device(ds, adev);
}
/*****************************************************************************
* Dock functions *
*****************************************************************************/
/**
* is_dock_device - see if a device is on a dock station
* @adev: ACPI device object to check.
*
* If this device is either the dock station itself,
* or is a device dependent on the dock station, then it
* is a dock device
*/
int is_dock_device(struct acpi_device *adev)
{
struct dock_station *dock_station;
if (!dock_station_count)
return 0;
if (acpi_dock_match(adev->handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
if (find_dock_dependent_device(dock_station, adev))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(is_dock_device);
/**
* dock_present - see if the dock station is present.
* @ds: the dock station
*
* execute the _STA method. note that present does not
* imply that we are docked.
*/
static int dock_present(struct dock_station *ds)
{
unsigned long long sta;
acpi_status status;
if (ds) {
status = acpi_evaluate_integer(ds->handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
return 1;
}
return 0;
}
/**
* hot_remove_dock_devices - Remove dock station devices.
* @ds: Dock station.
*/
static void hot_remove_dock_devices(struct dock_station *ds)
{
struct dock_dependent_device *dd;
/*
* Walk the list in reverse order so that devices that have been added
* last are removed first (in case there are some indirect dependencies
* between them).
*/
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST,
DOCK_CALL_HANDLER);
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
acpi_bus_trim(dd->adev);
}
/**
* hotplug_dock_devices - Insert devices on a dock station.
* @ds: the dock station
* @event: either bus check or device check request
*
* Some devices on the dock station need to have drivers called
* to perform hotplug operations after a dock event has occurred.
* Traverse the list of dock devices that have registered a
* hotplug handler, and call the handler.
*/
static void hotplug_dock_devices(struct dock_station *ds, u32 event)
{
struct dock_dependent_device *dd;
/* Call driver specific post-dock fixups. */
list_for_each_entry(dd, &ds->dependent_devices, list)
dock_hotplug_event(dd, event, DOCK_CALL_FIXUP);
/* Call driver specific hotplug functions. */
list_for_each_entry(dd, &ds->dependent_devices, list)
dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
/*
* Check if all devices have been enumerated already. If not, run
* acpi_bus_scan() for them and that will cause scan handlers to be
* attached to device objects or acpi_drivers to be stopped/started if
* they are present.
*/
list_for_each_entry(dd, &ds->dependent_devices, list) {
struct acpi_device *adev = dd->adev;
if (!acpi_device_enumerated(adev)) {
int ret = acpi_bus_scan(adev->handle);
if (ret)
dev_dbg(&adev->dev, "scan error %d\n", -ret);
}
}
}
static void dock_event(struct dock_station *ds, u32 event, int num)
{
struct device *dev = &ds->dock_device->dev;
char event_string[13];
char *envp[] = { event_string, NULL };
struct dock_dependent_device *dd;
if (num == UNDOCK_EVENT)
sprintf(event_string, "EVENT=undock");
else
sprintf(event_string, "EVENT=dock");
/*
* Indicate that the status of the dock station has
* changed.
*/
if (num == DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
list_for_each_entry(dd, &ds->dependent_devices, list)
dock_hotplug_event(dd, event, DOCK_CALL_UEVENT);
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
* handle_dock - handle a dock event
* @ds: the dock station
* @dock: to dock, or undock - that is the question
*
* Execute the _DCK method in response to an acpi event
*/
static void handle_dock(struct dock_station *ds, int dock)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
unsigned long long value;
acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
status);
}
static inline void dock(struct dock_station *ds)
{
handle_dock(ds, 1);
}
static inline void undock(struct dock_station *ds)
{
handle_dock(ds, 0);
}
static inline void begin_dock(struct dock_station *ds)
{
ds->flags |= DOCK_DOCKING;
}
static inline void complete_dock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_DOCKING);
ds->last_dock_time = jiffies;
}
static inline void begin_undock(struct dock_station *ds)
{
ds->flags |= DOCK_UNDOCKING;
}
static inline void complete_undock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_UNDOCKING);
}
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
*
* Sometimes while docking, false dock events can be sent to the driver
* because good connections aren't made or some other reason. Ignore these
* if we are in the middle of doing something.
*/
static int dock_in_progress(struct dock_station *ds)
{
if ((ds->flags & DOCK_DOCKING) ||
time_before(jiffies, (ds->last_dock_time + HZ)))
return 1;
return 0;
}
/**
* handle_eject_request - handle an undock request checking for error conditions
*
* Check to make sure the dock device is still present, then undock and
* hotremove all the devices that may need removing.
*/
static int handle_eject_request(struct dock_station *ds, u32 event)
{
if (dock_in_progress(ds))
return -EBUSY;
/*
* here we need to generate the undock
* event prior to actually doing the undock
* so that the device struct still exists.
* Also, even send the dock event if the
* device is not present anymore
*/
dock_event(ds, event, UNDOCK_EVENT);
hot_remove_dock_devices(ds);
undock(ds);
acpi_evaluate_lck(ds->handle, 0);
acpi_evaluate_ej0(ds->handle);
if (dock_present(ds)) {
acpi_handle_err(ds->handle, "Unable to undock!\n");
return -EBUSY;
}
complete_undock(ds);
return 0;
}
/**
* dock_notify - Handle ACPI dock notification.
* @adev: Dock station's ACPI device object.
* @event: Event code.
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
int dock_notify(struct acpi_device *adev, u32 event)
{
acpi_handle handle = adev->handle;
struct dock_station *ds = find_dock_station(handle);
int surprise_removal = 0;
if (!ds)
return -ENODEV;
/*
* According to acpi spec 3.0a, if a DEVICE_CHECK notification
* is sent and _DCK is present, it is assumed to mean an undock
* request.
*/
if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
event = ACPI_NOTIFY_EJECT_REQUEST;
/*
* dock station: BUS_CHECK - docked or surprise removal
* DEVICE_CHECK - undocked
* other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
*
* To simplify event handling, dock dependent device handler always
* get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
* ACPI_NOTIFY_EJECT_REQUEST for removal
*/
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
acpi_handle_err(handle, "Unable to dock!\n");
complete_dock(ds);
break;
}
hotplug_dock_devices(ds, event);
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
acpi_evaluate_lck(ds->handle, 1);
acpi_update_all_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
break;
/* This is a surprise removal */
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
fallthrough;
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
|| surprise_removal)
handle_eject_request(ds, event);
else
dock_event(ds, event, UNDOCK_EVENT);
break;
}
return 0;
}
/*
* show_docked - read method for "docked" file in sysfs
*/
static ssize_t docked_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
struct acpi_device *adev = acpi_fetch_acpi_dev(dock_station->handle);
return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR_RO(docked);
/*
* show_flags - read method for flags file in sysfs
*/
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return sysfs_emit(buf, "%d\n", dock_station->flags);
}
static DEVICE_ATTR_RO(flags);
/*
* write_undock - write method for "undock" file in sysfs
*/
static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
if (!count)
return -EINVAL;
acpi_scan_lock_acquire();
begin_undock(dock_station);
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
acpi_scan_lock_release();
return ret ? ret : count;
}
static DEVICE_ATTR_WO(undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
static ssize_t uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
return 0;
return sysfs_emit(buf, "%llx\n", lbuf);
}
static DEVICE_ATTR_RO(uid);
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
if (dock_station->flags & DOCK_IS_DOCK)
type = "dock_station";
else if (dock_station->flags & DOCK_IS_ATA)
type = "ata_bay";
else if (dock_station->flags & DOCK_IS_BAT)
type = "battery_bay";
else
type = "unknown";
return sysfs_emit(buf, "%s\n", type);
}
static DEVICE_ATTR_RO(type);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,
&dev_attr_flags.attr,
&dev_attr_undock.attr,
&dev_attr_uid.attr,
&dev_attr_type.attr,
NULL
};
static const struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
/**
* acpi_dock_add - Add a new dock station
* @adev: Dock station ACPI device object.
*
* allocated and initialize a new dock station device.
*/
void acpi_dock_add(struct acpi_device *adev)
{
struct dock_station *dock_station, ds = { NULL, };
struct platform_device_info pdevinfo;
acpi_handle handle = adev->handle;
struct platform_device *dd;
int ret;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.name = "dock";
pdevinfo.id = dock_station_count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
pdevinfo.data = &ds;
pdevinfo.size_data = sizeof(ds);
dd = platform_device_register_full(&pdevinfo);
if (IS_ERR(dd))
return;
dock_station = dd->dev.platform_data;
dock_station->handle = handle;
dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
INIT_LIST_HEAD(&dock_station->sibling);
INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
dev_set_uevent_suppress(&dd->dev, 0);
if (acpi_dock_match(handle))
dock_station->flags |= DOCK_IS_DOCK;
if (acpi_ata_match(handle))
dock_station->flags |= DOCK_IS_ATA;
if (acpi_device_is_battery(adev))
dock_station->flags |= DOCK_IS_BAT;
ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
goto err_unregister;
/* add the dock station as a device dependent on itself */
ret = add_dock_dependent_device(dock_station, adev);
if (ret)
goto err_rmgroup;
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
adev->flags.is_dock_station = true;
dev_info(&adev->dev, "ACPI dock station (docks/bays count: %d)\n",
dock_station_count);
return;
err_rmgroup:
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
}
| linux-master | drivers/acpi/dock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI AML interfacing support
*
* Copyright (C) 2015, Intel Corporation
* Authors: Lv Zheng <[email protected]>
*/
/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: AML: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <linux/acpi.h>
#include "internal.h"
#define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
#define ACPI_AML_BUF_SIZE PAGE_SIZE
#define circ_count(circ) \
(CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_count_to_end(circ) \
(CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_space(circ) \
(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_space_to_end(circ) \
(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define ACPI_AML_OPENED 0x0001
#define ACPI_AML_CLOSED 0x0002
#define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
#define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
#define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
#define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
#define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
#define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
#define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
#define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
struct acpi_aml_io {
wait_queue_head_t wait;
unsigned long flags;
unsigned long users;
struct mutex lock;
struct task_struct *thread;
char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
struct circ_buf out_crc;
char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
struct circ_buf in_crc;
acpi_osd_exec_callback function;
void *context;
unsigned long usages;
};
static struct acpi_aml_io acpi_aml_io;
static bool acpi_aml_initialized;
static struct file *acpi_aml_active_reader;
static struct dentry *acpi_aml_dentry;
static inline bool __acpi_aml_running(void)
{
return acpi_aml_io.thread ? true : false;
}
static inline bool __acpi_aml_access_ok(unsigned long flag)
{
/*
* The debugger interface is in opened state (OPENED && !CLOSED),
* then it is allowed to access the debugger buffers from either
* user space or the kernel space.
* In addition, for the kernel space, only the debugger thread
* (thread ID matched) is allowed to access.
*/
if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
(acpi_aml_io.flags & ACPI_AML_CLOSED) ||
!__acpi_aml_running())
return false;
if ((flag & ACPI_AML_KERN) &&
current != acpi_aml_io.thread)
return false;
return true;
}
static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
{
/*
* Another read is not in progress and there is data in buffer
* available for read.
*/
if (!(acpi_aml_io.flags & flag) && circ_count(circ))
return true;
return false;
}
static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
{
/*
* Another write is not in progress and there is buffer space
* available for write.
*/
if (!(acpi_aml_io.flags & flag) && circ_space(circ))
return true;
return false;
}
static inline bool __acpi_aml_busy(void)
{
if (acpi_aml_io.flags & ACPI_AML_BUSY)
return true;
return false;
}
static inline bool __acpi_aml_used(void)
{
return acpi_aml_io.usages ? true : false;
}
static inline bool acpi_aml_running(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = __acpi_aml_running();
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_busy(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = __acpi_aml_busy();
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_used(void)
{
bool ret;
/*
* The usage count is prepared to avoid race conditions between the
* starts and the stops of the debugger thread.
*/
mutex_lock(&acpi_aml_io.lock);
ret = __acpi_aml_used();
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_kern_readable(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
__acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_kern_writable(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
__acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_user_readable(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
__acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static bool acpi_aml_user_writable(void)
{
bool ret;
mutex_lock(&acpi_aml_io.lock);
ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
__acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
{
int ret = 0;
mutex_lock(&acpi_aml_io.lock);
if (!__acpi_aml_access_ok(flag)) {
ret = -EFAULT;
goto out;
}
if (!__acpi_aml_writable(circ, flag)) {
ret = -EAGAIN;
goto out;
}
acpi_aml_io.flags |= flag;
out:
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
{
int ret = 0;
mutex_lock(&acpi_aml_io.lock);
if (!__acpi_aml_access_ok(flag)) {
ret = -EFAULT;
goto out;
}
if (!__acpi_aml_readable(circ, flag)) {
ret = -EAGAIN;
goto out;
}
acpi_aml_io.flags |= flag;
out:
mutex_unlock(&acpi_aml_io.lock);
return ret;
}
static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
{
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.flags &= ~flag;
if (wakeup)
wake_up_interruptible(&acpi_aml_io.wait);
mutex_unlock(&acpi_aml_io.lock);
}
static int acpi_aml_write_kern(const char *buf, int len)
{
int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
int n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
if (ret < 0)
return ret;
/* sync tail before inserting logs */
smp_mb();
p = &crc->buf[crc->head];
n = min(len, circ_space_to_end(crc));
memcpy(p, buf, n);
/* sync head after inserting logs */
smp_wmb();
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
return n;
}
static int acpi_aml_readb_kern(void)
{
int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
if (ret < 0)
return ret;
/* sync head before removing cmds */
smp_rmb();
p = &crc->buf[crc->tail];
ret = (int)*p;
/* sync tail before inserting cmds */
smp_mb();
crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
return ret;
}
/*
* acpi_aml_write_log() - Capture debugger output
* @msg: the debugger output
*
* This function should be used to implement acpi_os_printf() to filter out
* the debugger output and store the output into the debugger interface
* buffer. Return the size of stored logs or errno.
*/
static ssize_t acpi_aml_write_log(const char *msg)
{
int ret = 0;
int count = 0, size = 0;
if (!acpi_aml_initialized)
return -ENODEV;
if (msg)
count = strlen(msg);
while (count > 0) {
again:
ret = acpi_aml_write_kern(msg + size, count);
if (ret == -EAGAIN) {
ret = wait_event_interruptible(acpi_aml_io.wait,
acpi_aml_kern_writable());
/*
* We need to retry when the condition
* becomes true.
*/
if (ret == 0)
goto again;
break;
}
if (ret < 0)
break;
size += ret;
count -= ret;
}
return size > 0 ? size : ret;
}
/*
* acpi_aml_read_cmd() - Capture debugger input
* @msg: the debugger input
* @size: the size of the debugger input
*
* This function should be used to implement acpi_os_get_line() to capture
* the debugger input commands and store the input commands into the
* debugger interface buffer. Return the size of stored commands or errno.
*/
static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
{
int ret = 0;
int size = 0;
/*
* This is ensured by the running fact of the debugger thread
* unless a bug is introduced.
*/
BUG_ON(!acpi_aml_initialized);
while (count > 0) {
again:
/*
* Check each input byte to find the end of the command.
*/
ret = acpi_aml_readb_kern();
if (ret == -EAGAIN) {
ret = wait_event_interruptible(acpi_aml_io.wait,
acpi_aml_kern_readable());
/*
* We need to retry when the condition becomes
* true.
*/
if (ret == 0)
goto again;
}
if (ret < 0)
break;
*(msg + size) = (char)ret;
size++;
count--;
if (ret == '\n') {
/*
* acpi_os_get_line() requires a zero terminated command
* string.
*/
*(msg + size - 1) = '\0';
break;
}
}
return size > 0 ? size : ret;
}
static int acpi_aml_thread(void *unused)
{
acpi_osd_exec_callback function = NULL;
void *context;
mutex_lock(&acpi_aml_io.lock);
if (acpi_aml_io.function) {
acpi_aml_io.usages++;
function = acpi_aml_io.function;
context = acpi_aml_io.context;
}
mutex_unlock(&acpi_aml_io.lock);
if (function)
function(context);
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.usages--;
if (!__acpi_aml_used()) {
acpi_aml_io.thread = NULL;
wake_up(&acpi_aml_io.wait);
}
mutex_unlock(&acpi_aml_io.lock);
return 0;
}
/*
* acpi_aml_create_thread() - Create AML debugger thread
* @function: the debugger thread callback
* @context: the context to be passed to the debugger thread
*
* This function should be used to implement acpi_os_execute() which is
* used by the ACPICA debugger to create the debugger thread.
*/
static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
{
struct task_struct *t;
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.function = function;
acpi_aml_io.context = context;
mutex_unlock(&acpi_aml_io.lock);
t = kthread_create(acpi_aml_thread, NULL, "aml");
if (IS_ERR(t)) {
pr_err("Failed to create AML debugger thread.\n");
return PTR_ERR(t);
}
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.thread = t;
acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
wake_up_process(t);
mutex_unlock(&acpi_aml_io.lock);
return 0;
}
static int acpi_aml_wait_command_ready(bool single_step,
char *buffer, size_t length)
{
acpi_status status;
if (single_step)
acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
else
acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
status = acpi_os_get_line(buffer, length, NULL);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static int acpi_aml_notify_command_complete(void)
{
return 0;
}
static int acpi_aml_open(struct inode *inode, struct file *file)
{
int ret = 0;
acpi_status status;
mutex_lock(&acpi_aml_io.lock);
/*
* The debugger interface is being closed, no new user is allowed
* during this period.
*/
if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
ret = -EBUSY;
goto err_lock;
}
if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
/*
* Only one reader is allowed to initiate the debugger
* thread.
*/
if (acpi_aml_active_reader) {
ret = -EBUSY;
goto err_lock;
} else {
pr_debug("Opening debugger reader.\n");
acpi_aml_active_reader = file;
}
} else {
/*
* No writer is allowed unless the debugger thread is
* ready.
*/
if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
ret = -ENODEV;
goto err_lock;
}
}
if (acpi_aml_active_reader == file) {
pr_debug("Opening debugger interface.\n");
mutex_unlock(&acpi_aml_io.lock);
pr_debug("Initializing debugger thread.\n");
status = acpi_initialize_debugger();
if (ACPI_FAILURE(status)) {
pr_err("Failed to initialize debugger.\n");
ret = -EINVAL;
goto err_exit;
}
pr_debug("Debugger thread initialized.\n");
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.flags |= ACPI_AML_OPENED;
acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
pr_debug("Debugger interface opened.\n");
}
acpi_aml_io.users++;
err_lock:
if (ret < 0) {
if (acpi_aml_active_reader == file)
acpi_aml_active_reader = NULL;
}
mutex_unlock(&acpi_aml_io.lock);
err_exit:
return ret;
}
static int acpi_aml_release(struct inode *inode, struct file *file)
{
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.users--;
if (file == acpi_aml_active_reader) {
pr_debug("Closing debugger reader.\n");
acpi_aml_active_reader = NULL;
pr_debug("Closing debugger interface.\n");
acpi_aml_io.flags |= ACPI_AML_CLOSED;
/*
* Wake up all user space/kernel space blocked
* readers/writers.
*/
wake_up_interruptible(&acpi_aml_io.wait);
mutex_unlock(&acpi_aml_io.lock);
/*
* Wait all user space/kernel space readers/writers to
* stop so that ACPICA command loop of the debugger thread
* should fail all its command line reads after this point.
*/
wait_event(acpi_aml_io.wait, !acpi_aml_busy());
/*
* Then we try to terminate the debugger thread if it is
* not terminated.
*/
pr_debug("Terminating debugger thread.\n");
acpi_terminate_debugger();
wait_event(acpi_aml_io.wait, !acpi_aml_used());
pr_debug("Debugger thread terminated.\n");
mutex_lock(&acpi_aml_io.lock);
acpi_aml_io.flags &= ~ACPI_AML_OPENED;
}
if (acpi_aml_io.users == 0) {
pr_debug("Debugger interface closed.\n");
acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
}
mutex_unlock(&acpi_aml_io.lock);
return 0;
}
static int acpi_aml_read_user(char __user *buf, int len)
{
int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
int n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
if (ret < 0)
return ret;
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
n = min(len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
}
/* sync tail after removing logs */
smp_mb();
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int ret = 0;
int size = 0;
if (!count)
return 0;
if (!access_ok(buf, count))
return -EFAULT;
while (count > 0) {
again:
ret = acpi_aml_read_user(buf + size, count);
if (ret == -EAGAIN) {
if (file->f_flags & O_NONBLOCK)
break;
else {
ret = wait_event_interruptible(acpi_aml_io.wait,
acpi_aml_user_readable());
/*
* We need to retry when the condition
* becomes true.
*/
if (ret == 0)
goto again;
}
}
if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
}
if (ret) {
size += ret;
count -= ret;
*ppos += ret;
break;
}
}
return size > 0 ? size : ret;
}
static int acpi_aml_write_user(const char __user *buf, int len)
{
int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
int n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
if (ret < 0)
return ret;
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
n = min(len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
}
/* sync head after inserting cmds */
smp_wmb();
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret = 0;
int size = 0;
if (!count)
return 0;
if (!access_ok(buf, count))
return -EFAULT;
while (count > 0) {
again:
ret = acpi_aml_write_user(buf + size, count);
if (ret == -EAGAIN) {
if (file->f_flags & O_NONBLOCK)
break;
else {
ret = wait_event_interruptible(acpi_aml_io.wait,
acpi_aml_user_writable());
/*
* We need to retry when the condition
* becomes true.
*/
if (ret == 0)
goto again;
}
}
if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
}
if (ret) {
size += ret;
count -= ret;
*ppos += ret;
}
}
return size > 0 ? size : ret;
}
static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
{
__poll_t masks = 0;
poll_wait(file, &acpi_aml_io.wait, wait);
if (acpi_aml_user_readable())
masks |= EPOLLIN | EPOLLRDNORM;
if (acpi_aml_user_writable())
masks |= EPOLLOUT | EPOLLWRNORM;
return masks;
}
static const struct file_operations acpi_aml_operations = {
.read = acpi_aml_read,
.write = acpi_aml_write,
.poll = acpi_aml_poll,
.open = acpi_aml_open,
.release = acpi_aml_release,
.llseek = generic_file_llseek,
};
static const struct acpi_debugger_ops acpi_aml_debugger = {
.create_thread = acpi_aml_create_thread,
.read_cmd = acpi_aml_read_cmd,
.write_log = acpi_aml_write_log,
.wait_command_ready = acpi_aml_wait_command_ready,
.notify_command_complete = acpi_aml_notify_command_complete,
};
static int __init acpi_aml_init(void)
{
int ret;
if (acpi_disabled)
return -ENODEV;
/* Initialize AML IO interface */
mutex_init(&acpi_aml_io.lock);
init_waitqueue_head(&acpi_aml_io.wait);
acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
acpi_aml_dentry = debugfs_create_file("acpidbg",
S_IFREG | S_IRUGO | S_IWUSR,
acpi_debugfs_dir, NULL,
&acpi_aml_operations);
ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
if (ret) {
debugfs_remove(acpi_aml_dentry);
acpi_aml_dentry = NULL;
return ret;
}
acpi_aml_initialized = true;
return 0;
}
static void __exit acpi_aml_exit(void)
{
if (acpi_aml_initialized) {
acpi_unregister_debugger(&acpi_aml_debugger);
debugfs_remove(acpi_aml_dentry);
acpi_aml_dentry = NULL;
acpi_aml_initialized = false;
}
}
module_init(acpi_aml_init);
module_exit(acpi_aml_exit);
MODULE_AUTHOR("Lv Zheng");
MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/acpi_dbg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* scan.c - support for transforming the ACPI namespace into individual objects
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
#include <linux/crc32.h>
#include <linux/dma-direct.h>
#include "internal.h"
#include "sleep.h"
#define ACPI_BUS_CLASS "system_bus"
#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
#define INVALID_ACPI_HANDLE ((acpi_handle)ZERO_PAGE(0))
static const char *dummy_hid = "device";
static LIST_HEAD(acpi_dep_list);
static DEFINE_MUTEX(acpi_dep_list_lock);
LIST_HEAD(acpi_bus_id_list);
static DEFINE_MUTEX(acpi_scan_lock);
static LIST_HEAD(acpi_scan_handlers_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
static DEFINE_MUTEX(acpi_hp_context_lock);
/*
* The UART device described by the SPCR table is the only object which needs
* special-casing. Everything else is covered by ACPI namespace paths in STAO
* table.
*/
static u64 spcr_uart_addr;
void acpi_scan_lock_acquire(void)
{
mutex_lock(&acpi_scan_lock);
}
EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
void acpi_scan_lock_release(void)
{
mutex_unlock(&acpi_scan_lock);
}
EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
void acpi_lock_hp_context(void)
{
mutex_lock(&acpi_hp_context_lock);
}
void acpi_unlock_hp_context(void)
{
mutex_unlock(&acpi_hp_context_lock);
}
void acpi_initialize_hp_context(struct acpi_device *adev,
struct acpi_hotplug_context *hp,
int (*notify)(struct acpi_device *, u32),
void (*uevent)(struct acpi_device *, u32))
{
acpi_lock_hp_context();
hp->notify = notify;
hp->uevent = uevent;
acpi_set_hp_context(adev, hp);
acpi_unlock_hp_context();
}
EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
int acpi_scan_add_handler(struct acpi_scan_handler *handler)
{
if (!handler)
return -EINVAL;
list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
return 0;
}
int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
const char *hotplug_profile_name)
{
int error;
error = acpi_scan_add_handler(handler);
if (error)
return error;
acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
return 0;
}
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
{
struct acpi_device_physical_node *pn;
bool offline = true;
char *envp[] = { "EVENT=offline", NULL };
/*
* acpi_container_offline() calls this for all of the container's
* children under the container's physical_node_lock lock.
*/
mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
list_for_each_entry(pn, &adev->physical_node_list, node)
if (device_supports_offline(pn->dev) && !pn->dev->offline) {
if (uevent)
kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp);
offline = false;
break;
}
mutex_unlock(&adev->physical_node_lock);
return offline;
}
static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_device_physical_node *pn;
bool second_pass = (bool)data;
acpi_status status = AE_OK;
if (!device)
return AE_OK;
if (device->handler && !device->handler->hotplug.enabled) {
*ret_p = &device->dev;
return AE_SUPPORT;
}
mutex_lock(&device->physical_node_lock);
list_for_each_entry(pn, &device->physical_node_list, node) {
int ret;
if (second_pass) {
/* Skip devices offlined by the first pass. */
if (pn->put_online)
continue;
} else {
pn->put_online = false;
}
ret = device_offline(pn->dev);
if (ret >= 0) {
pn->put_online = !ret;
} else {
*ret_p = pn->dev;
if (second_pass) {
status = AE_ERROR;
break;
}
}
}
mutex_unlock(&device->physical_node_lock);
return status;
}
static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_device_physical_node *pn;
if (!device)
return AE_OK;
mutex_lock(&device->physical_node_lock);
list_for_each_entry(pn, &device->physical_node_list, node)
if (pn->put_online) {
device_online(pn->dev);
pn->put_online = false;
}
mutex_unlock(&device->physical_node_lock);
return AE_OK;
}
static int acpi_scan_try_to_offline(struct acpi_device *device)
{
acpi_handle handle = device->handle;
struct device *errdev = NULL;
acpi_status status;
/*
* Carry out two passes here and ignore errors in the first pass,
* because if the devices in question are memory blocks and
* CONFIG_MEMCG is set, one of the blocks may hold data structures
* that the other blocks depend on, but it is not known in advance which
* block holds them.
*
* If the first pass is successful, the second one isn't needed, though.
*/
status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
NULL, acpi_bus_offline, (void *)false,
(void **)&errdev);
if (status == AE_SUPPORT) {
dev_warn(errdev, "Offline disabled.\n");
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_online, NULL, NULL, NULL);
return -EPERM;
}
acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
if (errdev) {
errdev = NULL;
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
NULL, acpi_bus_offline, (void *)true,
(void **)&errdev);
if (!errdev)
acpi_bus_offline(handle, 0, (void *)true,
(void **)&errdev);
if (errdev) {
dev_warn(errdev, "Offline failed.\n");
acpi_bus_online(handle, 0, NULL, NULL);
acpi_walk_namespace(ACPI_TYPE_ANY, handle,
ACPI_UINT32_MAX, acpi_bus_online,
NULL, NULL, NULL);
return -EBUSY;
}
}
return 0;
}
static int acpi_scan_hot_remove(struct acpi_device *device)
{
acpi_handle handle = device->handle;
unsigned long long sta;
acpi_status status;
if (device->handler && device->handler->hotplug.demand_offline) {
if (!acpi_scan_is_offline(device, true))
return -EBUSY;
} else {
int error = acpi_scan_try_to_offline(device);
if (error)
return error;
}
acpi_handle_debug(handle, "Ejecting\n");
acpi_bus_trim(device);
acpi_evaluate_lck(handle, 0);
/*
* TBD: _EJD support.
*/
status = acpi_evaluate_ej0(handle);
if (status == AE_NOT_FOUND)
return -ENODEV;
else if (ACPI_FAILURE(status))
return -EIO;
/*
* Verify if eject was indeed successful. If not, log an error
* message. No need to call _OST since _EJ0 call was made OK.
*/
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle,
"Status check after eject failed (0x%x)\n", status);
} else if (sta & ACPI_STA_DEVICE_ENABLED) {
acpi_handle_warn(handle,
"Eject incomplete - status 0x%llx\n", sta);
}
return 0;
}
static int acpi_scan_device_not_present(struct acpi_device *adev)
{
if (!acpi_device_enumerated(adev)) {
dev_warn(&adev->dev, "Still not present\n");
return -EALREADY;
}
acpi_bus_trim(adev);
return 0;
}
static int acpi_scan_device_check(struct acpi_device *adev)
{
int error;
acpi_bus_get_status(adev);
if (adev->status.present || adev->status.functional) {
/*
* This function is only called for device objects for which
* matching scan handlers exist. The only situation in which
* the scan handler is not attached to this device object yet
* is when the device has just appeared (either it wasn't
* present at all before or it was removed and then added
* again).
*/
if (adev->handler) {
dev_warn(&adev->dev, "Already enumerated\n");
return -EALREADY;
}
error = acpi_bus_scan(adev->handle);
if (error) {
dev_warn(&adev->dev, "Namespace scan failure\n");
return error;
}
if (!adev->handler) {
dev_warn(&adev->dev, "Enumeration failure\n");
error = -ENODEV;
}
} else {
error = acpi_scan_device_not_present(adev);
}
return error;
}
static int acpi_scan_bus_check(struct acpi_device *adev, void *not_used)
{
struct acpi_scan_handler *handler = adev->handler;
int error;
acpi_bus_get_status(adev);
if (!(adev->status.present || adev->status.functional)) {
acpi_scan_device_not_present(adev);
return 0;
}
if (handler && handler->hotplug.scan_dependent)
return handler->hotplug.scan_dependent(adev);
error = acpi_bus_scan(adev->handle);
if (error) {
dev_warn(&adev->dev, "Namespace scan failure\n");
return error;
}
return acpi_dev_for_each_child(adev, acpi_scan_bus_check, NULL);
}
static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
return acpi_scan_bus_check(adev, NULL);
case ACPI_NOTIFY_DEVICE_CHECK:
return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
case ACPI_OST_EC_OSPM_EJECT:
if (adev->handler && !adev->handler->hotplug.enabled) {
dev_info(&adev->dev, "Eject disabled\n");
return -EPERM;
}
acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
return acpi_scan_hot_remove(adev);
}
return -EINVAL;
}
void acpi_device_hotplug(struct acpi_device *adev, u32 src)
{
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error = -ENODEV;
lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
/*
* The device object's ACPI handle cannot become invalid as long as we
* are holding acpi_scan_lock, but it might have become invalid before
* that lock was acquired.
*/
if (adev->handle == INVALID_ACPI_HANDLE)
goto err_out;
if (adev->flags.is_dock_station) {
error = dock_notify(adev, src);
} else if (adev->flags.hotplug_notify) {
error = acpi_generic_hotplug_event(adev, src);
} else {
int (*notify)(struct acpi_device *, u32);
acpi_lock_hp_context();
notify = adev->hp ? adev->hp->notify : NULL;
acpi_unlock_hp_context();
/*
* There may be additional notify handlers for device objects
* without the .event() callback, so ignore them here.
*/
if (notify)
error = notify(adev, src);
else
goto out;
}
switch (error) {
case 0:
ost_code = ACPI_OST_SC_SUCCESS;
break;
case -EPERM:
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
break;
case -EBUSY:
ost_code = ACPI_OST_SC_DEVICE_BUSY;
break;
default:
ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
break;
}
err_out:
acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
out:
acpi_put_acpi_dev(adev);
mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug();
}
static void acpi_free_power_resources_lists(struct acpi_device *device)
{
int i;
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
struct acpi_device_power_state *ps = &device->power.states[i];
acpi_power_resources_list_free(&ps->resources);
}
}
static void acpi_device_release(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_properties(acpi_dev);
acpi_free_pnp_ids(&acpi_dev->pnp);
acpi_free_power_resources_lists(acpi_dev);
kfree(acpi_dev);
}
static void acpi_device_del(struct acpi_device *device)
{
struct acpi_device_bus_id *acpi_device_bus_id;
mutex_lock(&acpi_device_lock);
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
ida_free(&acpi_device_bus_id->instance_ida,
device->pnp.instance_no);
if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
}
list_del(&device->wakeup_list);
mutex_unlock(&acpi_device_lock);
acpi_power_add_remove_device(device, false);
acpi_device_remove_files(device);
if (device->remove)
device->remove(device);
device_del(&device->dev);
}
static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
static LIST_HEAD(acpi_device_del_list);
static DEFINE_MUTEX(acpi_device_del_lock);
static void acpi_device_del_work_fn(struct work_struct *work_not_used)
{
for (;;) {
struct acpi_device *adev;
mutex_lock(&acpi_device_del_lock);
if (list_empty(&acpi_device_del_list)) {
mutex_unlock(&acpi_device_del_lock);
break;
}
adev = list_first_entry(&acpi_device_del_list,
struct acpi_device, del_list);
list_del(&adev->del_list);
mutex_unlock(&acpi_device_del_lock);
blocking_notifier_call_chain(&acpi_reconfig_chain,
ACPI_RECONFIG_DEVICE_REMOVE, adev);
acpi_device_del(adev);
/*
* Drop references to all power resources that might have been
* used by the device.
*/
acpi_power_transition(adev, ACPI_STATE_D3_COLD);
acpi_dev_put(adev);
}
}
/**
* acpi_scan_drop_device - Drop an ACPI device object.
* @handle: Handle of an ACPI namespace node, not used.
* @context: Address of the ACPI device object to drop.
*
* This is invoked by acpi_ns_delete_node() during the removal of the ACPI
* namespace node the device object pointed to by @context is attached to.
*
* The unregistration is carried out asynchronously to avoid running
* acpi_device_del() under the ACPICA's namespace mutex and the list is used to
* ensure the correct ordering (the device objects must be unregistered in the
* same order in which the corresponding namespace nodes are deleted).
*/
static void acpi_scan_drop_device(acpi_handle handle, void *context)
{
static DECLARE_WORK(work, acpi_device_del_work_fn);
struct acpi_device *adev = context;
mutex_lock(&acpi_device_del_lock);
/*
* Use the ACPI hotplug workqueue which is ordered, so this work item
* won't run after any hotplug work items submitted subsequently. That
* prevents attempts to register device objects identical to those being
* deleted from happening concurrently (such attempts result from
* hotplug events handled via the ACPI hotplug workqueue). It also will
* run after all of the work items submitted previously, which helps
* those work items to ensure that they are not accessing stale device
* objects.
*/
if (list_empty(&acpi_device_del_list))
acpi_queue_hotplug_work(&work);
list_add_tail(&adev->del_list, &acpi_device_del_list);
/* Make acpi_ns_validate_handle() return NULL for this handle. */
adev->handle = INVALID_ACPI_HANDLE;
mutex_unlock(&acpi_device_del_lock);
}
static struct acpi_device *handle_to_device(acpi_handle handle,
void (*callback)(void *))
{
struct acpi_device *adev = NULL;
acpi_status status;
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)&adev, callback);
if (ACPI_FAILURE(status) || !adev) {
acpi_handle_debug(handle, "No context!\n");
return NULL;
}
return adev;
}
/**
* acpi_fetch_acpi_dev - Retrieve ACPI device object.
* @handle: ACPI handle associated with the requested ACPI device object.
*
* Return a pointer to the ACPI device object associated with @handle, if
* present, or NULL otherwise.
*/
struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle)
{
return handle_to_device(handle, NULL);
}
EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev);
static void get_acpi_device(void *dev)
{
acpi_dev_get(dev);
}
/**
* acpi_get_acpi_dev - Retrieve ACPI device object and reference count it.
* @handle: ACPI handle associated with the requested ACPI device object.
*
* Return a pointer to the ACPI device object associated with @handle and bump
* up that object's reference counter (under the ACPI Namespace lock), if
* present, or return NULL otherwise.
*
* The ACPI device object reference acquired by this function needs to be
* dropped via acpi_dev_put().
*/
struct acpi_device *acpi_get_acpi_dev(acpi_handle handle)
{
return handle_to_device(handle, get_acpi_device);
}
EXPORT_SYMBOL_GPL(acpi_get_acpi_dev);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
struct acpi_device_bus_id *acpi_device_bus_id;
/* Find suitable bus_id and instance number in acpi_bus_id_list. */
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
return acpi_device_bus_id;
}
return NULL;
}
static int acpi_device_set_name(struct acpi_device *device,
struct acpi_device_bus_id *acpi_device_bus_id)
{
struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
int result;
result = ida_alloc(instance_ida, GFP_KERNEL);
if (result < 0)
return result;
device->pnp.instance_no = result;
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
return 0;
}
int acpi_tie_acpi_dev(struct acpi_device *adev)
{
acpi_handle handle = adev->handle;
acpi_status status;
if (!handle)
return 0;
status = acpi_attach_data(handle, acpi_scan_drop_device, adev);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "Unable to attach device data\n");
return -ENODEV;
}
return 0;
}
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
acpi_status status;
status = acpi_get_physical_device_location(adev->handle, &pld);
if (ACPI_FAILURE(status))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
ACPI_FREE(pld);
}
int acpi_device_add(struct acpi_device *device)
{
struct acpi_device_bus_id *acpi_device_bus_id;
int result;
/*
* Linkage
* -------
* Link this device to its parent and siblings.
*/
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
mutex_lock(&acpi_device_lock);
acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
if (acpi_device_bus_id) {
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result)
goto err_unlock;
} else {
acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
GFP_KERNEL);
if (!acpi_device_bus_id) {
result = -ENOMEM;
goto err_unlock;
}
acpi_device_bus_id->bus_id =
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
if (!acpi_device_bus_id->bus_id) {
kfree(acpi_device_bus_id);
result = -ENOMEM;
goto err_unlock;
}
ida_init(&acpi_device_bus_id->instance_ida);
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
acpi_store_pld_crc(device);
mutex_unlock(&acpi_device_lock);
result = device_add(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
goto err;
}
result = acpi_device_setup_files(device);
if (result)
pr_err("Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
return 0;
err:
mutex_lock(&acpi_device_lock);
list_del(&device->wakeup_list);
err_unlock:
mutex_unlock(&acpi_device_lock);
acpi_detach_data(device->handle, acpi_scan_drop_device);
return result;
}
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
static bool acpi_info_matches_ids(struct acpi_device_info *info,
const char * const ids[])
{
struct acpi_pnp_device_id_list *cid_list = NULL;
int i, index;
if (!(info->valid & ACPI_VALID_HID))
return false;
index = match_string(ids, -1, info->hardware_id.string);
if (index >= 0)
return true;
if (info->valid & ACPI_VALID_CID)
cid_list = &info->compatible_id_list;
if (!cid_list)
return false;
for (i = 0; i < cid_list->count; i++) {
index = match_string(ids, -1, cid_list->ids[i].string);
if (index >= 0)
return true;
}
return false;
}
/* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */
static const char * const acpi_honor_dep_ids[] = {
"INT3472", /* Camera sensor PMIC / clk and regulator info */
"INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */
"INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
"INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
NULL
};
static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle)
{
struct acpi_device *adev;
/*
* Fixed hardware devices do not appear in the namespace and do not
* have handles, but we fabricate acpi_devices for them, so we have
* to deal with them specially.
*/
if (!handle)
return acpi_root;
do {
acpi_status status;
status = acpi_get_parent(handle, &handle);
if (ACPI_FAILURE(status)) {
if (status != AE_NULL_ENTRY)
return acpi_root;
return NULL;
}
adev = acpi_fetch_acpi_dev(handle);
} while (!adev);
return adev;
}
acpi_status
acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
{
acpi_status status;
acpi_handle tmp;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
status = acpi_get_handle(handle, "_EJD", &tmp);
if (ACPI_FAILURE(status))
return status;
status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
if (ACPI_SUCCESS(status)) {
obj = buffer.pointer;
status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
ejd);
kfree(buffer.pointer);
}
return status;
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
{
acpi_handle handle = dev->handle;
struct acpi_device_wakeup *wakeup = &dev->wakeup;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
int err = -ENODATA;
INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_info(handle, "_PRW evaluation failed: %s\n",
acpi_format_exception(status));
return err;
}
package = (union acpi_object *)buffer.pointer;
if (!package || package->package.count < 2)
goto out;
element = &(package->package.elements[0]);
if (!element)
goto out;
if (element->type == ACPI_TYPE_PACKAGE) {
if ((element->package.count < 2) ||
(element->package.elements[0].type !=
ACPI_TYPE_LOCAL_REFERENCE)
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER))
goto out;
wakeup->gpe_device =
element->package.elements[0].reference.handle;
wakeup->gpe_number =
(u32) element->package.elements[1].integer.value;
} else if (element->type == ACPI_TYPE_INTEGER) {
wakeup->gpe_device = NULL;
wakeup->gpe_number = element->integer.value;
} else {
goto out;
}
element = &(package->package.elements[1]);
if (element->type != ACPI_TYPE_INTEGER)
goto out;
wakeup->sleep_state = element->integer.value;
err = acpi_extract_power_resources(package, 2, &wakeup->resources);
if (err)
goto out;
if (!list_empty(&wakeup->resources)) {
int sleep_state;
err = acpi_power_wakeup_list_init(&wakeup->resources,
&sleep_state);
if (err) {
acpi_handle_warn(handle, "Retrieving current states "
"of wakeup power resources failed\n");
acpi_power_resources_list_free(&wakeup->resources);
goto out;
}
if (sleep_state < wakeup->sleep_state) {
acpi_handle_warn(handle, "Overriding _PRW sleep state "
"(S%d) by S%d from power resources\n",
(int)wakeup->sleep_state, sleep_state);
wakeup->sleep_state = sleep_state;
}
}
out:
kfree(buffer.pointer);
return err;
}
/* Do not use a button for S5 wakeup */
#define ACPI_AVOID_WAKE_FROM_S5 BIT(0)
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
{
static const struct acpi_device_id button_device_ids[] = {
{"PNP0C0C", 0}, /* Power button */
{"PNP0C0D", ACPI_AVOID_WAKE_FROM_S5}, /* Lid */
{"PNP0C0E", ACPI_AVOID_WAKE_FROM_S5}, /* Sleep button */
{"", 0},
};
struct acpi_device_wakeup *wakeup = &device->wakeup;
const struct acpi_device_id *match;
acpi_status status;
wakeup->flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
match = acpi_match_acpi_device(button_device_ids, device);
if (match) {
if ((match->driver_data & ACPI_AVOID_WAKE_FROM_S5) &&
wakeup->sleep_state == ACPI_STATE_S5)
wakeup->sleep_state = ACPI_STATE_S4;
acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
device_set_wakeup_capable(&device->dev, true);
return true;
}
status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
wakeup->gpe_number);
return ACPI_SUCCESS(status);
}
static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
int err;
/* Presence of _PRW indicates wake capable */
if (!acpi_has_method(device->handle, "_PRW"))
return;
err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
dev_err(&device->dev, "Unable to extract wakeup power resources");
return;
}
device->wakeup.flags.valid = acpi_wakeup_gpe_init(device);
device->wakeup.prepare_count = 0;
/*
* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
err = acpi_device_sleep_wake(device, 0, 0, 0);
if (err)
pr_debug("error in _DSW or _PSW evaluation\n");
}
static void acpi_bus_init_power_state(struct acpi_device *device, int state)
{
struct acpi_device_power_state *ps = &device->power.states[state];
char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
INIT_LIST_HEAD(&ps->resources);
/* Evaluate "_PRx" to get referenced power resources */
status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
if (ACPI_SUCCESS(status)) {
union acpi_object *package = buffer.pointer;
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
&& package->package.count)
acpi_extract_power_resources(package, 0, &ps->resources);
ACPI_FREE(buffer.pointer);
}
/* Evaluate "_PSx" to see if we can do explicit sets */
pathname[2] = 'S';
if (acpi_has_method(device->handle, pathname))
ps->flags.explicit_set = 1;
/* State is valid if there are means to put the device into it. */
if (!list_empty(&ps->resources) || ps->flags.explicit_set)
ps->flags.valid = 1;
ps->power = -1; /* Unknown - driver assigned */
ps->latency = -1; /* Unknown - driver assigned */
}
static void acpi_bus_get_power_flags(struct acpi_device *device)
{
unsigned long long dsc = ACPI_STATE_D0;
u32 i;
/* Presence of _PS0|_PR0 indicates 'power manageable' */
if (!acpi_has_method(device->handle, "_PS0") &&
!acpi_has_method(device->handle, "_PR0"))
return;
device->flags.power_manageable = 1;
/*
* Power Management Flags
*/
if (acpi_has_method(device->handle, "_PSC"))
device->power.flags.explicit_get = 1;
if (acpi_has_method(device->handle, "_IRC"))
device->power.flags.inrush_current = 1;
if (acpi_has_method(device->handle, "_DSW"))
device->power.flags.dsw_present = 1;
acpi_evaluate_integer(device->handle, "_DSC", NULL, &dsc);
device->power.state_for_enumeration = dsc;
/*
* Enumerate supported power management states
*/
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
/* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
/*
* Use power resources only if the D0 list of them is populated, because
* some platforms may provide _PR3 only to indicate D3cold support and
* in those cases the power resources list returned by it may be bogus.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
device->power.flags.power_resources = 1;
/*
* D3cold is supported if the D3hot list of power resources is
* not empty.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
}
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}
static void acpi_bus_get_flags(struct acpi_device *device)
{
/* Presence of _STA indicates 'dynamic_status' */
if (acpi_has_method(device->handle, "_STA"))
device->flags.dynamic_status = 1;
/* Presence of _RMV indicates 'removable' */
if (acpi_has_method(device->handle, "_RMV"))
device->flags.removable = 1;
/* Presence of _EJD|_EJ0 indicates 'ejectable' */
if (acpi_has_method(device->handle, "_EJD") ||
acpi_has_method(device->handle, "_EJ0"))
device->flags.ejectable = 1;
}
static void acpi_device_get_busid(struct acpi_device *device)
{
char bus_id[5] = { '?', 0 };
struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
int i = 0;
/*
* Bus ID
* ------
* The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
if (!acpi_dev_parent(device)) {
strcpy(device->pnp.bus_id, "ACPI");
return;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
strcpy(device->pnp.bus_id, "PWRF");
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
strcpy(device->pnp.bus_id, "SLPF");
break;
case ACPI_BUS_TYPE_ECDT_EC:
strcpy(device->pnp.bus_id, "ECDT");
break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */
for (i = 3; i > 1; i--) {
if (bus_id[i] == '_')
bus_id[i] = '\0';
else
break;
}
strcpy(device->pnp.bus_id, bus_id);
break;
}
}
/*
* acpi_ata_match - see if an acpi object is an ATA device
*
* If an acpi object has one of the ACPI ATA methods defined,
* then we can safely call it an ATA device.
*/
bool acpi_ata_match(acpi_handle handle)
{
return acpi_has_method(handle, "_GTF") ||
acpi_has_method(handle, "_GTM") ||
acpi_has_method(handle, "_STM") ||
acpi_has_method(handle, "_SDD");
}
/*
* acpi_bay_match - see if an acpi object is an ejectable driver bay
*
* If an acpi object is ejectable and has one of the ACPI ATA methods defined,
* then we can safely call it an ejectable drive bay
*/
bool acpi_bay_match(acpi_handle handle)
{
acpi_handle phandle;
if (!acpi_has_method(handle, "_EJ0"))
return false;
if (acpi_ata_match(handle))
return true;
if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
return false;
return acpi_ata_match(phandle);
}
bool acpi_device_is_battery(struct acpi_device *adev)
{
struct acpi_hardware_id *hwid;
list_for_each_entry(hwid, &adev->pnp.ids, list)
if (!strcmp("PNP0C0A", hwid->id))
return true;
return false;
}
static bool is_ejectable_bay(struct acpi_device *adev)
{
acpi_handle handle = adev->handle;
if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
return true;
return acpi_bay_match(handle);
}
/*
* acpi_dock_match - see if an acpi object has a _DCK method
*/
bool acpi_dock_match(acpi_handle handle)
{
return acpi_has_method(handle, "_DCK");
}
static acpi_status
acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
void **return_value)
{
long *cap = context;
if (acpi_has_method(handle, "_BCM") &&
acpi_has_method(handle, "_BCL")) {
acpi_handle_debug(handle, "Found generic backlight support\n");
*cap |= ACPI_VIDEO_BACKLIGHT;
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
return 0;
}
/* Returns true if the ACPI object is a video device which can be
* handled by video.ko.
* The device will get a Linux specific CID added in scan.c to
* identify the device as an ACPI graphics device
* Be aware that the graphics device may not be physically present
* Use acpi_video_get_capabilities() to detect general ACPI video
* capabilities of present cards
*/
long acpi_is_video_device(acpi_handle handle)
{
long video_caps = 0;
/* Is this device able to support video switching ? */
if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
/* Is this device able to retrieve a video ROM ? */
if (acpi_has_method(handle, "_ROM"))
video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
/* Is this device able to configure which video head to be POSTed ? */
if (acpi_has_method(handle, "_VPO") &&
acpi_has_method(handle, "_GPD") &&
acpi_has_method(handle, "_SPD"))
video_caps |= ACPI_VIDEO_DEVICE_POSTING;
/* Only check for backlight functionality if one of the above hit. */
if (video_caps)
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
&video_caps, NULL);
return video_caps;
}
EXPORT_SYMBOL(acpi_is_video_device);
const char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
if (list_empty(&device->pnp.ids))
return dummy_hid;
hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
EXPORT_SYMBOL(acpi_device_hid);
static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
{
struct acpi_hardware_id *id;
id = kmalloc(sizeof(*id), GFP_KERNEL);
if (!id)
return;
id->id = kstrdup_const(dev_id, GFP_KERNEL);
if (!id->id) {
kfree(id);
return;
}
list_add_tail(&id->list, &pnp->ids);
pnp->type.hardware_id = 1;
}
/*
* Old IBM workstations have a DSDT bug wherein the SMBus object
* lacks the SMBUS01 HID and the methods do not have the necessary "_"
* prefix. Work around this.
*/
static bool acpi_ibm_smbus_match(acpi_handle handle)
{
char node_name[ACPI_PATH_SEGMENT_LENGTH];
struct acpi_buffer path = { sizeof(node_name), node_name };
if (!dmi_name_in_vendors("IBM"))
return false;
/* Look for SMBS object */
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
strcmp("SMBS", path.pointer))
return false;
/* Does it have the necessary (but misnamed) methods? */
if (acpi_has_method(handle, "SBI") &&
acpi_has_method(handle, "SBR") &&
acpi_has_method(handle, "SBW"))
return true;
return false;
}
static bool acpi_object_is_system_bus(acpi_handle handle)
{
acpi_handle tmp;
if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
tmp == handle)
return true;
if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
tmp == handle)
return true;
return false;
}
static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
int device_type)
{
struct acpi_device_info *info = NULL;
struct acpi_pnp_device_id_list *cid_list;
int i;
switch (device_type) {
case ACPI_BUS_TYPE_DEVICE:
if (handle == ACPI_ROOT_OBJECT) {
acpi_add_id(pnp, ACPI_SYSTEM_HID);
break;
}
acpi_get_object_info(handle, &info);
if (!info) {
pr_err("%s: Error reading device info\n", __func__);
return;
}
if (info->valid & ACPI_VALID_HID) {
acpi_add_id(pnp, info->hardware_id.string);
pnp->type.platform_id = 1;
}
if (info->valid & ACPI_VALID_CID) {
cid_list = &info->compatible_id_list;
for (i = 0; i < cid_list->count; i++)
acpi_add_id(pnp, cid_list->ids[i].string);
}
if (info->valid & ACPI_VALID_ADR) {
pnp->bus_address = info->address;
pnp->type.bus_address = 1;
}
if (info->valid & ACPI_VALID_UID)
pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
if (info->valid & ACPI_VALID_CLS)
acpi_add_id(pnp, info->class_code.string);
kfree(info);
/*
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
*/
if (acpi_is_video_device(handle)) {
acpi_add_id(pnp, ACPI_VIDEO_HID);
pnp->type.backlight = 1;
break;
}
if (acpi_bay_match(handle))
acpi_add_id(pnp, ACPI_BAY_HID);
else if (acpi_dock_match(handle))
acpi_add_id(pnp, ACPI_DOCK_HID);
else if (acpi_ibm_smbus_match(handle))
acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
else if (list_empty(&pnp->ids) &&
acpi_object_is_system_bus(handle)) {
/* \_SB, \_TZ, LNXSYBUS */
acpi_add_id(pnp, ACPI_BUS_HID);
strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
strcpy(pnp->device_class, ACPI_BUS_CLASS);
}
break;
case ACPI_BUS_TYPE_POWER:
acpi_add_id(pnp, ACPI_POWER_HID);
break;
case ACPI_BUS_TYPE_PROCESSOR:
acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
break;
case ACPI_BUS_TYPE_THERMAL:
acpi_add_id(pnp, ACPI_THERMAL_HID);
break;
case ACPI_BUS_TYPE_POWER_BUTTON:
acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
break;
case ACPI_BUS_TYPE_ECDT_EC:
acpi_add_id(pnp, ACPI_ECDT_HID);
break;
}
}
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
{
struct acpi_hardware_id *id, *tmp;
list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
kfree_const(id->id);
kfree(id);
}
kfree(pnp->unique_id);
}
/**
* acpi_dma_supported - Check DMA support for the specified device.
* @adev: The pointer to acpi device
*
* Return false if DMA is not supported. Otherwise, return true
*/
bool acpi_dma_supported(const struct acpi_device *adev)
{
if (!adev)
return false;
if (adev->flags.cca_seen)
return true;
/*
* Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent
* DMA on "Intel platforms". Presumably that includes all x86 and
* ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y.
*/
if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
return true;
return false;
}
/**
* acpi_get_dma_attr - Check the supported DMA attr for the specified device.
* @adev: The pointer to acpi device
*
* Return enum dev_dma_attr.
*/
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
{
if (!acpi_dma_supported(adev))
return DEV_DMA_NOT_SUPPORTED;
if (adev->flags.coherent_dma)
return DEV_DMA_COHERENT;
else
return DEV_DMA_NON_COHERENT;
}
/**
* acpi_dma_get_range() - Get device DMA parameters.
*
* @dev: device to configure
* @map: pointer to DMA ranges result
*
* Evaluate DMA regions and return pointer to DMA regions on
* parsing success; it does not update the passed in values on failure.
*
* Return 0 on success, < 0 on failure.
*/
int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
struct acpi_device *adev;
LIST_HEAD(list);
struct resource_entry *rentry;
int ret;
struct device *dma_dev = dev;
struct bus_dma_region *r;
/*
* Walk the device tree chasing an ACPI companion with a _DMA
* object while we go. Stop if we find a device with an ACPI
* companion containing a _DMA method.
*/
do {
adev = ACPI_COMPANION(dma_dev);
if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
break;
dma_dev = dma_dev->parent;
} while (dma_dev);
if (!dma_dev)
return -ENODEV;
if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
return -EINVAL;
}
ret = acpi_dev_get_dma_resources(adev, &list);
if (ret > 0) {
r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL);
if (!r) {
ret = -ENOMEM;
goto out;
}
*map = r;
list_for_each_entry(rentry, &list, node) {
if (rentry->res->start >= rentry->res->end) {
kfree(*map);
*map = NULL;
ret = -EINVAL;
dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
goto out;
}
r->cpu_start = rentry->res->start;
r->dma_start = rentry->res->start - rentry->offset;
r->size = resource_size(rentry->res);
r->offset = rentry->offset;
r++;
}
}
out:
acpi_dev_free_resource_list(&list);
return ret >= 0 ? 0 : ret;
}
#ifdef CONFIG_IOMMU_API
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
int ret = iommu_fwspec_init(dev, fwnode, ops);
if (!ret)
ret = iommu_fwspec_add_ids(dev, &id, 1);
return ret;
}
static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
return fwspec ? fwspec->ops : NULL;
}
static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
const u32 *id_in)
{
int err;
const struct iommu_ops *ops;
/*
* If we already translated the fwspec there is nothing left to do,
* return the iommu_ops.
*/
ops = acpi_iommu_fwspec_ops(dev);
if (ops)
return ops;
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
/*
* If we have reason to believe the IOMMU driver missed the initial
* iommu_probe_device() call for dev, replay it to get things in order.
*/
if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
if (err == -EPROBE_DEFER) {
return ERR_PTR(err);
} else if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
return NULL;
}
return acpi_iommu_fwspec_ops(dev);
}
#else /* !CONFIG_IOMMU_API */
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
return -ENODEV;
}
static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
const u32 *id_in)
{
return NULL;
}
#endif /* !CONFIG_IOMMU_API */
/**
* acpi_dma_configure_id - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
* @input_id: input device id const value pointer
*/
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id)
{
const struct iommu_ops *iommu;
if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops);
return 0;
}
acpi_arch_dma_setup(dev);
iommu = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
arch_setup_dma_ops(dev, 0, U64_MAX,
iommu, attr == DEV_DMA_COHERENT);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_dma_configure_id);
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
acpi_status status;
struct acpi_device *parent = acpi_dev_parent(adev);
if (parent && parent->flags.cca_seen) {
/*
* From ACPI spec, OSPM will ignore _CCA if an ancestor
* already saw one.
*/
adev->flags.cca_seen = 1;
cca = parent->flags.coherent_dma;
} else {
status = acpi_evaluate_integer(adev->handle, "_CCA",
NULL, &cca);
if (ACPI_SUCCESS(status))
adev->flags.cca_seen = 1;
else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
/*
* If architecture does not specify that _CCA is
* required for DMA-able devices (e.g. x86),
* we default to _CCA=1.
*/
cca = 1;
else
acpi_handle_debug(adev->handle,
"ACPI device is missing _CCA.\n");
}
adev->flags.coherent_dma = cca;
}
static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
{
bool *is_serial_bus_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
*is_serial_bus_slave_p = true;
/* no need to do more checking */
return -1;
}
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
struct acpi_device *parent = acpi_dev_parent(device);
static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
};
return parent && !acpi_match_device_ids(parent, indirect_io_hosts);
}
static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
static const struct acpi_device_id ignore_serial_bus_ids[] = {
/*
* These devices have multiple SerialBus resources and a client
* device must be instantiated for each of them, each with
* its own device id.
* Normally we only instantiate one client device for the first
* resource, using the ACPI HID as id. These special cases are handled
* by the drivers/platform/x86/serial-multi-instantiate.c driver, which
* knows which client device id to use for each resource.
*/
{"BSG1160", },
{"BSG2150", },
{"CSC3551", },
{"CSC3556", },
{"INT33FE", },
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
/*
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
*/
{"MSHW0028", },
/*
* HIDs of device with an UartSerialBusV2 resource for which userspace
* expects a regular tty cdev to be created (instead of the in kernel
* serdev) and which have a kernel driver which expects a platform_dev
* such as the rfkill-gpio driver.
*/
{"BCM4752", },
{"LNV4752", },
{}
};
if (acpi_is_indirect_io_slave(device))
return true;
/* Macs use device properties in lieu of _CRS resources */
if (x86_apple_machine &&
(fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
fwnode_property_present(&device->fwnode, "i2cAddress") ||
fwnode_property_present(&device->fwnode, "baud")))
return true;
if (!acpi_match_device_ids(device, ignore_serial_bus_ids))
return false;
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list,
acpi_check_serial_bus_slave,
&is_serial_bus_slave);
acpi_dev_free_resource_list(&resource_list);
return is_serial_bus_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, void (*release)(struct device *))
{
struct acpi_device *parent = acpi_find_parent_acpi_dev(handle);
INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type;
device->handle = handle;
device->dev.parent = parent ? &parent->dev : NULL;
device->dev.release = release;
device->dev.bus = &acpi_bus_type;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
acpi_init_properties(device);
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
device->flags.enumeration_by_parent =
acpi_device_enumeration_by_parent(device);
acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
}
static void acpi_scan_dep_init(struct acpi_device *adev)
{
struct acpi_dep_data *dep;
list_for_each_entry(dep, &acpi_dep_list, node) {
if (dep->consumer == adev->handle) {
if (dep->honor_dep)
adev->flags.honor_deps = 1;
adev->dep_unmet++;
}
}
}
void acpi_device_add_finalize(struct acpi_device *device)
{
dev_set_uevent_suppress(&device->dev, false);
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
}
static void acpi_scan_init_status(struct acpi_device *adev)
{
if (acpi_bus_get_status(adev))
acpi_set_device_status(adev, 0);
}
static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type, bool dep_init)
{
struct acpi_device *device;
bool release_dep_lock = false;
int result;
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device)
return -ENOMEM;
acpi_init_device_object(device, handle, type, acpi_device_release);
/*
* Getting the status is delayed till here so that we can call
* acpi_bus_get_status() and use its quirk handling. Note that
* this must be done before the get power-/wakeup_dev-flags calls.
*/
if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
if (dep_init) {
mutex_lock(&acpi_dep_list_lock);
/*
* Hold the lock until the acpi_tie_acpi_dev() call
* below to prevent concurrent acpi_scan_clear_dep()
* from deleting a dependency list entry without
* updating dep_unmet for the device.
*/
release_dep_lock = true;
acpi_scan_dep_init(device);
}
acpi_scan_init_status(device);
}
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
result = acpi_tie_acpi_dev(device);
if (release_dep_lock)
mutex_unlock(&acpi_dep_list_lock);
if (!result)
result = acpi_device_add(device);
if (result) {
acpi_device_release(&device->dev);
return result;
}
acpi_power_add_remove_device(device, true);
acpi_device_add_finalize(device);
acpi_handle_debug(handle, "Added as %s, parent %s\n",
dev_name(&device->dev), device->dev.parent ?
dev_name(device->dev.parent) : "(null)");
*child = device;
return 0;
}
static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
void *context)
{
struct resource *res = context;
if (acpi_dev_resource_memory(ares, res))
return AE_CTRL_TERMINATE;
return AE_OK;
}
static bool acpi_device_should_be_hidden(acpi_handle handle)
{
acpi_status status;
struct resource res;
/* Check if it should ignore the UART device */
if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
return false;
/*
* The UART device described in SPCR table is assumed to have only one
* memory resource present. So we only look for the first one here.
*/
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
acpi_get_resource_memory, &res);
if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
return false;
acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
&res.start);
return true;
}
bool acpi_device_is_present(const struct acpi_device *adev)
{
return adev->status.present || adev->status.functional;
}
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
const char *idstr,
const struct acpi_device_id **matchid)
{
const struct acpi_device_id *devid;
if (handler->match)
return handler->match(idstr, matchid);
for (devid = handler->ids; devid->id[0]; devid++)
if (!strcmp((char *)devid->id, idstr)) {
if (matchid)
*matchid = devid;
return true;
}
return false;
}
static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr,
const struct acpi_device_id **matchid)
{
struct acpi_scan_handler *handler;
list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
if (acpi_scan_handler_matching(handler, idstr, matchid))
return handler;
return NULL;
}
void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
{
if (!!hotplug->enabled == !!val)
return;
mutex_lock(&acpi_scan_lock);
hotplug->enabled = val;
mutex_unlock(&acpi_scan_lock);
}
static void acpi_scan_init_hotplug(struct acpi_device *adev)
{
struct acpi_hardware_id *hwid;
if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
acpi_dock_add(adev);
return;
}
list_for_each_entry(hwid, &adev->pnp.ids, list) {
struct acpi_scan_handler *handler;
handler = acpi_scan_match_handler(hwid->id, NULL);
if (handler) {
adev->flags.hotplug_notify = true;
break;
}
}
}
static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
{
struct acpi_handle_list dep_devices;
acpi_status status;
u32 count;
int i;
/*
* Check for _HID here to avoid deferring the enumeration of:
* 1. PCI devices.
* 2. ACPI nodes describing USB ports.
* Still, checking for _HID catches more then just these cases ...
*/
if (!check_dep || !acpi_has_method(handle, "_DEP") ||
!acpi_has_method(handle, "_HID"))
return 0;
status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
return 0;
}
for (count = 0, i = 0; i < dep_devices.count; i++) {
struct acpi_device_info *info;
struct acpi_dep_data *dep;
bool skip, honor_dep;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "Error reading _DEP device info\n");
continue;
}
skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
kfree(info);
if (skip)
continue;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
continue;
count++;
dep->supplier = dep_devices.handles[i];
dep->consumer = handle;
dep->honor_dep = honor_dep;
mutex_lock(&acpi_dep_list_lock);
list_add_tail(&dep->node , &acpi_dep_list);
mutex_unlock(&acpi_dep_list_lock);
}
return count;
}
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
struct acpi_device **adev_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
acpi_object_type acpi_type;
int type;
if (device)
goto out;
if (ACPI_FAILURE(acpi_get_type(handle, &acpi_type)))
return AE_OK;
switch (acpi_type) {
case ACPI_TYPE_DEVICE:
if (acpi_device_should_be_hidden(handle))
return AE_OK;
/* Bail out if there are dependencies. */
if (acpi_scan_check_dep(handle, check_dep) > 0)
return AE_CTRL_DEPTH;
fallthrough;
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
type = ACPI_BUS_TYPE_DEVICE;
break;
case ACPI_TYPE_PROCESSOR:
type = ACPI_BUS_TYPE_PROCESSOR;
break;
case ACPI_TYPE_THERMAL:
type = ACPI_BUS_TYPE_THERMAL;
break;
case ACPI_TYPE_POWER:
acpi_add_power_resource(handle);
fallthrough;
default:
return AE_OK;
}
/*
* If check_dep is true at this point, the device has no dependencies,
* or the creation of the device object would have been postponed above.
*/
acpi_add_single_object(&device, handle, type, !check_dep);
if (!device)
return AE_CTRL_DEPTH;
acpi_scan_init_hotplug(device);
out:
if (!*adev_p)
*adev_p = device;
return AE_OK;
}
static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p);
}
static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p);
}
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
* Do not enumerate devices with enumeration_by_parent flag set as
* they will be enumerated by their respective parents.
*/
if (!device->flags.enumeration_by_parent) {
acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
blocking_notifier_call_chain(&acpi_reconfig_chain,
ACPI_RECONFIG_DEVICE_ADD, device);
}
}
static const struct acpi_device_id generic_device_ids[] = {
{ACPI_DT_NAMESPACE_HID, },
{"", },
};
static int acpi_generic_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
/*
* Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test
* below can be unconditional.
*/
if (adev->data.of_compatible)
acpi_default_enumeration(adev);
return 1;
}
static struct acpi_scan_handler generic_device_handler = {
.ids = generic_device_ids,
.attach = acpi_generic_device_attach,
};
static int acpi_scan_attach_handler(struct acpi_device *device)
{
struct acpi_hardware_id *hwid;
int ret = 0;
list_for_each_entry(hwid, &device->pnp.ids, list) {
const struct acpi_device_id *devid;
struct acpi_scan_handler *handler;
handler = acpi_scan_match_handler(hwid->id, &devid);
if (handler) {
if (!handler->attach) {
device->pnp.type.platform_id = 0;
continue;
}
device->handler = handler;
ret = handler->attach(device, devid);
if (ret > 0)
break;
device->handler = NULL;
if (ret < 0)
break;
}
}
return ret;
}
static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
{
bool skip = !first_pass && device->flags.visited;
acpi_handle ejd;
int ret;
if (skip)
goto ok;
if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
register_dock_dependent_device(device, ejd);
acpi_bus_get_status(device);
/* Skip devices that are not ready for enumeration (e.g. not present) */
if (!acpi_dev_ready_for_enumeration(device)) {
device->flags.initialized = false;
acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
return 0;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid;
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
device->flags.initialized = true;
} else if (device->flags.visited) {
goto ok;
}
ret = acpi_scan_attach_handler(device);
if (ret < 0)
return 0;
device->flags.match_driver = true;
if (ret > 0 && !device->flags.enumeration_by_parent) {
acpi_device_set_enumerated(device);
goto ok;
}
ret = device_attach(&device->dev);
if (ret < 0)
return 0;
if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
acpi_default_enumeration(device);
else
acpi_device_set_enumerated(device);
ok:
acpi_dev_for_each_child(device, acpi_bus_attach, first_pass);
if (!skip && device->handler && device->handler->hotplug.notify_online)
device->handler->hotplug.notify_online(device);
return 0;
}
static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{
struct acpi_device **adev_p = data;
struct acpi_device *adev = *adev_p;
/*
* If we're passed a 'previous' consumer device then we need to skip
* any consumers until we meet the previous one, and then NULL @data
* so the next one can be returned.
*/
if (adev) {
if (dep->consumer == adev->handle)
*adev_p = NULL;
return 0;
}
adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
*(struct acpi_device **)data = adev;
return 1;
}
/* Continue parsing if the device object is not present. */
return 0;
}
struct acpi_scan_clear_dep_work {
struct work_struct work;
struct acpi_device *adev;
};
static void acpi_scan_clear_dep_fn(struct work_struct *work)
{
struct acpi_scan_clear_dep_work *cdw;
cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
acpi_scan_lock_acquire();
acpi_bus_attach(cdw->adev, (void *)true);
acpi_scan_lock_release();
acpi_dev_put(cdw->adev);
kfree(cdw);
}
static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
{
struct acpi_scan_clear_dep_work *cdw;
if (adev->dep_unmet)
return false;
cdw = kmalloc(sizeof(*cdw), GFP_KERNEL);
if (!cdw)
return false;
cdw->adev = adev;
INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn);
/*
* Since the work function may block on the lock until the entire
* initial enumeration of devices is complete, put it into the unbound
* workqueue.
*/
queue_work(system_unbound_wq, &cdw->work);
return true;
}
static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep)
{
list_del(&dep->node);
kfree(dep);
}
static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{
struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
adev->dep_unmet--;
if (!acpi_scan_clear_dep_queue(adev))
acpi_dev_put(adev);
}
if (dep->free_when_met)
acpi_scan_delete_dep_data(dep);
else
dep->met = true;
return 0;
}
/**
* acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list
* @handle: The ACPI handle of the supplier device
* @callback: Pointer to the callback function to apply
* @data: Pointer to some data to pass to the callback
*
* The return value of the callback determines this function's behaviour. If 0
* is returned we continue to iterate over acpi_dep_list. If a positive value
* is returned then the loop is broken but this function returns 0. If a
* negative value is returned by the callback then the loop is broken and that
* value is returned as the final error.
*/
static int acpi_walk_dep_device_list(acpi_handle handle,
int (*callback)(struct acpi_dep_data *, void *),
void *data)
{
struct acpi_dep_data *dep, *tmp;
int ret = 0;
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
ret = callback(dep, data);
if (ret)
break;
}
}
mutex_unlock(&acpi_dep_list_lock);
return ret > 0 ? 0 : ret;
}
/**
* acpi_dev_clear_dependencies - Inform consumers that the device is now active
* @supplier: Pointer to the supplier &struct acpi_device
*
* Clear dependencies on the given device.
*/
void acpi_dev_clear_dependencies(struct acpi_device *supplier)
{
acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL);
}
EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
/**
* acpi_dev_ready_for_enumeration - Check if the ACPI device is ready for enumeration
* @device: Pointer to the &struct acpi_device to check
*
* Check if the device is present and has no unmet dependencies.
*
* Return true if the device is ready for enumeratino. Otherwise, return false.
*/
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
{
if (device->flags.honor_deps && device->dep_unmet)
return false;
return acpi_device_is_present(device);
}
EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
/**
* acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier
* @supplier: Pointer to the dependee device
* @start: Pointer to the current dependent device
*
* Returns the next &struct acpi_device which declares itself dependent on
* @supplier via the _DEP buffer, parsed from the acpi_dep_list.
*
* If the returned adev is not passed as @start to this function, the caller is
* responsible for putting the reference to adev when it is no longer needed.
*/
struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
struct acpi_device *start)
{
struct acpi_device *adev = start;
acpi_walk_dep_device_list(supplier->handle,
acpi_dev_get_next_consumer_dev_cb, &adev);
acpi_dev_put(start);
if (adev == start)
return NULL;
return adev;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);
static void acpi_scan_postponed_branch(acpi_handle handle)
{
struct acpi_device *adev = NULL;
if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev)))
return;
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add_2, NULL, NULL, (void **)&adev);
acpi_bus_attach(adev, NULL);
}
static void acpi_scan_postponed(void)
{
struct acpi_dep_data *dep, *tmp;
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
acpi_handle handle = dep->consumer;
/*
* In case there are multiple acpi_dep_list entries with the
* same consumer, skip the current entry if the consumer device
* object corresponding to it is present already.
*/
if (!acpi_fetch_acpi_dev(handle)) {
/*
* Even though the lock is released here, tmp is
* guaranteed to be valid, because none of the list
* entries following dep is marked as "free when met"
* and so they cannot be deleted.
*/
mutex_unlock(&acpi_dep_list_lock);
acpi_scan_postponed_branch(handle);
mutex_lock(&acpi_dep_list_lock);
}
if (dep->met)
acpi_scan_delete_dep_data(dep);
else
dep->free_when_met = true;
}
mutex_unlock(&acpi_dep_list_lock);
}
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
* @handle: Root of the namespace scope to scan.
*
* Scan a given ACPI tree (probably recently hot-plugged) and create and add
* found devices.
*
* If no devices were found, -ENODEV is returned, but it does not mean that
* there has been a real error. There just have been no suitable ACPI objects
* in the table trunk from which the kernel could create a device and add an
* appropriate driver.
*
* Must be called under acpi_scan_lock.
*/
int acpi_bus_scan(acpi_handle handle)
{
struct acpi_device *device = NULL;
/* Pass 1: Avoid enumerating devices with missing dependencies. */
if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add_1, NULL, NULL,
(void **)&device);
if (!device)
return -ENODEV;
acpi_bus_attach(device, (void *)true);
/* Pass 2: Enumerate all of the remaining devices. */
acpi_scan_postponed();
return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
{
struct acpi_scan_handler *handler = adev->handler;
acpi_dev_for_each_child_reverse(adev, acpi_bus_trim_one, NULL);
adev->flags.match_driver = false;
if (handler) {
if (handler->detach)
handler->detach(adev);
adev->handler = NULL;
} else {
device_release_driver(&adev->dev);
}
/*
* Most likely, the device is going away, so put it into D3cold before
* that.
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
acpi_device_clear_enumerated(adev);
return 0;
}
/**
* acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
* @adev: Root of the ACPI namespace scope to walk.
*
* Must be called under acpi_scan_lock.
*/
void acpi_bus_trim(struct acpi_device *adev)
{
acpi_bus_trim_one(adev, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
int acpi_bus_register_early_device(int type)
{
struct acpi_device *device = NULL;
int result;
result = acpi_add_single_object(&device, NULL, type, false);
if (result)
return result;
device->flags.match_driver = true;
return device_attach(&device->dev);
}
EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
static void acpi_bus_scan_fixed(void)
{
if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
struct acpi_device *adev = NULL;
acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON,
false);
if (adev) {
adev->flags.match_driver = true;
if (device_attach(&adev->dev) >= 0)
device_init_wakeup(&adev->dev, true);
else
dev_dbg(&adev->dev, "No driver\n");
}
}
if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
struct acpi_device *adev = NULL;
acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON,
false);
if (adev) {
adev->flags.match_driver = true;
if (device_attach(&adev->dev) < 0)
dev_dbg(&adev->dev, "No driver\n");
}
}
}
static void __init acpi_get_spcr_uart_addr(void)
{
acpi_status status;
struct acpi_table_spcr *spcr_ptr;
status = acpi_get_table(ACPI_SIG_SPCR, 0,
(struct acpi_table_header **)&spcr_ptr);
if (ACPI_FAILURE(status)) {
pr_warn("STAO table present, but SPCR is missing\n");
return;
}
spcr_uart_addr = spcr_ptr->serial_port.address;
acpi_put_table((struct acpi_table_header *)spcr_ptr);
}
static bool acpi_scan_initialized;
void __init acpi_scan_init(void)
{
acpi_status status;
struct acpi_table_stao *stao_ptr;
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
acpi_platform_init();
acpi_lpss_init();
acpi_apd_init();
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
/*
* If there is STAO table, check whether it needs to ignore the UART
* device in SPCR table.
*/
status = acpi_get_table(ACPI_SIG_STAO, 0,
(struct acpi_table_header **)&stao_ptr);
if (ACPI_SUCCESS(status)) {
if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
pr_info("STAO Name List not yet supported.\n");
if (stao_ptr->ignore_uart)
acpi_get_spcr_uart_addr();
acpi_put_table((struct acpi_table_header *)stao_ptr);
}
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
/*
* Although we call __add_memory() that is documented to require the
* device_hotplug_lock, it is not necessary here because this is an
* early code when userspace or any other code path cannot trigger
* hotplug/hotunplug operations.
*/
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
*/
if (acpi_bus_scan(ACPI_ROOT_OBJECT))
goto unlock;
acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
if (!acpi_root)
goto unlock;
/* Fixed feature devices do not exist on HW-reduced platform */
if (!acpi_gbl_reduced_hardware)
acpi_bus_scan_fixed();
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
unlock:
mutex_unlock(&acpi_scan_lock);
}
static struct acpi_probe_entry *ape;
static int acpi_probe_count;
static DEFINE_MUTEX(acpi_probe_mutex);
static int __init acpi_match_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
if (!ape->probe_subtbl(header, end))
acpi_probe_count++;
return 0;
}
int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
{
int count = 0;
if (acpi_disabled)
return 0;
mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
count += acpi_probe_count;
} else {
int res;
res = acpi_table_parse(ape->id, ape->probe_table);
if (!res)
count++;
}
}
mutex_unlock(&acpi_probe_mutex);
return count;
}
static void acpi_table_events_fn(struct work_struct *work)
{
acpi_scan_lock_acquire();
acpi_bus_scan(ACPI_ROOT_OBJECT);
acpi_scan_lock_release();
kfree(work);
}
void acpi_scan_table_notify(void)
{
struct work_struct *work;
if (!acpi_scan_initialized)
return;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return;
INIT_WORK(work, acpi_table_events_fn);
schedule_work(work);
}
int acpi_reconfig_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
}
EXPORT_SYMBOL(acpi_reconfig_notifier_register);
int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
}
EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);
| linux-master | drivers/acpi/scan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI Platform Firmware Runtime Update Device driver
*
* Copyright (C) 2021 Intel Corporation
* Author: Chen Yu <[email protected]>
*
* pfr_update driver is used for Platform Firmware Runtime
* Update, which includes the code injection and driver update.
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/uuid.h>
#include <uapi/linux/pfrut.h>
#define PFRU_FUNC_STANDARD_QUERY 0
#define PFRU_FUNC_QUERY_UPDATE_CAP 1
#define PFRU_FUNC_QUERY_BUF 2
#define PFRU_FUNC_START 3
#define PFRU_CODE_INJECT_TYPE 1
#define PFRU_DRIVER_UPDATE_TYPE 2
#define PFRU_REVID_1 1
#define PFRU_REVID_2 2
#define PFRU_DEFAULT_REV_ID PFRU_REVID_1
enum cap_index {
CAP_STATUS_IDX = 0,
CAP_UPDATE_IDX = 1,
CAP_CODE_TYPE_IDX = 2,
CAP_FW_VER_IDX = 3,
CAP_CODE_RT_VER_IDX = 4,
CAP_DRV_TYPE_IDX = 5,
CAP_DRV_RT_VER_IDX = 6,
CAP_DRV_SVN_IDX = 7,
CAP_PLAT_ID_IDX = 8,
CAP_OEM_ID_IDX = 9,
CAP_OEM_INFO_IDX = 10,
CAP_NR_IDX
};
enum buf_index {
BUF_STATUS_IDX = 0,
BUF_EXT_STATUS_IDX = 1,
BUF_ADDR_LOW_IDX = 2,
BUF_ADDR_HI_IDX = 3,
BUF_SIZE_IDX = 4,
BUF_NR_IDX
};
enum update_index {
UPDATE_STATUS_IDX = 0,
UPDATE_EXT_STATUS_IDX = 1,
UPDATE_AUTH_TIME_LOW_IDX = 2,
UPDATE_AUTH_TIME_HI_IDX = 3,
UPDATE_EXEC_TIME_LOW_IDX = 4,
UPDATE_EXEC_TIME_HI_IDX = 5,
UPDATE_NR_IDX
};
enum pfru_start_action {
START_STAGE = 0,
START_ACTIVATE = 1,
START_STAGE_ACTIVATE = 2,
};
struct pfru_device {
u32 rev_id, index;
struct device *parent_dev;
struct miscdevice miscdev;
};
static DEFINE_IDA(pfru_ida);
/*
* Manual reference:
* https://uefi.org/sites/default/files/resources/Intel_MM_OS_Interface_Spec_Rev100.pdf
*
* pfru_guid is the parameter for _DSM method
*/
static const guid_t pfru_guid =
GUID_INIT(0xECF9533B, 0x4A3C, 0x4E89, 0x93, 0x9E, 0xC7, 0x71,
0x12, 0x60, 0x1C, 0x6D);
/* pfru_code_inj_guid is the UUID to identify code injection EFI capsule file */
static const guid_t pfru_code_inj_guid =
GUID_INIT(0xB2F84B79, 0x7B6E, 0x4E45, 0x88, 0x5F, 0x3F, 0xB9,
0xBB, 0x18, 0x54, 0x02);
/* pfru_drv_update_guid is the UUID to identify driver update EFI capsule file */
static const guid_t pfru_drv_update_guid =
GUID_INIT(0x4569DD8C, 0x75F1, 0x429A, 0xA3, 0xD6, 0x24, 0xDE,
0x80, 0x97, 0xA0, 0xDF);
static inline int pfru_valid_revid(u32 id)
{
return id == PFRU_REVID_1 || id == PFRU_REVID_2;
}
static inline struct pfru_device *to_pfru_dev(struct file *file)
{
return container_of(file->private_data, struct pfru_device, miscdev);
}
static int query_capability(struct pfru_update_cap_info *cap_hdr,
struct pfru_device *pfru_dev)
{
acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
union acpi_object *out_obj;
int ret = -EINVAL;
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id,
PFRU_FUNC_QUERY_UPDATE_CAP,
NULL, ACPI_TYPE_PACKAGE);
if (!out_obj)
return ret;
if (out_obj->package.count < CAP_NR_IDX ||
out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_UPDATE_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_CODE_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_FW_VER_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_CODE_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_DRV_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_DRV_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
goto free_acpi_buffer;
cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
if (cap_hdr->status != DSM_SUCCEED) {
ret = -EBUSY;
dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
goto free_acpi_buffer;
}
cap_hdr->update_cap = out_obj->package.elements[CAP_UPDATE_IDX].integer.value;
memcpy(&cap_hdr->code_type,
out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.pointer,
out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.length);
cap_hdr->fw_version =
out_obj->package.elements[CAP_FW_VER_IDX].integer.value;
cap_hdr->code_rt_version =
out_obj->package.elements[CAP_CODE_RT_VER_IDX].integer.value;
memcpy(&cap_hdr->drv_type,
out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.pointer,
out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.length);
cap_hdr->drv_rt_version =
out_obj->package.elements[CAP_DRV_RT_VER_IDX].integer.value;
cap_hdr->drv_svn =
out_obj->package.elements[CAP_DRV_SVN_IDX].integer.value;
memcpy(&cap_hdr->platform_id,
out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.pointer,
out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.length);
memcpy(&cap_hdr->oem_id,
out_obj->package.elements[CAP_OEM_ID_IDX].buffer.pointer,
out_obj->package.elements[CAP_OEM_ID_IDX].buffer.length);
cap_hdr->oem_info_len =
out_obj->package.elements[CAP_OEM_INFO_IDX].buffer.length;
ret = 0;
free_acpi_buffer:
ACPI_FREE(out_obj);
return ret;
}
static int query_buffer(struct pfru_com_buf_info *info,
struct pfru_device *pfru_dev)
{
acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
union acpi_object *out_obj;
int ret = -EINVAL;
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
NULL, ACPI_TYPE_PACKAGE);
if (!out_obj)
return ret;
if (out_obj->package.count < BUF_NR_IDX ||
out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
goto free_acpi_buffer;
info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
info->ext_status =
out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
if (info->status != DSM_SUCCEED) {
ret = -EBUSY;
dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
goto free_acpi_buffer;
}
info->addr_lo =
out_obj->package.elements[BUF_ADDR_LOW_IDX].integer.value;
info->addr_hi =
out_obj->package.elements[BUF_ADDR_HI_IDX].integer.value;
info->buf_size = out_obj->package.elements[BUF_SIZE_IDX].integer.value;
ret = 0;
free_acpi_buffer:
ACPI_FREE(out_obj);
return ret;
}
static int get_image_type(const struct efi_manage_capsule_image_header *img_hdr,
struct pfru_device *pfru_dev)
{
const efi_guid_t *image_type_id = &img_hdr->image_type_id;
/* check whether this is a code injection or driver update */
if (guid_equal(image_type_id, &pfru_code_inj_guid))
return PFRU_CODE_INJECT_TYPE;
if (guid_equal(image_type_id, &pfru_drv_update_guid))
return PFRU_DRIVER_UPDATE_TYPE;
return -EINVAL;
}
static int adjust_efi_size(const struct efi_manage_capsule_image_header *img_hdr,
int size)
{
/*
* The (u64 hw_ins) was introduced in UEFI spec version 2,
* and (u64 capsule_support) was introduced in version 3.
* The size needs to be adjusted accordingly. That is to
* say, version 1 should subtract the size of hw_ins+capsule_support,
* and version 2 should sbstract the size of capsule_support.
*/
size += sizeof(struct efi_manage_capsule_image_header);
switch (img_hdr->ver) {
case 1:
return size - 2 * sizeof(u64);
case 2:
return size - sizeof(u64);
default:
/* only support version 1 and 2 */
return -EINVAL;
}
}
static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
struct pfru_device *pfru_dev)
{
struct pfru_payload_hdr *payload_hdr;
const efi_capsule_header_t *cap_hdr = data;
const struct efi_manage_capsule_header *m_hdr;
const struct efi_manage_capsule_image_header *m_img_hdr;
const struct efi_image_auth *auth;
int type, size;
/*
* If the code in the capsule is older than the current
* firmware code, the update will be rejected by the firmware,
* so check the version of it upfront without engaging the
* Management Mode update mechanism which may be costly.
*/
size = cap_hdr->headersize;
m_hdr = data + size;
/*
* Current data structure size plus variable array indicated
* by number of (emb_drv_cnt + payload_cnt)
*/
size += offsetof(struct efi_manage_capsule_header, offset_list) +
(m_hdr->emb_drv_cnt + m_hdr->payload_cnt) * sizeof(u64);
m_img_hdr = data + size;
type = get_image_type(m_img_hdr, pfru_dev);
if (type < 0)
return false;
size = adjust_efi_size(m_img_hdr, size);
if (size < 0)
return false;
auth = data + size;
size += sizeof(u64) + auth->auth_info.hdr.len;
payload_hdr = (struct pfru_payload_hdr *)(data + size);
/* finally compare the version */
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
return payload_hdr->rt_ver >= cap->drv_rt_version;
}
static void print_update_debug_info(struct pfru_updated_result *result,
struct pfru_device *pfru_dev)
{
dev_dbg(pfru_dev->parent_dev, "Update result:\n");
dev_dbg(pfru_dev->parent_dev, "Authentication Time Low:%lld\n",
result->low_auth_time);
dev_dbg(pfru_dev->parent_dev, "Authentication Time High:%lld\n",
result->high_auth_time);
dev_dbg(pfru_dev->parent_dev, "Execution Time Low:%lld\n",
result->low_exec_time);
dev_dbg(pfru_dev->parent_dev, "Execution Time High:%lld\n",
result->high_exec_time);
}
static int start_update(int action, struct pfru_device *pfru_dev)
{
union acpi_object *out_obj, in_obj, in_buf;
struct pfru_updated_result update_result;
acpi_handle handle;
int ret = -EINVAL;
memset(&in_obj, 0, sizeof(in_obj));
memset(&in_buf, 0, sizeof(in_buf));
in_obj.type = ACPI_TYPE_PACKAGE;
in_obj.package.count = 1;
in_obj.package.elements = &in_buf;
in_buf.type = ACPI_TYPE_INTEGER;
in_buf.integer.value = action;
handle = ACPI_HANDLE(pfru_dev->parent_dev);
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_START,
&in_obj, ACPI_TYPE_PACKAGE);
if (!out_obj)
return ret;
if (out_obj->package.count < UPDATE_NR_IDX ||
out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
goto free_acpi_buffer;
update_result.status =
out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
update_result.ext_status =
out_obj->package.elements[UPDATE_EXT_STATUS_IDX].integer.value;
if (update_result.status != DSM_SUCCEED) {
ret = -EBUSY;
dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
update_result.ext_status);
goto free_acpi_buffer;
}
update_result.low_auth_time =
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].integer.value;
update_result.high_auth_time =
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].integer.value;
update_result.low_exec_time =
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].integer.value;
update_result.high_exec_time =
out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].integer.value;
print_update_debug_info(&update_result, pfru_dev);
ret = 0;
free_acpi_buffer:
ACPI_FREE(out_obj);
return ret;
}
static long pfru_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pfru_update_cap_info cap_hdr;
struct pfru_device *pfru_dev = to_pfru_dev(file);
void __user *p = (void __user *)arg;
u32 rev;
int ret;
switch (cmd) {
case PFRU_IOC_QUERY_CAP:
ret = query_capability(&cap_hdr, pfru_dev);
if (ret)
return ret;
if (copy_to_user(p, &cap_hdr, sizeof(cap_hdr)))
return -EFAULT;
return 0;
case PFRU_IOC_SET_REV:
if (copy_from_user(&rev, p, sizeof(rev)))
return -EFAULT;
if (!pfru_valid_revid(rev))
return -EINVAL;
pfru_dev->rev_id = rev;
return 0;
case PFRU_IOC_STAGE:
return start_update(START_STAGE, pfru_dev);
case PFRU_IOC_ACTIVATE:
return start_update(START_ACTIVATE, pfru_dev);
case PFRU_IOC_STAGE_ACTIVATE:
return start_update(START_STAGE_ACTIVATE, pfru_dev);
default:
return -ENOTTY;
}
}
static ssize_t pfru_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct pfru_device *pfru_dev = to_pfru_dev(file);
struct pfru_update_cap_info cap;
struct pfru_com_buf_info buf_info;
phys_addr_t phy_addr;
struct iov_iter iter;
struct iovec iov;
char *buf_ptr;
int ret;
ret = query_buffer(&buf_info, pfru_dev);
if (ret)
return ret;
if (len > buf_info.buf_size)
return -EINVAL;
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len);
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
if (!buf_ptr)
return -ENOMEM;
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
ret = -EINVAL;
goto unmap;
}
/* check if the capsule header has a valid version number */
ret = query_capability(&cap, pfru_dev);
if (ret)
goto unmap;
if (!applicable_image(buf_ptr, &cap, pfru_dev))
ret = -EINVAL;
unmap:
memunmap(buf_ptr);
return ret ?: len;
}
static const struct file_operations acpi_pfru_fops = {
.owner = THIS_MODULE,
.write = pfru_write,
.unlocked_ioctl = pfru_ioctl,
.llseek = noop_llseek,
};
static int acpi_pfru_remove(struct platform_device *pdev)
{
struct pfru_device *pfru_dev = platform_get_drvdata(pdev);
misc_deregister(&pfru_dev->miscdev);
return 0;
}
static void pfru_put_idx(void *data)
{
struct pfru_device *pfru_dev = data;
ida_free(&pfru_ida, pfru_dev->index);
}
static int acpi_pfru_probe(struct platform_device *pdev)
{
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
struct pfru_device *pfru_dev;
int ret;
if (!acpi_has_method(handle, "_DSM")) {
dev_dbg(&pdev->dev, "Missing _DSM\n");
return -ENODEV;
}
pfru_dev = devm_kzalloc(&pdev->dev, sizeof(*pfru_dev), GFP_KERNEL);
if (!pfru_dev)
return -ENOMEM;
ret = ida_alloc(&pfru_ida, GFP_KERNEL);
if (ret < 0)
return ret;
pfru_dev->index = ret;
ret = devm_add_action_or_reset(&pdev->dev, pfru_put_idx, pfru_dev);
if (ret)
return ret;
pfru_dev->rev_id = PFRU_DEFAULT_REV_ID;
pfru_dev->parent_dev = &pdev->dev;
pfru_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
pfru_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pfru%d", pfru_dev->index);
if (!pfru_dev->miscdev.name)
return -ENOMEM;
pfru_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"acpi_pfr_update%d", pfru_dev->index);
if (!pfru_dev->miscdev.nodename)
return -ENOMEM;
pfru_dev->miscdev.fops = &acpi_pfru_fops;
pfru_dev->miscdev.parent = &pdev->dev;
ret = misc_register(&pfru_dev->miscdev);
if (ret)
return ret;
platform_set_drvdata(pdev, pfru_dev);
return 0;
}
static const struct acpi_device_id acpi_pfru_ids[] = {
{"INTC1080"},
{}
};
MODULE_DEVICE_TABLE(acpi, acpi_pfru_ids);
static struct platform_driver acpi_pfru_driver = {
.driver = {
.name = "pfr_update",
.acpi_match_table = acpi_pfru_ids,
},
.probe = acpi_pfru_probe,
.remove = acpi_pfru_remove,
};
module_platform_driver(acpi_pfru_driver);
MODULE_DESCRIPTION("Platform Firmware Runtime Update device driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/acpi/pfr_update.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_thermal.c - ACPI Thermal Zone Driver ($Revision: 41 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*
* This driver fully implements the ACPI thermal policy as described in the
* ACPI 2.0 Specification.
*
* TBD: 1. Implement passive cooling hysteresis.
* 2. Enhance passive cooling (CPU) states/limit interface to support
* concepts of 'multiple limiters', upper/lower limits, etc.
*/
#define pr_fmt(fmt) "ACPI: thermal: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/device.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/units.h>
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
#define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81
#define ACPI_THERMAL_NOTIFY_DEVICES 0x82
#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
#define ACPI_TRIPS_CRITICAL BIT(0)
#define ACPI_TRIPS_HOT BIT(1)
#define ACPI_TRIPS_PASSIVE BIT(2)
#define ACPI_TRIPS_ACTIVE BIT(3)
#define ACPI_TRIPS_DEVICES BIT(4)
#define ACPI_TRIPS_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
ACPI_TRIPS_DEVICES)
/*
* This exception is thrown out in two cases:
* 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
* when re-evaluating the AML code.
* 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
* We need to re-bind the cooling devices of a thermal zone when this occurs.
*/
#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \
do { \
if (flags != ACPI_TRIPS_INIT) \
acpi_handle_info(tz->device->handle, \
"ACPI thermal trip point %s changed\n" \
"Please report to [email protected]\n", str); \
} while (0)
static int act;
module_param(act, int, 0644);
MODULE_PARM_DESC(act, "Disable or override all lowest active trip points.");
static int crt;
module_param(crt, int, 0644);
MODULE_PARM_DESC(crt, "Disable or lower all critical trip points.");
static int tzp;
module_param(tzp, int, 0444);
MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
static int off;
module_param(off, int, 0);
MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
static int psv;
module_param(psv, int, 0644);
MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static struct workqueue_struct *acpi_thermal_pm_queue;
struct acpi_thermal_trip {
unsigned long temperature;
bool valid;
};
struct acpi_thermal_passive {
struct acpi_thermal_trip trip;
struct acpi_handle_list devices;
unsigned long tc1;
unsigned long tc2;
unsigned long tsp;
};
struct acpi_thermal_active {
struct acpi_thermal_trip trip;
struct acpi_handle_list devices;
};
struct acpi_thermal_trips {
struct acpi_thermal_trip critical;
struct acpi_thermal_trip hot;
struct acpi_thermal_passive passive;
struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE];
};
struct acpi_thermal {
struct acpi_device *device;
acpi_bus_id name;
unsigned long temperature;
unsigned long last_temperature;
unsigned long polling_frequency;
volatile u8 zombie;
struct acpi_thermal_trips trips;
struct thermal_trip *trip_table;
struct acpi_handle_list devices;
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
struct mutex thermal_check_lock;
refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
Thermal Zone Management
-------------------------------------------------------------------------- */
static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
{
acpi_status status = AE_OK;
unsigned long long tmp;
if (!tz)
return -EINVAL;
tz->last_temperature = tz->temperature;
status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
tz->temperature = tmp;
acpi_handle_debug(tz->device->handle, "Temperature is %lu dK\n",
tz->temperature);
return 0;
}
static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
{
acpi_status status = AE_OK;
unsigned long long tmp;
if (!tz)
return -EINVAL;
status = acpi_evaluate_integer(tz->device->handle, "_TZP", NULL, &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
tz->polling_frequency = tmp;
acpi_handle_debug(tz->device->handle, "Polling frequency is %lu dS\n",
tz->polling_frequency);
return 0;
}
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
}
static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
acpi_status status;
unsigned long long tmp;
struct acpi_handle_list devices;
bool valid = false;
int i;
/* Critical Shutdown */
if (flag & ACPI_TRIPS_CRITICAL) {
status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
tz->trips.critical.temperature = tmp;
/*
* Treat freezing temperatures as invalid as well; some
* BIOSes return really low values and cause reboots at startup.
* Below zero (Celsius) values clearly aren't right for sure..
* ... so lets discard those as invalid.
*/
if (ACPI_FAILURE(status)) {
tz->trips.critical.valid = false;
acpi_handle_debug(tz->device->handle,
"No critical threshold\n");
} else if (tmp <= 2732) {
pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
tz->trips.critical.valid = false;
} else {
tz->trips.critical.valid = true;
acpi_handle_debug(tz->device->handle,
"Found critical threshold [%lu]\n",
tz->trips.critical.temperature);
}
if (tz->trips.critical.valid) {
if (crt == -1) {
tz->trips.critical.valid = false;
} else if (crt > 0) {
unsigned long crt_k = celsius_to_deci_kelvin(crt);
/*
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
pr_info("Critical threshold %d C\n", crt);
tz->trips.critical.temperature = crt_k;
}
}
}
/* Critical Sleep (optional) */
if (flag & ACPI_TRIPS_HOT) {
status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.valid = false;
acpi_handle_debug(tz->device->handle,
"No hot threshold\n");
} else {
tz->trips.hot.temperature = tmp;
tz->trips.hot.valid = true;
acpi_handle_debug(tz->device->handle,
"Found hot threshold [%lu]\n",
tz->trips.hot.temperature);
}
}
/* Passive (optional) */
if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.trip.valid) ||
flag == ACPI_TRIPS_INIT) {
valid = tz->trips.passive.trip.valid;
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
tmp = celsius_to_deci_kelvin(psv);
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
"_PSV", NULL, &tmp);
}
if (ACPI_FAILURE(status)) {
tz->trips.passive.trip.valid = false;
} else {
tz->trips.passive.trip.temperature = tmp;
tz->trips.passive.trip.valid = true;
if (flag == ACPI_TRIPS_INIT) {
status = acpi_evaluate_integer(tz->device->handle,
"_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tc1 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tc2 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tsp = tmp;
}
}
}
if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.trip.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
tz->trips.passive.trip.valid = false;
} else {
tz->trips.passive.trip.valid = true;
}
if (memcmp(&tz->trips.passive.devices, &devices,
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.trip.valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
valid = tz->trips.active[i].trip.valid;
if (act == -1)
break; /* disable all active trip points */
if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
tz->trips.active[i].trip.valid)) {
status = acpi_evaluate_integer(tz->device->handle,
name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.active[i].trip.valid = false;
if (i == 0)
break;
if (act <= 0)
break;
if (i == 1)
tz->trips.active[0].trip.temperature =
celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
* the next higher trip point
*/
tz->trips.active[i-1].trip.temperature =
min_t(unsigned long,
tz->trips.active[i-2].trip.temperature,
celsius_to_deci_kelvin(act));
break;
} else {
tz->trips.active[i].trip.temperature = tmp;
tz->trips.active[i].trip.valid = true;
}
}
name[2] = 'L';
if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].trip.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
tz->trips.active[i].trip.valid = false;
} else {
tz->trips.active[i].trip.valid = true;
}
if (memcmp(&tz->trips.active[i].devices, &devices,
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
if (valid != tz->trips.active[i].trip.valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
if (!tz->trips.active[i].trip.valid)
break;
}
if (flag & ACPI_TRIPS_DEVICES) {
memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
NULL, &devices);
if (ACPI_SUCCESS(status) &&
memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
}
static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
{
struct acpi_thermal_trip *acpi_trip = trip->priv;
struct acpi_thermal *tz = data;
if (!acpi_trip)
return 0;
if (acpi_trip->valid)
trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
else
trip->temperature = THERMAL_TEMP_INVALID;
return 0;
}
static void acpi_thermal_adjust_thermal_zone(struct thermal_zone_device *thermal,
unsigned long data)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
int flag = data == ACPI_THERMAL_NOTIFY_THRESHOLDS ?
ACPI_TRIPS_THRESHOLDS : ACPI_TRIPS_DEVICES;
__acpi_thermal_trips_update(tz, flag);
for_each_thermal_trip(tz->thermal_zone, acpi_thermal_adjust_trip, tz);
}
static void acpi_queue_thermal_check(struct acpi_thermal *tz)
{
if (!work_pending(&tz->thermal_check_work))
queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
}
static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event)
{
struct acpi_device *adev = tz->device;
/*
* Use thermal_zone_device_exec() to carry out the trip points
* update, so as to protect thermal_get_trend() from getting stale
* trip point temperatures and to prevent thermal_zone_device_update()
* invoked from acpi_thermal_check_fn() from producing inconsistent
* results.
*/
thermal_zone_device_exec(tz->thermal_zone,
acpi_thermal_adjust_thermal_zone, event);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(adev->pnp.device_class,
dev_name(&adev->dev), event, 0);
}
static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
bool valid;
int i;
__acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
valid = tz->trips.critical.valid |
tz->trips.hot.valid |
tz->trips.passive.trip.valid;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++)
valid = valid || tz->trips.active[i].trip.valid;
if (!valid) {
pr_warn(FW_BUG "No valid trip found\n");
return -ENODEV;
}
return 0;
}
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
int result;
if (!tz)
return -EINVAL;
result = acpi_thermal_get_temperature(tz);
if (result)
return result;
*temp = deci_kelvin_to_millicelsius_with_offset(tz->temperature,
tz->kelvin_offset);
return 0;
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
const struct thermal_trip *trip,
enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
struct acpi_thermal_trip *acpi_trip;
int t;
if (!tz || !trip)
return -EINVAL;
acpi_trip = trip->priv;
if (!acpi_trip || !acpi_trip->valid)
return -EINVAL;
switch (trip->type) {
case THERMAL_TRIP_PASSIVE:
t = tz->trips.passive.tc1 * (tz->temperature -
tz->last_temperature) +
tz->trips.passive.tc2 * (tz->temperature -
acpi_trip->temperature);
if (t > 0)
*trend = THERMAL_TREND_RAISING;
else if (t < 0)
*trend = THERMAL_TREND_DROPPING;
else
*trend = THERMAL_TREND_STABLE;
return 0;
case THERMAL_TRIP_ACTIVE:
t = acpi_thermal_temp(tz, tz->temperature);
if (t <= trip->temperature)
break;
*trend = THERMAL_TREND_RAISING;
return 0;
default:
break;
}
return -EINVAL;
}
static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
dev_name(&tz->device->dev),
ACPI_THERMAL_NOTIFY_HOT, 1);
}
static void acpi_thermal_zone_device_critical(struct thermal_zone_device *thermal)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
dev_name(&tz->device->dev),
ACPI_THERMAL_NOTIFY_CRITICAL, 1);
thermal_zone_device_critical(thermal);
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev,
bool bind)
{
struct acpi_device *device = cdev->devdata;
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
struct acpi_device *dev;
acpi_handle handle;
int i;
int j;
int trip = -1;
int result = 0;
if (tz->trips.critical.valid)
trip++;
if (tz->trips.hot.valid)
trip++;
if (tz->trips.passive.trip.valid) {
trip++;
for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
if (bind)
result = thermal_zone_bind_cooling_device(
thermal, trip, cdev,
THERMAL_NO_LIMIT,
THERMAL_NO_LIMIT,
THERMAL_WEIGHT_DEFAULT);
else
result =
thermal_zone_unbind_cooling_device(
thermal, trip, cdev);
if (result)
goto failed;
}
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].trip.valid)
break;
trip++;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
handle = tz->trips.active[i].devices.handles[j];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
if (bind)
result = thermal_zone_bind_cooling_device(
thermal, trip, cdev,
THERMAL_NO_LIMIT,
THERMAL_NO_LIMIT,
THERMAL_WEIGHT_DEFAULT);
else
result = thermal_zone_unbind_cooling_device(
thermal, trip, cdev);
if (result)
goto failed;
}
}
failed:
return result;
}
static int
acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, true);
}
static int
acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
.get_trend = thermal_get_trend,
.hot = acpi_thermal_zone_device_hot,
.critical = acpi_thermal_zone_device_critical,
};
static int acpi_thermal_zone_sysfs_add(struct acpi_thermal *tz)
{
struct device *tzdev = thermal_zone_device(tz->thermal_zone);
int ret;
ret = sysfs_create_link(&tz->device->dev.kobj,
&tzdev->kobj, "thermal_zone");
if (ret)
return ret;
ret = sysfs_create_link(&tzdev->kobj,
&tz->device->dev.kobj, "device");
if (ret)
sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
return ret;
}
static void acpi_thermal_zone_sysfs_remove(struct acpi_thermal *tz)
{
struct device *tzdev = thermal_zone_device(tz->thermal_zone);
sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
sysfs_remove_link(&tzdev->kobj, "device");
}
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
{
struct acpi_thermal_trip *acpi_trip;
struct thermal_trip *trip;
int passive_delay = 0;
int trip_count = 0;
int result;
int i;
if (tz->trips.critical.valid)
trip_count++;
if (tz->trips.hot.valid)
trip_count++;
if (tz->trips.passive.trip.valid) {
trip_count++;
passive_delay = tz->trips.passive.tsp * 100;
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].trip.valid; i++)
trip_count++;
trip = kcalloc(trip_count, sizeof(*trip), GFP_KERNEL);
if (!trip)
return -ENOMEM;
tz->trip_table = trip;
if (tz->trips.critical.valid) {
trip->type = THERMAL_TRIP_CRITICAL;
trip->temperature = acpi_thermal_temp(tz, tz->trips.critical.temperature);
trip++;
}
if (tz->trips.hot.valid) {
trip->type = THERMAL_TRIP_HOT;
trip->temperature = acpi_thermal_temp(tz, tz->trips.hot.temperature);
trip++;
}
acpi_trip = &tz->trips.passive.trip;
if (acpi_trip->valid) {
trip->type = THERMAL_TRIP_PASSIVE;
trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
trip->priv = acpi_trip;
trip++;
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
acpi_trip = &tz->trips.active[i].trip;
if (!acpi_trip->valid)
break;
trip->type = THERMAL_TRIP_ACTIVE;
trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
trip->priv = acpi_trip;
trip++;
}
tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
tz->trip_table,
trip_count,
0, tz,
&acpi_thermal_zone_ops,
NULL,
passive_delay,
tz->polling_frequency * 100);
if (IS_ERR(tz->thermal_zone)) {
result = PTR_ERR(tz->thermal_zone);
goto free_trip_table;
}
result = acpi_thermal_zone_sysfs_add(tz);
if (result)
goto unregister_tzd;
result = thermal_zone_device_enable(tz->thermal_zone);
if (result)
goto remove_links;
dev_info(&tz->device->dev, "registered as thermal_zone%d\n",
thermal_zone_device_id(tz->thermal_zone));
return 0;
remove_links:
acpi_thermal_zone_sysfs_remove(tz);
unregister_tzd:
thermal_zone_device_unregister(tz->thermal_zone);
free_trip_table:
kfree(tz->trip_table);
return result;
}
static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
{
acpi_thermal_zone_sysfs_remove(tz);
thermal_zone_device_unregister(tz->thermal_zone);
kfree(tz->trip_table);
tz->thermal_zone = NULL;
}
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_thermal *tz = acpi_driver_data(device);
if (!tz)
return;
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, event);
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
break;
}
}
/*
* On some platforms, the AML code has dependency about
* the evaluating order of _TMP and _CRT/_HOT/_PSV/_ACx.
* 1. On HP Pavilion G4-1016tx, _TMP must be invoked after
* /_CRT/_HOT/_PSV/_ACx, or else system will be power off.
* 2. On HP Compaq 6715b/6715s, the return value of _PSV is 0
* if _TMP has never been evaluated.
*
* As this dependency is totally transparent to OS, evaluate
* all of them once, in the order of _CRT/_HOT/_PSV/_ACx,
* _TMP, before they are actually used.
*/
static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
{
acpi_handle handle = tz->device->handle;
unsigned long long value;
int i;
acpi_evaluate_integer(handle, "_CRT", NULL, &value);
acpi_evaluate_integer(handle, "_HOT", NULL, &value);
acpi_evaluate_integer(handle, "_PSV", NULL, &value);
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
acpi_status status;
status = acpi_evaluate_integer(handle, name, NULL, &value);
if (status == AE_NOT_FOUND)
break;
}
acpi_evaluate_integer(handle, "_TMP", NULL, &value);
}
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
int result;
if (!tz)
return -EINVAL;
acpi_thermal_aml_dependency_fix(tz);
/* Get trip points [_CRT, _PSV, etc.] (required) */
result = acpi_thermal_get_trip_points(tz);
if (result)
return result;
/* Get temperature [_TMP] (required) */
result = acpi_thermal_get_temperature(tz);
if (result)
return result;
/* Set the cooling mode [_SCP] to active cooling (default) */
acpi_execute_simple_method(tz->device->handle, "_SCP",
ACPI_THERMAL_MODE_ACTIVE);
/* Get default polling frequency [_TZP] (optional) */
if (tzp)
tz->polling_frequency = tzp;
else
acpi_thermal_get_polling_frequency(tz);
return 0;
}
/*
* The exact offset between Kelvin and degree Celsius is 273.15. However ACPI
* handles temperature values with a single decimal place. As a consequence,
* some implementations use an offset of 273.1 and others use an offset of
* 273.2. Try to find out which one is being used, to present the most
* accurate and visually appealing number.
*
* The heuristic below should work for all ACPI thermal zones which have a
* critical trip point with a value being a multiple of 0.5 degree Celsius.
*/
static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
{
if (tz->trips.critical.valid &&
(tz->trips.critical.temperature % 5) == 1)
tz->kelvin_offset = 273100;
else
tz->kelvin_offset = 273200;
}
static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
/*
* In general, it is not sufficient to check the pending bit, because
* subsequent instances of this function may be queued after one of them
* has started running (e.g. if _TMP sleeps). Avoid bailing out if just
* one of them is running, though, because it may have done the actual
* check some time ago, so allow at least one of them to block on the
* mutex while another one is running the update.
*/
if (!refcount_dec_not_one(&tz->thermal_check_count))
return;
mutex_lock(&tz->thermal_check_lock);
thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
refcount_inc(&tz->thermal_check_count);
mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
{
struct acpi_thermal *tz;
int result;
if (!device)
return -EINVAL;
tz = kzalloc(sizeof(struct acpi_thermal), GFP_KERNEL);
if (!tz)
return -ENOMEM;
tz->device = device;
strcpy(tz->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
device->driver_data = tz;
result = acpi_thermal_get_info(tz);
if (result)
goto free_memory;
acpi_thermal_guess_offset(tz);
result = acpi_thermal_register_thermal_zone(tz);
if (result)
goto free_memory;
refcount_set(&tz->thermal_check_count, 3);
mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info("%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
result = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_thermal_notify);
if (result)
goto flush_wq;
return 0;
flush_wq:
flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
free_memory:
kfree(tz);
return result;
}
static void acpi_thermal_remove(struct acpi_device *device)
{
struct acpi_thermal *tz;
if (!device || !acpi_driver_data(device))
return;
tz = acpi_driver_data(device);
acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_thermal_notify);
flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
kfree(tz);
}
#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_suspend(struct device *dev)
{
/* Make sure the previously queued thermal check work has been done */
flush_workqueue(acpi_thermal_pm_queue);
return 0;
}
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
int i, j, power_state;
if (!dev)
return -EINVAL;
tz = acpi_driver_data(to_acpi_device(dev));
if (!tz)
return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].trip.valid)
break;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
acpi_bus_update_power(tz->trips.active[i].devices.handles[j],
&power_state);
}
}
acpi_queue_thermal_check(tz);
return AE_OK;
}
#else
#define acpi_thermal_suspend NULL
#define acpi_thermal_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
static const struct acpi_device_id thermal_device_ids[] = {
{ACPI_THERMAL_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
.class = ACPI_THERMAL_CLASS,
.ids = thermal_device_ids,
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
},
.drv.pm = &acpi_thermal_pm,
};
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
pr_notice("%s detected: disabling all active thermal trip points\n",
d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
d->ident);
crt = -1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
pr_notice("%s detected: enabling thermal zone polling\n",
d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
}
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
pr_notice("%s detected: disabling all passive thermal trip points\n",
d->ident);
psv = -1;
}
return 0;
}
static const struct dmi_system_id thermal_dmi_table[] __initconst = {
/*
* Award BIOS on this AOpen makes thermal control almost worthless.
* http://bugzilla.kernel.org/show_bug.cgi?id=8842
*/
{
.callback = thermal_act,
.ident = "AOpen i915GMm-HFS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
{
.callback = thermal_psv,
.ident = "AOpen i915GMm-HFS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
{
.callback = thermal_tzp,
.ident = "AOpen i915GMm-HFS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
{
.callback = thermal_nocrt,
.ident = "Gigabyte GA-7ZX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "7ZX"),
},
},
{}
};
static int __init acpi_thermal_init(void)
{
int result;
dmi_check_system(thermal_dmi_table);
if (off) {
pr_notice("thermal control disabled\n");
return -ENODEV;
}
acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
result = acpi_bus_register_driver(&acpi_thermal_driver);
if (result < 0) {
destroy_workqueue(acpi_thermal_pm_queue);
return -ENODEV;
}
return 0;
}
static void __exit acpi_thermal_exit(void)
{
acpi_bus_unregister_driver(&acpi_thermal_driver);
destroy_workqueue(acpi_thermal_pm_queue);
}
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/thermal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Author: Sudeep Holla <[email protected]>
* Copyright 2021 Arm Limited
*
* The PCC Address Space also referred as PCC Operation Region pertains to the
* region of PCC subspace that succeeds the PCC signature. The PCC Operation
* Region works in conjunction with the PCC Table(Platform Communications
* Channel Table). PCC subspaces that are marked for use as PCC Operation
* Regions must not be used as PCC subspaces for the standard ACPI features
* such as CPPC, RASF, PDTT and MPST. These standard features must always use
* the PCC Table instead.
*
* This driver sets up the PCC Address Space and installs an handler to enable
* handling of PCC OpRegion in the firmware.
*
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <acpi/pcc.h>
/*
* Arbitrary retries in case the remote processor is slow to respond
* to PCC commands
*/
#define PCC_CMD_WAIT_RETRIES_NUM 500ULL
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
};
static struct acpi_pcc_info pcc_ctx;
static void pcc_rx_callback(struct mbox_client *cl, void *m)
{
struct pcc_data *data = container_of(cl, struct pcc_data, cl);
complete(&data->done);
}
static acpi_status
acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
void *handler_context, void **region_context)
{
struct pcc_data *data;
struct acpi_pcc_info *ctx = handler_context;
struct pcc_mbox_chan *pcc_chan;
static acpi_status ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return AE_NO_MEMORY;
data->cl.rx_callback = pcc_rx_callback;
data->cl.knows_txdone = true;
data->ctx.length = ctx->length;
data->ctx.subspace_id = ctx->subspace_id;
data->ctx.internal_buffer = ctx->internal_buffer;
init_completion(&data->done);
data->pcc_chan = pcc_mbox_request_channel(&data->cl, ctx->subspace_id);
if (IS_ERR(data->pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
ctx->subspace_id);
ret = AE_NOT_FOUND;
goto err_free_data;
}
pcc_chan = data->pcc_chan;
if (!pcc_chan->mchan->mbox->txdone_irq) {
pr_err("This channel-%d does not support interrupt.\n",
ctx->subspace_id);
ret = AE_SUPPORT;
goto err_free_channel;
}
data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
pcc_chan->shmem_size);
if (!data->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
ctx->subspace_id);
ret = AE_NO_MEMORY;
goto err_free_channel;
}
*region_context = data;
return AE_OK;
err_free_channel:
pcc_mbox_free_channel(data->pcc_chan);
err_free_data:
kfree(data);
return ret;
}
static acpi_status
acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
u32 bits, acpi_integer *value,
void *handler_context, void *region_context)
{
int ret;
struct pcc_data *data = region_context;
u64 usecs_lat;
reinit_completion(&data->done);
/* Write to Shared Memory */
memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
return AE_ERROR;
/*
* pcc_chan->latency is just a Nominal value. In reality the remote
* processor could be much slower to reply. So add an arbitrary
* amount of wait on top of Nominal.
*/
usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency;
ret = wait_for_completion_timeout(&data->done,
usecs_to_jiffies(usecs_lat));
if (ret == 0) {
pr_err("PCC command executed timeout!\n");
return AE_TIME;
}
mbox_chan_txdone(data->pcc_chan->mchan, ret);
memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
return AE_OK;
}
void __init acpi_init_pcc(void)
{
acpi_status status;
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_COMM,
&acpi_pcc_address_space_handler,
&acpi_pcc_address_space_setup,
&pcc_ctx);
if (ACPI_FAILURE(status))
pr_alert("OperationRegion handler could not be installed\n");
}
| linux-master | drivers/acpi/acpi_pcc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD ACPI support for ACPI2platform device.
*
* Copyright (c) 2014,2015 AMD Corporation.
* Authors: Ken Xue <[email protected]>
* Wu, Jeff <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_data/clk-fch.h>
#include <linux/platform_device.h>
#include "internal.h"
struct apd_private_data;
/**
* struct apd_device_desc - a descriptor for apd device
* @fixed_clk_rate: fixed rate input clock source for acpi device;
* 0 means no fixed rate input clock source
* @properties: build-in properties of the device such as UART
* @setup: a hook routine to set device resource during create platform device
*
* Device description defined as acpi_device_id.driver_data
*/
struct apd_device_desc {
unsigned int fixed_clk_rate;
struct property_entry *properties;
int (*setup)(struct apd_private_data *pdata);
};
struct apd_private_data {
struct clk *clk;
struct acpi_device *adev;
const struct apd_device_desc *dev_desc;
};
#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64)
#define APD_ADDR(desc) ((unsigned long)&desc)
static int acpi_apd_setup(struct apd_private_data *pdata)
{
const struct apd_device_desc *dev_desc = pdata->dev_desc;
struct clk *clk;
if (dev_desc->fixed_clk_rate) {
clk = clk_register_fixed_rate(&pdata->adev->dev,
dev_name(&pdata->adev->dev),
NULL, 0, dev_desc->fixed_clk_rate);
clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
pdata->clk = clk;
}
return 0;
}
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
static int fch_misc_setup(struct apd_private_data *pdata)
{
struct acpi_device *adev = pdata->adev;
const union acpi_object *obj;
struct platform_device *clkdev;
struct fch_clk_data *clk_data;
struct resource_entry *rentry;
struct list_head resource_list;
int ret;
clk_data = devm_kzalloc(&adev->dev, sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
return -ENOENT;
if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
GFP_KERNEL);
if (!clk_data->name)
return -ENOMEM;
strcpy(clk_data->name, obj->string.pointer);
} else {
/* Set default name to mclk if entry missing in firmware */
clk_data->name = "mclk";
}
list_for_each_entry(rentry, &resource_list, node) {
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
resource_size(rentry->res));
break;
}
if (!clk_data->base)
return -ENOMEM;
acpi_dev_free_resource_list(&resource_list);
clkdev = platform_device_register_data(&adev->dev, "clk-fch",
PLATFORM_DEVID_NONE, clk_data,
sizeof(*clk_data));
return PTR_ERR_OR_ZERO(clkdev);
}
static const struct apd_device_desc cz_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 133000000,
};
static const struct apd_device_desc wt_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 150000000,
};
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
{ },
};
static const struct apd_device_desc cz_uart_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 48000000,
.properties = uart_properties,
};
static const struct apd_device_desc fch_misc_desc = {
.setup = fch_misc_setup,
};
#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
#ifdef CONFIG_ARM64
static const struct apd_device_desc xgene_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 100000000,
};
static const struct apd_device_desc vulcan_spi_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 133000000,
};
static const struct apd_device_desc hip07_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 200000000,
};
static const struct apd_device_desc hip08_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
static const struct apd_device_desc hip08_lite_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 125000000,
};
static const struct apd_device_desc thunderx2_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 125000000,
};
static const struct apd_device_desc nxp_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 350000000,
};
static const struct apd_device_desc hip08_spi_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
#endif /* CONFIG_ARM64 */
#endif
/*
* Create platform device during acpi scan attach handle.
* Return value > 0 on success of creating device.
*/
static int acpi_apd_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
const struct apd_device_desc *dev_desc = (void *)id->driver_data;
struct apd_private_data *pdata;
struct platform_device *pdev;
int ret;
if (!dev_desc) {
pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->adev = adev;
pdata->dev_desc = dev_desc;
if (dev_desc->setup) {
ret = dev_desc->setup(pdata);
if (ret)
goto err_out;
}
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev))
return 1;
ret = PTR_ERR(pdev);
adev->driver_data = NULL;
err_out:
kfree(pdata);
return ret;
}
static const struct acpi_device_id acpi_apd_device_ids[] = {
/* Generic apd devices */
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
{ "AMD0010", APD_ADDR(cz_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)},
{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMDI0019", APD_ADDR(wt_i2c_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0022", APD_ADDR(cz_uart_desc) },
{ "HYGO0010", APD_ADDR(wt_i2c_desc) },
#endif
#ifdef CONFIG_ARM64
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV9007", APD_ADDR(thunderx2_i2c_desc) },
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
{ "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) },
{ "HISI0173", APD_ADDR(hip08_spi_desc) },
{ "NXP0001", APD_ADDR(nxp_i2c_desc) },
#endif
{ }
};
static struct acpi_scan_handler apd_handler = {
.ids = acpi_apd_device_ids,
.attach = acpi_apd_create_device,
};
void __init acpi_apd_init(void)
{
acpi_scan_add_handler(&apd_handler);
}
| linux-master | drivers/acpi/acpi_apd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic Event Device for ACPI.
*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* Generic Event Device allows platforms to handle interrupts in ACPI
* ASL statements. It follows very similar to _EVT method approach
* from GPIO events. All interrupts are listed in _CRS and the handler
* is written in _EVT method. Here is an example.
*
* Device (GED0)
* {
*
* Name (_HID, "ACPI0013")
* Name (_UID, 0)
* Method (_CRS, 0x0, Serialized)
* {
* Name (RBUF, ResourceTemplate ()
* {
* Interrupt(ResourceConsumer, Edge, ActiveHigh, Shared, , , )
* {123}
* }
* })
*
* Method (_EVT, 1) {
* if (Lequal(123, Arg0))
* {
* }
* }
* }
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#define MODULE_NAME "acpi-ged"
struct acpi_ged_device {
struct device *dev;
struct list_head event_list;
};
struct acpi_ged_event {
struct list_head node;
struct device *dev;
unsigned int gsi;
unsigned int irq;
acpi_handle handle;
};
static irqreturn_t acpi_ged_irq_handler(int irq, void *data)
{
struct acpi_ged_event *event = data;
acpi_status acpi_ret;
acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi);
if (ACPI_FAILURE(acpi_ret))
dev_err_once(event->dev, "IRQ method execution failed\n");
return IRQ_HANDLED;
}
static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
void *context)
{
struct acpi_ged_event *event;
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
struct acpi_ged_device *geddev = context;
struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
char ev_name[5];
u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
if (!acpi_dev_resource_interrupt(ares, 0, &r)) {
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
trigger = p->triggering;
} else {
gsi = pext->interrupts[0];
trigger = pext->triggering;
}
irq = r.start;
switch (gsi) {
case 0 ... 255:
sprintf(ev_name, "_%c%02X",
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
break;
fallthrough;
default:
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
break;
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
event->gsi = gsi;
event->dev = dev;
event->irq = irq;
event->handle = evt_handle;
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
struct acpi_ged_device *geddev;
acpi_status acpi_ret;
geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
if (!geddev)
return -ENOMEM;
geddev->dev = &pdev->dev;
INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
platform_set_drvdata(pdev, geddev);
return 0;
}
static void ged_shutdown(struct platform_device *pdev)
{
struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
struct acpi_ged_event *event, *next;
list_for_each_entry_safe(event, next, &geddev->event_list, node) {
free_irq(event->irq, event);
list_del(&event->node);
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
}
static int ged_remove(struct platform_device *pdev)
{
ged_shutdown(pdev);
return 0;
}
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
};
static struct platform_driver ged_driver = {
.probe = ged_probe,
.remove = ged_remove,
.shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
},
};
builtin_platform_driver(ged_driver);
| linux-master | drivers/acpi/evged.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/reboot.h>
#include <linux/delay.h>
#ifdef CONFIG_PCI
static void acpi_pci_reboot(struct acpi_generic_address *rr, u8 reset_value)
{
unsigned int devfn;
struct pci_bus *bus0;
/* The reset register can only live on bus 0. */
bus0 = pci_find_bus(0, 0);
if (!bus0)
return;
/* Form PCI device/function pair. */
devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
(rr->address >> 16) & 0xffff);
pr_debug("Resetting with ACPI PCI RESET_REG.\n");
/* Write the value that resets us. */
pci_bus_write_config_byte(bus0, devfn,
(rr->address & 0xffff), reset_value);
}
#else
static inline void acpi_pci_reboot(struct acpi_generic_address *rr,
u8 reset_value)
{
pr_warn_once("PCI configuration space access is not supported\n");
}
#endif
void acpi_reboot(void)
{
struct acpi_generic_address *rr;
u8 reset_value;
if (acpi_disabled)
return;
rr = &acpi_gbl_FADT.reset_register;
/* ACPI reset register was only introduced with v2 of the FADT */
if (acpi_gbl_FADT.header.revision < 2)
return;
/* Is the reset register supported? The spec says we should be
* checking the bit width and bit offset, but Windows ignores
* these fields */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
return;
reset_value = acpi_gbl_FADT.reset_value;
/* The reset register can only exist in I/O, Memory or PCI config space
* on a device on bus 0. */
switch (rr->space_id) {
case ACPI_ADR_SPACE_PCI_CONFIG:
acpi_pci_reboot(rr, reset_value);
break;
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
case ACPI_ADR_SPACE_SYSTEM_IO:
pr_debug("ACPI MEMORY or I/O RESET_REG.\n");
acpi_reset();
break;
}
/*
* Some platforms do not shut down immediately after writing to the
* ACPI reset register, and this results in racing with the
* subsequent reboot mechanism.
*
* The 15ms delay has been found to be long enough for the system
* to reboot on the affected platforms.
*/
mdelay(15);
}
| linux-master | drivers/acpi/reboot.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* processor_throttling.c - Throttling submodule of the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* - Added processor hotplug support
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/io.h>
#include <linux/uaccess.h>
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
* 1 -> acpi processor driver ignores _TPC values
*/
static int ignore_tpc;
module_param(ignore_tpc, int, 0644);
MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
struct throttling_tstate {
unsigned int cpu; /* cpu nr */
int target_state; /* target T-state */
};
struct acpi_processor_throttling_arg {
struct acpi_processor *pr;
int target_state;
bool force;
};
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
static int __acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force, bool direct);
static int acpi_processor_update_tsd_coord(void)
{
int count_target;
int retval = 0;
unsigned int i, j;
cpumask_var_t covered_cpus;
struct acpi_processor *pr, *match_pr;
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/*
* Now that we have _TSD data from all CPUs, lets setup T-state
* coordination between all CPUs.
*/
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
/* Basic validity check for domain info */
pthrottling = &(pr->throttling);
/*
* If tsd package for one cpu is invalid, the coordination
* among all CPUs is thought as invalid.
* Maybe it is ugly.
*/
if (!pthrottling->tsd_valid_flag) {
retval = -EINVAL;
break;
}
}
if (retval)
goto err_ret;
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
if (cpumask_test_cpu(i, covered_cpus))
continue;
pthrottling = &pr->throttling;
pdomain = &(pthrottling->domain_info);
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
cpumask_set_cpu(i, covered_cpus);
/*
* If the number of processor in the TSD domain is 1, it is
* unnecessary to parse the coordination for this CPU.
*/
if (pdomain->num_processors <= 1)
continue;
/* Validate the Domain info */
count_target = pdomain->num_processors;
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pthrottling = &(match_pr->throttling);
match_pdomain = &(match_pthrottling->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and j are in the same domain.
* If two TSD packages have the same domain, they
* should have the same num_porcessors and
* coordination type. Otherwise it will be regarded
* as illegal.
*/
if (match_pdomain->num_processors != count_target) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) {
retval = -EINVAL;
goto err_ret;
}
cpumask_set_cpu(j, covered_cpus);
cpumask_set_cpu(j, pthrottling->shared_cpu_map);
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pthrottling = &(match_pr->throttling);
match_pdomain = &(match_pthrottling->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/*
* If some CPUS have the same domain, they
* will have the same shared_cpu_map.
*/
cpumask_copy(match_pthrottling->shared_cpu_map,
pthrottling->shared_cpu_map);
}
}
err_ret:
free_cpumask_var(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
/*
* Assume no coordination on any error parsing domain info.
* The coordination type will be forced as SW_ALL.
*/
if (retval) {
pthrottling = &(pr->throttling);
cpumask_clear(pthrottling->shared_cpu_map);
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
}
return retval;
}
/*
* Update the T-state coordination after the _TSD
* data for all cpus is obtained.
*/
void acpi_processor_throttling_init(void)
{
if (acpi_processor_update_tsd_coord())
pr_debug("Assume no T-state coordination\n");
}
static int acpi_processor_throttling_notifier(unsigned long event, void *data)
{
struct throttling_tstate *p_tstate = data;
struct acpi_processor *pr;
unsigned int cpu;
int target_state;
struct acpi_processor_limit *p_limit;
struct acpi_processor_throttling *p_throttling;
cpu = p_tstate->cpu;
pr = per_cpu(processors, cpu);
if (!pr) {
pr_debug("Invalid pr pointer\n");
return 0;
}
if (!pr->flags.throttling) {
acpi_handle_debug(pr->handle,
"Throttling control unsupported on CPU %d\n",
cpu);
return 0;
}
target_state = p_tstate->target_state;
p_throttling = &(pr->throttling);
switch (event) {
case THROTTLING_PRECHANGE:
/*
* Prechange event is used to choose one proper t-state,
* which meets the limits of thermal, user and _TPC.
*/
p_limit = &pr->limit;
if (p_limit->thermal.tx > target_state)
target_state = p_limit->thermal.tx;
if (p_limit->user.tx > target_state)
target_state = p_limit->user.tx;
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
pr_warn("Exceed the limit of T-state \n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
acpi_handle_debug(pr->handle,
"PreChange Event: target T-state of CPU %d is T%d\n",
cpu, target_state);
break;
case THROTTLING_POSTCHANGE:
/*
* Postchange event is only used to update the
* T-state flag of acpi_processor_throttling.
*/
p_throttling->state = target_state;
acpi_handle_debug(pr->handle,
"PostChange Event: CPU %d is switched to T%d\n",
cpu, target_state);
break;
default:
pr_warn("Unsupported Throttling notifier event\n");
break;
}
return 0;
}
/*
* _TPC - Throttling Present Capabilities
*/
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long tpc = 0;
if (!pr)
return -EINVAL;
if (ignore_tpc)
goto end;
status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
acpi_evaluation_failure_warn(pr->handle, "_TPC", status);
return -ENODEV;
}
end:
pr->throttling_platform_limit = (int)tpc;
return 0;
}
int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
{
int result = 0;
int throttling_limit;
int current_state;
struct acpi_processor_limit *limit;
int target_state;
if (ignore_tpc)
return 0;
result = acpi_processor_get_platform_limit(pr);
if (result) {
/* Throttling Limit is unsupported */
return result;
}
throttling_limit = pr->throttling_platform_limit;
if (throttling_limit >= pr->throttling.state_count) {
/* Uncorrect Throttling Limit */
return -EINVAL;
}
current_state = pr->throttling.state;
if (current_state > throttling_limit) {
/*
* The current state can meet the requirement of
* _TPC limit. But it is reasonable that OSPM changes
* t-states from high to low for better performance.
* Of course the limit condition of thermal
* and user should be considered.
*/
limit = &pr->limit;
target_state = throttling_limit;
if (limit->thermal.tx > target_state)
target_state = limit->thermal.tx;
if (limit->user.tx > target_state)
target_state = limit->user.tx;
} else if (current_state == throttling_limit) {
/*
* Unnecessary to change the throttling state
*/
return 0;
} else {
/*
* If the current state is lower than the limit of _TPC, it
* will be forced to switch to the throttling state defined
* by throttling_platfor_limit.
* Because the previous state meets with the limit condition
* of thermal and user, it is unnecessary to check it again.
*/
target_state = throttling_limit;
}
return acpi_processor_set_throttling(pr, target_state, false);
}
/*
* This function is used to reevaluate whether the T-state is valid
* after one CPU is onlined/offlined.
* It is noted that it won't reevaluate the following properties for
* the T-state.
* 1. Control method.
* 2. the number of supported T-state
* 3. TSD domain
*/
void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
bool is_dead)
{
int result = 0;
if (is_dead) {
/* When one CPU is offline, the T-state throttling
* will be invalidated.
*/
pr->flags.throttling = 0;
return;
}
/* the following is to recheck whether the T-state is valid for
* the online CPU
*/
if (!pr->throttling.state_count) {
/* If the number of T-state is invalid, it is
* invalidated.
*/
pr->flags.throttling = 0;
return;
}
pr->flags.throttling = 1;
/* Disable throttling (if enabled). We'll let subsequent
* policy (e.g.thermal) decide to lower performance if it
* so chooses, but for now we'll crank up the speed.
*/
result = acpi_processor_get_throttling(pr);
if (result)
goto end;
if (pr->throttling.state) {
result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
end:
if (result)
pr->flags.throttling = 0;
}
/*
* _PTC - Processor Throttling Control (and status) register location
*/
static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *ptc = NULL;
union acpi_object obj;
struct acpi_processor_throttling *throttling;
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
acpi_evaluation_failure_warn(pr->handle, "_PTC", status);
return -ENODEV;
}
ptc = (union acpi_object *)buffer.pointer;
if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
|| (ptc->package.count != 2)) {
pr_err("Invalid _PTC data\n");
result = -EFAULT;
goto end;
}
/*
* control_register
*/
obj = ptc->package.elements[0];
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
pr_err("Invalid _PTC data (control_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->throttling.control_register, obj.buffer.pointer,
sizeof(struct acpi_ptc_register));
/*
* status_register
*/
obj = ptc->package.elements[1];
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
pr_err("Invalid _PTC data (status_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->throttling.status_register, obj.buffer.pointer,
sizeof(struct acpi_ptc_register));
throttling = &pr->throttling;
if ((throttling->control_register.bit_width +
throttling->control_register.bit_offset) > 32) {
pr_err("Invalid _PTC control register\n");
result = -EFAULT;
goto end;
}
if ((throttling->status_register.bit_width +
throttling->status_register.bit_offset) > 32) {
pr_err("Invalid _PTC status register\n");
result = -EFAULT;
goto end;
}
end:
kfree(buffer.pointer);
return result;
}
/*
* _TSS - Throttling Supported States
*/
static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
struct acpi_buffer state = { 0, NULL };
union acpi_object *tss = NULL;
int i;
status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
acpi_evaluation_failure_warn(pr->handle, "_TSS", status);
return -ENODEV;
}
tss = buffer.pointer;
if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
pr_err("Invalid _TSS data\n");
result = -EFAULT;
goto end;
}
acpi_handle_debug(pr->handle, "Found %d throttling states\n",
tss->package.count);
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
kmalloc_array(tss->package.count,
sizeof(struct acpi_processor_tx_tss),
GFP_KERNEL);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;
}
for (i = 0; i < pr->throttling.state_count; i++) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[i]);
state.length = sizeof(struct acpi_processor_tx_tss);
state.pointer = tx;
acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
status = acpi_extract_package(&(tss->package.elements[i]),
&format, &state);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n",
acpi_format_exception(status));
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
if (!tx->freqpercentage) {
pr_err("Invalid _TSS data: freq is zero\n");
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
}
end:
kfree(buffer.pointer);
return result;
}
/*
* _TSD - T-State Dependencies
*/
static int acpi_processor_get_tsd(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
struct acpi_buffer state = { 0, NULL };
union acpi_object *tsd = NULL;
struct acpi_tsd_package *pdomain;
struct acpi_processor_throttling *pthrottling;
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
acpi_evaluation_failure_warn(pr->handle, "_TSD", status);
return -ENODEV;
}
tsd = buffer.pointer;
if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (tsd->package.count != 1) {
pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
pdomain = &(pr->throttling.domain_info);
state.length = sizeof(struct acpi_tsd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(tsd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
pr_err("Unknown _TSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
pr_err("Unknown _TSD:revision\n");
result = -EFAULT;
goto end;
}
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 1;
pthrottling->shared_type = pdomain->coord_type;
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
/*
* If the coordination type is not defined in ACPI spec,
* the tsd_valid_flag will be clear and coordination type
* will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
*/
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
pthrottling->tsd_valid_flag = 0;
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
end:
kfree(buffer.pointer);
return result;
}
/* --------------------------------------------------------------------------
Throttling Control
-------------------------------------------------------------------------- */
static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
{
int state = 0;
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
/*
* We don't care about error returns - we just try to mark
* these reserved so that nobody else is confused into thinking
* that this region might be unused..
*
* (In particular, allocating the IO range for Cardbus)
*/
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
pr->throttling.state = 0;
duty_mask = pr->throttling.state_count - 1;
duty_mask <<= pr->throttling.duty_offset;
local_irq_disable();
value = inl(pr->throttling.address);
/*
* Compute the current throttling state when throttling is enabled
* (bit 4 is on).
*/
if (value & 0x10) {
duty_value = value & duty_mask;
duty_value >>= pr->throttling.duty_offset;
if (duty_value)
state = pr->throttling.state_count - duty_value;
}
pr->throttling.state = state;
local_irq_enable();
acpi_handle_debug(pr->handle,
"Throttling state is T%d (%d%% throttling applied)\n",
state, pr->throttling.states[state].performance);
return 0;
}
#ifdef CONFIG_X86
static int acpi_throttling_rdmsr(u64 *value)
{
u64 msr_high, msr_low;
u64 msr = 0;
int ret = -1;
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr_low = 0;
msr_high = 0;
rdmsr_safe(MSR_IA32_THERM_CONTROL,
(u32 *)&msr_low, (u32 *) &msr_high);
msr = (msr_high << 32) | msr_low;
*value = (u64) msr;
ret = 0;
}
return ret;
}
static int acpi_throttling_wrmsr(u64 value)
{
int ret = -1;
u64 msr;
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr = value;
wrmsr_safe(MSR_IA32_THERM_CONTROL,
msr & 0xffffffff, msr >> 32);
ret = 0;
}
return ret;
}
#else
static int acpi_throttling_rdmsr(u64 *value)
{
pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
static int acpi_throttling_wrmsr(u64 value)
{
pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
#endif
static int acpi_read_throttling_status(struct acpi_processor *pr,
u64 *value)
{
u32 bit_width, bit_offset;
u32 ptc_value;
u64 ptc_mask;
struct acpi_processor_throttling *throttling;
int ret = -1;
throttling = &pr->throttling;
switch (throttling->status_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
bit_width = throttling->status_register.bit_width;
bit_offset = throttling->status_register.bit_offset;
acpi_os_read_port((acpi_io_address) throttling->status_register.
address, &ptc_value,
(u32) (bit_width + bit_offset));
ptc_mask = (1 << bit_width) - 1;
*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
ret = 0;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
ret = acpi_throttling_rdmsr(value);
break;
default:
pr_err("Unknown addr space %d\n",
(u32) (throttling->status_register.space_id));
}
return ret;
}
static int acpi_write_throttling_state(struct acpi_processor *pr,
u64 value)
{
u32 bit_width, bit_offset;
u64 ptc_value;
u64 ptc_mask;
struct acpi_processor_throttling *throttling;
int ret = -1;
throttling = &pr->throttling;
switch (throttling->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
bit_width = throttling->control_register.bit_width;
bit_offset = throttling->control_register.bit_offset;
ptc_mask = (1 << bit_width) - 1;
ptc_value = value & ptc_mask;
acpi_os_write_port((acpi_io_address) throttling->
control_register.address,
(u32) (ptc_value << bit_offset),
(u32) (bit_width + bit_offset));
ret = 0;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
ret = acpi_throttling_wrmsr(value);
break;
default:
pr_err("Unknown addr space %d\n",
(u32) (throttling->control_register.space_id));
}
return ret;
}
static int acpi_get_throttling_state(struct acpi_processor *pr,
u64 value)
{
int i;
for (i = 0; i < pr->throttling.state_count; i++) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[i]);
if (tx->control == value)
return i;
}
return -1;
}
static int acpi_get_throttling_value(struct acpi_processor *pr,
int state, u64 *value)
{
int ret = -1;
if (state >= 0 && state <= pr->throttling.state_count) {
struct acpi_processor_tx_tss *tx =
(struct acpi_processor_tx_tss *)&(pr->throttling.
states_tss[state]);
*value = tx->control;
ret = 0;
}
return ret;
}
static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
{
int state = 0;
int ret;
u64 value;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
pr->throttling.state = 0;
value = 0;
ret = acpi_read_throttling_status(pr, &value);
if (ret >= 0) {
state = acpi_get_throttling_state(pr, value);
if (state == -1) {
acpi_handle_debug(pr->handle,
"Invalid throttling state, reset\n");
state = 0;
ret = __acpi_processor_set_throttling(pr, state, true,
true);
if (ret)
return ret;
}
pr->throttling.state = state;
}
return 0;
}
static long __acpi_processor_get_throttling(void *data)
{
struct acpi_processor *pr = data;
return pr->throttling.acpi_processor_get_throttling(pr);
}
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
/*
* This is either called from the CPU hotplug callback of
* processor_driver or via the ACPI probe function. In the latter
* case the CPU is not guaranteed to be online. Both call sites are
* protected against CPU hotplug.
*/
if (!cpu_online(pr->id))
return -ENODEV;
return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
{
int i, step;
if (!pr->throttling.address) {
acpi_handle_debug(pr->handle, "No throttling register\n");
return -EINVAL;
} else if (!pr->throttling.duty_width) {
acpi_handle_debug(pr->handle, "No throttling states\n");
return -EINVAL;
}
/* TBD: Support duty_cycle values that span bit 4. */
else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
pr_warn("duty_cycle spans bit 4\n");
return -EINVAL;
}
pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
/*
* Compute state values. Note that throttling displays a linear power
* performance relationship (at 50% performance the CPU will consume
* 50% power). Values are in 1/10th of a percent to preserve accuracy.
*/
step = (1000 / pr->throttling.state_count);
for (i = 0; i < pr->throttling.state_count; i++) {
pr->throttling.states[i].performance = 1000 - step * i;
pr->throttling.states[i].power = 1000 - step * i;
}
return 0;
}
static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
int state, bool force)
{
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
if (!pr)
return -EINVAL;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
return -EPERM;
/*
* Calculate the duty_value and duty_mask.
*/
if (state) {
duty_value = pr->throttling.state_count - state;
duty_value <<= pr->throttling.duty_offset;
/* Used to clear all duty_value bits */
duty_mask = pr->throttling.state_count - 1;
duty_mask <<= acpi_gbl_FADT.duty_offset;
duty_mask = ~duty_mask;
}
local_irq_disable();
/*
* Disable throttling by writing a 0 to bit 4. Note that we must
* turn it off before you can change the duty_value.
*/
value = inl(pr->throttling.address);
if (value & 0x10) {
value &= 0xFFFFFFEF;
outl(value, pr->throttling.address);
}
/*
* Write the new duty_value and then enable throttling. Note
* that a state value of 0 leaves throttling disabled.
*/
if (state) {
value &= duty_mask;
value |= duty_value;
outl(value, pr->throttling.address);
value |= 0x00000010;
outl(value, pr->throttling.address);
}
pr->throttling.state = state;
local_irq_enable();
acpi_handle_debug(pr->handle,
"Throttling state set to T%d (%d%%)\n", state,
(pr->throttling.states[state].performance ? pr->
throttling.states[state].performance / 10 : 0));
return 0;
}
static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int state, bool force)
{
int ret;
u64 value;
if (!pr)
return -EINVAL;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
return -EPERM;
value = 0;
ret = acpi_get_throttling_value(pr, state, &value);
if (ret >= 0) {
acpi_write_throttling_state(pr, value);
pr->throttling.state = state;
}
return 0;
}
static long acpi_processor_throttling_fn(void *data)
{
struct acpi_processor_throttling_arg *arg = data;
struct acpi_processor *pr = arg->pr;
return pr->throttling.acpi_processor_set_throttling(pr,
arg->target_state, arg->force);
}
static int __acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force, bool direct)
{
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
* the throttling state any more.
*/
return -ENODEV;
}
t_state.target_state = state;
p_throttling = &(pr->throttling);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
}
/*
* The function of acpi_processor_set_throttling will be called
* to switch T-state. If the coordination type is SW_ALL or HW_ALL,
* it is necessary to call it for every affected cpu. Otherwise
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
arg.pr = pr;
arg.target_state = state;
arg.force = force;
ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
direct);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
for_each_cpu_and(i, cpu_online_mask,
p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
* error message and continue.
*/
if (!match_pr) {
acpi_handle_debug(pr->handle,
"Invalid Pointer for CPU %d\n", i);
continue;
}
/*
* If the throttling control is unsupported on CPU i,
* we will report the error message and continue.
*/
if (!match_pr->flags.throttling) {
acpi_handle_debug(pr->handle,
"Throttling Control unsupported on CPU %d\n", i);
continue;
}
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
&arg, direct);
}
}
/*
* After the set_throttling is called, the
* throttling notifier is called for every
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
return ret;
}
int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
bool force)
{
return __acpi_processor_set_throttling(pr, state, force, false);
}
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
struct acpi_processor_throttling *pthrottling;
acpi_handle_debug(pr->handle,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
pr->throttling.address,
pr->throttling.duty_offset,
pr->throttling.duty_width);
/*
* Evaluate _PTC, _TSS and _TPC
* They must all be present or none of them can be used.
*/
if (acpi_processor_get_throttling_control(pr) ||
acpi_processor_get_throttling_states(pr) ||
acpi_processor_get_platform_limit(pr)) {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
&acpi_processor_set_throttling_fadt;
if (acpi_processor_get_fadt_info(pr))
return 0;
} else {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_ptc;
pr->throttling.acpi_processor_set_throttling =
&acpi_processor_set_throttling_ptc;
}
/*
* If TSD package for one CPU can't be parsed successfully, it means
* that this CPU will have no coordination with other CPUs.
*/
if (acpi_processor_get_tsd(pr)) {
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
/*
* PIIX4 Errata: We don't support throttling on the original PIIX4.
* This shouldn't be an issue as few (if any) mobile systems ever
* used this part.
*/
if (errata.piix4.throttle) {
acpi_handle_debug(pr->handle,
"Throttling not supported on PIIX4 A- or B-step\n");
return 0;
}
acpi_handle_debug(pr->handle, "Found %d throttling states\n",
pr->throttling.state_count);
pr->flags.throttling = 1;
/*
* Disable throttling (if enabled). We'll let subsequent policy (e.g.
* thermal) decide to lower performance if it so chooses, but for now
* we'll crank up the speed.
*/
result = acpi_processor_get_throttling(pr);
if (result)
goto end;
if (pr->throttling.state) {
acpi_handle_debug(pr->handle,
"Disabling throttling (was T%d)\n",
pr->throttling.state);
result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
end:
if (result)
pr->flags.throttling = 0;
return result;
}
| linux-master | drivers/acpi/processor_throttling.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* video.c - ACPI Video Driver
*
* Copyright (C) 2004 Luming Yu <[email protected]>
* Copyright (C) 2004 Bruno Ducrot <[email protected]>
* Copyright (C) 2006 Thomas Tuttle <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: video: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/input.h>
#include <linux/backlight.h>
#include <linux/thermal.h>
#include <linux/sort.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <acpi/video.h>
#include <linux/uaccess.h>
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define MAX_NAME_LEN 20
MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
static bool brightness_switch_enabled = true;
module_param(brightness_switch_enabled, bool, 0644);
/*
* By default, we don't allow duplicate ACPI video bus devices
* under the same VGA controller
*/
static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
#define REPORT_OUTPUT_KEY_EVENTS 0x01
#define REPORT_BRIGHTNESS_KEY_EVENTS 0x02
static int report_key_events = -1;
module_param(report_key_events, int, 0644);
MODULE_PARM_DESC(report_key_events,
"0: none, 1: output changes, 2: brightness changes, 3: all");
static int hw_changes_brightness = -1;
module_param(hw_changes_brightness, int, 0644);
MODULE_PARM_DESC(hw_changes_brightness,
"Set this to 1 on buggy hw which changes the brightness itself when "
"a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness");
/*
* Whether the struct acpi_video_device_attrib::device_id_scheme bit should be
* assumed even if not actually set.
*/
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
static DEFINE_MUTEX(video_list_lock);
static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device);
static void acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data);
/*
* Indices in the _BCL method response: the first two items are special,
* the rest are all supported levels.
*
* See page 575 of the ACPI spec 3.0
*/
enum acpi_video_level_idx {
ACPI_VIDEO_AC_LEVEL, /* level when machine has full power */
ACPI_VIDEO_BATTERY_LEVEL, /* level when machine is on batteries */
ACPI_VIDEO_FIRST_LEVEL, /* actual supported levels begin here */
};
static const struct acpi_device_id video_device_ids[] = {
{ACPI_VIDEO_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, video_device_ids);
static struct acpi_driver acpi_video_bus = {
.name = "video",
.class = ACPI_VIDEO_CLASS,
.ids = video_device_ids,
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
},
};
struct acpi_video_bus_flags {
u8 multihead:1; /* can switch video heads */
u8 rom:1; /* can retrieve a video rom */
u8 post:1; /* can configure the head to */
u8 reserved:5;
};
struct acpi_video_bus_cap {
u8 _DOS:1; /* Enable/Disable output switching */
u8 _DOD:1; /* Enumerate all devices attached to display adapter */
u8 _ROM:1; /* Get ROM Data */
u8 _GPD:1; /* Get POST Device */
u8 _SPD:1; /* Set POST Device */
u8 _VPO:1; /* Video POST Options */
u8 reserved:2;
};
struct acpi_video_device_attrib {
u32 display_index:4; /* A zero-based instance of the Display */
u32 display_port_attachment:4; /* This field differentiates the display type */
u32 display_type:4; /* Describe the specific type in use */
u32 vendor_specific:4; /* Chipset Vendor Specific */
u32 bios_can_detect:1; /* BIOS can detect the device */
u32 depend_on_vga:1; /* Non-VGA output device whose power is related to
the VGA device. */
u32 pipe_id:3; /* For VGA multiple-head devices. */
u32 reserved:10; /* Must be 0 */
/*
* The device ID might not actually follow the scheme described by this
* struct acpi_video_device_attrib. If it does, then this bit
* device_id_scheme is set; otherwise, other fields should be ignored.
*
* (but also see the global flag device_id_scheme)
*/
u32 device_id_scheme:1;
};
struct acpi_video_enumerated_device {
union {
u32 int_val;
struct acpi_video_device_attrib attrib;
} value;
struct acpi_video_device *bind_info;
};
struct acpi_video_bus {
struct acpi_device *device;
bool backlight_registered;
u8 dos_setting;
struct acpi_video_enumerated_device *attached_array;
u8 attached_count;
u8 child_count;
struct acpi_video_bus_cap cap;
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
struct list_head entry;
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
};
struct acpi_video_device_flags {
u8 crt:1;
u8 lcd:1;
u8 tvout:1;
u8 dvi:1;
u8 bios:1;
u8 unknown:1;
u8 notify:1;
u8 reserved:1;
};
struct acpi_video_device_cap {
u8 _ADR:1; /* Return the unique ID */
u8 _BCL:1; /* Query list of brightness control levels supported */
u8 _BCM:1; /* Set the brightness level */
u8 _BQC:1; /* Get current brightness level */
u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */
u8 _DDC:1; /* Return the EDID for this device */
};
struct acpi_video_device {
unsigned long device_id;
struct acpi_video_device_flags flags;
struct acpi_video_device_cap cap;
struct list_head entry;
struct delayed_work switch_brightness_work;
int switch_brightness_event;
struct acpi_video_bus *video;
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
struct thermal_cooling_device *cooling_dev;
};
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data);
static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device);
static int acpi_video_device_enumerate(struct acpi_video_bus *video);
static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
struct acpi_video_device *device,
unsigned long long *level, bool raw);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static void acpi_video_switch_brightness(struct work_struct *work);
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long long cur_level;
int i;
struct acpi_video_device *vd = bl_get_data(bd);
if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
return -EINVAL;
for (i = ACPI_VIDEO_FIRST_LEVEL; i < vd->brightness->count; i++) {
if (vd->brightness->levels[i] == cur_level)
return i - ACPI_VIDEO_FIRST_LEVEL;
}
return 0;
}
static int acpi_video_set_brightness(struct backlight_device *bd)
{
int request_level = bd->props.brightness + ACPI_VIDEO_FIRST_LEVEL;
struct acpi_video_device *vd = bl_get_data(bd);
cancel_delayed_work(&vd->switch_brightness_work);
return acpi_video_device_lcd_set_level(vd,
vd->brightness->levels[request_level]);
}
static const struct backlight_ops acpi_backlight_ops = {
.get_brightness = acpi_video_get_brightness,
.update_status = acpi_video_set_brightness,
};
/* thermal cooling device callbacks */
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
return 0;
}
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
unsigned long long level;
int offset;
if (acpi_video_device_lcd_get_level_current(video, &level, false))
return -EINVAL;
for (offset = ACPI_VIDEO_FIRST_LEVEL; offset < video->brightness->count;
offset++)
if (level == video->brightness->levels[offset]) {
*state = video->brightness->count - offset - 1;
return 0;
}
return -EINVAL;
}
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
int level;
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
return -EINVAL;
state = video->brightness->count - state;
level = video->brightness->levels[state - 1];
return acpi_video_device_lcd_set_level(video, level);
}
static const struct thermal_cooling_device_ops video_cooling_ops = {
.get_max_state = video_get_max_state,
.get_cur_state = video_get_cur_state,
.set_cur_state = video_set_cur_state,
};
/*
* --------------------------------------------------------------------------
* Video Management
* --------------------------------------------------------------------------
*/
static int
acpi_video_device_lcd_query_levels(acpi_handle handle,
union acpi_object **levels)
{
int status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
*levels = NULL;
status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
acpi_handle_info(handle, "Invalid _BCL data\n");
status = -EFAULT;
goto err;
}
*levels = obj;
return 0;
err:
kfree(buffer.pointer);
return status;
}
static int
acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
{
int status;
int state;
status = acpi_execute_simple_method(device->dev->handle,
"_BCM", level);
if (ACPI_FAILURE(status)) {
acpi_handle_info(device->dev->handle, "_BCM evaluation failed\n");
return -EIO;
}
device->brightness->curr = level;
for (state = ACPI_VIDEO_FIRST_LEVEL; state < device->brightness->count;
state++)
if (level == device->brightness->levels[state]) {
if (device->backlight)
device->backlight->props.brightness =
state - ACPI_VIDEO_FIRST_LEVEL;
return 0;
}
acpi_handle_info(device->dev->handle, "Current brightness invalid\n");
return -EINVAL;
}
/*
* For some buggy _BQC methods, we need to add a constant value to
* the _BQC return value to get the actual current brightness level
*/
static int bqc_offset_aml_bug_workaround;
static int video_set_bqc_offset(const struct dmi_system_id *d)
{
bqc_offset_aml_bug_workaround = 9;
return 0;
}
static int video_set_device_id_scheme(const struct dmi_system_id *d)
{
device_id_scheme = true;
return 0;
}
static int video_enable_only_lcd(const struct dmi_system_id *d)
{
only_lcd = true;
return 0;
}
static int video_set_report_key_events(const struct dmi_system_id *id)
{
if (report_key_events == -1)
report_key_events = (uintptr_t)id->driver_data;
return 0;
}
static int video_hw_changes_brightness(
const struct dmi_system_id *d)
{
if (hw_changes_brightness == -1)
hw_changes_brightness = 1;
return 0;
}
static const struct dmi_system_id video_dmi_table[] = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
*/
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5720",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5710Z",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "eMachines E510",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 5315",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
},
},
{
.callback = video_set_bqc_offset,
.ident = "Acer Aspire 7720",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
/*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
*/
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=104121 */
.callback = video_set_device_id_scheme,
.ident = "ESPRIMO Mobile M9410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"),
},
},
/*
* Some machines have multiple video output devices, but only the one
* that is the type of LCD can do the backlight control so we should not
* register backlight interface for other video output devices.
*/
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=104121 */
.callback = video_enable_only_lcd,
.ident = "ESPRIMO Mobile M9410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"),
},
},
/*
* Some machines report wrong key events on the acpi-bus, suppress
* key event reporting on these. Note this is only intended to work
* around events which are plain wrong. In some cases we get double
* events, in this case acpi-video is considered the canonical source
* and the events from the other source should be filtered. E.g.
* by calling acpi_video_handles_brightness_key_presses() from the
* vendor acpi/wmi driver or by using /lib/udev/hwdb.d/60-keyboard.hwdb
*/
{
.callback = video_set_report_key_events,
.driver_data = (void *)((uintptr_t)REPORT_OUTPUT_KEY_EVENTS),
.ident = "Dell Vostro V131",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
{
.callback = video_set_report_key_events,
.driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
.ident = "Dell Vostro 3350",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
},
/*
* Some machines change the brightness themselves when a brightness
* hotkey gets pressed, despite us telling them not to. In this case
* acpi_video_device_notify() should only call backlight_force_update(
* BACKLIGHT_UPDATE_HOTKEY) and not do anything else.
*/
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */
.callback = video_hw_changes_brightness,
.ident = "Packard Bell EasyNote MZ35",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"),
},
},
{}
};
static unsigned long long
acpi_video_bqc_value_to_level(struct acpi_video_device *device,
unsigned long long bqc_value)
{
unsigned long long level;
if (device->brightness->flags._BQC_use_index) {
/*
* _BQC returns an index that doesn't account for the first 2
* items with special meaning (see enum acpi_video_level_idx),
* so we need to compensate for that by offsetting ourselves
*/
if (device->brightness->flags._BCL_reversed)
bqc_value = device->brightness->count -
ACPI_VIDEO_FIRST_LEVEL - 1 - bqc_value;
level = device->brightness->levels[bqc_value +
ACPI_VIDEO_FIRST_LEVEL];
} else {
level = bqc_value;
}
level += bqc_offset_aml_bug_workaround;
return level;
}
static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long long *level, bool raw)
{
acpi_status status = AE_OK;
int i;
if (device->cap._BQC || device->cap._BCQ) {
char *buf = device->cap._BQC ? "_BQC" : "_BCQ";
status = acpi_evaluate_integer(device->dev->handle, buf,
NULL, level);
if (ACPI_SUCCESS(status)) {
if (raw) {
/*
* Caller has indicated he wants the raw
* value returned by _BQC, so don't furtherly
* mess with the value.
*/
return 0;
}
*level = acpi_video_bqc_value_to_level(device, *level);
for (i = ACPI_VIDEO_FIRST_LEVEL;
i < device->brightness->count; i++)
if (device->brightness->levels[i] == *level) {
device->brightness->curr = *level;
return 0;
}
/*
* BQC returned an invalid level.
* Stop using it.
*/
acpi_handle_info(device->dev->handle,
"%s returned an invalid level", buf);
device->cap._BQC = device->cap._BCQ = 0;
} else {
/*
* Fixme:
* should we return an error or ignore this failure?
* dev->brightness->curr is a cached value which stores
* the correct current backlight level in most cases.
* ACPI video backlight still works w/ buggy _BQC.
* http://bugzilla.kernel.org/show_bug.cgi?id=12233
*/
acpi_handle_info(device->dev->handle,
"%s evaluation failed", buf);
device->cap._BQC = device->cap._BCQ = 0;
}
}
*level = device->brightness->curr;
return 0;
}
static int
acpi_video_device_EDID(struct acpi_video_device *device,
union acpi_object **edid, ssize_t length)
{
int status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
*edid = NULL;
if (!device)
return -ENODEV;
if (length == 128)
arg0.integer.value = 1;
else if (length == 256)
arg0.integer.value = 2;
else
return -EINVAL;
status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = buffer.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER)
*edid = obj;
else {
acpi_handle_info(device->dev->handle, "Invalid _DDC data\n");
status = -EFAULT;
kfree(obj);
}
return status;
}
/* bus */
/*
* Arg:
* video : video bus device pointer
* bios_flag :
* 0. The system BIOS should NOT automatically switch(toggle)
* the active display output.
* 1. The system BIOS should automatically switch (toggle) the
* active display output. No switch event.
* 2. The _DGS value should be locked.
* 3. The system BIOS should not automatically switch (toggle) the
* active display output, but instead generate the display switch
* event notify code.
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
* of the LCD when:
* - the power changes from AC to DC (ACPI appendix B)
* - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* 1. The system BIOS should NOT automatically control the brightness
* level of the LCD when:
* - the power changes from AC to DC (ACPI appendix B)
* - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* Return Value:
* -EINVAL wrong arg.
*/
static int
acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
{
acpi_status status;
if (!video->cap._DOS)
return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
video->dos_setting = (lcd_flag << 2) | bios_flag;
status = acpi_execute_simple_method(video->device->handle, "_DOS",
(lcd_flag << 2) | bios_flag);
if (ACPI_FAILURE(status))
return -EIO;
return 0;
}
/*
* Simple comparison function used to sort backlight levels.
*/
static int
acpi_video_cmp_level(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
/*
* Decides if _BQC/_BCQ for this system is usable
*
* We do this by changing the level first and then read out the current
* brightness level, if the value does not match, find out if it is using
* index. If not, clear the _BQC/_BCQ capability.
*/
static int acpi_video_bqc_quirk(struct acpi_video_device *device,
int max_level, int current_level)
{
struct acpi_video_device_brightness *br = device->brightness;
int result;
unsigned long long level;
int test_level;
/* don't mess with existing known broken systems */
if (bqc_offset_aml_bug_workaround)
return 0;
/*
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them. However,
* there is a subtlety:
*
* If the _BCL package ordering is descending, the first level
* (br->levels[2]) is likely to be 0, and if the number of levels
* matches the number of steps, we might confuse a returned level to
* mean the index.
*
* For example:
*
* current_level = max_level = 100
* test_level = 0
* returned level = 100
*
* In this case 100 means the level, not the index, and _BCM failed.
* Still, if the _BCL package ordering is descending, the index of
* level 0 is also 100, so we assume _BQC is indexed, when it's not.
*
* This causes all _BQC calls to return bogus values causing weird
* behavior from the user's perspective. For example:
*
* xbacklight -set 10; xbacklight -set 20;
*
* would flash to 90% and then slowly down to the desired level (20).
*
* The solution is simple; test anything other than the first level
* (e.g. 1).
*/
test_level = current_level == max_level
? br->levels[ACPI_VIDEO_FIRST_LEVEL + 1]
: max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)
return result;
result = acpi_video_device_lcd_get_level_current(device, &level, true);
if (result)
return result;
if (level != test_level) {
/* buggy _BQC found, need to find out if it uses index */
if (level < br->count) {
if (br->flags._BCL_reversed)
level = br->count - ACPI_VIDEO_FIRST_LEVEL - 1 - level;
if (br->levels[level + ACPI_VIDEO_FIRST_LEVEL] == test_level)
br->flags._BQC_use_index = 1;
}
if (!br->flags._BQC_use_index)
device->cap._BQC = device->cap._BCQ = 0;
}
return 0;
}
int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = 0;
u32 value;
if (ACPI_FAILURE(acpi_video_device_lcd_query_levels(device->handle, &obj))) {
acpi_handle_debug(device->handle,
"Could not query available LCD brightness level\n");
result = -ENODEV;
goto out;
}
if (obj->package.count < ACPI_VIDEO_FIRST_LEVEL) {
result = -EINVAL;
goto out;
}
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
result = -ENOMEM;
goto out;
}
/*
* Note that we have to reserve 2 extra items (ACPI_VIDEO_FIRST_LEVEL),
* in order to account for buggy BIOS which don't export the first two
* special levels (see below)
*/
br->levels = kmalloc_array(obj->package.count + ACPI_VIDEO_FIRST_LEVEL,
sizeof(*br->levels),
GFP_KERNEL);
if (!br->levels) {
result = -ENOMEM;
goto out_free;
}
for (i = 0; i < obj->package.count; i++) {
o = (union acpi_object *)&obj->package.elements[i];
if (o->type != ACPI_TYPE_INTEGER) {
acpi_handle_info(device->handle, "Invalid data\n");
continue;
}
value = (u32) o->integer.value;
/* Skip duplicate entries */
if (count > ACPI_VIDEO_FIRST_LEVEL
&& br->levels[count - 1] == value)
continue;
br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
count++;
}
/*
* some buggy BIOS don't export the levels
* when machine is on AC/Battery in _BCL package.
* In this case, the first two elements in _BCL packages
* are also supported brightness levels that OS should take care of.
*/
for (i = ACPI_VIDEO_FIRST_LEVEL; i < count; i++) {
if (br->levels[i] == br->levels[ACPI_VIDEO_AC_LEVEL])
level_ac_battery++;
if (br->levels[i] == br->levels[ACPI_VIDEO_BATTERY_LEVEL])
level_ac_battery++;
}
if (level_ac_battery < ACPI_VIDEO_FIRST_LEVEL) {
level_ac_battery = ACPI_VIDEO_FIRST_LEVEL - level_ac_battery;
br->flags._BCL_no_ac_battery_levels = 1;
for (i = (count - 1 + level_ac_battery);
i >= ACPI_VIDEO_FIRST_LEVEL; i--)
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > ACPI_VIDEO_FIRST_LEVEL)
acpi_handle_info(device->handle,
"Too many duplicates in _BCL package");
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[ACPI_VIDEO_FIRST_LEVEL]) {
br->flags._BCL_reversed = 1;
sort(&br->levels[ACPI_VIDEO_FIRST_LEVEL],
count - ACPI_VIDEO_FIRST_LEVEL,
sizeof(br->levels[ACPI_VIDEO_FIRST_LEVEL]),
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
acpi_handle_info(device->handle,
"Found unordered _BCL package");
br->count = count;
*dev_br = br;
if (pmax_level)
*pmax_level = max_level;
out:
kfree(obj);
return result;
out_free:
kfree(br);
goto out;
}
EXPORT_SYMBOL(acpi_video_get_levels);
/*
* Arg:
* device : video output device (LCD, CRT, ..)
*
* Return Value:
* Maximum brightness level
*
* Allocate and initialize device->brightness.
*/
static int
acpi_video_init_brightness(struct acpi_video_device *device)
{
int i, max_level = 0;
unsigned long long level, level_old;
struct acpi_video_device_brightness *br = NULL;
int result;
result = acpi_video_get_levels(device->dev, &br, &max_level);
if (result)
return result;
device->brightness = br;
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
br->curr = level = max_level;
if (!device->cap._BQC)
goto set_level;
result = acpi_video_device_lcd_get_level_current(device,
&level_old, true);
if (result)
goto out_free_levels;
result = acpi_video_bqc_quirk(device, max_level, level_old);
if (result)
goto out_free_levels;
/*
* cap._BQC may get cleared due to _BQC is found to be broken
* in acpi_video_bqc_quirk, so check again here.
*/
if (!device->cap._BQC)
goto set_level;
level = acpi_video_bqc_value_to_level(device, level_old);
/*
* On some buggy laptops, _BQC returns an uninitialized
* value when invoked for the first time, i.e.
* level_old is invalid (no matter whether it's a level
* or an index). Set the backlight to max_level in this case.
*/
for (i = ACPI_VIDEO_FIRST_LEVEL; i < br->count; i++)
if (level == br->levels[i])
break;
if (i == br->count || !level)
level = max_level;
set_level:
result = acpi_video_device_lcd_set_level(device, level);
if (result)
goto out_free_levels;
acpi_handle_debug(device->dev->handle, "found %d brightness levels\n",
br->count - ACPI_VIDEO_FIRST_LEVEL);
return 0;
out_free_levels:
kfree(br->levels);
kfree(br);
device->brightness = NULL;
return result;
}
/*
* Arg:
* device : video output device (LCD, CRT, ..)
*
* Return Value:
* None
*
* Find out all required AML methods defined under the output
* device.
*/
static void acpi_video_device_find_cap(struct acpi_video_device *device)
{
if (acpi_has_method(device->dev->handle, "_ADR"))
device->cap._ADR = 1;
if (acpi_has_method(device->dev->handle, "_BCL"))
device->cap._BCL = 1;
if (acpi_has_method(device->dev->handle, "_BCM"))
device->cap._BCM = 1;
if (acpi_has_method(device->dev->handle, "_BQC")) {
device->cap._BQC = 1;
} else if (acpi_has_method(device->dev->handle, "_BCQ")) {
acpi_handle_info(device->dev->handle,
"_BCQ is used instead of _BQC\n");
device->cap._BCQ = 1;
}
if (acpi_has_method(device->dev->handle, "_DDC"))
device->cap._DDC = 1;
}
/*
* Arg:
* device : video output device (VGA)
*
* Return Value:
* None
*
* Find out all required AML methods defined under the video bus device.
*/
static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
{
if (acpi_has_method(video->device->handle, "_DOS"))
video->cap._DOS = 1;
if (acpi_has_method(video->device->handle, "_DOD"))
video->cap._DOD = 1;
if (acpi_has_method(video->device->handle, "_ROM"))
video->cap._ROM = 1;
if (acpi_has_method(video->device->handle, "_GPD"))
video->cap._GPD = 1;
if (acpi_has_method(video->device->handle, "_SPD"))
video->cap._SPD = 1;
if (acpi_has_method(video->device->handle, "_VPO"))
video->cap._VPO = 1;
}
/*
* Check whether the video bus device has required AML method to
* support the desired features
*/
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
struct pci_dev *dev;
if (!video)
return -EINVAL;
dev = acpi_get_pci_dev(video->device->handle);
if (!dev)
return -ENODEV;
pci_dev_put(dev);
/*
* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
*/
/* Does this device support video switching? */
if (video->cap._DOS || video->cap._DOD) {
if (!video->cap._DOS) {
pr_info(FW_BUG "ACPI(%s) defines _DOD but not _DOS\n",
acpi_device_bid(video->device));
}
video->flags.multihead = 1;
status = 0;
}
/* Does this device support retrieving a video ROM? */
if (video->cap._ROM) {
video->flags.rom = 1;
status = 0;
}
/* Does this device support configuring which video device to POST? */
if (video->cap._GPD && video->cap._SPD && video->cap._VPO) {
video->flags.post = 1;
status = 0;
}
return status;
}
/*
* --------------------------------------------------------------------------
* Driver Interface
* --------------------------------------------------------------------------
*/
/* device interface */
static struct acpi_video_device_attrib *
acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if ((ids->value.int_val & 0xffff) == device_id)
return &ids->value.attrib;
}
return NULL;
}
static int
acpi_video_get_device_type(struct acpi_video_bus *video,
unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if ((ids->value.int_val & 0xffff) == device_id)
return ids->value.int_val;
}
return 0;
}
static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
{
struct acpi_video_bus *video = arg;
struct acpi_video_device_attrib *attribute;
struct acpi_video_device *data;
unsigned long long device_id;
acpi_status status;
int device_type;
status = acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
/* Skip devices without _ADR instead of failing. */
if (ACPI_FAILURE(status))
goto exit;
data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
if (!data) {
dev_dbg(&device->dev, "Cannot attach\n");
return -ENOMEM;
}
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = data;
data->device_id = device_id;
data->video = video;
data->dev = device;
INIT_DELAYED_WORK(&data->switch_brightness_work,
acpi_video_switch_brightness);
attribute = acpi_video_get_device_attr(video, device_id);
if (attribute && (attribute->device_id_scheme || device_id_scheme)) {
switch (attribute->display_type) {
case ACPI_VIDEO_DISPLAY_CRT:
data->flags.crt = 1;
break;
case ACPI_VIDEO_DISPLAY_TV:
data->flags.tvout = 1;
break;
case ACPI_VIDEO_DISPLAY_DVI:
data->flags.dvi = 1;
break;
case ACPI_VIDEO_DISPLAY_LCD:
data->flags.lcd = 1;
break;
default:
data->flags.unknown = 1;
break;
}
if (attribute->bios_can_detect)
data->flags.bios = 1;
} else {
/* Check for legacy IDs */
device_type = acpi_video_get_device_type(video, device_id);
/* Ignore bits 16 and 18-20 */
switch (device_type & 0xffe2ffff) {
case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
data->flags.crt = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
data->flags.lcd = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_TV:
data->flags.tvout = 1;
break;
default:
data->flags.unknown = 1;
}
}
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
if (data->cap._BCM && data->cap._BCL)
may_report_brightness_keys = true;
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
exit:
video->child_count++;
return 0;
}
/*
* Arg:
* video : video bus device
*
* Return:
* none
*
* Enumerate the video device list of the video bus,
* bind the ids with the corresponding video devices
* under the video bus.
*/
static void acpi_video_device_rebind(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
acpi_video_device_bind(video, dev);
mutex_unlock(&video->device_list_lock);
}
/*
* Arg:
* video : video bus device
* device : video output device under the video
* bus
*
* Return:
* none
*
* Bind the ids with the corresponding video devices
* under the video bus.
*/
static void
acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if (device->device_id == (ids->value.int_val & 0xffff)) {
ids->bind_info = device;
acpi_handle_debug(video->device->handle, "%s: %d\n",
__func__, i);
}
}
}
static bool acpi_video_device_in_dod(struct acpi_video_device *device)
{
struct acpi_video_bus *video = device->video;
int i;
/*
* If we have a broken _DOD or we have more than 8 output devices
* under the graphics controller node that we can't proper deal with
* in the operation region code currently, no need to test.
*/
if (!video->attached_count || video->child_count > 8)
return true;
for (i = 0; i < video->attached_count; i++) {
if ((video->attached_array[i].value.int_val & 0xfff) ==
(device->device_id & 0xfff))
return true;
}
return false;
}
/*
* Arg:
* video : video bus device
*
* Return:
* < 0 : error
*
* Call _DOD to enumerate all devices attached to display adapter
*
*/
static int acpi_video_device_enumerate(struct acpi_video_bus *video)
{
int status;
int count;
int i;
struct acpi_video_enumerated_device *active_list;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *dod = NULL;
union acpi_object *obj;
if (!video->cap._DOD)
return AE_NOT_EXIST;
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_info(video->device->handle,
"_DOD evaluation failed: %s\n",
acpi_format_exception(status));
return status;
}
dod = buffer.pointer;
if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
acpi_handle_info(video->device->handle, "Invalid _DOD data\n");
status = -EFAULT;
goto out;
}
acpi_handle_debug(video->device->handle, "Found %d video heads in _DOD\n",
dod->package.count);
active_list = kcalloc(1 + dod->package.count,
sizeof(struct acpi_video_enumerated_device),
GFP_KERNEL);
if (!active_list) {
status = -ENOMEM;
goto out;
}
count = 0;
for (i = 0; i < dod->package.count; i++) {
obj = &dod->package.elements[i];
if (obj->type != ACPI_TYPE_INTEGER) {
acpi_handle_info(video->device->handle,
"Invalid _DOD data in element %d\n", i);
continue;
}
active_list[count].value.int_val = obj->integer.value;
active_list[count].bind_info = NULL;
acpi_handle_debug(video->device->handle,
"_DOD element[%d] = %d\n", i,
(int)obj->integer.value);
count++;
}
kfree(video->attached_array);
video->attached_array = active_list;
video->attached_count = count;
out:
kfree(buffer.pointer);
return status;
}
static int
acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event)
{
int min, max, min_above, max_below, i, l, delta = 255;
max = max_below = 0;
min = min_above = 255;
/* Find closest level to level_current */
for (i = ACPI_VIDEO_FIRST_LEVEL; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (abs(l - level_current) < abs(delta)) {
delta = l - level_current;
if (!delta)
break;
}
}
/* Adjust level_current to closest available level */
level_current += delta;
for (i = ACPI_VIDEO_FIRST_LEVEL; i < device->brightness->count; i++) {
l = device->brightness->levels[i];
if (l < min)
min = l;
if (l > max)
max = l;
if (l < min_above && l > level_current)
min_above = l;
if (l > max_below && l < level_current)
max_below = l;
}
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:
return (level_current < max) ? min_above : min;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS:
return (level_current < max) ? min_above : max;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS:
return (level_current > min) ? max_below : min;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS:
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF:
return 0;
default:
return level_current;
}
}
static void
acpi_video_switch_brightness(struct work_struct *work)
{
struct acpi_video_device *device = container_of(to_delayed_work(work),
struct acpi_video_device, switch_brightness_work);
unsigned long long level_current, level_next;
int event = device->switch_brightness_event;
int result = -EINVAL;
/* no warning message if acpi_backlight=vendor or a quirk is used */
if (!device->backlight)
return;
if (!device->brightness)
goto out;
result = acpi_video_device_lcd_get_level_current(device,
&level_current,
false);
if (result)
goto out;
level_next = acpi_video_get_next_level(device, level_current, event);
result = acpi_video_device_lcd_set_level(device, level_next);
if (!result)
backlight_force_update(device->backlight,
BACKLIGHT_UPDATE_HOTKEY);
out:
if (result)
acpi_handle_info(device->dev->handle,
"Failed to switch brightness\n");
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
void **edid)
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
union acpi_object *buffer = NULL;
acpi_status status;
int i, length;
if (!device || !acpi_driver_data(device))
return -EINVAL;
video = acpi_driver_data(device);
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
length = 256;
if (!video_device)
continue;
if (!video_device->cap._DDC)
continue;
if (type) {
switch (type) {
case ACPI_VIDEO_DISPLAY_CRT:
if (!video_device->flags.crt)
continue;
break;
case ACPI_VIDEO_DISPLAY_TV:
if (!video_device->flags.tvout)
continue;
break;
case ACPI_VIDEO_DISPLAY_DVI:
if (!video_device->flags.dvi)
continue;
break;
case ACPI_VIDEO_DISPLAY_LCD:
if (!video_device->flags.lcd)
continue;
break;
}
} else if (video_device->device_id != device_id) {
continue;
}
status = acpi_video_device_EDID(video_device, &buffer, length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
length = 128;
status = acpi_video_device_EDID(video_device, &buffer,
length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
continue;
}
}
*edid = buffer->buffer.pointer;
return length;
}
return -ENODEV;
}
EXPORT_SYMBOL(acpi_video_get_edid);
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
{
/*
* There are systems where video module known to work fine regardless
* of broken _DOD and ignoring returned value here doesn't cause
* any issues later.
*/
acpi_video_device_enumerate(video);
return acpi_dev_for_each_child(device, acpi_video_bus_get_one_device, video);
}
/* acpi_video interface */
/*
* Win8 requires setting bit2 of _DOS to let firmware know it shouldn't
* perform any automatic brightness change on receiving a notification.
*/
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
acpi_osi_is_win8() ? 1 : 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
acpi_osi_is_win8() ? 0 : 1);
}
static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_video_bus *video = acpi_driver_data(device);
struct input_dev *input;
int keycode = 0;
if (!video || !video->input)
return;
input = video->input;
switch (event) {
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
* connector. */
acpi_video_device_enumerate(video);
acpi_video_device_rebind(video);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
keycode = KEY_VIDEO_NEXT;
break;
case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
keycode = KEY_VIDEO_PREV;
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
break;
}
if (acpi_notifier_call_chain(device, event, 0))
/* Something vetoed the keypress. */
keycode = 0;
if (keycode && (report_key_events & REPORT_OUTPUT_KEY_EVENTS)) {
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
}
}
static void brightness_switch_event(struct acpi_video_device *video_device,
u32 event)
{
if (!brightness_switch_enabled)
return;
video_device->switch_brightness_event = event;
schedule_delayed_work(&video_device->switch_brightness_work, HZ / 10);
}
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_video_device *video_device = data;
struct acpi_device *device = NULL;
struct acpi_video_bus *bus;
struct input_dev *input;
int keycode = 0;
if (!video_device)
return;
device = video_device->dev;
bus = video_device->video;
input = bus->input;
if (hw_changes_brightness > 0) {
if (video_device->backlight)
backlight_force_update(video_device->backlight,
BACKLIGHT_UPDATE_HOTKEY);
acpi_notifier_call_chain(device, event, 0);
return;
}
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
brightness_switch_event(video_device, event);
keycode = KEY_DISPLAY_OFF;
break;
default:
acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
break;
}
if (keycode)
may_report_brightness_keys = true;
acpi_notifier_call_chain(device, event, 0);
if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
}
}
static int acpi_video_resume(struct notifier_block *nb,
unsigned long val, void *ign)
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
int i;
switch (val) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
video = container_of(nb, struct acpi_video_bus, pm_nb);
dev_info(&video->device->dev, "Restoring backlight state\n");
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
if (video_device && video_device->brightness)
acpi_video_device_lcd_set_level(video_device,
video_device->brightness->curr);
}
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static acpi_status
acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
void **return_value)
{
struct acpi_device *device = context;
struct acpi_device *sibling;
if (handle == device->handle)
return AE_CTRL_TERMINATE;
sibling = acpi_fetch_acpi_dev(handle);
if (!sibling)
return AE_OK;
if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
return AE_ALREADY_EXISTS;
return AE_OK;
}
static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
{
struct backlight_properties props;
struct pci_dev *pdev;
acpi_handle acpi_parent;
struct device *parent = NULL;
int result;
static int count;
char *name;
result = acpi_video_init_brightness(device);
if (result)
return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
count++;
acpi_get_parent(device->dev->handle, &acpi_parent);
pdev = acpi_get_pci_dev(acpi_parent);
if (pdev) {
parent = &pdev->dev;
pci_dev_put(pdev);
}
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_FIRMWARE;
props.max_brightness =
device->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
device->backlight = backlight_device_register(name,
parent,
device,
&acpi_backlight_ops,
&props);
kfree(name);
if (IS_ERR(device->backlight)) {
device->backlight = NULL;
return;
}
/*
* Save current brightness level in case we have to restore it
* before acpi_video_device_lcd_set_level() is called next time.
*/
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
device->cooling_dev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to free it.
* Also, why the hell we are returning early and not attempt to
* register video output if cooling device registration failed?
* -- dtor
*/
device->cooling_dev = NULL;
return;
}
dev_info(&device->dev->dev, "registered as cooling_device%d\n",
device->cooling_dev->id);
result = sysfs_create_link(&device->dev->dev.kobj,
&device->cooling_dev->device.kobj,
"thermal_cooling");
if (result)
pr_info("sysfs link creation failed\n");
result = sysfs_create_link(&device->cooling_dev->device.kobj,
&device->dev->dev.kobj, "device");
if (result)
pr_info("Reverse sysfs link creation failed\n");
}
static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
union acpi_object *levels;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry) {
if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
kfree(levels);
}
mutex_unlock(&video->device_list_lock);
}
static bool acpi_video_should_register_backlight(struct acpi_video_device *dev)
{
/*
* Do not create backlight device for video output
* device that is not in the enumerated list.
*/
if (!acpi_video_device_in_dod(dev)) {
dev_dbg(&dev->dev->dev, "not in _DOD list, ignore\n");
return false;
}
if (only_lcd)
return dev->flags.lcd;
return true;
}
static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
if (video->backlight_registered)
return 0;
if (acpi_video_get_backlight_type() != acpi_backlight_video)
return 0;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry) {
if (acpi_video_should_register_backlight(dev))
acpi_video_dev_register_backlight(dev);
}
mutex_unlock(&video->device_list_lock);
video->backlight_registered = true;
video->pm_nb.notifier_call = acpi_video_resume;
video->pm_nb.priority = 0;
return register_pm_notifier(&video->pm_nb);
}
static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
{
if (device->backlight) {
backlight_device_unregister(device->backlight);
device->backlight = NULL;
}
if (device->brightness) {
kfree(device->brightness->levels);
kfree(device->brightness);
device->brightness = NULL;
}
if (device->cooling_dev) {
sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
thermal_cooling_device_unregister(device->cooling_dev);
device->cooling_dev = NULL;
}
}
static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
int error;
if (!video->backlight_registered)
return 0;
error = unregister_pm_notifier(&video->pm_nb);
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
acpi_video_dev_unregister_backlight(dev);
mutex_unlock(&video->device_list_lock);
video->backlight_registered = false;
return error;
}
static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
{
acpi_status status;
struct acpi_device *adev = device->dev;
status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
acpi_video_device_notify, device);
if (ACPI_FAILURE(status))
dev_err(&adev->dev, "Error installing notify handler\n");
else
device->flags.notify = 1;
}
static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
{
struct input_dev *input;
struct acpi_video_device *dev;
int error;
video->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto out;
}
error = acpi_video_bus_start_devices(video);
if (error)
goto err_free_input;
snprintf(video->phys, sizeof(video->phys),
"%s/video/input0", acpi_device_hid(video->device));
input->name = acpi_device_name(video->device);
input->phys = video->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &video->device->dev;
input->evbit[0] = BIT(EV_KEY);
set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
set_bit(KEY_VIDEO_NEXT, input->keybit);
set_bit(KEY_VIDEO_PREV, input->keybit);
set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
set_bit(KEY_BRIGHTNESSUP, input->keybit);
set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
set_bit(KEY_DISPLAY_OFF, input->keybit);
error = input_register_device(input);
if (error)
goto err_stop_dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
acpi_video_dev_add_notify_handler(dev);
mutex_unlock(&video->device_list_lock);
return 0;
err_stop_dev:
acpi_video_bus_stop_devices(video);
err_free_input:
input_free_device(input);
video->input = NULL;
out:
return error;
}
static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
{
if (dev->flags.notify) {
acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
dev->flags.notify = 0;
}
}
static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
{
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
acpi_video_dev_remove_notify_handler(dev);
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
input_unregister_device(video->input);
video->input = NULL;
}
static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
{
struct acpi_video_device *dev, *next;
mutex_lock(&video->device_list_lock);
list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
list_del(&dev->entry);
kfree(dev);
}
mutex_unlock(&video->device_list_lock);
return 0;
}
static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
bool auto_detect;
int error;
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
acpi_dev_parent(device)->handle, 1,
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
pr_info(FW_BUG
"Duplicate ACPI video bus devices for the"
" same VGA controller, please try module "
"parameter \"video.allow_duplicates=1\""
"if the current driver doesn't work.\n");
if (!allow_duplicates)
return -ENODEV;
}
video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
if (!video)
return -ENOMEM;
/* a hack to fix the duplicate name "VID" problem on T61 */
if (!strcmp(device->pnp.bus_id, "VID")) {
if (instance)
device->pnp.bus_id[3] = '0' + instance;
instance++;
}
/* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
if (!strcmp(device->pnp.bus_id, "VGA")) {
if (instance)
device->pnp.bus_id[3] = '0' + instance;
instance++;
}
video->device = device;
strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = video;
acpi_video_bus_find_cap(video);
error = acpi_video_bus_check(video);
if (error)
goto err_free_video;
mutex_init(&video->device_list_lock);
INIT_LIST_HEAD(&video->video_device_list);
error = acpi_video_bus_get_devices(video, device);
if (error)
goto err_put_video;
/*
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
* evaluated to have functional panel brightness control.
*/
acpi_device_fix_up_power_extended(device);
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
video->flags.post ? "yes" : "no");
mutex_lock(&video_list_lock);
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
/*
* If backlight-type auto-detection is used then a native backlight may
* show up later and this may change the result from video to native.
* Therefor normally the userspace visible /sys/class/backlight device
* gets registered separately by the GPU driver calling
* acpi_video_register_backlight() when an internal panel is detected.
* Register the backlight now when not using auto-detection, so that
* when the kernel cmdline or DMI-quirks are used the backlight will
* get registered even if acpi_video_register_backlight() is not called.
*/
acpi_video_run_bcl_for_osi(video);
if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
!auto_detect)
acpi_video_bus_register_backlight(video);
acpi_video_bus_add_notify_handler(video);
error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
if (error)
goto err_remove;
return 0;
err_remove:
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
err_free_video:
kfree(video);
device->driver_data = NULL;
return error;
}
static void acpi_video_bus_remove(struct acpi_device *device)
{
struct acpi_video_bus *video = NULL;
if (!device || !acpi_driver_data(device))
return;
video = acpi_driver_data(device);
acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
kfree(video);
}
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
return 1;
if (dev->device == 0x7000)
return 1;
return 0;
}
static int __init intel_opregion_present(void)
{
int opregion = 0;
struct pci_dev *dev = NULL;
u32 address;
for_each_pci_dev(dev) {
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
continue;
if (dev->vendor != PCI_VENDOR_ID_INTEL)
continue;
/* We don't want to poke around undefined i740 registers */
if (is_i740(dev))
continue;
pci_read_config_dword(dev, 0xfc, &address);
if (!address)
continue;
opregion = 1;
}
return opregion;
}
/* Check if the chassis-type indicates there is no builtin LCD panel */
static bool dmi_is_desktop(void)
{
const char *chassis_type;
unsigned long type;
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
if (!chassis_type)
return false;
if (kstrtoul(chassis_type, 10, &type) != 0)
return false;
switch (type) {
case 0x03: /* Desktop */
case 0x04: /* Low Profile Desktop */
case 0x05: /* Pizza Box */
case 0x06: /* Mini Tower */
case 0x07: /* Tower */
case 0x10: /* Lunch Box */
case 0x11: /* Main Server Chassis */
return true;
}
return false;
}
/*
* We're seeing a lot of bogus backlight interfaces on newer machines
* without a LCD such as desktops, servers and HDMI sticks. Checking the
* lcd flag fixes this, enable this by default on any machines which are:
* 1. Win8 ready (where we also prefer the native backlight driver, so
* normally the acpi_video code should not register there anyways); *and*
* 2.1 Report a desktop/server DMI chassis-type, or
* 2.2 Are an ACPI-reduced-hardware platform (and thus won't use the EC for
backlight control)
*/
static bool should_check_lcd_flag(void)
{
if (!acpi_osi_is_win8())
return false;
if (dmi_is_desktop())
return true;
if (acpi_reduced_hardware())
return true;
return false;
}
int acpi_video_register(void)
{
int ret = 0;
mutex_lock(®ister_count_mutex);
if (register_count) {
/*
* if the function of acpi_video_register is already called,
* don't register the acpi_video_bus again and return no error.
*/
goto leave;
}
if (only_lcd == -1)
only_lcd = should_check_lcd_flag();
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
if (ret)
goto leave;
/*
* When the acpi_video_bus is loaded successfully, increase
* the counter reference.
*/
register_count = 1;
leave:
mutex_unlock(®ister_count_mutex);
return ret;
}
EXPORT_SYMBOL(acpi_video_register);
void acpi_video_unregister(void)
{
mutex_lock(®ister_count_mutex);
if (register_count) {
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
may_report_brightness_keys = false;
}
mutex_unlock(®ister_count_mutex);
}
EXPORT_SYMBOL(acpi_video_unregister);
void acpi_video_register_backlight(void)
{
struct acpi_video_bus *video;
mutex_lock(&video_list_lock);
list_for_each_entry(video, &video_bus_head, entry)
acpi_video_bus_register_backlight(video);
mutex_unlock(&video_list_lock);
}
EXPORT_SYMBOL(acpi_video_register_backlight);
bool acpi_video_handles_brightness_key_presses(void)
{
return may_report_brightness_keys &&
(report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
}
EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
/*
* This is kind of nasty. Hardware using Intel chipsets may require
* the video opregion code to be run first in order to initialise
* state before any ACPI video calls are made. To handle this we defer
* registration of the video class until the opregion code has run.
*/
static int __init acpi_video_init(void)
{
/*
* Let the module load even if ACPI is disabled (e.g. due to
* a broken BIOS) so that i915.ko can still be loaded on such
* old systems without an AcpiOpRegion.
*
* acpi_video_register() will report -ENODEV later as well due
* to acpi_disabled when i915.ko tries to register itself afterwards.
*/
if (acpi_disabled)
return 0;
if (intel_opregion_present())
return 0;
return acpi_video_register();
}
static void __exit acpi_video_exit(void)
{
acpi_video_unregister();
}
module_init(acpi_video_init);
module_exit(acpi_video_exit);
| linux-master | drivers/acpi/acpi_video.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Extended Error Log driver
*
* Copyright (C) 2013 Intel Corp.
* Author: Chen, Gong <[email protected]>
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/cper.h>
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
#define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
#define EXTLOG_DSM_REV 0x0
#define EXTLOG_FN_ADDR 0x1
#define FLAG_OS_OPTIN BIT(0)
#define ELOG_ENTRY_VALID (1ULL<<63)
#define ELOG_ENTRY_LEN 0x1000
#define EMCA_BUG \
"Can not request iomem region <0x%016llx-0x%016llx> - eMCA disabled\n"
struct extlog_l1_head {
u32 ver; /* Header Version */
u32 hdr_len; /* Header Length */
u64 total_len; /* entire L1 Directory length including this header */
u64 elog_base; /* MCA Error Log Directory base address */
u64 elog_len; /* MCA Error Log Directory length */
u32 flags; /* bit 0 - OS/VMM Opt-in */
u8 rev0[12];
u32 entries; /* Valid L1 Directory entries per logical processor */
u8 rev1[12];
};
static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295";
/* L1 table related physical address */
static u64 elog_base;
static size_t elog_size;
static u64 l1_dirbase;
static size_t l1_size;
/* L1 table related virtual address */
static void __iomem *extlog_l1_addr;
static void __iomem *elog_addr;
static void *elog_buf;
static u64 *l1_entry_base;
static u32 l1_percpu_entry;
#define ELOG_IDX(cpu, bank) \
(cpu_physical_id(cpu) * l1_percpu_entry + (bank))
#define ELOG_ENTRY_DATA(idx) \
(*(l1_entry_base + (idx)))
#define ELOG_ENTRY_ADDR(phyaddr) \
(phyaddr - elog_base + (u8 *)elog_addr)
static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
{
int idx;
u64 data;
struct acpi_hest_generic_status *estatus;
WARN_ON(cpu < 0);
idx = ELOG_IDX(cpu, bank);
data = ELOG_ENTRY_DATA(idx);
if ((data & ELOG_ENTRY_VALID) == 0)
return NULL;
data &= EXT_ELOG_ENTRY_MASK;
estatus = (struct acpi_hest_generic_status *)ELOG_ENTRY_ADDR(data);
/* if no valid data in elog entry, just return */
if (estatus->block_status == 0)
return NULL;
return estatus;
}
static void __print_extlog_rcd(const char *pfx,
struct acpi_hest_generic_status *estatus, int cpu)
{
static atomic_t seqno;
unsigned int curr_seqno;
char pfx_seq[64];
if (!pfx) {
if (estatus->error_severity <= CPER_SEV_CORRECTED)
pfx = KERN_INFO;
else
pfx = KERN_ERR;
}
curr_seqno = atomic_inc_return(&seqno);
snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}", pfx, curr_seqno);
printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
cper_estatus_print(pfx_seq, estatus);
}
static int print_extlog_rcd(const char *pfx,
struct acpi_hest_generic_status *estatus, int cpu)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
struct ratelimit_state *ratelimit;
if (estatus->error_severity == CPER_SEV_CORRECTED ||
(estatus->error_severity == CPER_SEV_INFORMATIONAL))
ratelimit = &ratelimit_corrected;
else
ratelimit = &ratelimit_uncorrected;
if (__ratelimit(ratelimit)) {
__print_extlog_rcd(pfx, estatus, cpu);
return 0;
}
return 1;
}
static int extlog_print(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
int bank = mce->bank;
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
const guid_t *fru_id;
char *fru_text;
guid_t *sec_type;
static u32 err_seq;
estatus = extlog_elog_entry_check(cpu, bank);
if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
return NOTIFY_DONE;
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
/* clear record status to enable BIOS to update it again */
estatus->block_status = 0;
tmp = (struct acpi_hest_generic_status *)elog_buf;
if (!ras_userspace_consumers()) {
print_extlog_rcd(NULL, tmp, cpu);
goto out;
}
/* log event via trace */
err_seq++;
apei_estatus_for_each_section(tmp, gdata) {
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
else
fru_id = &guid_null;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
else
fru_text = "";
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = acpi_hest_get_payload(gdata);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
}
}
out:
mce->kflags |= MCE_HANDLED_EXTLOG;
return NOTIFY_OK;
}
static bool __init extlog_get_l1addr(void)
{
guid_t guid;
acpi_handle handle;
union acpi_object *obj;
if (guid_parse(extlog_dsm_uuid, &guid))
return false;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return false;
if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
return false;
obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
if (!obj) {
return false;
} else {
l1_dirbase = obj->integer.value;
ACPI_FREE(obj);
}
/* Spec says L1 directory must be 4K aligned, bail out if it isn't */
if (l1_dirbase & ((1 << 12) - 1)) {
pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
l1_dirbase);
return false;
}
return true;
}
static struct notifier_block extlog_mce_dec = {
.notifier_call = extlog_print,
.priority = MCE_PRIO_EXTLOG,
};
static int __init extlog_init(void)
{
struct extlog_l1_head *l1_head;
void __iomem *extlog_l1_hdr;
size_t l1_hdr_size;
struct resource *r;
u64 cap;
int rc;
if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
rc = -EINVAL;
/* get L1 header to fetch necessary information */
l1_hdr_size = sizeof(struct extlog_l1_head);
r = request_mem_region(l1_dirbase, l1_hdr_size, "L1 DIR HDR");
if (!r) {
pr_warn(FW_BUG EMCA_BUG,
(unsigned long long)l1_dirbase,
(unsigned long long)l1_dirbase + l1_hdr_size);
goto err;
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
elog_base = l1_head->elog_base;
elog_size = l1_head->elog_len;
acpi_os_unmap_iomem(extlog_l1_hdr, l1_hdr_size);
release_mem_region(l1_dirbase, l1_hdr_size);
/* remap L1 header again based on completed information */
r = request_mem_region(l1_dirbase, l1_size, "L1 Table");
if (!r) {
pr_warn(FW_BUG EMCA_BUG,
(unsigned long long)l1_dirbase,
(unsigned long long)l1_dirbase + l1_size);
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
r = request_mem_region(elog_base, elog_size, "Elog Table");
if (!r) {
pr_warn(FW_BUG EMCA_BUG,
(unsigned long long)elog_base,
(unsigned long long)elog_base + elog_size);
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
rc = -ENOMEM;
/* allocate buffer to save elog record */
elog_buf = kmalloc(ELOG_ENTRY_LEN, GFP_KERNEL);
if (elog_buf == NULL)
goto err_release_elog;
mce_register_decode_chain(&extlog_mce_dec);
/* enable OS to be involved to take over management from BIOS */
((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
return 0;
err_release_elog:
if (elog_addr)
acpi_os_unmap_iomem(elog_addr, elog_size);
release_mem_region(elog_base, elog_size);
err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
}
static void __exit extlog_exit(void)
{
mce_unregister_decode_chain(&extlog_mce_dec);
((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
if (elog_addr)
acpi_os_unmap_iomem(elog_addr, elog_size);
release_mem_region(elog_base, elog_size);
release_mem_region(l1_dirbase, l1_size);
kfree(elog_buf);
}
module_init(extlog_init);
module_exit(extlog_exit);
MODULE_AUTHOR("Chen, Gong <[email protected]>");
MODULE_DESCRIPTION("Extended MCA Error Log Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/acpi_extlog.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* battery.c - ACPI Battery Driver (Revision: 2.0)
*
* Copyright (C) 2007 Alexey Starikovskiy <[email protected]>
* Copyright (C) 2004-2007 Vladimir Lebedev <[email protected]>
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: battery: " fmt
#include <linux/async.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#include <linux/acpi.h>
#include <linux/power_supply.h>
#include <acpi/battery.h>
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
#define ACPI_BATTERY_STATE_DISCHARGING 0x1
#define ACPI_BATTERY_STATE_CHARGING 0x2
#define ACPI_BATTERY_STATE_CRITICAL 0x4
#define MAX_STRING_LENGTH 64
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
static int battery_ac_is_broken;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
/* Microsoft Surface Go 3 */
{"MSHW0146", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
/* On Lenovo Thinkpad models from 2010 and 2011, the power unit
* switches between mWh and mAh depending on whether the system
* is running on battery or not. When mAh is the unit, most
* reported values are incorrect and need to be adjusted by
* 10000/design_voltage. Verified on x201, t410, t410s, and x220.
* Pre-2010 and 2012 models appear to always report in mWh and
* are thus unaffected (tested with t42, t61, t500, x200, x300,
* and x230). Also, in mid-2012 Lenovo issued a BIOS update for
* the 2011 models that fixes the issue (tested on x220 with a
* post-1.29 BIOS), but as of Nov. 2012, no such update is
* available for the 2010 models.
*/
ACPI_BATTERY_QUIRK_THINKPAD_MAH,
/* for batteries reporting current capacity with design capacity
* on a full charge, but showing degradation in full charge cap.
*/
ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE,
};
struct acpi_battery {
struct mutex lock;
struct mutex sysfs_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
struct notifier_block pm_nb;
struct list_head list;
unsigned long update_time;
int revision;
int rate_now;
int capacity_now;
int voltage_now;
int design_capacity;
int full_charge_capacity;
int technology;
int design_voltage;
int design_capacity_warning;
int design_capacity_low;
int cycle_count;
int measurement_accuracy;
int max_sampling_time;
int min_sampling_time;
int max_averaging_interval;
int min_averaging_interval;
int capacity_granularity_1;
int capacity_granularity_2;
int alarm;
char model_number[MAX_STRING_LENGTH];
char serial_number[MAX_STRING_LENGTH];
char type[MAX_STRING_LENGTH];
char oem_info[MAX_STRING_LENGTH];
int state;
int power_unit;
unsigned long flags;
};
#define to_acpi_battery(x) power_supply_get_drvdata(x)
static inline int acpi_battery_present(struct acpi_battery *battery)
{
return battery->device->status.battery_present;
}
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiCd;
if (!strcasecmp("NiMH", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiMH;
if (!strcasecmp("LION", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strncasecmp("LI-ION", battery->type, 6))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strcasecmp("LiP", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
}
static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
/* charging, discharging or critical low */
if (battery->state != 0)
return 0;
/* battery not reporting charge */
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
battery->capacity_now == 0)
return 0;
/* good batteries update full_charge as the batteries degrade */
if (battery->full_charge_capacity == battery->capacity_now)
return 1;
/* fallback to using design values for broken batteries */
if (battery->design_capacity <= battery->capacity_now)
return 1;
/* we don't do any sort of metric based on percentages */
return 0;
}
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
static int acpi_battery_handle_discharging(struct acpi_battery *battery)
{
/*
* Some devices wrongly report discharging if the battery's charge level
* was above the device's start charging threshold atm the AC adapter
* was plugged in and the device thus did not start a new charge cycle.
*/
if ((battery_ac_is_broken || power_supply_is_system_supplied()) &&
battery->rate_now == 0)
return POWER_SUPPLY_STATUS_NOT_CHARGING;
return POWER_SUPPLY_STATUS_DISCHARGING;
}
static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
/* run battery update only if it is present */
acpi_battery_get_state(battery);
} else if (psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
val->intval = acpi_battery_handle_discharging(battery);
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = acpi_battery_present(battery);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = acpi_battery_technology(battery);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->design_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->voltage_now * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
full_capacity = battery->full_charge_capacity;
else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
full_capacity = battery->design_capacity;
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->capacity_now * 100/
full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
else if (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
(battery->capacity_now <= battery->alarm))
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
else
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
break;
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->oem_info;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = battery->serial_number;
break;
default:
ret = -EINVAL;
}
return ret;
}
static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
/* Battery Management */
struct acpi_offsets {
size_t offset; /* offset inside struct acpi_sbs_battery */
u8 mode; /* int or string? */
};
static const struct acpi_offsets state_offsets[] = {
{offsetof(struct acpi_battery, state), 0},
{offsetof(struct acpi_battery, rate_now), 0},
{offsetof(struct acpi_battery, capacity_now), 0},
{offsetof(struct acpi_battery, voltage_now), 0},
};
static const struct acpi_offsets info_offsets[] = {
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static const struct acpi_offsets extended_info_offsets[] = {
{offsetof(struct acpi_battery, revision), 0},
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, cycle_count), 0},
{offsetof(struct acpi_battery, measurement_accuracy), 0},
{offsetof(struct acpi_battery, max_sampling_time), 0},
{offsetof(struct acpi_battery, min_sampling_time), 0},
{offsetof(struct acpi_battery, max_averaging_interval), 0},
{offsetof(struct acpi_battery, min_averaging_interval), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static int extract_package(struct acpi_battery *battery,
union acpi_object *package,
const struct acpi_offsets *offsets, int num)
{
int i;
union acpi_object *element;
if (package->type != ACPI_TYPE_PACKAGE)
return -EFAULT;
for (i = 0; i < num; ++i) {
if (package->package.count <= i)
return -EFAULT;
element = &package->package.elements[i];
if (offsets[i].mode) {
u8 *ptr = (u8 *)battery + offsets[i].offset;
u32 len = MAX_STRING_LENGTH;
switch (element->type) {
case ACPI_TYPE_BUFFER:
if (len > element->buffer.length + 1)
len = element->buffer.length + 1;
fallthrough;
case ACPI_TYPE_STRING:
strscpy(ptr, element->string.pointer, len);
break;
case ACPI_TYPE_INTEGER:
strscpy(ptr, (u8 *)&element->integer.value, sizeof(u64) + 1);
break;
default:
*ptr = 0; /* don't have value */
}
} else {
int *x = (int *)((u8 *)battery + offsets[i].offset);
*x = (element->type == ACPI_TYPE_INTEGER) ?
element->integer.value : -1;
}
}
return 0;
}
static int acpi_battery_get_status(struct acpi_battery *battery)
{
if (acpi_bus_get_status(battery->device)) {
acpi_handle_info(battery->device->handle,
"_STA evaluation failed\n");
return -ENODEV;
}
return 0;
}
static int extract_battery_info(const int use_bix,
struct acpi_battery *battery,
const struct acpi_buffer *buffer)
{
int result = -EFAULT;
if (use_bix && battery_bix_broken_package)
result = extract_package(battery, buffer->pointer,
extended_info_offsets + 1,
ARRAY_SIZE(extended_info_offsets) - 1);
else if (use_bix)
result = extract_package(battery, buffer->pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
else
result = extract_package(battery, buffer->pointer,
info_offsets, ARRAY_SIZE(info_offsets));
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
battery->power_unit && battery->design_voltage) {
battery->design_capacity = battery->design_capacity *
10000 / battery->design_voltage;
battery->full_charge_capacity = battery->full_charge_capacity *
10000 / battery->design_voltage;
battery->design_capacity_warning =
battery->design_capacity_warning *
10000 / battery->design_voltage;
/* Curiously, design_capacity_low, unlike the rest of them,
* is correct.
*/
/* capacity_granularity_* equal 1 on the systems tested, so
* it's impossible to tell if they would need an adjustment
* or not if their values were higher.
*/
}
if (test_bit(ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE, &battery->flags) &&
battery->capacity_now > battery->full_charge_capacity)
battery->capacity_now = battery->full_charge_capacity;
return result;
}
static int acpi_battery_get_info(struct acpi_battery *battery)
{
const int xinfo = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
int use_bix;
int result = -ENODEV;
if (!acpi_battery_present(battery))
return 0;
for (use_bix = xinfo ? 1 : 0; use_bix >= 0; use_bix--) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status = AE_ERROR;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle,
use_bix ? "_BIX":"_BIF",
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
"%s evaluation failed: %s\n",
use_bix ? "_BIX":"_BIF",
acpi_format_exception(status));
} else {
result = extract_battery_info(use_bix,
battery,
&buffer);
kfree(buffer.pointer);
break;
}
}
if (!result && !use_bix && xinfo)
pr_warn(FW_BUG "The _BIX method is broken, using _BIF.\n");
return result;
}
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
if (!acpi_battery_present(battery))
return 0;
if (battery->update_time &&
time_before(jiffies, battery->update_time +
msecs_to_jiffies(cache_time)))
return 0;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
"_BST evaluation failed: %s",
acpi_format_exception(status));
return -ENODEV;
}
result = extract_package(battery, buffer.pointer,
state_offsets, ARRAY_SIZE(state_offsets));
battery->update_time = jiffies;
kfree(buffer.pointer);
/* For buggy DSDTs that report negative 16-bit values for either
* charging or discharging current and/or report 0 as 65536
* due to bad math.
*/
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
pr_warn_once(FW_BUG "(dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
battery->power_unit && battery->design_voltage) {
battery->capacity_now = battery->capacity_now *
10000 / battery->design_voltage;
}
if (test_bit(ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE, &battery->flags) &&
battery->capacity_now > battery->full_charge_capacity)
battery->capacity_now = battery->full_charge_capacity;
return result;
}
static int acpi_battery_set_alarm(struct acpi_battery *battery)
{
acpi_status status = 0;
if (!acpi_battery_present(battery) ||
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
mutex_lock(&battery->lock);
status = acpi_execute_simple_method(battery->device->handle, "_BTP",
battery->alarm);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status))
return -ENODEV;
acpi_handle_debug(battery->device->handle, "Alarm set to %d\n",
battery->alarm);
return 0;
}
static int acpi_battery_init_alarm(struct acpi_battery *battery)
{
/* See if alarms are supported, and if so, set default */
if (!acpi_has_method(battery->device->handle, "_BTP")) {
clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
return 0;
}
set_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
if (!battery->alarm)
battery->alarm = battery->design_capacity_warning;
return acpi_battery_set_alarm(battery);
}
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", battery->alarm * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
return count;
}
static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
/*
* The Battery Hooking API
*
* This API is used inside other drivers that need to expose
* platform-specific behaviour within the generic driver in a
* generic way.
*
*/
static LIST_HEAD(acpi_battery_list);
static LIST_HEAD(battery_hook_list);
static DEFINE_MUTEX(hook_mutex);
static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
{
struct acpi_battery *battery;
/*
* In order to remove a hook, we first need to
* de-register all the batteries that are registered.
*/
if (lock)
mutex_lock(&hook_mutex);
list_for_each_entry(battery, &acpi_battery_list, list) {
if (!hook->remove_battery(battery->bat, hook))
power_supply_changed(battery->bat);
}
list_del(&hook->list);
if (lock)
mutex_unlock(&hook_mutex);
pr_info("extension unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
{
__battery_hook_unregister(hook, 1);
}
EXPORT_SYMBOL_GPL(battery_hook_unregister);
void battery_hook_register(struct acpi_battery_hook *hook)
{
struct acpi_battery *battery;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&hook->list);
list_add(&hook->list, &battery_hook_list);
/*
* Now that the driver is registered, we need
* to notify the hook that a battery is available
* for each battery, so that the driver may add
* its attributes.
*/
list_for_each_entry(battery, &acpi_battery_list, list) {
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
* the registration of the extension has failed,
* and we will not add it to the list of loaded
* hooks.
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
goto end;
}
power_supply_changed(battery->bat);
}
pr_info("new extension: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_register);
/*
* This function gets called right after the battery sysfs
* attributes have been added, so that the drivers that
* define custom sysfs attributes can add their own.
*/
static void battery_hook_add_battery(struct acpi_battery *battery)
{
struct acpi_battery_hook *hook_node, *tmp;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&battery->list);
list_add(&battery->list, &acpi_battery_list);
/*
* Since we added a new battery to the list, we need to
* iterate over the hooks and call add_battery for each
* hook that was registered. This usually happens
* when a battery gets hotplugged or initialized
* during the battery module initialization.
*/
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
* The notification of the extensions has failed, to
* prevent further errors we will unload the extension.
*/
pr_err("error in extension, unloading: %s",
hook_node->name);
__battery_hook_unregister(hook_node, 0);
}
}
mutex_unlock(&hook_mutex);
}
static void battery_hook_remove_battery(struct acpi_battery *battery)
{
struct acpi_battery_hook *hook;
mutex_lock(&hook_mutex);
/*
* Before removing the hook, we need to remove all
* custom attributes from the battery.
*/
list_for_each_entry(hook, &battery_hook_list, list) {
hook->remove_battery(battery->bat, hook);
}
/* Then, just remove the battery from the list */
list_del(&battery->list);
mutex_unlock(&hook_mutex);
}
static void __exit battery_hook_exit(void)
{
struct acpi_battery_hook *hook;
struct acpi_battery_hook *ptr;
/*
* At this point, the acpi_bus_unregister_driver()
* has called remove for all batteries. We just
* need to remove the hooks.
*/
list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
__battery_hook_unregister(hook, 1);
}
mutex_destroy(&hook_mutex);
}
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
bool full_cap_broken = false;
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
if (full_cap_broken) {
battery->bat_desc.properties =
charge_battery_full_cap_broken_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_props);
}
} else {
if (full_cap_broken) {
battery->bat_desc.properties =
energy_battery_full_cap_broken_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(energy_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = energy_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(energy_battery_props);
}
}
battery->bat_desc.name = acpi_device_bid(battery->device);
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat_desc.get_property = acpi_battery_get_property;
battery->bat = power_supply_register_no_ws(&battery->device->dev,
&battery->bat_desc, &psy_cfg);
if (IS_ERR(battery->bat)) {
int result = PTR_ERR(battery->bat);
battery->bat = NULL;
return result;
}
battery_hook_add_battery(battery);
return device_create_file(&battery->bat->dev, &alarm_attr);
}
static void sysfs_remove_battery(struct acpi_battery *battery)
{
mutex_lock(&battery->sysfs_lock);
if (!battery->bat) {
mutex_unlock(&battery->sysfs_lock);
return;
}
battery_hook_remove_battery(battery);
device_remove_file(&battery->bat->dev, &alarm_attr);
power_supply_unregister(battery->bat);
battery->bat = NULL;
mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
{
struct acpi_battery *battery = (struct acpi_battery *)private;
/* Note: the hardcoded offsets below have been extracted from
* the source code of dmidecode.
*/
if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
const u8 *dmi_data = (const u8 *)(dm + 1);
int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
if (dm->length >= 18)
dmi_capacity *= dmi_data[17];
if (battery->design_capacity * battery->design_voltage / 1000
!= dmi_capacity &&
battery->design_capacity * 10 == dmi_capacity)
set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
&battery->flags);
}
}
/*
* According to the ACPI spec, some kinds of primary batteries can
* report percentage battery remaining capacity directly to OS.
* In this case, it reports the Last Full Charged Capacity == 100
* and BatteryPresentRate == 0xFFFFFFFF.
*
* Now we found some battery reports percentage remaining capacity
* even if it's rechargeable.
* https://bugzilla.kernel.org/show_bug.cgi?id=15979
*
* Handle this correctly so that they won't break userspace.
*/
static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return;
if (battery->full_charge_capacity == 100 &&
battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
battery->capacity_now >= 0 && battery->capacity_now <= 100) {
set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
battery->full_charge_capacity = battery->design_capacity;
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
}
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
return;
if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
const char *s;
s = dmi_get_system_info(DMI_PRODUCT_VERSION);
if (s && !strncasecmp(s, "ThinkPad", 8)) {
dmi_walk(find_battery, battery);
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
&battery->flags) &&
battery->design_voltage) {
battery->design_capacity =
battery->design_capacity *
10000 / battery->design_voltage;
battery->full_charge_capacity =
battery->full_charge_capacity *
10000 / battery->design_voltage;
battery->design_capacity_warning =
battery->design_capacity_warning *
10000 / battery->design_voltage;
battery->capacity_now = battery->capacity_now *
10000 / battery->design_voltage;
}
}
}
if (test_bit(ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE, &battery->flags))
return;
if (acpi_battery_is_degraded(battery) &&
battery->capacity_now > battery->full_charge_capacity) {
set_bit(ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE, &battery->flags);
battery->capacity_now = battery->full_charge_capacity;
}
}
static int acpi_battery_update(struct acpi_battery *battery, bool resume)
{
int result = acpi_battery_get_status(battery);
if (result)
return result;
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
return 0;
}
if (resume)
return 0;
if (!battery->update_time) {
result = acpi_battery_get_info(battery);
if (result)
return result;
acpi_battery_init_alarm(battery);
}
result = acpi_battery_get_state(battery);
if (result)
return result;
acpi_battery_quirks(battery);
if (!battery->bat) {
result = sysfs_add_battery(battery);
if (result)
return result;
}
/*
* Wakeup the system if battery is critical low
* or lower than the alarm level
*/
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
(battery->capacity_now <= battery->alarm)))
acpi_pm_wakeup_event(&battery->device->dev);
return result;
}
static void acpi_battery_refresh(struct acpi_battery *battery)
{
int power_unit;
if (!battery->bat)
return;
power_unit = battery->power_unit;
acpi_battery_get_info(battery);
if (power_unit == battery->power_unit)
return;
/* The battery has changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
/* Driver Interface */
static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_battery *battery = acpi_driver_data(device);
struct power_supply *old;
if (!battery)
return;
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
* early. For example, when AC is unplugged and notification is
* triggered, battery state is still reported as "Full", and changes to
* "Discharging" only after short delay, without any notification.
*/
if (battery_notification_delay_ms > 0)
msleep(battery_notification_delay_ms);
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
acpi_battery_update(battery, false);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
acpi_notifier_call_chain(device, event, acpi_battery_present(battery));
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat)
power_supply_changed(battery->bat);
}
static int battery_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
int result;
switch (mode) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
result = acpi_battery_get_info(battery);
if (result)
return result;
result = sysfs_add_battery(battery);
if (result)
return result;
}
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
break;
}
return 0;
}
static int __init
battery_bix_broken_package_quirk(const struct dmi_system_id *d)
{
battery_bix_broken_package = 1;
return 0;
}
static int __init
battery_notification_delay_quirk(const struct dmi_system_id *d)
{
battery_notification_delay_ms = 1000;
return 0;
}
static int __init
battery_ac_is_broken_quirk(const struct dmi_system_id *d)
{
battery_ac_is_broken = 1;
return 0;
}
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
/* NEC LZ750/LS */
.callback = battery_bix_broken_package_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
},
},
{
/* Acer Aspire V5-573G */
.callback = battery_notification_delay_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
{
/* Point of View mobii wintab p800w */
.callback = battery_ac_is_broken_quirk,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
},
},
{
/* Microsoft Surface Go 3 */
.callback = battery_notification_delay_quirk,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
{},
};
/*
* Some machines'(E,G Lenovo Z480) ECs are not stable
* during boot up and this causes battery driver fails to be
* probed due to failure of getting battery information
* from EC sometimes. After several retries, the operation
* may work. So add retry code here and 20ms sleep between
* every retries.
*/
static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
break;
msleep(20);
}
return ret;
}
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
struct acpi_battery *battery = NULL;
if (!device)
return -EINVAL;
if (device->dep_unmet)
return -EPROBE_DEFER;
battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
mutex_init(&battery->sysfs_lock);
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
result = acpi_battery_update_retry(battery);
if (result)
goto fail;
pr_info("Slot [%s] (battery %s)\n", acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
device_init_wakeup(&device->dev, 1);
result = acpi_dev_install_notify_handler(device, ACPI_ALL_NOTIFY,
acpi_battery_notify);
if (result)
goto fail_pm;
return 0;
fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return result;
}
static void acpi_battery_remove(struct acpi_device *device)
{
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return;
battery = acpi_driver_data(device);
acpi_dev_remove_notify_handler(device, ACPI_ALL_NOTIFY,
acpi_battery_notify);
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
}
#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
struct acpi_battery *battery;
if (!dev)
return -EINVAL;
battery = acpi_driver_data(to_acpi_device(dev));
if (!battery)
return -EINVAL;
battery->update_time = 0;
acpi_battery_update(battery, true);
return 0;
}
#else
#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
.ops = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
},
.drv.pm = &acpi_battery_pm,
};
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
int result;
if (acpi_quirk_skip_acpi_ac_and_battery())
return;
dmi_check_system(bat_dmi_table);
result = acpi_bus_register_driver(&acpi_battery_driver);
battery_driver_registered = (result == 0);
}
static int __init acpi_battery_init(void)
{
if (acpi_disabled)
return -ENODEV;
async_cookie = async_schedule(acpi_battery_init_async, NULL);
return 0;
}
static void __exit acpi_battery_exit(void)
{
async_synchronize_cookie(async_cookie + 1);
if (battery_driver_registered) {
acpi_bus_unregister_driver(&acpi_battery_driver);
battery_hook_exit();
}
}
module_init(acpi_battery_init);
module_exit(acpi_battery_exit);
| linux-master | drivers/acpi/battery.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Author: Sudeep Holla <[email protected]>
* Copyright 2022 Arm Limited
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/io.h>
static struct acpi_ffh_info ffh_ctx;
int __weak acpi_ffh_address_space_arch_setup(void *handler_ctxt,
void **region_ctxt)
{
return -EOPNOTSUPP;
}
int __weak acpi_ffh_address_space_arch_handler(acpi_integer *value,
void *region_context)
{
return -EOPNOTSUPP;
}
static acpi_status
acpi_ffh_address_space_setup(acpi_handle region_handle, u32 function,
void *handler_context, void **region_context)
{
return acpi_ffh_address_space_arch_setup(handler_context,
region_context);
}
static acpi_status
acpi_ffh_address_space_handler(u32 function, acpi_physical_address addr,
u32 bits, acpi_integer *value,
void *handler_context, void *region_context)
{
return acpi_ffh_address_space_arch_handler(value, region_context);
}
void __init acpi_init_ffh(void)
{
acpi_status status;
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_FIXED_HARDWARE,
&acpi_ffh_address_space_handler,
&acpi_ffh_address_space_setup,
&ffh_ctx);
if (ACPI_FAILURE(status))
pr_alert("OperationRegion handler could not be installed\n");
}
| linux-master | drivers/acpi/acpi_ffh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/acpi/power.c - ACPI Power Resources management.
*
* Copyright (C) 2001 - 2015 Intel Corp.
* Author: Andy Grover <[email protected]>
* Author: Paul Diefenbaugh <[email protected]>
* Author: Rafael J. Wysocki <[email protected]>
*/
/*
* ACPI power-managed devices may be controlled in two ways:
* 1. via "Device Specific (D-State) Control"
* 2. via "Power Resource Control".
* The code below deals with ACPI Power Resources control.
*
* An ACPI "power resource object" represents a software controllable power
* plane, clock plane, or other resource depended on by a device.
*
* A device may rely on multiple power resources, and a power resource
* may be shared by multiple devices.
*/
#define pr_fmt(fmt) "ACPI: PM: " fmt
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/acpi.h>
#include "sleep.h"
#include "internal.h"
#define ACPI_POWER_CLASS "power_resource"
#define ACPI_POWER_DEVICE_NAME "Power Resource"
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
struct acpi_power_dependent_device {
struct device *dev;
struct list_head node;
};
struct acpi_power_resource {
struct acpi_device device;
struct list_head list_node;
u32 system_level;
u32 order;
unsigned int ref_count;
u8 state;
struct mutex resource_lock;
struct list_head dependents;
};
struct acpi_power_resource_entry {
struct list_head node;
struct acpi_power_resource *resource;
};
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
/* --------------------------------------------------------------------------
Power Resource Management
-------------------------------------------------------------------------- */
static inline const char *resource_dev_name(struct acpi_power_resource *pr)
{
return dev_name(&pr->device.dev);
}
static inline
struct acpi_power_resource *to_power_resource(struct acpi_device *device)
{
return container_of(device, struct acpi_power_resource, device);
}
static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (!device)
return NULL;
return to_power_resource(device);
}
static int acpi_power_resources_list_add(acpi_handle handle,
struct list_head *list)
{
struct acpi_power_resource *resource = acpi_power_get_context(handle);
struct acpi_power_resource_entry *entry;
if (!resource || !list)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->resource = resource;
if (!list_empty(list)) {
struct acpi_power_resource_entry *e;
list_for_each_entry(e, list, node)
if (e->resource->order > resource->order) {
list_add_tail(&entry->node, &e->node);
return 0;
}
}
list_add_tail(&entry->node, list);
return 0;
}
void acpi_power_resources_list_free(struct list_head *list)
{
struct acpi_power_resource_entry *entry, *e;
list_for_each_entry_safe(entry, e, list, node) {
list_del(&entry->node);
kfree(entry);
}
}
static bool acpi_power_resource_is_dup(union acpi_object *package,
unsigned int start, unsigned int i)
{
acpi_handle rhandle, dup;
unsigned int j;
/* The caller is expected to check the package element types */
rhandle = package->package.elements[i].reference.handle;
for (j = start; j < i; j++) {
dup = package->package.elements[j].reference.handle;
if (dup == rhandle)
return true;
}
return false;
}
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list)
{
unsigned int i;
int err = 0;
for (i = start; i < package->package.count; i++) {
union acpi_object *element = &package->package.elements[i];
struct acpi_device *rdev;
acpi_handle rhandle;
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
err = -ENODATA;
break;
}
rhandle = element->reference.handle;
if (!rhandle) {
err = -ENODEV;
break;
}
/* Some ACPI tables contain duplicate power resource references */
if (acpi_power_resource_is_dup(package, start, i))
continue;
rdev = acpi_add_power_resource(rhandle);
if (!rdev) {
err = -ENODEV;
break;
}
err = acpi_power_resources_list_add(rhandle, list);
if (err)
break;
}
if (err)
acpi_power_resources_list_free(list);
return err;
}
static int __get_state(acpi_handle handle, u8 *state)
{
acpi_status status = AE_OK;
unsigned long long sta = 0;
u8 cur_state;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
cur_state ? "on" : "off");
*state = cur_state;
return 0;
}
static int acpi_power_get_state(struct acpi_power_resource *resource, u8 *state)
{
if (resource->state == ACPI_POWER_RESOURCE_STATE_UNKNOWN) {
int ret;
ret = __get_state(resource->device.handle, &resource->state);
if (ret)
return ret;
}
*state = resource->state;
return 0;
}
static int acpi_power_get_list_state(struct list_head *list, u8 *state)
{
struct acpi_power_resource_entry *entry;
u8 cur_state = ACPI_POWER_RESOURCE_STATE_OFF;
if (!list || !state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
int result;
mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(resource, &cur_state);
mutex_unlock(&resource->resource_lock);
if (result)
return result;
if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
break;
}
pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
*state = cur_state;
return 0;
}
static int
acpi_power_resource_add_dependent(struct acpi_power_resource *resource,
struct device *dev)
{
struct acpi_power_dependent_device *dep;
int ret = 0;
mutex_lock(&resource->resource_lock);
list_for_each_entry(dep, &resource->dependents, node) {
/* Only add it once */
if (dep->dev == dev)
goto unlock;
}
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep) {
ret = -ENOMEM;
goto unlock;
}
dep->dev = dev;
list_add_tail(&dep->node, &resource->dependents);
dev_dbg(dev, "added power dependency to [%s]\n",
resource_dev_name(resource));
unlock:
mutex_unlock(&resource->resource_lock);
return ret;
}
static void
acpi_power_resource_remove_dependent(struct acpi_power_resource *resource,
struct device *dev)
{
struct acpi_power_dependent_device *dep;
mutex_lock(&resource->resource_lock);
list_for_each_entry(dep, &resource->dependents, node) {
if (dep->dev == dev) {
list_del(&dep->node);
kfree(dep);
dev_dbg(dev, "removed power dependency to [%s]\n",
resource_dev_name(resource));
break;
}
}
mutex_unlock(&resource->resource_lock);
}
/**
* acpi_device_power_add_dependent - Add dependent device of this ACPI device
* @adev: ACPI device pointer
* @dev: Dependent device
*
* If @adev has non-empty _PR0 the @dev is added as dependent device to all
* power resources returned by it. This means that whenever these power
* resources are turned _ON the dependent devices get runtime resumed. This
* is needed for devices such as PCI to allow its driver to re-initialize
* it after it went to D0uninitialized.
*
* If @adev does not have _PR0 this does nothing.
*
* Returns %0 in case of success and negative errno otherwise.
*/
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev)
{
struct acpi_power_resource_entry *entry;
struct list_head *resources;
int ret;
if (!adev->flags.power_manageable)
return 0;
resources = &adev->power.states[ACPI_STATE_D0].resources;
list_for_each_entry(entry, resources, node) {
ret = acpi_power_resource_add_dependent(entry->resource, dev);
if (ret)
goto err;
}
return 0;
err:
list_for_each_entry(entry, resources, node)
acpi_power_resource_remove_dependent(entry->resource, dev);
return ret;
}
/**
* acpi_device_power_remove_dependent - Remove dependent device
* @adev: ACPI device pointer
* @dev: Dependent device
*
* Does the opposite of acpi_device_power_add_dependent() and removes the
* dependent device if it is found. Can be called to @adev that does not
* have _PR0 as well.
*/
void acpi_device_power_remove_dependent(struct acpi_device *adev,
struct device *dev)
{
struct acpi_power_resource_entry *entry;
struct list_head *resources;
if (!adev->flags.power_manageable)
return;
resources = &adev->power.states[ACPI_STATE_D0].resources;
list_for_each_entry_reverse(entry, resources, node)
acpi_power_resource_remove_dependent(entry->resource, dev);
}
static int __acpi_power_on(struct acpi_power_resource *resource)
{
acpi_handle handle = resource->device.handle;
struct acpi_power_dependent_device *dep;
acpi_status status = AE_OK;
status = acpi_evaluate_object(handle, "_ON", NULL, NULL);
if (ACPI_FAILURE(status)) {
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
}
resource->state = ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource turned on\n");
/*
* If there are other dependents on this power resource we need to
* resume them now so that their drivers can re-initialize the
* hardware properly after it went back to D0.
*/
if (list_empty(&resource->dependents) ||
list_is_singular(&resource->dependents))
return 0;
list_for_each_entry(dep, &resource->dependents, node) {
dev_dbg(dep->dev, "runtime resuming because [%s] turned on\n",
resource_dev_name(resource));
pm_request_resume(dep->dev);
}
return 0;
}
static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
{
int result = 0;
if (resource->ref_count++) {
acpi_handle_debug(resource->device.handle,
"Power resource already on\n");
} else {
result = __acpi_power_on(resource);
if (result)
resource->ref_count--;
}
return result;
}
static int acpi_power_on(struct acpi_power_resource *resource)
{
int result;
mutex_lock(&resource->resource_lock);
result = acpi_power_on_unlocked(resource);
mutex_unlock(&resource->resource_lock);
return result;
}
static int __acpi_power_off(struct acpi_power_resource *resource)
{
acpi_handle handle = resource->device.handle;
acpi_status status;
status = acpi_evaluate_object(handle, "_OFF", NULL, NULL);
if (ACPI_FAILURE(status)) {
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
}
resource->state = ACPI_POWER_RESOURCE_STATE_OFF;
acpi_handle_debug(handle, "Power resource turned off\n");
return 0;
}
static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
{
int result = 0;
if (!resource->ref_count) {
acpi_handle_debug(resource->device.handle,
"Power resource already off\n");
return 0;
}
if (--resource->ref_count) {
acpi_handle_debug(resource->device.handle,
"Power resource still in use\n");
} else {
result = __acpi_power_off(resource);
if (result)
resource->ref_count++;
}
return result;
}
static int acpi_power_off(struct acpi_power_resource *resource)
{
int result;
mutex_lock(&resource->resource_lock);
result = acpi_power_off_unlocked(resource);
mutex_unlock(&resource->resource_lock);
return result;
}
static int acpi_power_off_list(struct list_head *list)
{
struct acpi_power_resource_entry *entry;
int result = 0;
list_for_each_entry_reverse(entry, list, node) {
result = acpi_power_off(entry->resource);
if (result)
goto err;
}
return 0;
err:
list_for_each_entry_continue(entry, list, node)
acpi_power_on(entry->resource);
return result;
}
static int acpi_power_on_list(struct list_head *list)
{
struct acpi_power_resource_entry *entry;
int result = 0;
list_for_each_entry(entry, list, node) {
result = acpi_power_on(entry->resource);
if (result)
goto err;
}
return 0;
err:
list_for_each_entry_continue_reverse(entry, list, node)
acpi_power_off(entry->resource);
return result;
}
static struct attribute *attrs[] = {
NULL,
};
static const struct attribute_group attr_groups[] = {
[ACPI_STATE_D0] = {
.name = "power_resources_D0",
.attrs = attrs,
},
[ACPI_STATE_D1] = {
.name = "power_resources_D1",
.attrs = attrs,
},
[ACPI_STATE_D2] = {
.name = "power_resources_D2",
.attrs = attrs,
},
[ACPI_STATE_D3_HOT] = {
.name = "power_resources_D3hot",
.attrs = attrs,
},
};
static const struct attribute_group wakeup_attr_group = {
.name = "power_resources_wakeup",
.attrs = attrs,
};
static void acpi_power_hide_list(struct acpi_device *adev,
struct list_head *resources,
const struct attribute_group *attr_group)
{
struct acpi_power_resource_entry *entry;
if (list_empty(resources))
return;
list_for_each_entry_reverse(entry, resources, node) {
struct acpi_device *res_dev = &entry->resource->device;
sysfs_remove_link_from_group(&adev->dev.kobj,
attr_group->name,
dev_name(&res_dev->dev));
}
sysfs_remove_group(&adev->dev.kobj, attr_group);
}
static void acpi_power_expose_list(struct acpi_device *adev,
struct list_head *resources,
const struct attribute_group *attr_group)
{
struct acpi_power_resource_entry *entry;
int ret;
if (list_empty(resources))
return;
ret = sysfs_create_group(&adev->dev.kobj, attr_group);
if (ret)
return;
list_for_each_entry(entry, resources, node) {
struct acpi_device *res_dev = &entry->resource->device;
ret = sysfs_add_link_to_group(&adev->dev.kobj,
attr_group->name,
&res_dev->dev.kobj,
dev_name(&res_dev->dev));
if (ret) {
acpi_power_hide_list(adev, resources, attr_group);
break;
}
}
}
static void acpi_power_expose_hide(struct acpi_device *adev,
struct list_head *resources,
const struct attribute_group *attr_group,
bool expose)
{
if (expose)
acpi_power_expose_list(adev, resources, attr_group);
else
acpi_power_hide_list(adev, resources, attr_group);
}
void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
{
int state;
if (adev->wakeup.flags.valid)
acpi_power_expose_hide(adev, &adev->wakeup.resources,
&wakeup_attr_group, add);
if (!adev->power.flags.power_resources)
return;
for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
acpi_power_expose_hide(adev,
&adev->power.states[state].resources,
&attr_groups[state], add);
}
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
{
struct acpi_power_resource_entry *entry;
int system_level = 5;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
u8 state;
mutex_lock(&resource->resource_lock);
/*
* Make sure that the power resource state and its reference
* counter value are consistent with each other.
*/
if (!resource->ref_count &&
!acpi_power_get_state(resource, &state) &&
state == ACPI_POWER_RESOURCE_STATE_ON)
__acpi_power_off(resource);
if (system_level > resource->system_level)
system_level = resource->system_level;
mutex_unlock(&resource->resource_lock);
}
*system_level_p = system_level;
return 0;
}
/* --------------------------------------------------------------------------
Device Power Management
-------------------------------------------------------------------------- */
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
* ACPI 3.0) _PSW (Power State Wake)
* @dev: Device to handle.
* @enable: 0 - disable, 1 - enable the wake capabilities of the device.
* @sleep_state: Target sleep state of the system.
* @dev_state: Target power state of the device.
*
* Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present. On failure reset the device's
* wakeup.flags.valid flag.
*
* RETURN VALUE:
* 0 if either _DSW or _PSW has been successfully executed
* 0 if neither _DSW nor _PSW has been found
* -ENODEV if the execution of either _DSW or _PSW has failed
*/
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state)
{
union acpi_object in_arg[3];
struct acpi_object_list arg_list = { 3, in_arg };
acpi_status status = AE_OK;
/*
* Try to execute _DSW first.
*
* Three arguments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities
* Argument 1: target system state
* Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe
* the first argument is filled. The values of the other two arguments
* are meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
in_arg[0].integer.value = enable;
in_arg[1].type = ACPI_TYPE_INTEGER;
in_arg[1].integer.value = sleep_state;
in_arg[2].type = ACPI_TYPE_INTEGER;
in_arg[2].integer.value = dev_state;
status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL);
if (ACPI_SUCCESS(status)) {
return 0;
} else if (status != AE_NOT_FOUND) {
acpi_handle_info(dev->handle, "_DSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
/* Execute _PSW */
status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
acpi_handle_info(dev->handle, "_PSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
return 0;
}
/*
* Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
* 1. Power on the power resources required for the wakeup device
* 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
mutex_lock(&acpi_device_lock);
dev_dbg(&dev->dev, "Enabling wakeup power (count %d)\n",
dev->wakeup.prepare_count);
if (dev->wakeup.prepare_count++)
goto out;
err = acpi_power_on_list(&dev->wakeup.resources);
if (err) {
dev_err(&dev->dev, "Cannot turn on wakeup power resources\n");
dev->wakeup.flags.valid = 0;
goto out;
}
/*
* Passing 3 as the third argument below means the device may be
* put into arbitrary power state afterward.
*/
err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
if (err) {
acpi_power_off_list(&dev->wakeup.resources);
dev->wakeup.prepare_count = 0;
goto out;
}
dev_dbg(&dev->dev, "Wakeup power enabled\n");
out:
mutex_unlock(&acpi_device_lock);
return err;
}
/*
* Shutdown a wakeup device, counterpart of above method
* 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
* 2. Shutdown down the power resources
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
mutex_lock(&acpi_device_lock);
dev_dbg(&dev->dev, "Disabling wakeup power (count %d)\n",
dev->wakeup.prepare_count);
/* Do nothing if wakeup power has not been enabled for this device. */
if (dev->wakeup.prepare_count <= 0)
goto out;
if (--dev->wakeup.prepare_count > 0)
goto out;
err = acpi_device_sleep_wake(dev, 0, 0, 0);
if (err)
goto out;
/*
* All of the power resources in the list need to be turned off even if
* there are errors.
*/
list_for_each_entry(entry, &dev->wakeup.resources, node) {
int ret;
ret = acpi_power_off(entry->resource);
if (ret && !err)
err = ret;
}
if (err) {
dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
dev->wakeup.flags.valid = 0;
goto out;
}
dev_dbg(&dev->dev, "Wakeup power disabled\n");
out:
mutex_unlock(&acpi_device_lock);
return err;
}
int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
u8 list_state = ACPI_POWER_RESOURCE_STATE_OFF;
int result = 0;
int i = 0;
if (!device || !state)
return -EINVAL;
/*
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
*/
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
struct list_head *list = &device->power.states[i].resources;
if (list_empty(list))
continue;
result = acpi_power_get_list_state(list, &list_state);
if (result)
return result;
if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
*state = i;
return 0;
}
}
*state = device->power.states[ACPI_STATE_D3_COLD].flags.valid ?
ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
return 0;
}
int acpi_power_on_resources(struct acpi_device *device, int state)
{
if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3_HOT)
return -EINVAL;
return acpi_power_on_list(&device->power.states[state].resources);
}
int acpi_power_transition(struct acpi_device *device, int state)
{
int result = 0;
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
if (device->power.state == state || !device->flags.power_manageable)
return 0;
if ((device->power.state < ACPI_STATE_D0)
|| (device->power.state > ACPI_STATE_D3_COLD))
return -ENODEV;
/*
* First we reference all power resources required in the target list
* (e.g. so the device doesn't lose power while transitioning). Then,
* we dereference all power resources used in the current list.
*/
if (state < ACPI_STATE_D3_COLD)
result = acpi_power_on_list(
&device->power.states[state].resources);
if (!result && device->power.state < ACPI_STATE_D3_COLD)
acpi_power_off_list(
&device->power.states[device->power.state].resources);
/* We shouldn't change the state unless the above operations succeed. */
device->power.state = result ? ACPI_STATE_UNKNOWN : state;
return result;
}
static void acpi_release_power_resource(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct acpi_power_resource *resource;
resource = container_of(device, struct acpi_power_resource, device);
mutex_lock(&power_resource_list_lock);
list_del(&resource->list_node);
mutex_unlock(&power_resource_list_lock);
acpi_free_pnp_ids(&device->pnp);
kfree(resource);
}
static ssize_t resource_in_use_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_power_resource *resource;
resource = to_power_resource(to_acpi_device(dev));
return sprintf(buf, "%u\n", !!resource->ref_count);
}
static DEVICE_ATTR_RO(resource_in_use);
static void acpi_power_sysfs_remove(struct acpi_device *device)
{
device_remove_file(&device->dev, &dev_attr_resource_in_use);
}
static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource)
{
mutex_lock(&power_resource_list_lock);
if (!list_empty(&acpi_power_resource_list)) {
struct acpi_power_resource *r;
list_for_each_entry(r, &acpi_power_resource_list, list_node)
if (r->order > resource->order) {
list_add_tail(&resource->list_node, &r->list_node);
goto out;
}
}
list_add_tail(&resource->list_node, &acpi_power_resource_list);
out:
mutex_unlock(&power_resource_list_lock);
}
struct acpi_device *acpi_add_power_resource(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_power_resource *resource;
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
u8 state_dummy;
int result;
if (device)
return device;
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
return NULL;
device = &resource->device;
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
acpi_release_power_resource);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
device->flags.match_driver = true;
/* Evaluate the object to get the system level and resource order. */
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status))
goto err;
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
/* Get the initial state or just flip it on if that fails. */
if (acpi_power_get_state(resource, &state_dummy))
__acpi_power_on(resource);
acpi_handle_info(handle, "New power resource\n");
result = acpi_tie_acpi_dev(device);
if (result)
goto err;
result = acpi_device_add(device);
if (result)
goto err;
if (!device_create_file(&device->dev, &dev_attr_resource_in_use))
device->remove = acpi_power_sysfs_remove;
acpi_power_add_resource_to_list(resource);
acpi_device_add_finalize(device);
return device;
err:
acpi_release_power_resource(&device->dev);
return NULL;
}
#ifdef CONFIG_ACPI_SLEEP
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
mutex_lock(&power_resource_list_lock);
list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
int result;
u8 state;
mutex_lock(&resource->resource_lock);
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
result = acpi_power_get_state(resource, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
continue;
}
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
acpi_handle_debug(resource->device.handle, "Turning ON\n");
__acpi_power_on(resource);
}
mutex_unlock(&resource->resource_lock);
}
mutex_unlock(&power_resource_list_lock);
}
#endif
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
* The Toshiba Click Mini has a CPR3 power-resource which must
* be on for the touchscreen to work, but which is not in any
* _PR? lists. The other 2 affected power-resources are no-ops.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"),
},
},
{}
};
/**
* acpi_turn_off_unused_power_resources - Turn off power resources not in use.
*/
void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
if (dmi_check_system(dmi_leave_unused_power_resources_on))
return;
mutex_lock(&power_resource_list_lock);
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
if (!resource->ref_count &&
resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
__acpi_power_off(resource);
}
mutex_unlock(&resource->resource_lock);
}
mutex_unlock(&power_resource_list_lock);
}
| linux-master | drivers/acpi/power.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/acpi.h>
#include <acpi/button.h>
MODULE_AUTHOR("Josh Triplett");
MODULE_DESCRIPTION("ACPI Tiny Power Button Driver");
MODULE_LICENSE("GPL");
static int power_signal __read_mostly = CONFIG_ACPI_TINY_POWER_BUTTON_SIGNAL;
module_param(power_signal, int, 0644);
MODULE_PARM_DESC(power_signal, "Power button sends this signal to init");
static const struct acpi_device_id tiny_power_button_device_ids[] = {
{ ACPI_BUTTON_HID_POWER, 0 },
{ ACPI_BUTTON_HID_POWERF, 0 },
{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids);
static void acpi_tiny_power_button_notify(acpi_handle handle, u32 event, void *data)
{
kill_cad_pid(power_signal, 1);
}
static void acpi_tiny_power_button_notify_run(void *not_used)
{
acpi_tiny_power_button_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, NULL);
}
static u32 acpi_tiny_power_button_event(void *not_used)
{
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_tiny_power_button_notify_run, NULL);
return ACPI_INTERRUPT_HANDLED;
}
static int acpi_tiny_power_button_add(struct acpi_device *device)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_tiny_power_button_event,
NULL);
} else {
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
acpi_tiny_power_button_notify,
NULL);
}
if (ACPI_FAILURE(status))
return -ENODEV;
return 0;
}
static void acpi_tiny_power_button_remove(struct acpi_device *device)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_tiny_power_button_event);
} else {
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_tiny_power_button_notify);
}
acpi_os_wait_events_complete();
}
static struct acpi_driver acpi_tiny_power_button_driver = {
.name = "tiny-power-button",
.class = "tiny-power-button",
.ids = tiny_power_button_device_ids,
.ops = {
.add = acpi_tiny_power_button_add,
.remove = acpi_tiny_power_button_remove,
},
};
module_acpi_driver(acpi_tiny_power_button_driver);
| linux-master | drivers/acpi/tiny-power-button.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sleep.c - ACPI sleep support.
*
* Copyright (c) 2005 Alexey Starikovskiy <[email protected]>
* Copyright (c) 2004 David Shaohua Li <[email protected]>
* Copyright (c) 2000-2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*/
#define pr_fmt(fmt) "ACPI: PM: " fmt
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/dmi.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/syscore_ops.h>
#include <asm/io.h>
#include <trace/events/power.h>
#include "internal.h"
#include "sleep.h"
/*
* Some HW-full platforms do not have _S5, so they may need
* to leverage efi power off for a shutdown.
*/
bool acpi_no_s5;
static u8 sleep_states[ACPI_S_STATE_COUNT];
static void acpi_sleep_tts_switch(u32 acpi_state)
{
acpi_status status;
status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
/*
* OS can't evaluate the _TTS object correctly. Some warning
* message will be printed. But it won't break anything.
*/
pr_notice("Failure in evaluating _TTS object\n");
}
}
static int tts_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
acpi_sleep_tts_switch(ACPI_STATE_S5);
return NOTIFY_DONE;
}
static struct notifier_block tts_notifier = {
.notifier_call = tts_notify_reboot,
.next = NULL,
.priority = 0,
};
#ifndef acpi_skip_set_wakeup_address
#define acpi_skip_set_wakeup_address() false
#endif
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
unsigned long acpi_wakeup_address;
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
acpi_set_waking_vector(acpi_wakeup_address);
}
#endif
pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
acpi_enable_wakeup_devices(acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
}
bool acpi_sleep_state_supported(u8 sleep_state)
{
acpi_status status;
u8 type_a, type_b;
status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
|| (acpi_gbl_FADT.sleep_control.address
&& acpi_gbl_FADT.sleep_status.address));
}
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
u32 acpi_target_system_state(void)
{
return acpi_target_sleep_state;
}
EXPORT_SYMBOL_GPL(acpi_target_system_state);
static bool pwr_btn_event_pending;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. Windows does that also for
* suspend to RAM. However, it is known that this mechanism does not work on
* all machines, so we allow the user to disable it with the help of the
* 'acpi_sleep=nonvs' kernel command line option.
*/
static bool nvs_nosave;
void __init acpi_nvs_nosave(void)
{
nvs_nosave = true;
}
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* but says nothing about saving NVS during S3. Not all versions of Windows
* save NVS on S3 suspend either, and it is clear that not all systems need
* NVS to be saved at S3 time. To improve suspend/resume time, allow the
* user to disable saving NVS on S3 if their system does not require it, but
* continue to save/restore NVS for S4 as specified.
*/
static bool nvs_nosave_s3;
void __init acpi_nvs_nosave_s3(void)
{
nvs_nosave_s3 = true;
}
static int __init init_nvs_save_s3(const struct dmi_system_id *d)
{
nvs_nosave_s3 = false;
return 0;
}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
*/
static bool old_suspend_ordering;
void __init acpi_old_suspend_ordering(void)
{
old_suspend_ordering = true;
}
static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
{
acpi_old_suspend_ordering();
return 0;
}
static int __init init_nvs_nosave(const struct dmi_system_id *d)
{
acpi_nvs_nosave();
return 0;
}
bool acpi_sleep_default_s3;
static int __init init_default_s3(const struct dmi_system_id *d)
{
acpi_sleep_default_s3 = true;
return 0;
}
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
.ident = "Abit KN9 (nForce4 variant)",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "HP xw4600 Workstation",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR,
"Matsushita Electric Industrial Co.,Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW41E_H",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB17FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR11M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Everex StepNote Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB1Z1E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-NW130D",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCCW29FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Averatec AV1020-ED2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Asus A8N-SLI DELUXE",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Asus A8N-SLI Premium",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-SR26GN_P",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB1S1E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW520F",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Asus K54C",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Asus K54HR",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
{
.callback = init_nvs_save_s3,
.ident = "Asus 1025C",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
},
},
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
* saving during S3.
*/
{
.callback = init_nvs_save_s3,
.ident = "Lenovo G50-45",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
{
.callback = init_nvs_save_s3,
.ident = "Lenovo G40-45",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
},
},
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
* https://bugzilla.kernel.org/show_bug.cgi?id=199057).
*/
{
.callback = init_default_s3,
.ident = "ThinkPad X1 Tablet(2016)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
/*
* ASUS B1400CEAE hangs on resume from suspend (see
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
*/
{
.callback = init_default_s3,
.ident = "ASUS B1400CEAE",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
},
},
{},
};
static bool ignore_blacklist;
void __init acpi_sleep_no_blacklist(void)
{
ignore_blacklist = true;
}
static void __init acpi_sleep_dmi_check(void)
{
if (ignore_blacklist)
return;
if (dmi_get_bios_year() >= 2012)
acpi_nvs_nosave_s3();
dmi_check_system(acpisleep_dmi_table);
}
/**
* acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
*/
static int acpi_pm_freeze(void)
{
acpi_disable_all_gpes();
acpi_os_wait_events_complete();
acpi_ec_block_transactions();
return 0;
}
/**
* acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
*/
static int acpi_pm_pre_suspend(void)
{
acpi_pm_freeze();
return suspend_nvs_save();
}
/**
* __acpi_pm_prepare - Prepare the platform to enter the target state.
*
* If necessary, set the firmware waking vector and do arch-specific
* nastiness to get the wakeup code to the waking vector.
*/
static int __acpi_pm_prepare(void)
{
int error = acpi_sleep_prepare(acpi_target_sleep_state);
if (error)
acpi_target_sleep_state = ACPI_STATE_S0;
return error;
}
/**
* acpi_pm_prepare - Prepare the platform to enter the target sleep
* state and disable the GPEs.
*/
static int acpi_pm_prepare(void)
{
int error = __acpi_pm_prepare();
if (!error)
error = acpi_pm_pre_suspend();
return error;
}
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
static void acpi_pm_finish(void)
{
struct acpi_device *pwr_btn_adev;
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
suspend_nvs_free();
if (acpi_state == ACPI_STATE_S0)
return;
pr_info("Waking up from system sleep state S%d\n", acpi_state);
acpi_disable_wakeup_devices(acpi_state);
acpi_leave_sleep_state(acpi_state);
/* reset firmware waking vector */
acpi_set_waking_vector(0);
acpi_target_sleep_state = ACPI_STATE_S0;
acpi_resume_power_resources();
/* If we were woken with the fixed power button, provide a small
* hint to userspace in the form of a wakeup event on the fixed power
* button device (if it can be found).
*
* We delay the event generation til now, as the PM layer requires
* timekeeping to be running before we generate events. */
if (!pwr_btn_event_pending)
return;
pwr_btn_event_pending = false;
pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
NULL, -1);
if (pwr_btn_adev) {
pm_wakeup_event(&pwr_btn_adev->dev, 0);
acpi_dev_put(pwr_btn_adev);
}
}
/**
* acpi_pm_start - Start system PM transition.
*/
static void acpi_pm_start(u32 acpi_state)
{
acpi_target_sleep_state = acpi_state;
acpi_sleep_tts_switch(acpi_target_sleep_state);
acpi_scan_lock_acquire();
}
/**
* acpi_pm_end - Finish up system PM transition.
*/
static void acpi_pm_end(void)
{
acpi_turn_off_unused_power_resources();
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
*/
acpi_target_sleep_state = ACPI_STATE_S0;
acpi_sleep_tts_switch(acpi_target_sleep_state);
}
#else /* !CONFIG_ACPI_SLEEP */
#define sleep_no_lps0 (1)
#define acpi_target_sleep_state ACPI_STATE_S0
#define acpi_sleep_default_s3 (1)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
static u32 acpi_suspend_states[] = {
[PM_SUSPEND_ON] = ACPI_STATE_S0,
[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
[PM_SUSPEND_MEM] = ACPI_STATE_S3,
[PM_SUSPEND_MAX] = ACPI_STATE_S5
};
/**
* acpi_suspend_begin - Set the target system sleep state to the state
* associated with given @pm_state, if supported.
*/
static int acpi_suspend_begin(suspend_state_t pm_state)
{
u32 acpi_state = acpi_suspend_states[pm_state];
int error;
error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
if (error)
return error;
if (!sleep_states[acpi_state]) {
pr_err("ACPI does not support sleep state S%u\n", acpi_state);
return -ENOSYS;
}
if (acpi_state > ACPI_STATE_S1)
pm_set_suspend_via_firmware();
acpi_pm_start(acpi_state);
return 0;
}
/**
* acpi_suspend_enter - Actually enter a sleep state.
* @pm_state: ignored
*
* Flush caches and go to sleep. For STR we have to call arch-specific
* assembly, which in turn call acpi_enter_sleep_state().
* It's unfortunate, but it works. Please fix if you're feeling frisky.
*/
static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
u32 acpi_state = acpi_target_sleep_state;
int error;
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
status = acpi_enter_sleep_state(acpi_state);
break;
case ACPI_STATE_S3:
if (!acpi_suspend_lowlevel)
return -ENOSYS;
error = acpi_suspend_lowlevel();
if (error)
return error;
pr_info("Low-level resume complete\n");
pm_set_resume_via_firmware();
break;
}
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers */
acpi_leave_sleep_state_prep(acpi_state);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
*
* However, we do generate a small hint for userspace in the form of
* a wakeup event. We flag this condition for now and generate the
* event later, as we're currently too early in resume to be able to
* generate wakeup events.
*/
if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
/* Flag for later */
pwr_btn_event_pending = true;
}
}
/*
* Disable all GPE and clear their status bits before interrupts are
* enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
* prevent them from producing spurious interrups.
*
* acpi_leave_sleep_state() will reenable specific GPEs later.
*
* Because this code runs on one CPU with disabled interrupts (all of
* the other CPUs are offline at this time), it need not acquire any
* sleeping locks which may trigger an implicit preemption point even
* if there is no contention, so avoid doing that by using a low-level
* library routine here.
*/
acpi_hw_disable_all_gpes();
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions();
suspend_nvs_restore();
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static int acpi_suspend_state_valid(suspend_state_t pm_state)
{
u32 acpi_state;
switch (pm_state) {
case PM_SUSPEND_ON:
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
acpi_state = acpi_suspend_states[pm_state];
return sleep_states[acpi_state];
default:
return 0;
}
}
static const struct platform_suspend_ops acpi_suspend_ops = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin,
.prepare_late = acpi_pm_prepare,
.enter = acpi_suspend_enter,
.wake = acpi_pm_finish,
.end = acpi_pm_end,
};
/**
* acpi_suspend_begin_old - Set the target system sleep state to the
* state associated with given @pm_state, if supported, and
* execute the _PTS control method. This function is used if the
* pre-ACPI 2.0 suspend ordering has been requested.
*/
static int acpi_suspend_begin_old(suspend_state_t pm_state)
{
int error = acpi_suspend_begin(pm_state);
if (!error)
error = __acpi_pm_prepare();
return error;
}
/*
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
static const struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
.prepare_late = acpi_pm_pre_suspend,
.enter = acpi_suspend_enter,
.wake = acpi_pm_finish,
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
static bool s2idle_wakeup;
int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
return 0;
}
int acpi_s2idle_prepare(void)
{
if (acpi_sci_irq_valid()) {
int error;
error = enable_irq_wake(acpi_sci_irq);
if (error)
pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
acpi_sci_irq, error);
acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
}
acpi_enable_wakeup_devices(ACPI_STATE_S0);
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
s2idle_wakeup = true;
return 0;
}
bool acpi_s2idle_wake(void)
{
if (!acpi_sci_irq_valid())
return pm_wakeup_pending();
while (pm_wakeup_pending()) {
/*
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
* SCI has not triggered while suspended, so bail out (the
* wakeup is pending anyway and the SCI is not the source of
* it).
*/
if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
return true;
}
/*
* If the status bit of any enabled fixed event is set, the
* wakeup is regarded as valid.
*/
if (acpi_any_fixed_event_status_set()) {
pm_pr_dbg("ACPI fixed event wakeup\n");
return true;
}
/* Check wakeups from drivers sharing the SCI. */
if (acpi_check_wakeup_handlers()) {
pm_pr_dbg("ACPI custom handler wakeup\n");
return true;
}
/*
* Check non-EC GPE wakeups and if there are none, cancel the
* SCI-related wakeup and dispatch the EC GPE.
*/
if (acpi_ec_dispatch_gpe()) {
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
}
acpi_os_wait_events_complete();
/*
* The SCI is in the "suspended" state now and it cannot produce
* new wakeup events till the rearming below, so if any of them
* are pending here, they must be resulting from the processing
* of EC events above or coming from somewhere else.
*/
if (pm_wakeup_pending()) {
pm_pr_dbg("Wakeup after ACPI Notify sync\n");
return true;
}
pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
return false;
}
void acpi_s2idle_restore(void)
{
/*
* Drain pending events before restoring the working-state configuration
* of GPEs.
*/
acpi_os_wait_events_complete(); /* synchronize GPE processing */
acpi_ec_flush_work(); /* flush the EC driver's workqueues */
acpi_os_wait_events_complete(); /* synchronize Notify handling */
s2idle_wakeup = false;
acpi_enable_all_runtime_gpes();
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid()) {
acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
disable_irq_wake(acpi_sci_irq);
}
}
void acpi_s2idle_end(void)
{
acpi_scan_lock_release();
}
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
.wake = acpi_s2idle_wake,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
void __weak acpi_s2idle_setup(void)
{
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
pr_info("Efficient low-power S0 idle declared\n");
s2idle_set_ops(&acpi_s2idle_ops);
}
static void __init acpi_sleep_suspend_setup(void)
{
bool suspend_ops_needed = false;
int i;
for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
if (acpi_sleep_state_supported(i)) {
sleep_states[i] = 1;
suspend_ops_needed = true;
}
if (suspend_ops_needed)
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_s2idle_setup();
}
#else /* !CONFIG_SUSPEND */
#define s2idle_wakeup (false)
static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */
bool acpi_s2idle_wakeup(void)
{
return s2idle_wakeup;
}
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
static int acpi_save_bm_rld(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
static void acpi_restore_bm_rld(void)
{
u32 resumed_bm_rld = 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld == saved_bm_rld)
return;
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
static struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
static void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
static int acpi_hibernation_begin(pm_message_t stage)
{
if (!nvs_nosave) {
int error = suspend_nvs_alloc();
if (error)
return error;
}
if (stage.event == PM_EVENT_HIBERNATE)
pm_set_suspend_via_firmware();
acpi_pm_start(ACPI_STATE_S4);
return 0;
}
static int acpi_hibernation_enter(void)
{
acpi_status status = AE_OK;
/* This shouldn't return. If it returns, we have a problem */
status = acpi_enter_sleep_state(ACPI_STATE_S4);
/* Reprogram control registers */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static void acpi_hibernation_leave(void)
{
pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
*/
acpi_enable();
/* Reprogram control registers */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature)
pr_crit("Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions();
}
static void acpi_pm_thaw(void)
{
acpi_ec_unblock_transactions();
acpi_enable_all_runtime_gpes();
}
static const struct platform_hibernation_ops acpi_hibernation_ops = {
.begin = acpi_hibernation_begin,
.end = acpi_pm_end,
.pre_snapshot = acpi_pm_prepare,
.finish = acpi_pm_finish,
.prepare = acpi_pm_prepare,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
.pre_restore = acpi_pm_freeze,
.restore_cleanup = acpi_pm_thaw,
};
/**
* acpi_hibernation_begin_old - Set the target system sleep state to
* ACPI_STATE_S4 and execute the _PTS control method. This
* function is used if the pre-ACPI 2.0 suspend ordering has been
* requested.
*/
static int acpi_hibernation_begin_old(pm_message_t stage)
{
int error;
/*
* The _TTS object should always be evaluated before the _PTS object.
* When the old_suspended_ordering is true, the _PTS object is
* evaluated in the acpi_sleep_prepare.
*/
acpi_sleep_tts_switch(ACPI_STATE_S4);
error = acpi_sleep_prepare(ACPI_STATE_S4);
if (error)
return error;
if (!nvs_nosave) {
error = suspend_nvs_alloc();
if (error)
return error;
}
if (stage.event == PM_EVENT_HIBERNATE)
pm_set_suspend_via_firmware();
acpi_target_sleep_state = ACPI_STATE_S4;
acpi_scan_lock_acquire();
return 0;
}
/*
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
.begin = acpi_hibernation_begin_old,
.end = acpi_pm_end,
.pre_snapshot = acpi_pm_pre_suspend,
.prepare = acpi_pm_freeze,
.finish = acpi_pm_finish,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
.pre_restore = acpi_pm_freeze,
.restore_cleanup = acpi_pm_thaw,
.recover = acpi_pm_finish,
};
static void acpi_sleep_hibernate_setup(void)
{
if (!acpi_sleep_state_supported(ACPI_STATE_S4))
return;
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
if (!acpi_check_s4_hw_signature)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
if (facs) {
/*
* s4_hardware_signature is the local variable which is just
* used to warn about mismatch after we're attempting to
* resume (in violation of the ACPI specification.)
*/
s4_hardware_signature = facs->hardware_signature;
if (acpi_check_s4_hw_signature > 0) {
/*
* If we're actually obeying the ACPI specification
* then the signature is written out as part of the
* swsusp header, in order to allow the boot kernel
* to gracefully decline to resume.
*/
swsusp_hardware_signature = facs->hardware_signature;
}
}
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
#endif /* !CONFIG_HIBERNATION */
static int acpi_power_off_prepare(struct sys_off_data *data)
{
/* Prepare to power off the system */
acpi_sleep_prepare(ACPI_STATE_S5);
acpi_disable_all_gpes();
acpi_os_wait_events_complete();
return NOTIFY_DONE;
}
static int acpi_power_off(struct sys_off_data *data)
{
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
pr_debug("%s called\n", __func__);
local_irq_disable();
acpi_enter_sleep_state(ACPI_STATE_S5);
return NOTIFY_DONE;
}
int __init acpi_sleep_init(void)
{
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
acpi_sleep_syscore_init();
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off_prepare, NULL);
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off, NULL);
/*
* Windows uses S5 for reboot, so some BIOSes depend on it to
* perform proper reboot.
*/
register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off_prepare, NULL);
} else {
acpi_no_s5 = true;
}
supported[0] = 0;
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
if (sleep_states[i])
pos += sprintf(pos, " S%d", i);
}
pr_info("(supports%s)\n", supported);
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5.
*/
register_reboot_notifier(&tts_notifier);
return 0;
}
| linux-master | drivers/acpi/sleep.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_bus.c - ACPI Bus Driver ($Revision: 80 $)
*
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/regulator/machine.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#include <linux/dmi.h>
#endif
#include <linux/acpi_viot.h>
#include <linux/pci.h>
#include <acpi/apei.h>
#include <linux/suspend.h>
#include <linux/prmt.h>
#include "internal.h"
struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
#ifdef CONFIG_X86
#ifdef CONFIG_ACPI_CUSTOM_DSDT
static inline int set_copy_dsdt(const struct dmi_system_id *id)
{
return 0;
}
#else
static int set_copy_dsdt(const struct dmi_system_id *id)
{
pr_notice("%s detected - force copy of DSDT to local memory\n", id->ident);
acpi_gbl_copy_dsdt_locally = 1;
return 0;
}
#endif
static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
.callback = set_copy_dsdt,
.ident = "TOSHIBA Satellite",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
},
},
{}
};
#endif
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta)
{
acpi_status status;
status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
if (ACPI_SUCCESS(status))
return AE_OK;
if (status == AE_NOT_FOUND) {
*sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
return AE_OK;
}
return status;
}
EXPORT_SYMBOL_GPL(acpi_bus_get_status_handle);
int acpi_bus_get_status(struct acpi_device *device)
{
acpi_status status;
unsigned long long sta;
if (acpi_device_override_status(device, &sta)) {
acpi_set_device_status(device, sta);
return 0;
}
/* Battery devices must have their deps met before calling _STA */
if (acpi_device_is_battery(device) && device->dep_unmet) {
acpi_set_device_status(device, 0);
return 0;
}
status = acpi_bus_get_status_handle(device->handle, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
pr_debug("Device [%s] status [%08x]: functional but not present\n",
device->pnp.bus_id, (u32)sta);
}
pr_debug("Device [%s] status [%08x]\n", device->pnp.bus_id, (u32)sta);
return 0;
}
EXPORT_SYMBOL(acpi_bus_get_status);
void acpi_bus_private_data_handler(acpi_handle handle,
void *context)
{
return;
}
EXPORT_SYMBOL(acpi_bus_private_data_handler);
int acpi_bus_attach_private_data(acpi_handle handle, void *data)
{
acpi_status status;
status = acpi_attach_data(handle,
acpi_bus_private_data_handler, data);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "Error attaching device data\n");
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(acpi_bus_attach_private_data);
int acpi_bus_get_private_data(acpi_handle handle, void **data)
{
acpi_status status;
if (!data)
return -EINVAL;
status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "No context for object\n");
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(acpi_bus_get_private_data);
void acpi_bus_detach_private_data(acpi_handle handle)
{
acpi_detach_data(handle, acpi_bus_private_data_handler);
}
EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
int i;
acpi_handle_debug(handle, "(%s): %s\n", context->uuid_str, error);
pr_debug("_OSC request data:");
for (i = 0; i < context->cap.length; i += sizeof(u32))
pr_debug(" %x", *((u32 *)(context->cap.pointer + i)));
pr_debug("\n");
}
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
{
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[4];
union acpi_object *out_obj;
guid_t guid;
u32 errors;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
if (!context)
return AE_ERROR;
if (guid_parse(context->uuid_str, &guid))
return AE_ERROR;
context->ret.length = ACPI_ALLOCATE_BUFFER;
context->ret.pointer = NULL;
/* Setting up input parameters */
input.count = 4;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = (u8 *)&guid;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = context->rev;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = context->cap.length/sizeof(u32);
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = context->cap.length;
in_params[3].buffer.pointer = context->cap.pointer;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return status;
if (!output.length)
return AE_NULL_OBJECT;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER
|| out_obj->buffer.length != context->cap.length) {
acpi_print_osc_error(handle, context,
"_OSC evaluation returned wrong type");
status = AE_TYPE;
goto out_kfree;
}
/* Need to ignore the bit0 in result code */
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) {
if (errors & OSC_REQUEST_ERROR)
acpi_print_osc_error(handle, context,
"_OSC request failed");
if (errors & OSC_INVALID_UUID_ERROR)
acpi_print_osc_error(handle, context,
"_OSC invalid UUID");
if (errors & OSC_INVALID_REVISION_ERROR)
acpi_print_osc_error(handle, context,
"_OSC invalid revision");
if (errors & OSC_CAPABILITIES_MASK_ERROR) {
if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
& OSC_QUERY_ENABLE)
goto out_success;
status = AE_SUPPORT;
goto out_kfree;
}
status = AE_ERROR;
goto out_kfree;
}
out_success:
context->ret.length = out_obj->buffer.length;
context->ret.pointer = kmemdup(out_obj->buffer.pointer,
context->ret.length, GFP_KERNEL);
if (!context->ret.pointer) {
status = AE_NO_MEMORY;
goto out_kfree;
}
status = AE_OK;
out_kfree:
kfree(output.pointer);
return status;
}
EXPORT_SYMBOL(acpi_run_osc);
bool osc_sb_apei_support_acked;
/*
* ACPI 6.0 Section 8.4.4.2 Idle State Coordination
* OSPM supports platform coordinated low power idle(LPI) states
*/
bool osc_pc_lpi_support_confirmed;
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
/*
* ACPI 6.2 Section 6.2.11.2 'Platform-Wide OSPM Capabilities':
* Starting with ACPI Specification 6.2, all _CPC registers can be in
* PCC, System Memory, System IO, or Functional Fixed Hardware address
* spaces. OSPM support for this more flexible register space scheme is
* indicated by the “Flexible Address Space for CPPC Registers” _OSC bit.
*
* Otherwise (cf ACPI 6.1, s8.4.7.1.1.X), _CPC registers must be in:
* - PCC or Functional Fixed Hardware address space if defined
* - SystemMemory address space (NULL register) if not defined
*/
bool osc_cpc_flexible_adr_space_confirmed;
EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);
/*
* ACPI 6.4 Operating System Capabilities for USB.
*/
bool osc_sb_native_usb4_support_confirmed;
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
bool osc_sb_cppc2_support_acked;
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_negotiate_platform_control(void)
{
u32 capbuf[2], *capbuf_ret;
struct acpi_osc_context context = {
.uuid_str = sb_uuid_str,
.rev = 1,
.cap.length = 8,
.cap.pointer = capbuf,
};
acpi_handle handle;
capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PRMT))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_FFH))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FFH_OPR_SUPPORT;
#ifdef CONFIG_ARM64
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
#endif
#ifdef CONFIG_X86
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
#endif
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_FLEXIBLE_ADR_SPACE;
if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
if (IS_ENABLED(CONFIG_USB4))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_NATIVE_USB4_SUPPORT;
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
if (context.ret.length <= OSC_SUPPORT_DWORD) {
kfree(context.ret.pointer);
return;
}
/*
* Now run _OSC again with query flag clear and with the caps
* supported by both the OS and the platform.
*/
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
if (context.ret.length > OSC_SUPPORT_DWORD) {
#ifdef CONFIG_ACPI_CPPC_LIB
osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT;
#endif
osc_sb_apei_support_acked =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
osc_pc_lpi_support_confirmed =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
osc_sb_native_usb4_support_confirmed =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
osc_cpc_flexible_adr_space_confirmed =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPC_FLEXIBLE_ADR_SPACE;
}
kfree(context.ret.pointer);
}
/*
* Native control of USB4 capabilities. If any of the tunneling bits is
* set it means OS is in control and we use software based connection
* manager.
*/
u32 osc_sb_native_usb4_control;
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
{
pr_info("%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
(bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
(bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
(bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
(bits & OSC_USB_XDOMAIN) ? '+' : '-');
}
static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
static void acpi_bus_osc_negotiate_usb_control(void)
{
u32 capbuf[3];
struct acpi_osc_context context = {
.uuid_str = sb_usb_uuid_str,
.rev = 1,
.cap.length = sizeof(capbuf),
.cap.pointer = capbuf,
};
acpi_handle handle;
acpi_status status;
u32 control;
if (!osc_sb_native_usb4_support_confirmed)
return;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = 0;
capbuf[OSC_CONTROL_DWORD] = control;
status = acpi_run_osc(handle, &context);
if (ACPI_FAILURE(status))
return;
if (context.ret.length != sizeof(capbuf)) {
pr_info("USB4 _OSC: returned invalid length buffer\n");
goto out_free;
}
osc_sb_native_usb4_control =
control & acpi_osc_ctx_get_pci_control(&context);
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
osc_sb_native_usb4_control);
out_free:
kfree(context.ret.pointer);
}
/* --------------------------------------------------------------------------
Notification Handling
-------------------------------------------------------------------------- */
/**
* acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
* @handle: Target ACPI object.
* @type: Notification type.
* @data: Ignored.
*
* This only handles notifications related to device hotplug.
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
return;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
return;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
return;
default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
return;
}
adev = acpi_get_acpi_dev(handle);
if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
acpi_put_acpi_dev(adev);
acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
struct acpi_driver *acpi_drv = to_acpi_driver(device->dev.driver);
acpi_drv->ops.notify(device, event);
}
static int acpi_device_install_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
acpi_status status;
status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device, device);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
acpi_os_wait_events_complete();
}
int acpi_dev_install_notify_handler(struct acpi_device *adev,
u32 handler_type,
acpi_notify_handler handler)
{
acpi_status status;
status = acpi_install_notify_handler(adev->handle, handler_type,
handler, adev);
if (ACPI_FAILURE(status))
return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_dev_install_notify_handler);
void acpi_dev_remove_notify_handler(struct acpi_device *adev,
u32 handler_type,
acpi_notify_handler handler)
{
acpi_remove_notify_handler(adev->handle, handler_type, handler);
acpi_os_wait_events_complete();
}
EXPORT_SYMBOL_GPL(acpi_dev_remove_notify_handler);
/* Handle events targeting \_SB device (at present only graceful shutdown) */
#define ACPI_SB_NOTIFY_SHUTDOWN_REQUEST 0x81
#define ACPI_SB_INDICATE_INTERVAL 10000
static void sb_notify_work(struct work_struct *dummy)
{
acpi_handle sb_handle;
orderly_poweroff(true);
/*
* After initiating graceful shutdown, the ACPI spec requires OSPM
* to evaluate _OST method once every 10seconds to indicate that
* the shutdown is in progress
*/
acpi_get_handle(NULL, "\\_SB", &sb_handle);
while (1) {
pr_info("Graceful shutdown in progress.\n");
acpi_evaluate_ost(sb_handle, ACPI_OST_EC_OSPM_SHUTDOWN,
ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS, NULL);
msleep(ACPI_SB_INDICATE_INTERVAL);
}
}
static void acpi_sb_notify(acpi_handle handle, u32 event, void *data)
{
static DECLARE_WORK(acpi_sb_work, sb_notify_work);
if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) {
if (!work_busy(&acpi_sb_work))
schedule_work(&acpi_sb_work);
} else {
pr_warn("event %x is not supported by \\_SB device\n", event);
}
}
static int __init acpi_setup_sb_notify_handler(void)
{
acpi_handle sb_handle;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &sb_handle)))
return -ENXIO;
if (ACPI_FAILURE(acpi_install_notify_handler(sb_handle, ACPI_DEVICE_NOTIFY,
acpi_sb_notify, NULL)))
return -EINVAL;
return 0;
}
/* --------------------------------------------------------------------------
Device Matching
-------------------------------------------------------------------------- */
/**
* acpi_get_first_physical_node - Get first physical node of an ACPI device
* @adev: ACPI device in question
*
* Return: First physical node of ACPI device @adev
*/
struct device *acpi_get_first_physical_node(struct acpi_device *adev)
{
struct mutex *physical_node_lock = &adev->physical_node_lock;
struct device *phys_dev;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
phys_dev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
phys_dev = node->dev;
}
mutex_unlock(physical_node_lock);
return phys_dev;
}
EXPORT_SYMBOL_GPL(acpi_get_first_physical_node);
static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
const struct device *dev)
{
const struct device *phys_dev = acpi_get_first_physical_node(adev);
return phys_dev && phys_dev == dev ? adev : NULL;
}
/**
* acpi_device_is_first_physical_node - Is given dev first physical node
* @adev: ACPI companion device
* @dev: Physical device to check
*
* Function checks if given @dev is the first physical devices attached to
* the ACPI companion device. This distinction is needed in some cases
* where the same companion device is shared between many physical devices.
*
* Note that the caller have to provide valid @adev pointer.
*/
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev)
{
return !!acpi_primary_dev_companion(adev, dev);
}
/*
* acpi_companion_match() - Can we match via ACPI companion device
* @dev: Device in question
*
* Check if the given device has an ACPI companion and if that companion has
* a valid list of PNP IDs, and if the device is the first (primary) physical
* device associated with it. Return the companion pointer if that's the case
* or NULL otherwise.
*
* If multiple physical devices are attached to a single ACPI companion, we need
* to be careful. The usage scenario for this kind of relationship is that all
* of the physical devices in question use resources provided by the ACPI
* companion. A typical case is an MFD device where all the sub-devices share
* the parent's ACPI companion. In such cases we can only allow the primary
* (first) physical device to be matched with the help of the companion's PNP
* IDs.
*
* Additional physical devices sharing the ACPI companion can still use
* resources available from it but they will be matched normally using functions
* provided by their bus types (and analogously for their modalias).
*/
const struct acpi_device *acpi_companion_match(const struct device *dev)
{
struct acpi_device *adev;
adev = ACPI_COMPANION(dev);
if (!adev)
return NULL;
if (list_empty(&adev->pnp.ids))
return NULL;
return acpi_primary_dev_companion(adev, dev);
}
/**
* acpi_of_match_device - Match device object using the "compatible" property.
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
* @of_id: OF ID if matched
*
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(const struct acpi_device *adev,
const struct of_device_id *of_match_table,
const struct of_device_id **of_id)
{
const union acpi_object *of_compatible, *obj;
int i, nval;
if (!adev)
return false;
of_compatible = adev->data.of_compatible;
if (!of_match_table || !of_compatible)
return false;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
/* Now we can look for the driver DT compatible strings */
for (i = 0; i < nval; i++, obj++) {
const struct of_device_id *id;
for (id = of_match_table; id->compatible[0]; id++)
if (!strcasecmp(obj->string.pointer, id->compatible)) {
if (of_id)
*of_id = id;
return true;
}
}
return false;
}
static bool acpi_of_modalias(struct acpi_device *adev,
char *modalias, size_t len)
{
const union acpi_object *of_compatible;
const union acpi_object *obj;
const char *str, *chr;
of_compatible = adev->data.of_compatible;
if (!of_compatible)
return false;
if (of_compatible->type == ACPI_TYPE_PACKAGE)
obj = of_compatible->package.elements;
else /* Must be ACPI_TYPE_STRING. */
obj = of_compatible;
str = obj->string.pointer;
chr = strchr(str, ',');
strscpy(modalias, chr ? chr + 1 : str, len);
return true;
}
/**
* acpi_set_modalias - Set modalias using "compatible" property or supplied ID
* @adev: ACPI device object to match
* @default_id: ID string to use as default if no compatible string found
* @modalias: Pointer to buffer that modalias value will be copied into
* @len: Length of modalias buffer
*
* This is a counterpart of of_alias_from_compatible() for struct acpi_device
* objects. If there is a compatible string for @adev, it will be copied to
* @modalias with the vendor prefix stripped; otherwise, @default_id will be
* used.
*/
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len)
{
if (!acpi_of_modalias(adev, modalias, len))
strscpy(modalias, default_id, len);
}
EXPORT_SYMBOL_GPL(acpi_set_modalias);
static bool __acpi_match_device_cls(const struct acpi_device_id *id,
struct acpi_hardware_id *hwid)
{
int i, msk, byte_shift;
char buf[3];
if (!id->cls)
return false;
/* Apply class-code bitmask, before checking each class-code byte */
for (i = 1; i <= 3; i++) {
byte_shift = 8 * (3 - i);
msk = (id->cls_msk >> byte_shift) & 0xFF;
if (!msk)
continue;
sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
return false;
}
return true;
}
static bool __acpi_match_device(const struct acpi_device *device,
const struct acpi_device_id *acpi_ids,
const struct of_device_id *of_ids,
const struct acpi_device_id **acpi_id,
const struct of_device_id **of_id)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
/*
* If the device is not present, it is unnecessary to load device
* driver for it.
*/
if (!device || !device->status.present)
return false;
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
if (acpi_ids) {
for (id = acpi_ids; id->id[0] || id->cls; id++) {
if (id->id[0] && !strcmp((char *)id->id, hwid->id))
goto out_acpi_match;
if (id->cls && __acpi_match_device_cls(id, hwid))
goto out_acpi_match;
}
}
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
*/
if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
return acpi_of_match_device(device, of_ids, of_id);
}
return false;
out_acpi_match:
if (acpi_id)
*acpi_id = id;
return true;
}
/**
* acpi_match_acpi_device - Match an ACPI device against a given list of ACPI IDs
* @ids: Array of struct acpi_device_id objects to match against.
* @adev: The ACPI device pointer to match.
*
* Match the ACPI device @adev against a given list of ACPI IDs @ids.
*
* Return:
* a pointer to the first matching ACPI ID on success or %NULL on failure.
*/
const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
const struct acpi_device *adev)
{
const struct acpi_device_id *id = NULL;
__acpi_match_device(adev, ids, NULL, &id, NULL);
return id;
}
EXPORT_SYMBOL_GPL(acpi_match_acpi_device);
/**
* acpi_match_device - Match a struct device against a given list of ACPI IDs
* @ids: Array of struct acpi_device_id object to match against.
* @dev: The device structure to match.
*
* Check if @dev has a valid ACPI handle and if there is a struct acpi_device
* object for that handle and use that object to match against a given list of
* device IDs.
*
* Return a pointer to the first matching ID on success or %NULL on failure.
*/
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
return acpi_match_acpi_device(ids, acpi_companion_match(dev));
}
EXPORT_SYMBOL_GPL(acpi_match_device);
static const void *acpi_of_device_get_match_data(const struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
const struct of_device_id *match = NULL;
if (!acpi_of_match_device(adev, dev->driver->of_match_table, &match))
return NULL;
return match->data;
}
const void *acpi_device_get_match_data(const struct device *dev)
{
const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table;
const struct acpi_device_id *match;
if (!acpi_ids)
return acpi_of_device_get_match_data(dev);
match = acpi_match_device(acpi_ids, dev);
if (!match)
return NULL;
return (const void *)match->driver_data;
}
EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
const struct acpi_device_id *acpi_ids = drv->acpi_match_table;
const struct of_device_id *of_ids = drv->of_match_table;
if (!acpi_ids)
return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL);
return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
/* --------------------------------------------------------------------------
ACPI Driver Management
-------------------------------------------------------------------------- */
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
* @driver: driver to unregister
*
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
/* --------------------------------------------------------------------------
ACPI Bus operations
-------------------------------------------------------------------------- */
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return acpi_dev->flags.match_driver
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
static int acpi_device_probe(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
int ret;
if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
return -EINVAL;
if (!acpi_drv->ops.add)
return -ENOSYS;
ret = acpi_drv->ops.add(acpi_dev);
if (ret) {
acpi_dev->driver_data = NULL;
return ret;
}
pr_debug("Driver [%s] successfully bound to device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
acpi_dev->driver_data = NULL;
return ret;
}
}
pr_debug("Found driver [%s] for device [%s]\n", acpi_drv->name,
acpi_dev->pnp.bus_id);
get_device(dev);
return 0;
}
static void acpi_device_remove(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
acpi_dev->driver_data = NULL;
put_device(dev);
}
struct bus_type acpi_bus_type = {
.name = "acpi",
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data)
{
return bus_for_each_dev(&acpi_bus_type, NULL, data, fn);
}
EXPORT_SYMBOL_GPL(acpi_bus_for_each_dev);
struct acpi_dev_walk_context {
int (*fn)(struct acpi_device *, void *);
void *data;
};
static int acpi_dev_for_one_check(struct device *dev, void *context)
{
struct acpi_dev_walk_context *adwc = context;
if (dev->bus != &acpi_bus_type)
return 0;
return adwc->fn(to_acpi_device(dev), adwc->data);
}
EXPORT_SYMBOL_GPL(acpi_dev_for_each_child);
int acpi_dev_for_each_child(struct acpi_device *adev,
int (*fn)(struct acpi_device *, void *), void *data)
{
struct acpi_dev_walk_context adwc = {
.fn = fn,
.data = data,
};
return device_for_each_child(&adev->dev, &adwc, acpi_dev_for_one_check);
}
int acpi_dev_for_each_child_reverse(struct acpi_device *adev,
int (*fn)(struct acpi_device *, void *),
void *data)
{
struct acpi_dev_walk_context adwc = {
.fn = fn,
.data = data,
};
return device_for_each_child_reverse(&adev->dev, &adwc, acpi_dev_for_one_check);
}
/* --------------------------------------------------------------------------
Initialization/Cleanup
-------------------------------------------------------------------------- */
static int __init acpi_bus_init_irq(void)
{
acpi_status status;
char *message = NULL;
/*
* Let the system know what interrupt model we are using by
* evaluating the \_PIC object, if exists.
*/
switch (acpi_irq_model) {
case ACPI_IRQ_MODEL_PIC:
message = "PIC";
break;
case ACPI_IRQ_MODEL_IOAPIC:
message = "IOAPIC";
break;
case ACPI_IRQ_MODEL_IOSAPIC:
message = "IOSAPIC";
break;
case ACPI_IRQ_MODEL_GIC:
message = "GIC";
break;
case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model";
break;
case ACPI_IRQ_MODEL_LPIC:
message = "LPIC";
break;
default:
pr_info("Unknown interrupt routing model\n");
return -ENODEV;
}
pr_info("Using %s for interrupt routing\n", message);
status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
pr_info("_PIC evaluation failed: %s\n", acpi_format_exception(status));
return -ENODEV;
}
return 0;
}
/**
* acpi_early_init - Initialize ACPICA and populate the ACPI namespace.
*
* The ACPI tables are accessible after this, but the handling of events has not
* been initialized and the global lock is not available yet, so AML should not
* be executed at this point.
*
* Doing this before switching the EFI runtime services to virtual mode allows
* the EfiBootServices memory to be freed slightly earlier on boot.
*/
void __init acpi_early_init(void)
{
acpi_status status;
if (acpi_disabled)
return;
pr_info("Core revision %08x\n", ACPI_CA_VERSION);
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
acpi_permanent_mmap = true;
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
* DSDT will be copied to memory.
* Note that calling dmi_check_system() here on other architectures
* would not be OK because only x86 initializes dmi early enough.
* Thankfully only x86 systems need such quirks for now.
*/
dmi_check_system(dsdt_dmi_table);
#endif
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
pr_err("Unable to reallocate ACPI tables\n");
goto error0;
}
status = acpi_initialize_subsystem();
if (ACPI_FAILURE(status)) {
pr_err("Unable to initialize the ACPI Interpreter\n");
goto error0;
}
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
}
/* Set PIC-mode SCI trigger type */
acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
} else {
/*
* now that acpi_gbl_FADT is initialized,
* update it with result from INT_SRC_OVR parsing
*/
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
}
#endif
return;
error0:
disable_acpi();
}
/**
* acpi_subsystem_init - Finalize the early initialization of ACPI.
*
* Switch over the platform to the ACPI mode (if possible).
*
* Doing this too early is generally unsafe, but at the same time it needs to be
* done before all things that really depend on ACPI. The right spot appears to
* be before finalizing the EFI initialization.
*/
void __init acpi_subsystem_init(void)
{
acpi_status status;
if (acpi_disabled)
return;
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
pr_err("Unable to enable ACPI\n");
disable_acpi();
} else {
/*
* If the system is using ACPI then we can be reasonably
* confident that any regulators are managed by the firmware
* so tell the regulator core it has everything it needs to
* know.
*/
regulator_has_full_constraints();
}
}
static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
{
if (event == ACPI_TABLE_EVENT_LOAD)
acpi_scan_table_notify();
return acpi_sysfs_table_handler(event, table, context);
}
static int __init acpi_bus_init(void)
{
int result;
acpi_status status;
acpi_os_initialize1();
status = acpi_load_tables();
if (ACPI_FAILURE(status)) {
pr_err("Unable to load the System Description Tables\n");
goto error1;
}
/*
* ACPI 2.0 requires the EC driver to be loaded and work before the EC
* device is found in the namespace.
*
* This is accomplished by looking for the ECDT table and getting the EC
* parameters out of that.
*
* Do that before calling acpi_initialize_objects() which may trigger EC
* address space accesses.
*/
acpi_ec_ecdt_probe();
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
pr_err("Unable to start the ACPI Interpreter\n");
goto error1;
}
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
pr_err("Unable to initialize ACPI objects\n");
goto error1;
}
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
*/
acpi_bus_osc_negotiate_platform_control();
acpi_bus_osc_negotiate_usb_control();
/*
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
status = acpi_install_table_handler(acpi_bus_table_handler, NULL);
acpi_sysfs_init();
acpi_early_processor_control_setup();
/*
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
acpi_ec_dsdt_probe();
pr_info("Interpreter enabled\n");
/* Initialize sleep structures */
acpi_sleep_init();
/*
* Get the system interrupt model and evaluate \_PIC.
*/
result = acpi_bus_init_irq();
if (result)
goto error1;
/*
* Register the for all standard device notifications.
*/
status =
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
&acpi_bus_notify, NULL);
if (ACPI_FAILURE(status)) {
pr_err("Unable to register for system notifications\n");
goto error1;
}
/*
* Create the top ACPI proc directory
*/
acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
result = bus_register(&acpi_bus_type);
if (!result)
return 0;
/* Mimic structured exception handling */
error1:
acpi_terminate();
return -ENODEV;
}
struct kobject *acpi_kobj;
EXPORT_SYMBOL_GPL(acpi_kobj);
static int __init acpi_init(void)
{
int result;
if (acpi_disabled) {
pr_info("Interpreter disabled.\n");
return -ENODEV;
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
if (!acpi_kobj)
pr_debug("%s: kset create error\n", __func__);
init_prmt();
acpi_init_pcc();
result = acpi_bus_init();
if (result) {
kobject_put(acpi_kobj);
disable_acpi();
return result;
}
acpi_init_ffh();
pci_mmcfg_late_init();
acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
acpi_debugger_init();
acpi_setup_sb_notify_handler();
acpi_viot_init();
return 0;
}
subsys_initcall(acpi_init);
| linux-master | drivers/acpi/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Author: Erik Kaneda <[email protected]>
* Copyright 2020 Intel Corporation
*
* prmt.c
*
* Each PRM service is an executable that is run in a restricted environment
* that is invoked by writing to the PlatformRtMechanism OperationRegion from
* AML bytecode.
*
* init_prmt initializes the Platform Runtime Mechanism (PRM) services by
* processing data in the PRMT as well as registering an ACPI OperationRegion
* handler for the PlatformRtMechanism subtype.
*
*/
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/acpi.h>
#include <linux/prmt.h>
#include <asm/efi.h>
#pragma pack(1)
struct prm_mmio_addr_range {
u64 phys_addr;
u64 virt_addr;
u32 length;
};
struct prm_mmio_info {
u64 mmio_count;
struct prm_mmio_addr_range addr_ranges[];
};
struct prm_buffer {
u8 prm_status;
u64 efi_status;
u8 prm_cmd;
guid_t handler_guid;
};
struct prm_context_buffer {
char signature[ACPI_NAMESEG_SIZE];
u16 revision;
u16 reserved;
guid_t identifier;
u64 static_data_buffer;
struct prm_mmio_info *mmio_ranges;
};
#pragma pack()
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
guid_t guid;
efi_status_t (__efiapi *handler_addr)(u64, void *);
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
struct list_head handler_list;
};
struct prm_module_info {
guid_t guid;
u16 major_rev;
u16 minor_rev;
u16 handler_count;
struct prm_mmio_info *mmio_info;
bool updatable;
struct list_head module_list;
struct prm_handler_info handlers[];
};
static u64 efi_pa_va_lookup(u64 pa)
{
efi_memory_desc_t *md;
u64 pa_offset = pa & ~PAGE_MASK;
u64 page = pa & PAGE_MASK;
for_each_efi_memory_desc(md) {
if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
return pa_offset + md->virt_addr + page - md->phys_addr;
}
return 0;
}
#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
static int __init
acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_prmt_module_info *module_info;
struct acpi_prmt_handler_info *handler_info;
struct prm_handler_info *th;
struct prm_module_info *tm;
u64 *mmio_count;
u64 cur_handler = 0;
u32 module_info_size = 0;
u64 mmio_range_size = 0;
void *temp_mmio;
module_info = (struct acpi_prmt_module_info *) header;
module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
tm = kmalloc(module_info_size, GFP_KERNEL);
if (!tm)
goto parse_prmt_out1;
guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
tm->major_rev = module_info->major_rev;
tm->minor_rev = module_info->minor_rev;
tm->handler_count = module_info->handler_info_count;
tm->updatable = true;
if (module_info->mmio_list_pointer) {
/*
* Each module is associated with a list of addr
* ranges that it can use during the service
*/
mmio_count = (u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
if (!mmio_count)
goto parse_prmt_out2;
mmio_range_size = struct_size(tm->mmio_info, addr_ranges, *mmio_count);
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
if (!tm->mmio_info)
goto parse_prmt_out3;
temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
if (!temp_mmio)
goto parse_prmt_out4;
memmove(tm->mmio_info, temp_mmio, mmio_range_size);
} else {
tm->mmio_info = kmalloc(sizeof(*tm->mmio_info), GFP_KERNEL);
if (!tm->mmio_info)
goto parse_prmt_out2;
tm->mmio_info->mmio_count = 0;
}
INIT_LIST_HEAD(&tm->module_list);
list_add(&tm->module_list, &prm_module_list);
handler_info = get_first_handler(module_info);
do {
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
parse_prmt_out4:
kfree(tm->mmio_info);
parse_prmt_out3:
memunmap(mmio_count);
parse_prmt_out2:
kfree(tm);
parse_prmt_out1:
return -ENOMEM;
}
#define GET_MODULE 0
#define GET_HANDLER 1
static void *find_guid_info(const guid_t *guid, u8 mode)
{
struct prm_handler_info *cur_handler;
struct prm_module_info *cur_module;
int i = 0;
list_for_each_entry(cur_module, &prm_module_list, module_list) {
for (i = 0; i < cur_module->handler_count; ++i) {
cur_handler = &cur_module->handlers[i];
if (guid_equal(guid, &cur_handler->guid)) {
if (mode == GET_MODULE)
return (void *)cur_module;
else
return (void *)cur_handler;
}
}
}
return NULL;
}
static struct prm_module_info *find_prm_module(const guid_t *guid)
{
return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
}
static struct prm_handler_info *find_prm_handler(const guid_t *guid)
{
return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
}
/* In-coming PRM commands */
#define PRM_CMD_RUN_SERVICE 0
#define PRM_CMD_START_TRANSACTION 1
#define PRM_CMD_END_TRANSACTION 2
/* statuses that can be passed back to ASL */
#define PRM_HANDLER_SUCCESS 0
#define PRM_HANDLER_ERROR 1
#define INVALID_PRM_COMMAND 2
#define PRM_HANDLER_GUID_NOT_FOUND 3
#define UPDATE_LOCK_ALREADY_HELD 4
#define UPDATE_UNLOCK_WITHOUT_LOCK 5
/*
* This is the PlatformRtMechanism opregion space handler.
* @function: indicates the read/write. In fact as the PlatformRtMechanism
* message is driven by command, only write is meaningful.
*
* @addr : not used
* @bits : not used.
* @value : it is an in/out parameter. It points to the PRM message buffer.
* @handler_context: not used
*/
static acpi_status acpi_platformrt_space_handler(u32 function,
acpi_physical_address addr,
u32 bits, acpi_integer *value,
void *handler_context,
void *region_context)
{
struct prm_buffer *buffer = ACPI_CAST_PTR(struct prm_buffer, value);
struct prm_handler_info *handler;
struct prm_module_info *module;
efi_status_t status;
struct prm_context_buffer context;
if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
return AE_NO_HANDLER;
}
/*
* The returned acpi_status will always be AE_OK. Error values will be
* saved in the first byte of the PRM message buffer to be used by ASL.
*/
switch (buffer->prm_cmd) {
case PRM_CMD_RUN_SERVICE:
handler = find_prm_handler(&buffer->handler_guid);
module = find_prm_module(&buffer->handler_guid);
if (!handler || !module)
goto invalid_guid;
ACPI_COPY_NAMESEG(context.signature, "PRMC");
context.revision = 0x0;
context.reserved = 0x0;
context.identifier = handler->guid;
context.static_data_buffer = handler->static_data_buffer_addr;
context.mmio_ranges = module->mmio_info;
status = efi_call_acpi_prm_handler(handler->handler_addr,
handler->acpi_param_buffer_addr,
&context);
if (status == EFI_SUCCESS) {
buffer->prm_status = PRM_HANDLER_SUCCESS;
} else {
buffer->prm_status = PRM_HANDLER_ERROR;
buffer->efi_status = status;
}
break;
case PRM_CMD_START_TRANSACTION:
module = find_prm_module(&buffer->handler_guid);
if (!module)
goto invalid_guid;
if (module->updatable)
module->updatable = false;
else
buffer->prm_status = UPDATE_LOCK_ALREADY_HELD;
break;
case PRM_CMD_END_TRANSACTION:
module = find_prm_module(&buffer->handler_guid);
if (!module)
goto invalid_guid;
if (module->updatable)
buffer->prm_status = UPDATE_UNLOCK_WITHOUT_LOCK;
else
module->updatable = true;
break;
default:
buffer->prm_status = INVALID_PRM_COMMAND;
break;
}
return AE_OK;
invalid_guid:
buffer->prm_status = PRM_HANDLER_GUID_NOT_FOUND;
return AE_OK;
}
void __init init_prmt(void)
{
struct acpi_table_header *tbl;
acpi_status status;
int mc;
status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
if (ACPI_FAILURE(status))
return;
mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
acpi_put_table(tbl);
/*
* Return immediately if PRMT table is not present or no PRM module found.
*/
if (mc <= 0)
return;
pr_info("PRM: found %u modules\n", mc);
if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
pr_err("PRM: EFI runtime services unavailable\n");
return;
}
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_RT,
&acpi_platformrt_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status))
pr_alert("PRM: OperationRegion handler could not be installed\n");
}
| linux-master | drivers/acpi/prmt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_utils.c - ACPI Utility Functions ($Revision: 10 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: utils: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/acpi.h>
#include <linux/dynamic_debug.h>
#include "internal.h"
#include "sleep.h"
/* --------------------------------------------------------------------------
Object Evaluation Helpers
-------------------------------------------------------------------------- */
static void acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
{
acpi_handle_debug(h, "Evaluate [%s]: %s\n", p, acpi_format_exception(s));
}
acpi_status
acpi_extract_package(union acpi_object *package,
struct acpi_buffer *format, struct acpi_buffer *buffer)
{
u32 size_required = 0;
u32 tail_offset = 0;
char *format_string = NULL;
u32 format_count = 0;
u32 i = 0;
u8 *head = NULL;
u8 *tail = NULL;
if (!package || (package->type != ACPI_TYPE_PACKAGE)
|| (package->package.count < 1)) {
pr_debug("Invalid package argument\n");
return AE_BAD_PARAMETER;
}
if (!format || !format->pointer || (format->length < 1)) {
pr_debug("Invalid format argument\n");
return AE_BAD_PARAMETER;
}
if (!buffer) {
pr_debug("Invalid buffer argument\n");
return AE_BAD_PARAMETER;
}
format_count = (format->length / sizeof(char)) - 1;
if (format_count > package->package.count) {
pr_debug("Format specifies more objects [%d] than present [%d]\n",
format_count, package->package.count);
return AE_BAD_DATA;
}
format_string = format->pointer;
/*
* Calculate size_required.
*/
for (i = 0; i < format_count; i++) {
union acpi_object *element = &(package->package.elements[i]);
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
size_required += sizeof(u64);
tail_offset += sizeof(u64);
break;
case 'S':
size_required +=
sizeof(char *) + sizeof(u64) +
sizeof(char);
tail_offset += sizeof(char *);
break;
default:
pr_debug("Invalid package element [%d]: got number, expected [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
size_required +=
sizeof(char *) +
(element->string.length * sizeof(char)) +
sizeof(char);
tail_offset += sizeof(char *);
break;
case 'B':
size_required +=
sizeof(u8 *) + element->buffer.length;
tail_offset += sizeof(u8 *);
break;
default:
pr_debug("Invalid package element [%d] got string/buffer, expected [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
switch (format_string[i]) {
case 'R':
size_required += sizeof(void *);
tail_offset += sizeof(void *);
break;
default:
pr_debug("Invalid package element [%d] got reference, expected [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
}
break;
case ACPI_TYPE_PACKAGE:
default:
pr_debug("Unsupported element at index=%d\n", i);
/* TBD: handle nested packages... */
return AE_SUPPORT;
}
}
/*
* Validate output buffer.
*/
if (buffer->length == ACPI_ALLOCATE_BUFFER) {
buffer->pointer = ACPI_ALLOCATE_ZEROED(size_required);
if (!buffer->pointer)
return AE_NO_MEMORY;
buffer->length = size_required;
} else {
if (buffer->length < size_required) {
buffer->length = size_required;
return AE_BUFFER_OVERFLOW;
} else if (buffer->length != size_required ||
!buffer->pointer) {
return AE_BAD_PARAMETER;
}
}
head = buffer->pointer;
tail = buffer->pointer + tail_offset;
/*
* Extract package data.
*/
for (i = 0; i < format_count; i++) {
u8 **pointer = NULL;
union acpi_object *element = &(package->package.elements[i]);
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
*((u64 *) head) =
element->integer.value;
head += sizeof(u64);
break;
case 'S':
pointer = (u8 **) head;
*pointer = tail;
*((u64 *) tail) =
element->integer.value;
head += sizeof(u64 *);
tail += sizeof(u64);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->string.pointer,
element->string.length);
head += sizeof(char *);
tail += element->string.length * sizeof(char);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
case 'B':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->buffer.pointer,
element->buffer.length);
head += sizeof(u8 *);
tail += element->buffer.length;
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
switch (format_string[i]) {
case 'R':
*(void **)head =
(void *)element->reference.handle;
head += sizeof(void *);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_PACKAGE:
/* TBD: handle nested packages... */
default:
/* Should never get here */
break;
}
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_extract_package);
acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data)
{
acpi_status status = AE_OK;
union acpi_object element;
struct acpi_buffer buffer = { 0, NULL };
if (!data)
return AE_BAD_PARAMETER;
buffer.length = sizeof(union acpi_object);
buffer.pointer = &element;
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status)) {
acpi_util_eval_error(handle, pathname, status);
return status;
}
if (element.type != ACPI_TYPE_INTEGER) {
acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
return AE_BAD_DATA;
}
*data = element.integer.value;
acpi_handle_debug(handle, "Return value [%llu]\n", *data);
return AE_OK;
}
EXPORT_SYMBOL(acpi_evaluate_integer);
int acpi_get_local_address(acpi_handle handle, u32 *addr)
{
unsigned long long adr;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
if (ACPI_FAILURE(status))
return -ENODATA;
*addr = (u32)adr;
return 0;
}
EXPORT_SYMBOL(acpi_get_local_address);
#define ACPI_MAX_SUB_BUF_SIZE 9
const char *acpi_get_subsystem_id(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
const char *sub;
size_t len;
status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status);
return ERR_PTR(-ENODATA);
}
obj = buffer.pointer;
if (obj->type == ACPI_TYPE_STRING) {
len = strlen(obj->string.pointer);
if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) {
sub = kstrdup(obj->string.pointer, GFP_KERNEL);
if (!sub)
sub = ERR_PTR(-ENOMEM);
} else {
acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len);
sub = ERR_PTR(-ENODATA);
}
} else {
acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n");
sub = ERR_PTR(-ENODATA);
}
acpi_os_free(buffer.pointer);
return sub;
}
EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
struct acpi_handle_list *list)
{
acpi_status status = AE_OK;
union acpi_object *package = NULL;
union acpi_object *element = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u32 i = 0;
if (!list) {
return AE_BAD_PARAMETER;
}
/* Evaluate object. */
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status))
goto end;
package = buffer.pointer;
if ((buffer.length == 0) || !package) {
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->type != ACPI_TYPE_PACKAGE) {
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (!package->package.count) {
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->package.count > ACPI_MAX_HANDLES) {
kfree(package);
return AE_NO_MEMORY;
}
list->count = package->package.count;
/* Extract package data. */
for (i = 0; i < list->count; i++) {
element = &(package->package.elements[i]);
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
break;
}
if (!element->reference.handle) {
status = AE_NULL_ENTRY;
acpi_util_eval_error(handle, pathname, status);
break;
}
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
end:
if (ACPI_FAILURE(status)) {
list->count = 0;
//kfree(list->handles);
}
kfree(buffer.pointer);
return status;
}
EXPORT_SYMBOL(acpi_evaluate_reference);
acpi_status
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
if (ACPI_FAILURE(status))
return status;
output = buffer.pointer;
if (!output || output->type != ACPI_TYPE_PACKAGE
|| !output->package.count
|| output->package.elements[0].type != ACPI_TYPE_BUFFER
|| output->package.elements[0].buffer.length < ACPI_PLD_REV1_BUFFER_SIZE) {
status = AE_TYPE;
goto out;
}
status = acpi_decode_pld_buffer(
output->package.elements[0].buffer.pointer,
output->package.elements[0].buffer.length,
pld);
out:
kfree(buffer.pointer);
return status;
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
/**
* acpi_evaluate_ost: Evaluate _OST for hotplug operations
* @handle: ACPI device handle
* @source_event: source event code
* @status_code: status code
* @status_buf: optional detailed information (NULL if none)
*
* Evaluate _OST for hotplug operations. All ACPI hotplug handlers
* must call this function when evaluating _OST for hotplug operations.
* When the platform does not support _OST, this function has no effect.
*/
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf)
{
union acpi_object params[3] = {
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_BUFFER,}
};
struct acpi_object_list arg_list = {3, params};
params[0].integer.value = source_event;
params[1].integer.value = status_code;
if (status_buf != NULL) {
params[2].buffer.pointer = status_buf->pointer;
params[2].buffer.length = status_buf->length;
} else {
params[2].buffer.pointer = NULL;
params[2].buffer.length = 0;
}
return acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
}
EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_path: Return the object path of handle
* @handle: ACPI device handle
*
* Caller must free the returned buffer
*/
static char *acpi_handle_path(acpi_handle handle)
{
struct acpi_buffer buffer = {
.length = ACPI_ALLOCATE_BUFFER,
.pointer = NULL
};
if (in_interrupt() ||
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
return NULL;
return buffer.pointer;
}
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
* @level: log level
* @handle: ACPI device handle
* @fmt: format string
*
* This function is called through acpi_handle_<level> macros and prints
* a message with ACPI prefix and object path. This function acquires
* the global namespace mutex to obtain an object path. In interrupt
* context, it shows the object path as <n/a>.
*/
void
acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
const char *path;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
path = acpi_handle_path(handle);
printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf);
va_end(args);
kfree(path);
}
EXPORT_SYMBOL(acpi_handle_printk);
#if defined(CONFIG_DYNAMIC_DEBUG)
/**
* __acpi_handle_debug: pr_debug with ACPI prefix and object path
* @descriptor: Dynamic Debug descriptor
* @handle: ACPI device handle
* @fmt: format string
*
* This function is called through acpi_handle_debug macro and debug
* prints a message with ACPI prefix and object path. This function
* acquires the global namespace mutex to obtain an object path. In
* interrupt context, it shows the object path as <n/a>.
*/
void
__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
const char *path;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
path = acpi_handle_path(handle);
__dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf);
va_end(args);
kfree(path);
}
EXPORT_SYMBOL(__acpi_handle_debug);
#endif
/**
* acpi_evaluation_failure_warn - Log evaluation failure warning.
* @handle: Parent object handle.
* @name: Name of the object whose evaluation has failed.
* @status: Status value returned by the failing object evaluation.
*/
void acpi_evaluation_failure_warn(acpi_handle handle, const char *name,
acpi_status status)
{
acpi_handle_warn(handle, "%s evaluation failed: %s\n", name,
acpi_format_exception(status));
}
EXPORT_SYMBOL_GPL(acpi_evaluation_failure_warn);
/**
* acpi_has_method: Check whether @handle has a method named @name
* @handle: ACPI device handle
* @name: name of object or method
*
* Check whether @handle has a method named @name.
*/
bool acpi_has_method(acpi_handle handle, char *name)
{
acpi_handle tmp;
return ACPI_SUCCESS(acpi_get_handle(handle, name, &tmp));
}
EXPORT_SYMBOL(acpi_has_method);
acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
u64 arg)
{
union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, };
obj.integer.value = arg;
return acpi_evaluate_object(handle, method, &arg_list, NULL);
}
EXPORT_SYMBOL(acpi_execute_simple_method);
/**
* acpi_evaluate_ej0: Evaluate _EJ0 method for hotplug operations
* @handle: ACPI device handle
*
* Evaluate device's _EJ0 method for hotplug operations.
*/
acpi_status acpi_evaluate_ej0(acpi_handle handle)
{
acpi_status status;
status = acpi_execute_simple_method(handle, "_EJ0", 1);
if (status == AE_NOT_FOUND)
acpi_handle_warn(handle, "No _EJ0 support for device\n");
else if (ACPI_FAILURE(status))
acpi_handle_warn(handle, "Eject failed (0x%x)\n", status);
return status;
}
/**
* acpi_evaluate_lck: Evaluate _LCK method to lock/unlock device
* @handle: ACPI device handle
* @lock: lock device if non-zero, otherwise unlock device
*
* Evaluate device's _LCK method if present to lock/unlock device
*/
acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
{
acpi_status status;
status = acpi_execute_simple_method(handle, "_LCK", !!lock);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
if (lock)
acpi_handle_warn(handle,
"Locking device failed (0x%x)\n", status);
else
acpi_handle_warn(handle,
"Unlocking device failed (0x%x)\n", status);
}
return status;
}
/**
* acpi_evaluate_reg: Evaluate _REG method to register OpRegion presence
* @handle: ACPI device handle
* @space_id: ACPI address space id to register OpRegion presence for
* @function: Parameter to pass to _REG one of ACPI_REG_CONNECT or
* ACPI_REG_DISCONNECT
*
* Evaluate device's _REG method to register OpRegion presence.
*/
acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function)
{
struct acpi_object_list arg_list;
union acpi_object params[2];
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = space_id;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = function;
arg_list.count = 2;
arg_list.pointer = params;
return acpi_evaluate_object(handle, "_REG", &arg_list, NULL);
}
EXPORT_SYMBOL(acpi_evaluate_reg);
/**
* acpi_evaluate_dsm - evaluate device's _DSM method
* @handle: ACPI device handle
* @guid: GUID of requested functions, should be 16 bytes
* @rev: revision number of requested function
* @func: requested function number
* @argv4: the function specific parameter
*
* Evaluate device's _DSM method with specified GUID, revision id and
* function number. Caller needs to free the returned object.
*
* Though ACPI defines the fourth parameter for _DSM should be a package,
* some old BIOSes do expect a buffer or an integer etc.
*/
union acpi_object *
acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4)
{
acpi_status ret;
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object params[4];
struct acpi_object_list input = {
.count = 4,
.pointer = params,
};
params[0].type = ACPI_TYPE_BUFFER;
params[0].buffer.length = 16;
params[0].buffer.pointer = (u8 *)guid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = rev;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
if (argv4) {
params[3] = *argv4;
} else {
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
}
ret = acpi_evaluate_object(handle, "_DSM", &input, &buf);
if (ACPI_SUCCESS(ret))
return (union acpi_object *)buf.pointer;
if (ret != AE_NOT_FOUND)
acpi_handle_warn(handle,
"failed to evaluate _DSM %pUb (0x%x)\n", guid, ret);
return NULL;
}
EXPORT_SYMBOL(acpi_evaluate_dsm);
/**
* acpi_check_dsm - check if _DSM method supports requested functions.
* @handle: ACPI device handle
* @guid: GUID of requested functions, should be 16 bytes at least
* @rev: revision number of requested functions
* @funcs: bitmap of requested functions
*
* Evaluate device's _DSM method to check whether it supports requested
* functions. Currently only support 64 functions at maximum, should be
* enough for now.
*/
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
{
int i;
u64 mask = 0;
union acpi_object *obj;
if (funcs == 0)
return false;
obj = acpi_evaluate_dsm(handle, guid, rev, 0, NULL);
if (!obj)
return false;
/* For compatibility, old BIOSes may return an integer */
if (obj->type == ACPI_TYPE_INTEGER)
mask = obj->integer.value;
else if (obj->type == ACPI_TYPE_BUFFER)
for (i = 0; i < obj->buffer.length && i < 8; i++)
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
/*
* Bit 0 indicates whether there's support for any functions other than
* function 0 for the specified GUID and revision.
*/
if ((mask & 0x1) && (mask & funcs) == funcs)
return true;
return false;
}
EXPORT_SYMBOL(acpi_check_dsm);
/**
* acpi_dev_hid_uid_match - Match device by supplied HID and UID
* @adev: ACPI device to match.
* @hid2: Hardware ID of the device.
* @uid2: Unique ID of the device, pass NULL to not check _UID.
*
* Matches HID and UID in @adev with given @hid2 and @uid2.
* Returns true if matches.
*/
bool acpi_dev_hid_uid_match(struct acpi_device *adev,
const char *hid2, const char *uid2)
{
const char *hid1 = acpi_device_hid(adev);
const char *uid1 = acpi_device_uid(adev);
if (strcmp(hid1, hid2))
return false;
if (!uid2)
return true;
return uid1 && !strcmp(uid1, uid2);
}
EXPORT_SYMBOL(acpi_dev_hid_uid_match);
/**
* acpi_dev_uid_to_integer - treat ACPI device _UID as integer
* @adev: ACPI device to get _UID from
* @integer: output buffer for integer
*
* Considers _UID as integer and converts it to @integer.
*
* Returns 0 on success, or negative error code otherwise.
*/
int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
const char *uid;
if (!adev)
return -ENODEV;
uid = acpi_device_uid(adev);
if (!uid)
return -ENODATA;
return kstrtou64(uid, 0, integer);
}
EXPORT_SYMBOL(acpi_dev_uid_to_integer);
/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
* Return %true if the device was present at the moment of invocation.
* Note that if the device is pluggable, it may since have disappeared.
*
* For this function to work, acpi_bus_scan() must have been executed
* which happens in the subsys_initcall() subsection. Hence, do not
* call from a subsys_initcall() or earlier (use acpi_get_devices()
* instead). Calling from module_init() is fine (which is synonymous
* with device_initcall()).
*/
bool acpi_dev_found(const char *hid)
{
struct acpi_device_bus_id *acpi_device_bus_id;
bool found = false;
mutex_lock(&acpi_device_lock);
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id, hid)) {
found = true;
break;
}
mutex_unlock(&acpi_device_lock);
return found;
}
EXPORT_SYMBOL(acpi_dev_found);
struct acpi_dev_match_info {
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
static int acpi_dev_match_cb(struct device *dev, const void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
const struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
if (acpi_match_device_ids(adev, match->hid))
return 0;
if (match->uid && (!adev->pnp.unique_id ||
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
if (match->hrv == -1)
return 1;
status = acpi_evaluate_integer(adev->handle, "_HRV", NULL, &hrv);
if (ACPI_FAILURE(status))
return 0;
return hrv == match->hrv;
}
/**
* acpi_dev_present - Detect that a given ACPI device is present
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* Return %true if a matching device was present at the moment of invocation.
* Note that if the device is pluggable, it may since have disappeared.
*
* Note that unlike acpi_dev_found() this function checks the status
* of the device. So for devices which are present in the DSDT, but
* which are disabled (their _STA callback returns 0) this function
* will return false.
*
* For this function to work, acpi_bus_scan() must have been executed
* which happens in the subsys_initcall() subsection. Hence, do not
* call from a subsys_initcall() or earlier (use acpi_get_devices()
* instead). Calling from module_init() is fine (which is synonymous
* with device_initcall()).
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
struct acpi_dev_match_info match = {};
struct device *dev;
strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
put_device(dev);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
/**
* acpi_dev_get_next_match_dev - Return the next match of ACPI device
* @adev: Pointer to the previous ACPI device matching this @hid, @uid and @hrv
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
* On the other hand the function invokes acpi_dev_put() on the given @adev
* assuming that its reference counter had been increased beforehand.
*
* See additional information in acpi_dev_present() as well.
*/
struct acpi_device *
acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv)
{
struct device *start = adev ? &adev->dev : NULL;
struct acpi_dev_match_info match = {};
struct device *dev;
strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
acpi_dev_put(adev);
return dev ? to_acpi_device(dev) : NULL;
}
EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
/**
* acpi_dev_get_first_match_dev - Return the first match of ACPI device
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* Return the first match of ACPI device if a matching device was present
* at the moment of invocation, or NULL otherwise.
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
*
* See additional information in acpi_dev_present() as well.
*/
struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
return acpi_dev_get_next_match_dev(NULL, hid, uid, hrv);
}
EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/**
* acpi_reduced_hardware - Return if this is an ACPI-reduced-hw machine
*
* Return true when running on an ACPI-reduced-hw machine, false otherwise.
*/
bool acpi_reduced_hardware(void)
{
return acpi_gbl_reduced_hardware;
}
EXPORT_SYMBOL_GPL(acpi_reduced_hardware);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
*/
char acpi_video_backlight_string[16];
EXPORT_SYMBOL(acpi_video_backlight_string);
static int __init acpi_backlight(char *str)
{
strscpy(acpi_video_backlight_string, str,
sizeof(acpi_video_backlight_string));
return 1;
}
__setup("acpi_backlight=", acpi_backlight);
/**
* acpi_match_platform_list - Check if the system matches with a given list
* @plat: pointer to acpi_platform_list table terminated by a NULL entry
*
* Return the matched index if the system is found in the platform list.
* Otherwise, return a negative error code.
*/
int acpi_match_platform_list(const struct acpi_platform_list *plat)
{
struct acpi_table_header hdr;
int idx = 0;
if (acpi_disabled)
return -ENODEV;
for (; plat->oem_id[0]; plat++, idx++) {
if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr)))
continue;
if (strncmp(plat->oem_id, hdr.oem_id, ACPI_OEM_ID_SIZE))
continue;
if (strncmp(plat->oem_table_id, hdr.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))
continue;
if ((plat->pred == all_versions) ||
(plat->pred == less_than_or_equal && hdr.oem_revision <= plat->oem_revision) ||
(plat->pred == greater_than_or_equal && hdr.oem_revision >= plat->oem_revision) ||
(plat->pred == equal && hdr.oem_revision == plat->oem_revision))
return idx;
}
return -ENODEV;
}
EXPORT_SYMBOL(acpi_match_platform_list);
| linux-master | drivers/acpi/utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* nvs.c - Routines for saving and restoring ACPI NVS memory region
*
* Copyright (C) 2008-2011 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#define pr_fmt(fmt) "ACPI: PM: " fmt
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include "internal.h"
/* ACPI NVS regions, APEI may use it */
struct nvs_region {
__u64 phys_start;
__u64 size;
struct list_head node;
};
static LIST_HEAD(nvs_region_list);
#ifdef CONFIG_ACPI_SLEEP
static int suspend_nvs_register(unsigned long start, unsigned long size);
#else
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
{
return 0;
}
#endif
int acpi_nvs_register(__u64 start, __u64 size)
{
struct nvs_region *region;
region = kmalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
region->phys_start = start;
region->size = size;
list_add_tail(®ion->node, &nvs_region_list);
return suspend_nvs_register(start, size);
}
int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
void *data)
{
int rc;
struct nvs_region *region;
list_for_each_entry(region, &nvs_region_list, node) {
rc = func(region->phys_start, region->size, data);
if (rc)
return rc;
}
return 0;
}
#ifdef CONFIG_ACPI_SLEEP
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* suspend and to restore the contents of this memory during the subsequent
* resume. The code below implements a mechanism allowing us to do that.
*/
struct nvs_page {
unsigned long phys_start;
unsigned int size;
void *kaddr;
void *data;
bool unmap;
struct list_head node;
};
static LIST_HEAD(nvs_list);
/**
* suspend_nvs_register - register platform NVS memory region to save
* @start: Physical address of the region.
* @size: Size of the region.
*
* The NVS region need not be page-aligned (both ends) and we arrange
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
pr_info("Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
start, start + size - 1, size);
while (size > 0) {
unsigned int nr_bytes;
entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
if (!entry)
goto Error;
list_add_tail(&entry->node, &nvs_list);
entry->phys_start = start;
nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
entry->size = (size < nr_bytes) ? size : nr_bytes;
start += entry->size;
size -= entry->size;
}
return 0;
Error:
list_for_each_entry_safe(entry, next, &nvs_list, node) {
list_del(&entry->node);
kfree(entry);
}
return -ENOMEM;
}
/**
* suspend_nvs_free - free data pages allocated for saving NVS regions
*/
void suspend_nvs_free(void)
{
struct nvs_page *entry;
list_for_each_entry(entry, &nvs_list, node)
if (entry->data) {
free_page((unsigned long)entry->data);
entry->data = NULL;
if (entry->kaddr) {
if (entry->unmap) {
iounmap(entry->kaddr);
entry->unmap = false;
} else {
acpi_os_unmap_iomem(entry->kaddr,
entry->size);
}
entry->kaddr = NULL;
}
}
}
/**
* suspend_nvs_alloc - allocate memory necessary for saving NVS regions
*/
int suspend_nvs_alloc(void)
{
struct nvs_page *entry;
list_for_each_entry(entry, &nvs_list, node) {
entry->data = (void *)__get_free_page(GFP_KERNEL);
if (!entry->data) {
suspend_nvs_free();
return -ENOMEM;
}
}
return 0;
}
/**
* suspend_nvs_save - save NVS memory regions
*/
int suspend_nvs_save(void)
{
struct nvs_page *entry;
pr_info("Saving platform NVS memory\n");
list_for_each_entry(entry, &nvs_list, node)
if (entry->data) {
unsigned long phys = entry->phys_start;
unsigned int size = entry->size;
entry->kaddr = acpi_os_get_iomem(phys, size);
if (!entry->kaddr) {
entry->kaddr = acpi_os_ioremap(phys, size);
entry->unmap = !!entry->kaddr;
}
if (!entry->kaddr) {
suspend_nvs_free();
return -ENOMEM;
}
memcpy(entry->data, entry->kaddr, entry->size);
}
return 0;
}
/**
* suspend_nvs_restore - restore NVS memory regions
*
* This function is going to be called with interrupts disabled, so it
* cannot iounmap the virtual addresses used to access the NVS region.
*/
void suspend_nvs_restore(void)
{
struct nvs_page *entry;
pr_info("Restoring platform NVS memory\n");
list_for_each_entry(entry, &nvs_list, node)
if (entry->data)
memcpy(entry->kaddr, entry->data, entry->size);
}
#endif
| linux-master | drivers/acpi/nvs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* event.c - exporting ACPI events via procfs
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/gfp.h>
#include <linux/acpi.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include "internal.h"
/* ACPI notifier chain */
static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
{
struct acpi_bus_event event;
strcpy(event.device_class, dev->pnp.device_class);
strcpy(event.bus_id, dev->pnp.bus_id);
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
== NOTIFY_BAD) ? -EINVAL : 0;
}
EXPORT_SYMBOL(acpi_notifier_call_chain);
int register_acpi_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&acpi_chain_head, nb);
}
EXPORT_SYMBOL(register_acpi_notifier);
int unregister_acpi_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&acpi_chain_head, nb);
}
EXPORT_SYMBOL(unregister_acpi_notifier);
#ifdef CONFIG_NET
static unsigned int acpi_event_seqnum;
struct acpi_genl_event {
acpi_device_class device_class;
char bus_id[15];
u32 type;
u32 data;
};
/* attributes of acpi_genl_family */
enum {
ACPI_GENL_ATTR_UNSPEC,
ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */
__ACPI_GENL_ATTR_MAX,
};
#define ACPI_GENL_ATTR_MAX (__ACPI_GENL_ATTR_MAX - 1)
/* commands supported by the acpi_genl_family */
enum {
ACPI_GENL_CMD_UNSPEC,
ACPI_GENL_CMD_EVENT, /* kernel->user notifications for ACPI events */
__ACPI_GENL_CMD_MAX,
};
#define ACPI_GENL_CMD_MAX (__ACPI_GENL_CMD_MAX - 1)
#define ACPI_GENL_FAMILY_NAME "acpi_event"
#define ACPI_GENL_VERSION 0x01
#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group"
static const struct genl_multicast_group acpi_event_mcgrps[] = {
{ .name = ACPI_GENL_MCAST_GROUP_NAME, },
};
static struct genl_family acpi_event_genl_family __ro_after_init = {
.module = THIS_MODULE,
.name = ACPI_GENL_FAMILY_NAME,
.version = ACPI_GENL_VERSION,
.maxattr = ACPI_GENL_ATTR_MAX,
.mcgrps = acpi_event_mcgrps,
.n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps),
};
int acpi_bus_generate_netlink_event(const char *device_class,
const char *bus_id,
u8 type, int data)
{
struct sk_buff *skb;
struct nlattr *attr;
struct acpi_genl_event *event;
void *msg_header;
int size;
/* allocate memory */
size = nla_total_size(sizeof(struct acpi_genl_event)) +
nla_total_size(0);
skb = genlmsg_new(size, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* add the genetlink message header */
msg_header = genlmsg_put(skb, 0, acpi_event_seqnum++,
&acpi_event_genl_family, 0,
ACPI_GENL_CMD_EVENT);
if (!msg_header) {
nlmsg_free(skb);
return -ENOMEM;
}
/* fill the data */
attr =
nla_reserve(skb, ACPI_GENL_ATTR_EVENT,
sizeof(struct acpi_genl_event));
if (!attr) {
nlmsg_free(skb);
return -EINVAL;
}
event = nla_data(attr);
memset(event, 0, sizeof(struct acpi_genl_event));
strscpy(event->device_class, device_class, sizeof(event->device_class));
strscpy(event->bus_id, bus_id, sizeof(event->bus_id));
event->type = type;
event->data = data;
/* send multicast genetlink message */
genlmsg_end(skb, msg_header);
genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
return 0;
}
EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
static int __init acpi_event_genetlink_init(void)
{
return genl_register_family(&acpi_event_genl_family);
}
#else
int acpi_bus_generate_netlink_event(const char *device_class,
const char *bus_id,
u8 type, int data)
{
return 0;
}
EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
static int acpi_event_genetlink_init(void)
{
return -ENODEV;
}
#endif
static int __init acpi_event_init(void)
{
int error;
if (acpi_disabled)
return 0;
/* create genetlink for acpi event */
error = acpi_event_genetlink_init();
if (error)
pr_warn("Failed to create genetlink family for ACPI event\n");
return 0;
}
fs_initcall(acpi_event_init);
| linux-master | drivers/acpi/event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* - Added processor hotplug support
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
static DEFINE_MUTEX(performance_mutex);
/*
* _PPC support is implemented as a CPUfreq policy notifier:
* This means each time a CPUfreq driver registered also with
* the ACPI core is asked to change the speed policy, the maximum
* value is adjusted so that it is within the platform limit.
*
* Also, when a new platform limit value is detected, the CPUfreq
* policy is adjusted accordingly.
*/
/* ignore_ppc:
* -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
* ignore _PPC
* 0 -> cpufreq low level drivers initialized -> consider _PPC values
* 1 -> ignore _PPC totally -> forced by user through boot param
*/
static int ignore_ppc = -1;
module_param(ignore_ppc, int, 0644);
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
"limited by BIOS, this should help");
static bool acpi_processor_ppc_in_use;
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
s32 qos_value;
int index;
int ret;
if (!pr)
return -EINVAL;
/*
* _PPC indicates the maximum state currently supported by the platform
* (e.g. 0 = states 0..n; 1 = states 1..n; etc.
*/
status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
if (status != AE_NOT_FOUND) {
acpi_processor_ppc_in_use = true;
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
return -ENODEV;
}
}
index = ppc;
if (pr->performance_platform_limit == index ||
ppc >= pr->performance->state_count)
return 0;
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
index, index ? "is" : "is not");
pr->performance_platform_limit = index;
if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
/*
* If _PPC returns 0, it means that all of the available states can be
* used ("no limit").
*/
if (index == 0)
qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
else
qos_value = pr->performance->states[index].core_frequency * 1000;
ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
}
return 0;
}
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
/*
* acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
* @handle: ACPI processor handle
* @status: the status code of _PPC evaluation
* 0: success. OSPM is now using the performance state specified.
* 1: failure. OSPM has not changed the number of P-states in use
*/
static void acpi_processor_ppc_ost(acpi_handle handle, int status)
{
if (acpi_has_method(handle, "_OST"))
acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
status, NULL);
}
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
if (ignore_ppc || !pr->performance) {
/*
* Only when it is notification event, the _OST object
* will be evaluated. Otherwise it is skipped.
*/
if (event_flag)
acpi_processor_ppc_ost(pr->handle, 1);
return;
}
ret = acpi_processor_get_platform_limit(pr);
/*
* Only when it is notification event, the _OST object
* will be evaluated. Otherwise it is skipped.
*/
if (event_flag) {
if (ret < 0)
acpi_processor_ppc_ost(pr->handle, 1);
else
acpi_processor_ppc_ost(pr->handle, 0);
}
if (ret >= 0)
cpufreq_update_limits(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
{
struct acpi_processor *pr;
pr = per_cpu(processors, cpu);
if (!pr || !pr->performance || !pr->performance->state_count)
return -ENODEV;
*limit = pr->performance->states[pr->performance_platform_limit].
core_frequency * 1000;
return 0;
}
EXPORT_SYMBOL(acpi_processor_get_bios_limit);
void acpi_processor_ignore_ppc_init(void)
{
if (ignore_ppc < 0)
ignore_ppc = 0;
}
void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
if (!pr)
continue;
/*
* Reset performance_platform_limit in case there is a stale
* value in it, so as to make it match the "no limit" QoS value
* below.
*/
pr->performance_platform_limit = 0;
ret = freq_qos_add_request(&policy->constraints,
&pr->perflib_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
}
}
void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->perflib_req);
}
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *pct = NULL;
union acpi_object obj = { 0 };
status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
return -ENODEV;
}
pct = (union acpi_object *)buffer.pointer;
if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) {
pr_err("Invalid _PCT data\n");
result = -EFAULT;
goto end;
}
/*
* control_register
*/
obj = pct->package.elements[0];
if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
obj.buffer.length < sizeof(struct acpi_pct_register)) {
pr_err("Invalid _PCT data (control_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->performance->control_register, obj.buffer.pointer,
sizeof(struct acpi_pct_register));
/*
* status_register
*/
obj = pct->package.elements[1];
if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
obj.buffer.length < sizeof(struct acpi_pct_register)) {
pr_err("Invalid _PCT data (status_register)\n");
result = -EFAULT;
goto end;
}
memcpy(&pr->performance->status_register, obj.buffer.pointer,
sizeof(struct acpi_pct_register));
end:
kfree(buffer.pointer);
return result;
}
#ifdef CONFIG_X86
/*
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
* in their ACPI data. Calculate the real values and fix up the _PSS data.
*/
static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
{
u32 hi, lo, fid, did;
int index = px->control & 0x00000007;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return;
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) ||
boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
/*
* MSR C001_0064+:
* Bit 63: PstateEn. Read-write. If set, the P-state is valid.
*/
if (!(hi & BIT(31)))
return;
fid = lo & 0x3f;
did = (lo >> 6) & 7;
if (boot_cpu_data.x86 == 0x10)
px->core_frequency = (100 * (fid + 0x10)) >> did;
else
px->core_frequency = (100 * (fid + 8)) >> did;
}
}
#else
static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
#endif
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
struct acpi_buffer state = { 0, NULL };
union acpi_object *pss = NULL;
int i;
int last_invalid = -1;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
return -ENODEV;
}
pss = buffer.pointer;
if (!pss || pss->type != ACPI_TYPE_PACKAGE) {
pr_err("Invalid _PSS data\n");
result = -EFAULT;
goto end;
}
acpi_handle_debug(pr->handle, "Found %d performance states\n",
pss->package.count);
pr->performance->state_count = pss->package.count;
pr->performance->states =
kmalloc_array(pss->package.count,
sizeof(struct acpi_processor_px),
GFP_KERNEL);
if (!pr->performance->states) {
result = -ENOMEM;
goto end;
}
for (i = 0; i < pr->performance->state_count; i++) {
struct acpi_processor_px *px = &(pr->performance->states[i]);
state.length = sizeof(struct acpi_processor_px);
state.pointer = px;
acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
status = acpi_extract_package(&(pss->package.elements[i]),
&format, &state);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
acpi_format_exception(status));
result = -EFAULT;
kfree(pr->performance->states);
goto end;
}
amd_fixup_frequency(px, i);
acpi_handle_debug(pr->handle,
"State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
i,
(u32) px->core_frequency,
(u32) px->power,
(u32) px->transition_latency,
(u32) px->bus_master_latency,
(u32) px->control, (u32) px->status);
/*
* Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
*/
if (!px->core_frequency ||
(u32)(px->core_frequency * 1000) != px->core_frequency * 1000) {
pr_err(FW_BUG
"Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
pr->id, px->core_frequency);
if (last_invalid == -1)
last_invalid = i;
} else {
if (last_invalid != -1) {
/*
* Copy this valid entry over last_invalid entry
*/
memcpy(&(pr->performance->states[last_invalid]),
px, sizeof(struct acpi_processor_px));
++last_invalid;
}
}
}
if (last_invalid == 0) {
pr_err(FW_BUG
"No valid BIOS _PSS frequency found for processor %d\n", pr->id);
result = -EFAULT;
kfree(pr->performance->states);
pr->performance->states = NULL;
}
if (last_invalid > 0)
pr->performance->state_count = last_invalid;
end:
kfree(buffer.pointer);
return result;
}
int acpi_processor_get_performance_info(struct acpi_processor *pr)
{
int result = 0;
if (!pr || !pr->performance || !pr->handle)
return -EINVAL;
if (!acpi_has_method(pr->handle, "_PCT")) {
acpi_handle_debug(pr->handle,
"ACPI-based processor performance control unavailable\n");
return -ENODEV;
}
result = acpi_processor_get_performance_control(pr);
if (result)
goto update_bios;
result = acpi_processor_get_performance_states(pr);
if (result)
goto update_bios;
/* We need to call _PPC once when cpufreq starts */
if (ignore_ppc != 1)
result = acpi_processor_get_platform_limit(pr);
return result;
/*
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
* the BIOS is older than the CPU and does not know its frequencies
*/
update_bios:
#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
#endif
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
int acpi_processor_pstate_control(void)
{
acpi_status status;
if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
return 0;
pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.pstate_control, 8);
if (ACPI_SUCCESS(status))
return 1;
pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
acpi_format_exception(status));
return -EIO;
}
int acpi_processor_notify_smm(struct module *calling_module)
{
static int is_done;
int result = 0;
if (!acpi_processor_cpufreq_init)
return -EBUSY;
if (!try_module_get(calling_module))
return -EINVAL;
/*
* is_done is set to negative if an error occurs and to 1 if no error
* occurrs, but SMM has been notified already. This avoids repeated
* notification which might lead to unexpected results.
*/
if (is_done != 0) {
if (is_done < 0)
result = is_done;
goto out_put;
}
result = acpi_processor_pstate_control();
if (result <= 0) {
if (result) {
is_done = result;
} else {
pr_debug("No SMI port or pstate_control\n");
is_done = 1;
}
goto out_put;
}
is_done = 1;
/*
* Success. If there _PPC, unloading the cpufreq driver would be risky,
* so disallow it in that case.
*/
if (acpi_processor_ppc_in_use)
return 0;
out_put:
module_put(calling_module);
return result;
}
EXPORT_SYMBOL(acpi_processor_notify_smm);
int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
struct acpi_buffer state = {0, NULL};
union acpi_object *psd = NULL;
status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
return -ENODEV;
}
psd = buffer.pointer;
if (!psd || psd->type != ACPI_TYPE_PACKAGE) {
pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (psd->package.count != 1) {
pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
state.length = sizeof(struct acpi_psd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(psd->package.elements[0]), &format, &state);
if (ACPI_FAILURE(status)) {
pr_err("Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
pr_err("Unknown _PSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
pr_err("Unknown _PSD:revision\n");
result = -EFAULT;
goto end;
}
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
pr_err("Invalid _PSD:coord_type\n");
result = -EFAULT;
goto end;
}
end:
kfree(buffer.pointer);
return result;
}
EXPORT_SYMBOL(acpi_processor_get_psd);
int acpi_processor_preregister_performance(
struct acpi_processor_performance __percpu *performance)
{
int count_target;
int retval = 0;
unsigned int i, j;
cpumask_var_t covered_cpus;
struct acpi_processor *pr;
struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&performance_mutex);
/*
* Check if another driver has already registered, and abort before
* changing pr->performance if it has. Check input data as well.
*/
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr) {
/* Look only at processors in ACPI namespace */
continue;
}
if (pr->performance) {
retval = -EBUSY;
goto err_out;
}
if (!performance || !per_cpu_ptr(performance, i)) {
retval = -EINVAL;
goto err_out;
}
}
/* Call _PSD for all CPUs */
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
pr->performance = per_cpu_ptr(performance, i);
pdomain = &(pr->performance->domain_info);
if (acpi_processor_get_psd(pr->handle, pdomain)) {
retval = -EINVAL;
continue;
}
}
if (retval)
goto err_ret;
/*
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
if (cpumask_test_cpu(i, covered_cpus))
continue;
pdomain = &(pr->performance->domain_info);
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
cpumask_set_cpu(i, covered_cpus);
if (pdomain->num_processors <= 1)
continue;
/* Validate the Domain info */
count_target = pdomain->num_processors;
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and j are in the same domain */
if (match_pdomain->num_processors != count_target) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) {
retval = -EINVAL;
goto err_ret;
}
cpumask_set_cpu(j, covered_cpus);
cpumask_set_cpu(j, pr->performance->shared_cpu_map);
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = per_cpu(processors, j);
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
match_pr->performance->shared_type =
pr->performance->shared_type;
cpumask_copy(match_pr->performance->shared_cpu_map,
pr->performance->shared_cpu_map);
}
}
err_ret:
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr || !pr->performance)
continue;
/* Assume no coordination on any error parsing domain info */
if (retval) {
cpumask_clear(pr->performance->shared_cpu_map);
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
}
pr->performance = NULL; /* Will be set for real in register */
}
err_out:
mutex_unlock(&performance_mutex);
free_cpumask_var(covered_cpus);
return retval;
}
EXPORT_SYMBOL(acpi_processor_preregister_performance);
int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
{
struct acpi_processor *pr;
if (!acpi_processor_cpufreq_init)
return -EINVAL;
mutex_lock(&performance_mutex);
pr = per_cpu(processors, cpu);
if (!pr) {
mutex_unlock(&performance_mutex);
return -ENODEV;
}
if (pr->performance) {
mutex_unlock(&performance_mutex);
return -EBUSY;
}
WARN_ON(!performance);
pr->performance = performance;
if (acpi_processor_get_performance_info(pr)) {
pr->performance = NULL;
mutex_unlock(&performance_mutex);
return -EIO;
}
mutex_unlock(&performance_mutex);
return 0;
}
EXPORT_SYMBOL(acpi_processor_register_performance);
void acpi_processor_unregister_performance(unsigned int cpu)
{
struct acpi_processor *pr;
mutex_lock(&performance_mutex);
pr = per_cpu(processors, cpu);
if (!pr)
goto unlock;
if (pr->performance)
kfree(pr->performance->states);
pr->performance = NULL;
unlock:
mutex_unlock(&performance_mutex);
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
| linux-master | drivers/acpi/processor_perflib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* acpi_lpit.c - LPIT table processing functions
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*/
#include <linux/cpu.h>
#include <linux/acpi.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#include "internal.h"
struct lpit_residency_info {
struct acpi_generic_address gaddr;
u64 frequency;
void __iomem *iomem_addr;
};
/* Storage for an memory mapped and FFH based entries */
static struct lpit_residency_info residency_info_mem;
static struct lpit_residency_info residency_info_ffh;
static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
{
int err;
if (io_mem) {
u64 count = 0;
int error;
error = acpi_os_read_iomem(residency_info_mem.iomem_addr, &count,
residency_info_mem.gaddr.bit_width);
if (error)
return error;
*counter = div64_u64(count * 1000000ULL, residency_info_mem.frequency);
return 0;
}
err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
residency_info_ffh.gaddr.bit_offset);
*counter &= mask;
*counter >>= residency_info_ffh.gaddr.bit_offset;
*counter = div64_u64(*counter * 1000000ULL, residency_info_ffh.frequency);
return 0;
}
return -ENODATA;
}
static ssize_t low_power_idle_system_residency_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 counter;
int ret;
ret = lpit_read_residency_counter_us(&counter, true);
if (ret)
return ret;
return sprintf(buf, "%llu\n", counter);
}
static DEVICE_ATTR_RO(low_power_idle_system_residency_us);
static ssize_t low_power_idle_cpu_residency_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 counter;
int ret;
ret = lpit_read_residency_counter_us(&counter, false);
if (ret)
return ret;
return sprintf(buf, "%llu\n", counter);
}
static DEVICE_ATTR_RO(low_power_idle_cpu_residency_us);
int lpit_read_residency_count_address(u64 *address)
{
if (!residency_info_mem.gaddr.address)
return -EINVAL;
*address = residency_info_mem.gaddr.address;
return 0;
}
EXPORT_SYMBOL_GPL(lpit_read_residency_count_address);
static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native)
{
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
/* Silently fail, if cpuidle attribute group is not present */
if (!dev_root)
return;
info->frequency = lpit_native->counter_frequency ?
lpit_native->counter_frequency : tsc_khz * 1000;
if (!info->frequency)
info->frequency = 1;
info->gaddr = lpit_native->residency_counter;
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
info->iomem_addr = ioremap(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
goto exit;
sysfs_add_file_to_group(&dev_root->kobj,
&dev_attr_low_power_idle_system_residency_us.attr,
"cpuidle");
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
sysfs_add_file_to_group(&dev_root->kobj,
&dev_attr_low_power_idle_cpu_residency_us.attr,
"cpuidle");
}
exit:
put_device(dev_root);
}
static void lpit_process(u64 begin, u64 end)
{
while (begin + sizeof(struct acpi_lpit_native) <= end) {
struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
if (!lpit_native->header.type && !lpit_native->header.flags) {
if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY &&
!residency_info_mem.gaddr.address) {
lpit_update_residency(&residency_info_mem, lpit_native);
} else if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
!residency_info_ffh.gaddr.address) {
lpit_update_residency(&residency_info_ffh, lpit_native);
}
}
begin += lpit_native->header.length;
}
}
void acpi_init_lpit(void)
{
acpi_status status;
struct acpi_table_lpit *lpit;
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
if (ACPI_FAILURE(status))
return;
lpit_process((u64)lpit + sizeof(*lpit),
(u64)lpit + lpit->header.length);
acpi_put_table((struct acpi_table_header *)lpit);
}
| linux-master | drivers/acpi/acpi_lpit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* blacklist.c
*
* Check to see if the given machine has a known bad ACPI BIOS
* or if the BIOS is too old.
* Check given machine against acpi_rev_dmi_table[].
*
* Copyright (C) 2004 Len Brown <[email protected]>
* Copyright (C) 2002 Andy Grover <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include "internal.h"
#ifdef CONFIG_DMI
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
#endif
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
* If they are critical errors, mark it critical, and abort driver load.
*/
static struct acpi_platform_list acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1},
/* Sony FX120, FX140, FX150? */
{"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
"ACPI driver problem", 1},
/* Compaq Presario 800, Insyde BIOS */
{"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
"Does not use _REG to protect EC OpRegions", 1},
/* IBM 600E - _ADR should return 7, but it returns 1 */
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
{ }
};
int __init acpi_blacklisted(void)
{
int i;
int blacklisted = 0;
i = acpi_match_platform_list(acpi_blacklist);
if (i >= 0) {
pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
acpi_blacklist[i].oem_id,
acpi_blacklist[i].oem_table_id,
acpi_blacklist[i].oem_revision);
pr_err("Reason: %s. This is a %s error\n",
acpi_blacklist[i].reason,
(acpi_blacklist[i].data ?
"non-recoverable" : "recoverable"));
blacklisted = acpi_blacklist[i].data;
}
(void)early_acpi_osi_init();
#ifdef CONFIG_DMI
dmi_check_system(acpi_rev_dmi_table);
#endif
return blacklisted;
}
#ifdef CONFIG_DMI
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
{
pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident);
acpi_rev_override_setup(NULL);
return 0;
}
#endif
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = {
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
/*
* DELL XPS 13 (2015) switches sound between HDA and I2S
* depending on the ACPI _REV callback. If userspace supports
* I2S sufficiently (or if you do not care about sound), you
* can safely disable this quirk.
*/
{
.callback = dmi_enable_rev_override,
.ident = "DELL XPS 13 (2015)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
},
},
{
.callback = dmi_enable_rev_override,
.ident = "DELL Precision 5520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
},
},
{
.callback = dmi_enable_rev_override,
.ident = "DELL Precision 3520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
},
},
/*
* Resolves a quirk with the Dell Latitude 3350 that
* causes the ethernet adapter to not function.
*/
{
.callback = dmi_enable_rev_override,
.ident = "DELL Latitude 3350",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
},
},
{
.callback = dmi_enable_rev_override,
.ident = "DELL Inspiron 7537",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
},
},
#endif
{}
};
#endif /* CONFIG_DMI */
| linux-master | drivers/acpi/blacklist.c |
// SPDX-License-Identifier: GPL-2.0
/*
* pptt.c - parsing of Processor Properties Topology Table (PPTT)
*
* Copyright (C) 2018, ARM
*
* This file implements parsing of the Processor Properties Topology Table
* which is optionally used to describe the processor and cache topology.
* Due to the relative pointers used throughout the table, this doesn't
* leverage the existing subtable parsing in the kernel.
*
* The PPTT structure is an inverted tree, with each node potentially
* holding one or two inverted tree data structures describing
* the caches available at that level. Each cache structure optionally
* contains properties describing the cache at a given level which can be
* used to override hardware probed values.
*/
#define pr_fmt(fmt) "ACPI PPTT: " fmt
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
#include <acpi/processor.h>
static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
struct acpi_subtable_header *entry;
/* there isn't a subtable at reference 0 */
if (pptt_ref < sizeof(struct acpi_subtable_header))
return NULL;
if (pptt_ref + sizeof(struct acpi_subtable_header) > table_hdr->length)
return NULL;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, pptt_ref);
if (entry->length == 0)
return NULL;
if (pptt_ref + entry->length > table_hdr->length)
return NULL;
return entry;
}
static struct acpi_pptt_processor *fetch_pptt_node(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
return (struct acpi_pptt_processor *)fetch_pptt_subtable(table_hdr, pptt_ref);
}
static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref);
}
static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *node,
int resource)
{
u32 *ref;
if (resource >= node->number_of_priv_resources)
return NULL;
ref = ACPI_ADD_PTR(u32, node, sizeof(struct acpi_pptt_processor));
ref += resource;
return fetch_pptt_subtable(table_hdr, *ref);
}
static inline bool acpi_pptt_match_type(int table_type, int type)
{
return ((table_type & ACPI_PPTT_MASK_CACHE_TYPE) == type ||
table_type & ACPI_PPTT_CACHE_TYPE_UNIFIED & type);
}
/**
* acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache
* @table_hdr: Pointer to the head of the PPTT table
* @local_level: passed res reflects this cache level
* @split_levels: Number of split cache levels (data/instruction).
* @res: cache resource in the PPTT we want to walk
* @found: returns a pointer to the requested level if found
* @level: the requested cache level
* @type: the requested cache type
*
* Attempt to find a given cache level, while counting the max number
* of cache levels for the cache node.
*
* Given a pptt resource, verify that it is a cache node, then walk
* down each level of caches, counting how many levels are found
* as well as checking the cache type (icache, dcache, unified). If a
* level & type match, then we set found, and continue the search.
* Once the entire cache branch has been walked return its max
* depth.
*
* Return: The cache structure and the level we terminated with.
*/
static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
unsigned int local_level,
unsigned int *split_levels,
struct acpi_subtable_header *res,
struct acpi_pptt_cache **found,
unsigned int level, int type)
{
struct acpi_pptt_cache *cache;
if (res->type != ACPI_PPTT_TYPE_CACHE)
return 0;
cache = (struct acpi_pptt_cache *) res;
while (cache) {
local_level++;
if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) {
cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
continue;
}
if (split_levels &&
(acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) ||
acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR)))
*split_levels = local_level;
if (local_level == level &&
acpi_pptt_match_type(cache->attributes, type)) {
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
pr_debug("Found cache @ level %u\n", level);
*found = cache;
/*
* continue looking at this node's resource list
* to verify that we don't find a duplicate
* cache node.
*/
}
cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
}
return local_level;
}
static struct acpi_pptt_cache *
acpi_find_cache_level(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node,
unsigned int *starting_level, unsigned int *split_levels,
unsigned int level, int type)
{
struct acpi_subtable_header *res;
unsigned int number_of_levels = *starting_level;
int resource = 0;
struct acpi_pptt_cache *ret = NULL;
unsigned int local_level;
/* walk down from processor node */
while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
resource++;
local_level = acpi_pptt_walk_cache(table_hdr, *starting_level,
split_levels, res, &ret,
level, type);
/*
* we are looking for the max depth. Since its potentially
* possible for a given node to have resources with differing
* depths verify that the depth we have found is the largest.
*/
if (number_of_levels < local_level)
number_of_levels = local_level;
}
if (number_of_levels > *starting_level)
*starting_level = number_of_levels;
return ret;
}
/**
* acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
* levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
* @levels: Number of levels if success.
* @split_levels: Number of split cache levels (data/instruction) if
* success. Can by NULL.
*
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
* caches that exist across packages). Count the number of cache levels and
* split cache levels (data/instruction) that exist at each level on the way
* up.
*/
static void acpi_count_levels(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node,
unsigned int *levels, unsigned int *split_levels)
{
do {
acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
}
/**
* acpi_pptt_leaf_node() - Given a processor node, determine if its a leaf
* @table_hdr: Pointer to the head of the PPTT table
* @node: passed node is checked to see if its a leaf
*
* Determine if the *node parameter is a leaf node by iterating the
* PPTT table, looking for nodes which reference it.
*
* Return: 0 if we find a node referencing the passed node (or table error),
* or 1 if we don't.
*/
static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *node)
{
struct acpi_subtable_header *entry;
unsigned long table_end;
u32 node_entry;
struct acpi_pptt_processor *cpu_node;
u32 proc_sz;
if (table_hdr->revision > 1)
return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE);
table_end = (unsigned long)table_hdr + table_hdr->length;
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
}
return 1;
}
/**
* acpi_find_processor_node() - Given a PPTT table find the requested processor
* @table_hdr: Pointer to the head of the PPTT table
* @acpi_cpu_id: CPU we are searching for
*
* Find the subtable entry describing the provided processor.
* This is done by iterating the PPTT table looking for processor nodes
* which have an acpi_processor_id that matches the acpi_cpu_id parameter
* passed into the function. If we find a node that matches this criteria
* we verify that its a leaf node in the topology rather than depending
* on the valid flag, which doesn't need to be set for leaf nodes.
*
* Return: NULL, or the processors acpi_pptt_processor*
*/
static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_header *table_hdr,
u32 acpi_cpu_id)
{
struct acpi_subtable_header *entry;
unsigned long table_end;
struct acpi_pptt_processor *cpu_node;
u32 proc_sz;
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
}
return NULL;
}
static u8 acpi_cache_type(enum cache_type type)
{
switch (type) {
case CACHE_TYPE_DATA:
pr_debug("Looking for data cache\n");
return ACPI_PPTT_CACHE_TYPE_DATA;
case CACHE_TYPE_INST:
pr_debug("Looking for instruction cache\n");
return ACPI_PPTT_CACHE_TYPE_INSTR;
default:
case CACHE_TYPE_UNIFIED:
pr_debug("Looking for unified cache\n");
/*
* It is important that ACPI_PPTT_CACHE_TYPE_UNIFIED
* contains the bit pattern that will match both
* ACPI unified bit patterns because we use it later
* to match both cases.
*/
return ACPI_PPTT_CACHE_TYPE_UNIFIED;
}
}
static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *table_hdr,
u32 acpi_cpu_id,
enum cache_type type,
unsigned int level,
struct acpi_pptt_processor **node)
{
unsigned int total_levels = 0;
struct acpi_pptt_cache *found = NULL;
struct acpi_pptt_processor *cpu_node;
u8 acpi_type = acpi_cache_type(type);
pr_debug("Looking for CPU %d's level %u cache type %d\n",
acpi_cpu_id, level, acpi_type);
cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
while (cpu_node && !found) {
found = acpi_find_cache_level(table_hdr, cpu_node,
&total_levels, NULL, level, acpi_type);
*node = cpu_node;
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
}
return found;
}
/**
* update_cache_properties() - Update cacheinfo for the given processor
* @this_leaf: Kernel cache info structure being updated
* @found_cache: The PPTT node describing this cache instance
* @cpu_node: A unique reference to describe this cache instance
* @revision: The revision of the PPTT table
*
* The ACPI spec implies that the fields in the cache structures are used to
* extend and correct the information probed from the hardware. Lets only
* set fields that we determine are VALID.
*
* Return: nothing. Side effect of updating the global cacheinfo
*/
static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
struct acpi_pptt_processor *cpu_node,
u8 revision)
{
struct acpi_pptt_cache_v1* found_cache_v1;
this_leaf->fw_token = cpu_node;
if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
this_leaf->size = found_cache->size;
if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID)
this_leaf->coherency_line_size = found_cache->line_size;
if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID)
this_leaf->number_of_sets = found_cache->number_of_sets;
if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID)
this_leaf->ways_of_associativity = found_cache->associativity;
if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) {
switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) {
case ACPI_PPTT_CACHE_POLICY_WT:
this_leaf->attributes = CACHE_WRITE_THROUGH;
break;
case ACPI_PPTT_CACHE_POLICY_WB:
this_leaf->attributes = CACHE_WRITE_BACK;
break;
}
}
if (found_cache->flags & ACPI_PPTT_ALLOCATION_TYPE_VALID) {
switch (found_cache->attributes & ACPI_PPTT_MASK_ALLOCATION_TYPE) {
case ACPI_PPTT_CACHE_READ_ALLOCATE:
this_leaf->attributes |= CACHE_READ_ALLOCATE;
break;
case ACPI_PPTT_CACHE_WRITE_ALLOCATE:
this_leaf->attributes |= CACHE_WRITE_ALLOCATE;
break;
case ACPI_PPTT_CACHE_RW_ALLOCATE:
case ACPI_PPTT_CACHE_RW_ALLOCATE_ALT:
this_leaf->attributes |=
CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE;
break;
}
}
/*
* If cache type is NOCACHE, then the cache hasn't been specified
* via other mechanisms. Update the type if a cache type has been
* provided.
*
* Note, we assume such caches are unified based on conventional system
* design and known examples. Significant work is required elsewhere to
* fully support data/instruction only type caches which are only
* specified in PPTT.
*/
if (this_leaf->type == CACHE_TYPE_NOCACHE &&
found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
found_cache, sizeof(struct acpi_pptt_cache));
this_leaf->id = found_cache_v1->cache_id;
this_leaf->attributes |= CACHE_ID;
}
}
static void cache_setup_acpi_cpu(struct acpi_table_header *table,
unsigned int cpu)
{
struct acpi_pptt_cache *found_cache;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
struct cacheinfo *this_leaf;
unsigned int index = 0;
struct acpi_pptt_processor *cpu_node = NULL;
while (index < get_cpu_cacheinfo(cpu)->num_leaves) {
this_leaf = this_cpu_ci->info_list + index;
found_cache = acpi_find_cache_node(table, acpi_cpu_id,
this_leaf->type,
this_leaf->level,
&cpu_node);
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
update_cache_properties(this_leaf, found_cache,
ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)),
table->revision);
index++;
}
}
static bool flag_identical(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu)
{
struct acpi_pptt_processor *next;
/* heterogeneous machines must use PPTT revision > 1 */
if (table_hdr->revision < 2)
return false;
/* Locate the last node in the tree with IDENTICAL set */
if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
next = fetch_pptt_node(table_hdr, cpu->parent);
if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL))
return true;
}
return false;
}
/* Passing level values greater than this will result in search termination */
#define PPTT_ABORT_PACKAGE 0xFF
static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu,
int level, int flag)
{
struct acpi_pptt_processor *prev_node;
while (cpu && level) {
/* special case the identical flag to find last identical */
if (flag == ACPI_PPTT_ACPI_IDENTICAL) {
if (flag_identical(table_hdr, cpu))
break;
} else if (cpu->flags & flag)
break;
pr_debug("level %d\n", level);
prev_node = fetch_pptt_node(table_hdr, cpu->parent);
if (prev_node == NULL)
break;
cpu = prev_node;
level--;
}
return cpu;
}
static void acpi_pptt_warn_missing(void)
{
pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
}
/**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table
* @cpu: Kernel logical CPU number
* @level: A level that terminates the search
* @flag: A flag which terminates the search
*
* Get a unique value given a CPU, and a topology level, that can be
* matched to determine which cpus share common topological features
* at that level.
*
* Return: Unique value, or -ENOENT if unable to locate CPU
*/
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
unsigned int cpu, int level, int flag)
{
struct acpi_pptt_processor *cpu_node;
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (cpu_node) {
cpu_node = acpi_find_processor_tag(table, cpu_node,
level, flag);
/*
* As per specification if the processor structure represents
* an actual processor, then ACPI processor ID must be valid.
* For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
* should be set if the UID is valid
*/
if (level == 0 ||
cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
return cpu_node->acpi_processor_id;
return ACPI_PTR_DIFF(cpu_node, table);
}
pr_warn_once("PPTT table found, but unable to locate core %d (%d)\n",
cpu, acpi_cpu_id);
return -ENOENT;
}
static struct acpi_table_header *acpi_get_pptt(void)
{
static struct acpi_table_header *pptt;
static bool is_pptt_checked;
acpi_status status;
/*
* PPTT will be used at runtime on every CPU hotplug in path, so we
* don't need to call acpi_put_table() to release the table mapping.
*/
if (!pptt && !is_pptt_checked) {
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
if (ACPI_FAILURE(status))
acpi_pptt_warn_missing();
is_pptt_checked = true;
}
return pptt;
}
static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
{
struct acpi_table_header *table;
int retval;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval);
return retval;
}
/**
* check_acpi_cpu_flag() - Determine if CPU node has a flag set
* @cpu: Kernel logical CPU number
* @rev: The minimum PPTT revision defining the flag
* @flag: The flag itself
*
* Check the node representing a CPU for a given flag.
*
* Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
* the table revision isn't new enough.
* 1, any passed flag set
* 0, flag unset
*/
static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
{
struct acpi_table_header *table;
u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
struct acpi_pptt_processor *cpu_node = NULL;
int ret = -ENOENT;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
if (table->revision >= rev)
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (cpu_node)
ret = (cpu_node->flags & flag) != 0;
return ret;
}
/**
* acpi_get_cache_info() - Determine the number of cache levels and
* split cache levels (data/instruction) and for a PE.
* @cpu: Kernel logical CPU number
* @levels: Number of levels if success.
* @split_levels: Number of levels being split (i.e. data/instruction)
* if success. Can by NULL.
*
* Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
* Return: -ENOENT if no PPTT table or no PPTT processor struct found.
* 0 on success.
*/
int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
unsigned int *split_levels)
{
struct acpi_pptt_processor *cpu_node;
struct acpi_table_header *table;
u32 acpi_cpu_id;
*levels = 0;
if (split_levels)
*split_levels = 0;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (!cpu_node)
return -ENOENT;
acpi_count_levels(table, cpu_node, levels, split_levels);
pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
*levels, split_levels ? *split_levels : -1);
return 0;
}
/**
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT
* @cpu: Kernel logical CPU number
*
* Updates the global cache info provided by cpu_get_cacheinfo()
* when there are valid properties in the acpi_pptt_cache nodes. A
* successful parse may not result in any updates if none of the
* cache levels have any valid flags set. Further, a unique value is
* associated with each known CPU cache entry. This unique value
* can be used to determine whether caches are shared between CPUs.
*
* Return: -ENOENT on failure to find table, or 0 on success
*/
int cache_setup_acpi(unsigned int cpu)
{
struct acpi_table_header *table;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
pr_debug("Cache Setup ACPI CPU %d\n", cpu);
cache_setup_acpi_cpu(table, cpu);
return 0;
}
/**
* acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
* @cpu: Kernel logical CPU number
*
* Return: 1, a thread
* 0, not a thread
* -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
* the table revision isn't new enough.
*/
int acpi_pptt_cpu_is_thread(unsigned int cpu)
{
return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
}
/**
* find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
* @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
*
* Determine a topology unique ID for each thread/core/cluster/mc_grouping
* /socket/etc. This ID can then be used to group peers, which will have
* matching ids.
*
* The search terminates when either the requested level is found or
* we reach a root node. Levels beyond the termination point will return the
* same unique ID. The unique id for level 0 is the acpi processor id. All
* other levels beyond this use a generated value to uniquely identify
* a topological feature.
*
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_topology(unsigned int cpu, int level)
{
return find_acpi_cpu_topology_tag(cpu, level, 0);
}
/**
* find_acpi_cpu_topology_package() - Determine a unique CPU package value
* @cpu: Kernel logical CPU number
*
* Determine a topology unique package ID for the given CPU.
* This ID can then be used to group peers, which will have matching ids.
*
* The search terminates when either a level is found with the PHYSICAL_PACKAGE
* flag set or we reach a root node.
*
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents the package for this CPU.
*/
int find_acpi_cpu_topology_package(unsigned int cpu)
{
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_PHYSICAL_PACKAGE);
}
/**
* find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value
* @cpu: Kernel logical CPU number
*
* Determine a topology unique cluster ID for the given CPU/thread.
* This ID can then be used to group peers, which will have matching ids.
*
* The cluster, if present is the level of topology above CPUs. In a
* multi-thread CPU, it will be the level above the CPU, not the thread.
* It may not exist in single CPU systems. In simple multi-CPU systems,
* it may be equal to the package topology level.
*
* Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found
* or there is no toplogy level above the CPU..
* Otherwise returns a value which represents the package for this CPU.
*/
int find_acpi_cpu_topology_cluster(unsigned int cpu)
{
struct acpi_table_header *table;
struct acpi_pptt_processor *cpu_node, *cluster_node;
u32 acpi_cpu_id;
int retval;
int is_thread;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (!cpu_node || !cpu_node->parent)
return -ENOENT;
is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
cluster_node = fetch_pptt_node(table, cpu_node->parent);
if (!cluster_node)
return -ENOENT;
if (is_thread) {
if (!cluster_node->parent)
return -ENOENT;
cluster_node = fetch_pptt_node(table, cluster_node->parent);
if (!cluster_node)
return -ENOENT;
}
if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
retval = cluster_node->acpi_processor_id;
else
retval = ACPI_PTR_DIFF(cluster_node, table);
return retval;
}
/**
* find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
* @cpu: Kernel logical CPU number
*
* Determine a unique heterogeneous tag for the given CPU. CPUs with the same
* implementation should have matching tags.
*
* The returned tag can be used to group peers with identical implementation.
*
* The search terminates when a level is found with the identical implementation
* flag set or we reach a root node.
*
* Due to limitations in the PPTT data structure, there may be rare situations
* where two cores in a heterogeneous machine may be identical, but won't have
* the same tag.
*
* Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a group of identical cores
* similar to this CPU.
*/
int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_ACPI_IDENTICAL);
}
| linux-master | drivers/acpi/pptt.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
#include "sleep.h"
#include "internal.h"
/*
* this file provides support for:
* /proc/acpi/wakeup
*/
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_device *dev, *tmp;
seq_printf(seq, "Device\tS-state\t Status Sysfs node\n");
mutex_lock(&acpi_device_lock);
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
struct acpi_device_physical_node *entry;
if (!dev->wakeup.flags.valid)
continue;
seq_printf(seq, "%s\t S%d\t",
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.valid ? '*' : ' ',
device_may_wakeup(&dev->dev) ?
"enabled" : "disabled");
} else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
node) {
ldev = get_device(entry->dev);
if (!ldev)
continue;
if (&entry->node !=
dev->physical_node_list.next)
seq_printf(seq, "\t\t");
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.valid ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
put_device(ldev);
}
}
mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
}
static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct acpi_device_physical_node *entry;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
mutex_unlock(&adev->physical_node_lock);
}
static ssize_t
acpi_system_write_wakeup_device(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
struct acpi_device *dev, *tmp;
char strbuf[5];
char str[5] = "";
if (count > 4)
count = 4;
if (copy_from_user(strbuf, buffer, count))
return -EFAULT;
strbuf[count] = '\0';
sscanf(strbuf, "%s", str);
mutex_lock(&acpi_device_lock);
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid)
continue;
if (!strncmp(dev->pnp.bus_id, str, 4)) {
if (device_can_wakeup(&dev->dev)) {
bool enable = !device_may_wakeup(&dev->dev);
device_set_wakeup_enable(&dev->dev, enable);
} else {
physical_device_enable_wakeup(dev);
}
break;
}
}
mutex_unlock(&acpi_device_lock);
return count;
}
static int
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_wakeup_device_seq_show,
pde_data(inode));
}
static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
.proc_open = acpi_system_wakeup_device_open_fs,
.proc_read = seq_read,
.proc_write = acpi_system_write_wakeup_device,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
| linux-master | drivers/acpi/proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* processor_thermal.c - Passive cooling submodule of the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* - Added processor hotplug support
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <linux/uaccess.h>
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
* offers (in most cases) voltage scaling in addition to frequency scaling, and
* thus a cubic (instead of linear) reduction of energy. Also, we allow for
* _any_ cpufreq driver and not only the acpi-cpufreq driver.
*/
#define CPUFREQ_THERMAL_MIN_STEP 0
#define CPUFREQ_THERMAL_MAX_STEP 3
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
#define reduction_pctg(cpu) \
per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
/*
* Emulate "per package data" using per cpu data (which should really be
* provided elsewhere)
*
* Note we can lose a CPU on cpu hotunplug, in this case we forget the state
* temporarily. Fortunately that's not a big issue here (I hope)
*/
static int phys_package_first_cpu(int cpu)
{
int i;
int id = topology_physical_package_id(cpu);
for_each_online_cpu(i)
if (topology_physical_package_id(i) == id)
return i;
return 0;
}
static int cpu_has_cpufreq(unsigned int cpu)
{
struct cpufreq_policy *policy;
if (!acpi_processor_cpufreq_init)
return 0;
policy = cpufreq_cpu_get(cpu);
if (policy) {
cpufreq_cpu_put(policy);
return 1;
}
return 0;
}
static int cpufreq_get_max_state(unsigned int cpu)
{
if (!cpu_has_cpufreq(cpu))
return 0;
return CPUFREQ_THERMAL_MAX_STEP;
}
static int cpufreq_get_cur_state(unsigned int cpu)
{
if (!cpu_has_cpufreq(cpu))
return 0;
return reduction_pctg(cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
struct cpufreq_policy *policy;
struct acpi_processor *pr;
unsigned long max_freq;
int i, ret;
if (!cpu_has_cpufreq(cpu))
return 0;
reduction_pctg(cpu) = state;
/*
* Update all the CPUs in the same package because they all
* contribute to the temperature and often share the same
* frequency.
*/
for_each_online_cpu(i) {
if (topology_physical_package_id(i) !=
topology_physical_package_id(cpu))
continue;
pr = per_cpu(processors, i);
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
policy = cpufreq_cpu_get(i);
if (!policy)
return -EINVAL;
max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
cpufreq_cpu_put(policy);
ret = freq_qos_update_request(&pr->thermal_req, max_freq);
if (ret < 0) {
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
pr->id, ret);
}
}
return 0;
}
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
if (!pr)
continue;
ret = freq_qos_add_request(&policy->constraints,
&pr->thermal_req,
FREQ_QOS_MAX, INT_MAX);
if (ret < 0) {
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
continue;
}
thermal_cooling_device_update(pr->cdev);
}
}
void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
if (!pr)
continue;
freq_qos_remove_request(&pr->thermal_req);
thermal_cooling_device_update(pr->cdev);
}
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
{
return 0;
}
static int cpufreq_get_cur_state(unsigned int cpu)
{
return 0;
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
return 0;
}
#endif
/* thermal cooling device callbacks */
static int acpi_processor_max_state(struct acpi_processor *pr)
{
int max_state = 0;
/*
* There exists four states according to
* cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
*/
max_state += cpufreq_get_max_state(pr->id);
if (pr->flags.throttling)
max_state += (pr->throttling.state_count -1);
return max_state;
}
static int
processor_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_processor *pr;
if (!device)
return -EINVAL;
pr = acpi_driver_data(device);
if (!pr)
return -EINVAL;
*state = acpi_processor_max_state(pr);
return 0;
}
static int
processor_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *cur_state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_processor *pr;
if (!device)
return -EINVAL;
pr = acpi_driver_data(device);
if (!pr)
return -EINVAL;
*cur_state = cpufreq_get_cur_state(pr->id);
if (pr->flags.throttling)
*cur_state += pr->throttling.state;
return 0;
}
static int
processor_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct acpi_device *device = cdev->devdata;
struct acpi_processor *pr;
int result = 0;
int max_pstate;
if (!device)
return -EINVAL;
pr = acpi_driver_data(device);
if (!pr)
return -EINVAL;
max_pstate = cpufreq_get_max_state(pr->id);
if (state > acpi_processor_max_state(pr))
return -EINVAL;
if (state <= max_pstate) {
if (pr->flags.throttling && pr->throttling.state)
result = acpi_processor_set_throttling(pr, 0, false);
cpufreq_set_cur_state(pr->id, state);
} else {
cpufreq_set_cur_state(pr->id, max_pstate);
result = acpi_processor_set_throttling(pr,
state - max_pstate, false);
}
return result;
}
const struct thermal_cooling_device_ops processor_cooling_ops = {
.get_max_state = processor_get_max_state,
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
};
int acpi_processor_thermal_init(struct acpi_processor *pr,
struct acpi_device *device)
{
int result = 0;
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
if (IS_ERR(pr->cdev)) {
result = PTR_ERR(pr->cdev);
return result;
}
dev_dbg(&device->dev, "registered as cooling_device%d\n",
pr->cdev->id);
result = sysfs_create_link(&device->dev.kobj,
&pr->cdev->device.kobj,
"thermal_cooling");
if (result) {
dev_err(&device->dev,
"Failed to create sysfs link 'thermal_cooling'\n");
goto err_thermal_unregister;
}
result = sysfs_create_link(&pr->cdev->device.kobj,
&device->dev.kobj,
"device");
if (result) {
dev_err(&pr->cdev->device,
"Failed to create sysfs link 'device'\n");
goto err_remove_sysfs_thermal;
}
return 0;
err_remove_sysfs_thermal:
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
thermal_cooling_device_unregister(pr->cdev);
return result;
}
void acpi_processor_thermal_exit(struct acpi_processor *pr,
struct acpi_device *device)
{
if (pr->cdev) {
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
sysfs_remove_link(&pr->cdev->device.kobj, "device");
thermal_cooling_device_unregister(pr->cdev);
pr->cdev = NULL;
}
}
| linux-master | drivers/acpi/processor_thermal.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI Platform Firmware Runtime Telemetry driver
*
* Copyright (C) 2021 Intel Corporation
* Author: Chen Yu <[email protected]>
*
* This driver allows user space to fetch telemetry data from the
* firmware with the help of the Platform Firmware Runtime Telemetry
* interface.
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/uuid.h>
#include <uapi/linux/pfrut.h>
#define PFRT_LOG_EXEC_IDX 0
#define PFRT_LOG_HISTORY_IDX 1
#define PFRT_LOG_ERR 0
#define PFRT_LOG_WARN 1
#define PFRT_LOG_INFO 2
#define PFRT_LOG_VERB 4
#define PFRT_FUNC_SET_LEV 1
#define PFRT_FUNC_GET_LEV 2
#define PFRT_FUNC_GET_DATA 3
#define PFRT_REVID_1 1
#define PFRT_REVID_2 2
#define PFRT_DEFAULT_REV_ID PFRT_REVID_1
enum log_index {
LOG_STATUS_IDX = 0,
LOG_EXT_STATUS_IDX = 1,
LOG_MAX_SZ_IDX = 2,
LOG_CHUNK1_LO_IDX = 3,
LOG_CHUNK1_HI_IDX = 4,
LOG_CHUNK1_SZ_IDX = 5,
LOG_CHUNK2_LO_IDX = 6,
LOG_CHUNK2_HI_IDX = 7,
LOG_CHUNK2_SZ_IDX = 8,
LOG_ROLLOVER_CNT_IDX = 9,
LOG_RESET_CNT_IDX = 10,
LOG_NR_IDX
};
struct pfrt_log_device {
int index;
struct pfrt_log_info info;
struct device *parent_dev;
struct miscdevice miscdev;
};
/* pfrt_guid is the parameter for _DSM method */
static const guid_t pfrt_log_guid =
GUID_INIT(0x75191659, 0x8178, 0x4D9D, 0xB8, 0x8F, 0xAC, 0x5E,
0x5E, 0x93, 0xE8, 0xBF);
static DEFINE_IDA(pfrt_log_ida);
static inline struct pfrt_log_device *to_pfrt_log_dev(struct file *file)
{
return container_of(file->private_data, struct pfrt_log_device, miscdev);
}
static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info,
struct pfrt_log_device *pfrt_log_dev)
{
acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
union acpi_object *out_obj, in_obj, in_buf;
int ret = -EBUSY;
memset(data_info, 0, sizeof(*data_info));
memset(&in_obj, 0, sizeof(in_obj));
memset(&in_buf, 0, sizeof(in_buf));
in_obj.type = ACPI_TYPE_PACKAGE;
in_obj.package.count = 1;
in_obj.package.elements = &in_buf;
in_buf.type = ACPI_TYPE_INTEGER;
in_buf.integer.value = pfrt_log_dev->info.log_type;
out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_DATA,
&in_obj, ACPI_TYPE_PACKAGE);
if (!out_obj)
return -EINVAL;
if (out_obj->package.count < LOG_NR_IDX ||
out_obj->package.elements[LOG_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_MAX_SZ_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK1_LO_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK1_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK1_SZ_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK2_LO_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK2_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_CHUNK2_SZ_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[LOG_RESET_CNT_IDX].type != ACPI_TYPE_INTEGER)
goto free_acpi_buffer;
data_info->status = out_obj->package.elements[LOG_STATUS_IDX].integer.value;
data_info->ext_status =
out_obj->package.elements[LOG_EXT_STATUS_IDX].integer.value;
if (data_info->status != DSM_SUCCEED) {
dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", data_info->status);
dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n",
data_info->ext_status);
goto free_acpi_buffer;
}
data_info->max_data_size =
out_obj->package.elements[LOG_MAX_SZ_IDX].integer.value;
data_info->chunk1_addr_lo =
out_obj->package.elements[LOG_CHUNK1_LO_IDX].integer.value;
data_info->chunk1_addr_hi =
out_obj->package.elements[LOG_CHUNK1_HI_IDX].integer.value;
data_info->chunk1_size =
out_obj->package.elements[LOG_CHUNK1_SZ_IDX].integer.value;
data_info->chunk2_addr_lo =
out_obj->package.elements[LOG_CHUNK2_LO_IDX].integer.value;
data_info->chunk2_addr_hi =
out_obj->package.elements[LOG_CHUNK2_HI_IDX].integer.value;
data_info->chunk2_size =
out_obj->package.elements[LOG_CHUNK2_SZ_IDX].integer.value;
data_info->rollover_cnt =
out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].integer.value;
data_info->reset_cnt =
out_obj->package.elements[LOG_RESET_CNT_IDX].integer.value;
ret = 0;
free_acpi_buffer:
ACPI_FREE(out_obj);
return ret;
}
static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev)
{
acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
union acpi_object *out_obj, *obj, in_obj, in_buf;
enum pfru_dsm_status status, ext_status;
int ret = 0;
memset(&in_obj, 0, sizeof(in_obj));
memset(&in_buf, 0, sizeof(in_buf));
in_obj.type = ACPI_TYPE_PACKAGE;
in_obj.package.count = 1;
in_obj.package.elements = &in_buf;
in_buf.type = ACPI_TYPE_INTEGER;
in_buf.integer.value = level;
out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
pfrt_log_dev->info.log_revid, PFRT_FUNC_SET_LEV,
&in_obj, ACPI_TYPE_PACKAGE);
if (!out_obj)
return -EINVAL;
obj = &out_obj->package.elements[0];
status = obj->integer.value;
if (status != DSM_SUCCEED) {
obj = &out_obj->package.elements[1];
ext_status = obj->integer.value;
dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
ret = -EBUSY;
}
ACPI_FREE(out_obj);
return ret;
}
static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev)
{
acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
union acpi_object *out_obj, *obj;
enum pfru_dsm_status status, ext_status;
int ret = -EBUSY;
out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_LEV,
NULL, ACPI_TYPE_PACKAGE);
if (!out_obj)
return -EINVAL;
obj = &out_obj->package.elements[0];
if (obj->type != ACPI_TYPE_INTEGER)
goto free_acpi_buffer;
status = obj->integer.value;
if (status != DSM_SUCCEED) {
obj = &out_obj->package.elements[1];
ext_status = obj->integer.value;
dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
goto free_acpi_buffer;
}
obj = &out_obj->package.elements[2];
if (obj->type != ACPI_TYPE_INTEGER)
goto free_acpi_buffer;
ret = obj->integer.value;
free_acpi_buffer:
ACPI_FREE(out_obj);
return ret;
}
static int valid_log_level(u32 level)
{
return level == PFRT_LOG_ERR || level == PFRT_LOG_WARN ||
level == PFRT_LOG_INFO || level == PFRT_LOG_VERB;
}
static int valid_log_type(u32 type)
{
return type == PFRT_LOG_EXEC_IDX || type == PFRT_LOG_HISTORY_IDX;
}
static inline int valid_log_revid(u32 id)
{
return id == PFRT_REVID_1 || id == PFRT_REVID_2;
}
static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pfrt_log_device *pfrt_log_dev = to_pfrt_log_dev(file);
struct pfrt_log_data_info data_info;
struct pfrt_log_info info;
void __user *p;
int ret = 0;
p = (void __user *)arg;
switch (cmd) {
case PFRT_LOG_IOC_SET_INFO:
if (copy_from_user(&info, p, sizeof(info)))
return -EFAULT;
if (valid_log_revid(info.log_revid))
pfrt_log_dev->info.log_revid = info.log_revid;
if (valid_log_level(info.log_level)) {
ret = set_pfrt_log_level(info.log_level, pfrt_log_dev);
if (ret < 0)
return ret;
pfrt_log_dev->info.log_level = info.log_level;
}
if (valid_log_type(info.log_type))
pfrt_log_dev->info.log_type = info.log_type;
return 0;
case PFRT_LOG_IOC_GET_INFO:
info.log_level = get_pfrt_log_level(pfrt_log_dev);
if (ret < 0)
return ret;
info.log_type = pfrt_log_dev->info.log_type;
info.log_revid = pfrt_log_dev->info.log_revid;
if (copy_to_user(p, &info, sizeof(info)))
return -EFAULT;
return 0;
case PFRT_LOG_IOC_GET_DATA_INFO:
ret = get_pfrt_log_data_info(&data_info, pfrt_log_dev);
if (ret)
return ret;
if (copy_to_user(p, &data_info, sizeof(struct pfrt_log_data_info)))
return -EFAULT;
return 0;
default:
return -ENOTTY;
}
}
static int
pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pfrt_log_device *pfrt_log_dev;
struct pfrt_log_data_info info;
unsigned long psize, vsize;
phys_addr_t base_addr;
int ret;
if (vma->vm_flags & VM_WRITE)
return -EROFS;
/* changing from read to write with mprotect is not allowed */
vm_flags_clear(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);
ret = get_pfrt_log_data_info(&info, pfrt_log_dev);
if (ret)
return ret;
base_addr = (phys_addr_t)((info.chunk2_addr_hi << 32) | info.chunk2_addr_lo);
/* pfrt update has not been launched yet */
if (!base_addr)
return -ENODEV;
psize = info.max_data_size;
/* base address and total buffer size must be page aligned */
if (!PAGE_ALIGNED(base_addr) || !PAGE_ALIGNED(psize))
return -ENODEV;
vsize = vma->vm_end - vma->vm_start;
if (vsize > psize)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(base_addr),
vsize, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static const struct file_operations acpi_pfrt_log_fops = {
.owner = THIS_MODULE,
.mmap = pfrt_log_mmap,
.unlocked_ioctl = pfrt_log_ioctl,
.llseek = noop_llseek,
};
static int acpi_pfrt_log_remove(struct platform_device *pdev)
{
struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev);
misc_deregister(&pfrt_log_dev->miscdev);
return 0;
}
static void pfrt_log_put_idx(void *data)
{
struct pfrt_log_device *pfrt_log_dev = data;
ida_free(&pfrt_log_ida, pfrt_log_dev->index);
}
static int acpi_pfrt_log_probe(struct platform_device *pdev)
{
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
struct pfrt_log_device *pfrt_log_dev;
int ret;
if (!acpi_has_method(handle, "_DSM")) {
dev_dbg(&pdev->dev, "Missing _DSM\n");
return -ENODEV;
}
pfrt_log_dev = devm_kzalloc(&pdev->dev, sizeof(*pfrt_log_dev), GFP_KERNEL);
if (!pfrt_log_dev)
return -ENOMEM;
ret = ida_alloc(&pfrt_log_ida, GFP_KERNEL);
if (ret < 0)
return ret;
pfrt_log_dev->index = ret;
ret = devm_add_action_or_reset(&pdev->dev, pfrt_log_put_idx, pfrt_log_dev);
if (ret)
return ret;
pfrt_log_dev->info.log_revid = PFRT_DEFAULT_REV_ID;
pfrt_log_dev->parent_dev = &pdev->dev;
pfrt_log_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
pfrt_log_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pfrt%d",
pfrt_log_dev->index);
if (!pfrt_log_dev->miscdev.name)
return -ENOMEM;
pfrt_log_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"acpi_pfr_telemetry%d",
pfrt_log_dev->index);
if (!pfrt_log_dev->miscdev.nodename)
return -ENOMEM;
pfrt_log_dev->miscdev.fops = &acpi_pfrt_log_fops;
pfrt_log_dev->miscdev.parent = &pdev->dev;
ret = misc_register(&pfrt_log_dev->miscdev);
if (ret)
return ret;
platform_set_drvdata(pdev, pfrt_log_dev);
return 0;
}
static const struct acpi_device_id acpi_pfrt_log_ids[] = {
{"INTC1081"},
{}
};
MODULE_DEVICE_TABLE(acpi, acpi_pfrt_log_ids);
static struct platform_driver acpi_pfrt_log_driver = {
.driver = {
.name = "pfr_telemetry",
.acpi_match_table = acpi_pfrt_log_ids,
},
.probe = acpi_pfrt_log_probe,
.remove = acpi_pfrt_log_remove,
};
module_platform_driver(acpi_pfrt_log_driver);
MODULE_DESCRIPTION("Platform Firmware Runtime Update Telemetry driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/acpi/pfr_telemetry.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, Intel Corporation
* Copyright (c) 2015, Red Hat, Inc.
* Copyright (c) 2015, 2016 Linaro Ltd.
*/
#define pr_fmt(fmt) "ACPI: SPCR: " fmt
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/serial_core.h>
/*
* Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
* occasionally getting stuck as 1. To avoid the potential for a hang, check
* TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
* implementations, so only do so if an affected platform is detected in
* acpi_parse_spcr().
*/
bool qdf2400_e44_present;
EXPORT_SYMBOL(qdf2400_e44_present);
/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similar to PCI
* quirk detection in pci_mcfg.c.
*/
static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
{
if (memcmp(h->oem_id, "QCOM ", ACPI_OEM_ID_SIZE))
return false;
if (!memcmp(h->oem_table_id, "QDF2432 ", ACPI_OEM_TABLE_ID_SIZE))
return true;
if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
h->oem_revision == 1)
return true;
return false;
}
/*
* APM X-Gene v1 and v2 UART hardware is an 16550 like device but has its
* register aligned to 32-bit. In addition, the BIOS also encoded the
* access width to be 8 bits. This function detects this errata condition.
*/
static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
{
bool xgene_8250 = false;
if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE)
return false;
if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) &&
memcmp(tb->header.oem_id, "HPE ", ACPI_OEM_ID_SIZE))
return false;
if (!memcmp(tb->header.oem_table_id, "XGENESPC",
ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0)
xgene_8250 = true;
if (!memcmp(tb->header.oem_table_id, "ProLiant",
ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1)
xgene_8250 = true;
return xgene_8250;
}
/**
* acpi_parse_spcr() - parse ACPI SPCR table and add preferred console
* @enable_earlycon: set up earlycon for the console specified by the table
* @enable_console: setup the console specified by the table.
*
* For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be
* defined to parse ACPI SPCR table. As a result of the parsing preferred
* console is registered and if @enable_earlycon is true, earlycon is set up.
* If @enable_console is true the system console is also configured.
*
* When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called
* from arch initialization code as soon as the DT/ACPI decision is made.
*/
int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
static char opts[64];
struct acpi_table_spcr *table;
acpi_status status;
char *uart;
char *iotype;
int baud_rate;
int err;
if (acpi_disabled)
return -ENODEV;
status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table);
if (ACPI_FAILURE(status))
return -ENOENT;
if (table->header.revision < 2)
pr_info("SPCR table version %d\n", table->header.revision);
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
u32 bit_width = table->serial_port.access_width;
if (bit_width > ACPI_ACCESS_BIT_MAX) {
pr_err(FW_BUG "Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
bit_width = ACPI_ACCESS_BIT_DEFAULT;
}
switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) {
default:
pr_err(FW_BUG "Unexpected SPCR Access Width. Defaulting to byte size\n");
fallthrough;
case 8:
iotype = "mmio";
break;
case 16:
iotype = "mmio16";
break;
case 32:
iotype = "mmio32";
break;
}
} else
iotype = "io";
switch (table->interface_type) {
case ACPI_DBG2_ARM_SBSA_32BIT:
iotype = "mmio32";
fallthrough;
case ACPI_DBG2_ARM_PL011:
case ACPI_DBG2_ARM_SBSA_GENERIC:
case ACPI_DBG2_BCM2835:
uart = "pl011";
break;
case ACPI_DBG2_16550_COMPATIBLE:
case ACPI_DBG2_16550_SUBSET:
case ACPI_DBG2_16550_WITH_GAS:
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
default:
err = -ENOENT;
goto done;
}
switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
* Assume firmware or bootloader configures console correctly.
*/
baud_rate = 0;
break;
case 3:
baud_rate = 9600;
break;
case 4:
baud_rate = 19200;
break;
case 6:
baud_rate = 57600;
break;
case 7:
baud_rate = 115200;
break;
default:
err = -ENOENT;
goto done;
}
/*
* If the E44 erratum is required, then we need to tell the pl011
* driver to implement the work-around.
*
* The global variable is used by the probe function when it
* creates the UARTs, whether or not they're used as a console.
*
* If the user specifies "traditional" earlycon, the qdf2400_e44
* console name matches the EARLYCON_DECLARE() statement, and
* SPCR is not used. Parameter "earlycon" is false.
*
* If the user specifies "SPCR" earlycon, then we need to update
* the console name so that it also says "qdf2400_e44". Parameter
* "earlycon" is true.
*
* For consistency, if we change the console name, then we do it
* for everyone, not just earlycon.
*/
if (qdf2400_erratum_44_present(&table->header)) {
qdf2400_e44_present = true;
if (enable_earlycon)
uart = "qdf2400_e44";
}
if (xgene_8250_erratum_present(table)) {
iotype = "mmio32";
/*
* For xgene v1 and v2 we don't know the clock rate of the
* UART so don't attempt to change to the baud rate state
* in the table because driver cannot calculate the dividers
*/
baud_rate = 0;
}
if (!baud_rate) {
snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
table->serial_port.address);
} else {
snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
table->serial_port.address, baud_rate);
}
pr_info("console: %s\n", opts);
if (enable_earlycon)
setup_earlycon(opts);
if (enable_console)
err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
else
err = 0;
done:
acpi_put_table((struct acpi_table_header *)table);
return err;
}
| linux-master | drivers/acpi/spcr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI configfs support
*
* Copyright (c) 2016 Intel Corporation
*/
#define pr_fmt(fmt) "ACPI configfs: " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/configfs.h>
#include <linux/acpi.h>
#include <linux/security.h>
static struct config_group *acpi_table_group;
struct acpi_table {
struct config_item cfg;
struct acpi_table_header *header;
u32 index;
};
static ssize_t acpi_table_aml_write(struct config_item *cfg,
const void *data, size_t size)
{
const struct acpi_table_header *header = data;
struct acpi_table *table;
int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
if (ret)
return ret;
table = container_of(cfg, struct acpi_table, cfg);
if (table->header) {
pr_err("table already loaded\n");
return -EBUSY;
}
if (header->length != size) {
pr_err("invalid table length\n");
return -EINVAL;
}
if (memcmp(header->signature, ACPI_SIG_SSDT, 4)) {
pr_err("invalid table signature\n");
return -EINVAL;
}
table = container_of(cfg, struct acpi_table, cfg);
table->header = kmemdup(header, header->length, GFP_KERNEL);
if (!table->header)
return -ENOMEM;
ret = acpi_load_table(table->header, &table->index);
if (ret) {
kfree(table->header);
table->header = NULL;
}
return ret;
}
static inline struct acpi_table_header *get_header(struct config_item *cfg)
{
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
if (!table->header)
pr_err("table not loaded\n");
return table->header ?: ERR_PTR(-EINVAL);
}
static ssize_t acpi_table_aml_read(struct config_item *cfg,
void *data, size_t size)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
if (data)
memcpy(data, h, h->length);
return h->length;
}
#define MAX_ACPI_TABLE_SIZE (128 * 1024)
CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE);
static struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
&acpi_table_attr_aml,
NULL,
};
static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
}
static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%d\n", h->length);
}
static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%d\n", h->revision);
}
static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
}
static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
}
static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%d\n", h->oem_revision);
}
static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
}
static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (IS_ERR(h))
return PTR_ERR(h);
return sysfs_emit(str, "%d\n", h->asl_compiler_revision);
}
CONFIGFS_ATTR_RO(acpi_table_, signature);
CONFIGFS_ATTR_RO(acpi_table_, length);
CONFIGFS_ATTR_RO(acpi_table_, revision);
CONFIGFS_ATTR_RO(acpi_table_, oem_id);
CONFIGFS_ATTR_RO(acpi_table_, oem_table_id);
CONFIGFS_ATTR_RO(acpi_table_, oem_revision);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision);
static struct configfs_attribute *acpi_table_attrs[] = {
&acpi_table_attr_signature,
&acpi_table_attr_length,
&acpi_table_attr_revision,
&acpi_table_attr_oem_id,
&acpi_table_attr_oem_table_id,
&acpi_table_attr_oem_revision,
&acpi_table_attr_asl_compiler_id,
&acpi_table_attr_asl_compiler_revision,
NULL,
};
static const struct config_item_type acpi_table_type = {
.ct_owner = THIS_MODULE,
.ct_bin_attrs = acpi_table_bin_attrs,
.ct_attrs = acpi_table_attrs,
};
static struct config_item *acpi_table_make_item(struct config_group *group,
const char *name)
{
struct acpi_table *table;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&table->cfg, name, &acpi_table_type);
return &table->cfg;
}
static void acpi_table_drop_item(struct config_group *group,
struct config_item *cfg)
{
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
pr_debug("Host-directed Dynamic ACPI Table Unload\n");
acpi_unload_table(table->index);
config_item_put(cfg);
}
static struct configfs_group_operations acpi_table_group_ops = {
.make_item = acpi_table_make_item,
.drop_item = acpi_table_drop_item,
};
static const struct config_item_type acpi_tables_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &acpi_table_group_ops,
};
static const struct config_item_type acpi_root_group_type = {
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem acpi_configfs = {
.su_group = {
.cg_item = {
.ci_namebuf = "acpi",
.ci_type = &acpi_root_group_type,
},
},
.su_mutex = __MUTEX_INITIALIZER(acpi_configfs.su_mutex),
};
static int __init acpi_configfs_init(void)
{
int ret;
struct config_group *root = &acpi_configfs.su_group;
config_group_init(root);
ret = configfs_register_subsystem(&acpi_configfs);
if (ret)
return ret;
acpi_table_group = configfs_register_default_group(root, "table",
&acpi_tables_type);
if (IS_ERR(acpi_table_group)) {
configfs_unregister_subsystem(&acpi_configfs);
return PTR_ERR(acpi_table_group);
}
return 0;
}
module_init(acpi_configfs_init);
static void __exit acpi_configfs_exit(void)
{
configfs_unregister_default_group(acpi_table_group);
configfs_unregister_subsystem(&acpi_configfs);
}
module_exit(acpi_configfs_exit);
MODULE_AUTHOR("Octavian Purdila <[email protected]>");
MODULE_DESCRIPTION("ACPI configfs support");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/acpi/acpi_configfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sbs.c - ACPI Smart Battery System Driver ($Revision: 2.0 $)
*
* Copyright (c) 2007 Alexey Starikovskiy <[email protected]>
* Copyright (c) 2005-2007 Vladimir Lebedev <[email protected]>
* Copyright (c) 2005 Rich Townsend <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
#include <linux/platform_data/x86/apple.h>
#include <acpi/battery.h>
#include "sbshc.h"
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
#define ACPI_BATTERY_DIR_NAME "BAT%i"
#define ACPI_AC_DIR_NAME "AC0"
#define ACPI_SBS_NOTIFY_STATUS 0x80
#define ACPI_SBS_NOTIFY_INFO 0x81
MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>");
MODULE_DESCRIPTION("Smart Battery System ACPI interface driver");
MODULE_LICENSE("GPL");
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
static const struct acpi_device_id sbs_device_ids[] = {
{"ACPI0002", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_sbs *sbs;
unsigned long update_time;
char name[8];
char manufacturer_name[ACPI_SBS_BLOCK_MAX];
char device_name[ACPI_SBS_BLOCK_MAX];
char device_chemistry[ACPI_SBS_BLOCK_MAX];
u16 alarm_capacity;
u16 full_charge_capacity;
u16 design_capacity;
u16 design_voltage;
u16 serial_number;
u16 cycle_count;
u16 temp_now;
u16 voltage_now;
s16 rate_now;
s16 rate_avg;
u16 capacity_now;
u16 state_of_charge;
u16 state;
u16 mode;
u16 spec;
u8 id;
u8 present:1;
u8 have_sysfs_alarm:1;
};
#define to_acpi_battery(x) power_supply_get_drvdata(x)
struct acpi_sbs {
struct power_supply *charger;
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
struct acpi_battery battery[MAX_SBS_BAT];
u8 batteries_supported:4;
u8 manager_present:1;
u8 charger_present:1;
u8 charger_exists:1;
};
#define to_acpi_sbs(x) power_supply_get_drvdata(x)
static void acpi_sbs_remove(struct acpi_device *device);
static int acpi_battery_get_state(struct acpi_battery *battery);
static inline int battery_scale(int log)
{
int scale = 1;
while (log--)
scale *= 10;
return scale;
}
static inline int acpi_battery_vscale(struct acpi_battery *battery)
{
return battery_scale((battery->spec & 0x0f00) >> 8);
}
static inline int acpi_battery_ipscale(struct acpi_battery *battery)
{
return battery_scale((battery->spec & 0xf000) >> 12);
}
static inline int acpi_battery_mode(struct acpi_battery *battery)
{
return (battery->mode & 0x8000);
}
static inline int acpi_battery_scale(struct acpi_battery *battery)
{
return (acpi_battery_mode(battery) ? 10 : 1) *
acpi_battery_ipscale(battery);
}
static int sbs_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct acpi_sbs *sbs = to_acpi_sbs(psy);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = sbs->charger_present;
break;
default:
return -EINVAL;
}
return 0;
}
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->device_chemistry))
return POWER_SUPPLY_TECHNOLOGY_NiCd;
if (!strcasecmp("NiMH", battery->device_chemistry))
return POWER_SUPPLY_TECHNOLOGY_NiMH;
if (!strcasecmp("LION", battery->device_chemistry))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strcasecmp("LiP", battery->device_chemistry))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
}
static int acpi_sbs_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct acpi_battery *battery = to_acpi_battery(psy);
if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
acpi_battery_get_state(battery);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->rate_now < 0)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
else if (battery->rate_now > 0)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_FULL;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = battery->present;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = acpi_battery_technology(battery);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = battery->design_voltage *
acpi_battery_vscale(battery) * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = battery->voltage_now *
acpi_battery_vscale(battery) * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
val->intval = abs(battery->rate_now) *
acpi_battery_ipscale(battery) * 1000;
val->intval *= (acpi_battery_mode(battery)) ?
(battery->voltage_now *
acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_POWER_AVG:
val->intval = abs(battery->rate_avg) *
acpi_battery_ipscale(battery) * 1000;
val->intval *= (acpi_battery_mode(battery)) ?
(battery->voltage_now *
acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = battery->state_of_charge;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
val->intval = battery->design_capacity *
acpi_battery_scale(battery) * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
val->intval = battery->full_charge_capacity *
acpi_battery_scale(battery) * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
val->intval = battery->capacity_now *
acpi_battery_scale(battery) * 1000;
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = battery->temp_now - 2730; // dK -> dC
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->device_name;
break;
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->manufacturer_name;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property sbs_ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
static enum power_supply_property sbs_charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
static enum power_supply_property sbs_energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
static const struct power_supply_desc acpi_sbs_charger_desc = {
.name = "sbs-charger",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = sbs_ac_props,
.num_properties = ARRAY_SIZE(sbs_ac_props),
.get_property = sbs_get_ac_property,
};
/* --------------------------------------------------------------------------
Smart Battery System Management
-------------------------------------------------------------------------- */
struct acpi_battery_reader {
u8 command; /* command for battery */
u8 mode; /* word or block? */
size_t offset; /* offset inside struct acpi_sbs_battery */
};
static struct acpi_battery_reader info_readers[] = {
{0x01, SMBUS_READ_WORD, offsetof(struct acpi_battery, alarm_capacity)},
{0x03, SMBUS_READ_WORD, offsetof(struct acpi_battery, mode)},
{0x10, SMBUS_READ_WORD, offsetof(struct acpi_battery, full_charge_capacity)},
{0x17, SMBUS_READ_WORD, offsetof(struct acpi_battery, cycle_count)},
{0x18, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_capacity)},
{0x19, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_voltage)},
{0x1a, SMBUS_READ_WORD, offsetof(struct acpi_battery, spec)},
{0x1c, SMBUS_READ_WORD, offsetof(struct acpi_battery, serial_number)},
{0x20, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, manufacturer_name)},
{0x21, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_name)},
{0x22, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_chemistry)},
};
static struct acpi_battery_reader state_readers[] = {
{0x08, SMBUS_READ_WORD, offsetof(struct acpi_battery, temp_now)},
{0x09, SMBUS_READ_WORD, offsetof(struct acpi_battery, voltage_now)},
{0x0a, SMBUS_READ_WORD, offsetof(struct acpi_battery, rate_now)},
{0x0b, SMBUS_READ_WORD, offsetof(struct acpi_battery, rate_avg)},
{0x0f, SMBUS_READ_WORD, offsetof(struct acpi_battery, capacity_now)},
{0x0e, SMBUS_READ_WORD, offsetof(struct acpi_battery, state_of_charge)},
{0x16, SMBUS_READ_WORD, offsetof(struct acpi_battery, state)},
};
static int acpi_manager_get_info(struct acpi_sbs *sbs)
{
int result = 0;
u16 battery_system_info;
result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER,
0x04, (u8 *)&battery_system_info);
if (!result)
sbs->batteries_supported = battery_system_info & 0x000f;
return result;
}
static int acpi_battery_get_info(struct acpi_battery *battery)
{
int i, result = 0;
for (i = 0; i < ARRAY_SIZE(info_readers); ++i) {
result = acpi_smbus_read(battery->sbs->hc,
info_readers[i].mode,
ACPI_SBS_BATTERY,
info_readers[i].command,
(u8 *) battery +
info_readers[i].offset);
if (result)
break;
}
return result;
}
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int i, result = 0;
if (battery->update_time &&
time_before(jiffies, battery->update_time +
msecs_to_jiffies(cache_time)))
return 0;
for (i = 0; i < ARRAY_SIZE(state_readers); ++i) {
result = acpi_smbus_read(battery->sbs->hc,
state_readers[i].mode,
ACPI_SBS_BATTERY,
state_readers[i].command,
(u8 *)battery +
state_readers[i].offset);
if (result)
goto end;
}
end:
battery->update_time = jiffies;
return result;
}
static int acpi_battery_get_alarm(struct acpi_battery *battery)
{
return acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
ACPI_SBS_BATTERY, 0x01,
(u8 *)&battery->alarm_capacity);
}
static int acpi_battery_set_alarm(struct acpi_battery *battery)
{
struct acpi_sbs *sbs = battery->sbs;
u16 value, sel = 1 << (battery->id + 12);
int ret;
if (sbs->manager_present) {
ret = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER,
0x01, (u8 *)&value);
if (ret)
goto end;
if ((value & 0xf000) != sel) {
value &= 0x0fff;
value |= sel;
ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD,
ACPI_SBS_MANAGER,
0x01, (u8 *)&value, 2);
if (ret)
goto end;
}
}
ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_BATTERY,
0x01, (u8 *)&battery->alarm_capacity, 2);
end:
return ret;
}
static int acpi_ac_get_present(struct acpi_sbs *sbs)
{
int result;
u16 status;
result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER,
0x13, (u8 *) & status);
if (result)
return result;
/*
* The spec requires that bit 4 always be 1. If it's not set, assume
* that the implementation doesn't support an SBS charger.
*
* And on some MacBooks a status of 0xffff is always returned, no
* matter whether the charger is plugged in or not, which is also
* wrong, so ignore the SBS charger for those too.
*/
if (!((status >> 4) & 0x1) || status == 0xffff)
return -ENODEV;
sbs->charger_present = (status >> 15) & 0x1;
return 0;
}
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
acpi_battery_get_alarm(battery);
return sprintf(buf, "%d\n", battery->alarm_capacity *
acpi_battery_scale(battery) * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
acpi_battery_set_alarm(battery);
return count;
}
static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_battery_read(struct acpi_battery *battery)
{
int result, saved_present = battery->present;
u16 state;
if (battery->sbs->manager_present) {
result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
ACPI_SBS_MANAGER, 0x01, (u8 *)&state);
if (result)
return result;
battery->present = state & (1 << battery->id);
if (!battery->present)
return 0;
/* Masking necessary for Smart Battery Selectors */
state = 0x0fff;
state |= 1 << (battery->id + 12);
acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD,
ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
} else {
if (battery->id == 0) {
battery->present = 1;
} else {
if (!battery->present)
return 0;
}
}
if (saved_present != battery->present) {
battery->update_time = 0;
result = acpi_battery_get_info(battery);
if (result) {
battery->present = 0;
return result;
}
}
result = acpi_battery_get_state(battery);
if (result)
battery->present = 0;
return result;
}
/* Smart Battery */
static int acpi_battery_add(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
struct power_supply_config psy_cfg = { .drv_data = battery, };
int result;
battery->id = id;
battery->sbs = sbs;
result = acpi_battery_read(battery);
if (result)
return result;
sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
battery->bat_desc.name = battery->name;
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
battery->bat_desc.properties = sbs_charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(sbs_charge_battery_props);
} else {
battery->bat_desc.properties = sbs_energy_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(sbs_energy_battery_props);
}
battery->bat_desc.get_property = acpi_sbs_battery_get_property;
battery->bat = power_supply_register(&sbs->device->dev,
&battery->bat_desc, &psy_cfg);
if (IS_ERR(battery->bat)) {
result = PTR_ERR(battery->bat);
battery->bat = NULL;
goto end;
}
result = device_create_file(&battery->bat->dev, &alarm_attr);
if (result)
goto end;
battery->have_sysfs_alarm = 1;
end:
pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
battery->name, battery->present ? "present" : "absent");
return result;
}
static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
struct acpi_battery *battery = &sbs->battery[id];
if (battery->bat) {
if (battery->have_sysfs_alarm)
device_remove_file(&battery->bat->dev, &alarm_attr);
power_supply_unregister(battery->bat);
}
}
static int acpi_charger_add(struct acpi_sbs *sbs)
{
int result;
struct power_supply_config psy_cfg = { .drv_data = sbs, };
result = acpi_ac_get_present(sbs);
if (result)
goto end;
sbs->charger_exists = 1;
sbs->charger = power_supply_register(&sbs->device->dev,
&acpi_sbs_charger_desc, &psy_cfg);
if (IS_ERR(sbs->charger)) {
result = PTR_ERR(sbs->charger);
sbs->charger = NULL;
}
pr_info("%s [%s]: AC Adapter [%s] (%s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
end:
return result;
}
static void acpi_charger_remove(struct acpi_sbs *sbs)
{
if (sbs->charger)
power_supply_unregister(sbs->charger);
}
static void acpi_sbs_callback(void *context)
{
int id;
struct acpi_sbs *sbs = context;
struct acpi_battery *bat;
u8 saved_charger_state = sbs->charger_present;
u8 saved_battery_state;
if (sbs->charger_exists) {
acpi_ac_get_present(sbs);
if (sbs->charger_present != saved_charger_state)
kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
}
if (sbs->manager_present) {
for (id = 0; id < MAX_SBS_BAT; ++id) {
if (!(sbs->batteries_supported & (1 << id)))
continue;
bat = &sbs->battery[id];
saved_battery_state = bat->present;
acpi_battery_read(bat);
if (saved_battery_state == bat->present)
continue;
kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
}
}
}
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
goto end;
}
mutex_init(&sbs->lock);
sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
device->driver_data = sbs;
result = acpi_charger_add(sbs);
if (result && result != -ENODEV)
goto end;
result = 0;
if (!x86_apple_machine) {
result = acpi_manager_get_info(sbs);
if (!result) {
sbs->manager_present = 1;
for (id = 0; id < MAX_SBS_BAT; ++id)
if ((sbs->batteries_supported & (1 << id)))
acpi_battery_add(sbs, id);
}
}
if (!sbs->manager_present)
acpi_battery_add(sbs, 0);
acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
end:
if (result)
acpi_sbs_remove(device);
return result;
}
static void acpi_sbs_remove(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int id;
if (!device)
return;
sbs = acpi_driver_data(device);
if (!sbs)
return;
mutex_lock(&sbs->lock);
acpi_smbus_unregister_callback(sbs->hc);
for (id = 0; id < MAX_SBS_BAT; ++id)
acpi_battery_remove(sbs, id);
acpi_charger_remove(sbs);
mutex_unlock(&sbs->lock);
mutex_destroy(&sbs->lock);
kfree(sbs);
}
#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
struct acpi_sbs *sbs;
if (!dev)
return -EINVAL;
sbs = to_acpi_device(dev)->driver_data;
acpi_sbs_callback(sbs);
return 0;
}
#else
#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
static struct acpi_driver acpi_sbs_driver = {
.name = "sbs",
.class = ACPI_SBS_CLASS,
.ids = sbs_device_ids,
.ops = {
.add = acpi_sbs_add,
.remove = acpi_sbs_remove,
},
.drv.pm = &acpi_sbs_pm,
};
module_acpi_driver(acpi_sbs_driver);
| linux-master | drivers/acpi/sbs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 40 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/platform_data/x86/apple.h>
#include "internal.h"
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_root_remove(struct acpi_device *device);
static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
{
acpiphp_check_host_bridge(adev);
return 0;
}
#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
| OSC_PCI_ASPM_SUPPORT \
| OSC_PCI_CLOCK_PM_SUPPORT \
| OSC_PCI_MSI_SUPPORT)
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
static struct acpi_scan_handler pci_root_handler = {
.ids = root_device_ids,
.attach = acpi_pci_root_add,
.detach = acpi_pci_root_remove,
.hotplug = {
.enabled = true,
.scan_dependent = acpi_pci_root_scan_dependent,
},
};
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle: the ACPI CA node in question.
*
* Note: we could make this API take a struct acpi_device * instead, but
* for now, it's more convenient to operate on an acpi_handle.
*/
int acpi_is_root_bridge(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
int ret;
if (!device)
return 0;
ret = acpi_match_device_ids(device, root_device_ids);
if (ret)
return 0;
else
return 1;
}
EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
struct resource *res = data;
struct acpi_resource_address64 address;
acpi_status status;
status = acpi_resource_to_address64(resource, &address);
if (ACPI_FAILURE(status))
return AE_OK;
if ((address.address.address_length > 0) &&
(address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
res->start = address.address.minimum;
res->end = address.address.minimum + address.address.address_length - 1;
}
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
struct resource *res)
{
acpi_status status;
res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
if (res->start == -1)
return AE_ERROR;
return AE_OK;
}
struct pci_osc_bit_struct {
u32 bit;
char *desc;
};
static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
{ OSC_PCI_ASPM_SUPPORT, "ASPM" },
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
{ OSC_PCI_EDR_SUPPORT, "EDR" },
{ OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
{ OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
{ OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
{ OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
{ OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" },
};
static struct pci_osc_bit_struct cxl_osc_support_bit[] = {
{ OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT, "CXL11PortRegAccess" },
{ OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT, "CXL20PortDevRegAccess" },
{ OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT, "CXLProtocolErrorReporting" },
{ OSC_CXL_NATIVE_HP_SUPPORT, "CXLNativeHotPlug" },
};
static struct pci_osc_bit_struct cxl_osc_control_bit[] = {
{ OSC_CXL_ERROR_REPORTING_CONTROL, "CXLMemErrorReporting" },
};
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
struct pci_osc_bit_struct *table, int size)
{
char buf[80];
int i, len = 0;
struct pci_osc_bit_struct *entry;
buf[0] = '\0';
for (i = 0, entry = table; i < size; i++, entry++)
if (word & entry->bit)
len += scnprintf(buf + len, sizeof(buf) - len, "%s%s",
len ? " " : "", entry->desc);
dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
}
static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
{
decode_osc_bits(root, msg, word, pci_osc_support_bit,
ARRAY_SIZE(pci_osc_support_bit));
}
static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
{
decode_osc_bits(root, msg, word, pci_osc_control_bit,
ARRAY_SIZE(pci_osc_control_bit));
}
static void decode_cxl_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
{
decode_osc_bits(root, msg, word, cxl_osc_support_bit,
ARRAY_SIZE(cxl_osc_support_bit));
}
static void decode_cxl_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
{
decode_osc_bits(root, msg, word, cxl_osc_control_bit,
ARRAY_SIZE(cxl_osc_control_bit));
}
static inline bool is_pcie(struct acpi_pci_root *root)
{
return root->bridge_type == ACPI_BRIDGE_TYPE_PCIE;
}
static inline bool is_cxl(struct acpi_pci_root *root)
{
return root->bridge_type == ACPI_BRIDGE_TYPE_CXL;
}
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static u8 cxl_osc_uuid_str[] = "68F2D50B-C469-4d8A-BD3D-941A103FD3FC";
static char *to_uuid(struct acpi_pci_root *root)
{
if (is_cxl(root))
return cxl_osc_uuid_str;
return pci_osc_uuid_str;
}
static int cap_length(struct acpi_pci_root *root)
{
if (is_cxl(root))
return sizeof(u32) * OSC_CXL_CAPABILITY_DWORDS;
return sizeof(u32) * OSC_PCI_CAPABILITY_DWORDS;
}
static acpi_status acpi_pci_run_osc(struct acpi_pci_root *root,
const u32 *capbuf, u32 *pci_control,
u32 *cxl_control)
{
struct acpi_osc_context context = {
.uuid_str = to_uuid(root),
.rev = 1,
.cap.length = cap_length(root),
.cap.pointer = (void *)capbuf,
};
acpi_status status;
status = acpi_run_osc(root->device->handle, &context);
if (ACPI_SUCCESS(status)) {
*pci_control = acpi_osc_ctx_get_pci_control(&context);
if (is_cxl(root))
*cxl_control = acpi_osc_ctx_get_cxl_control(&context);
kfree(context.ret.pointer);
}
return status;
}
static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support,
u32 *control, u32 cxl_support,
u32 *cxl_control)
{
acpi_status status;
u32 pci_result, cxl_result, capbuf[OSC_CXL_CAPABILITY_DWORDS];
support |= root->osc_support_set;
capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_DWORD] = support;
capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
if (is_cxl(root)) {
cxl_support |= root->osc_ext_support_set;
capbuf[OSC_EXT_SUPPORT_DWORD] = cxl_support;
capbuf[OSC_EXT_CONTROL_DWORD] = *cxl_control | root->osc_ext_control_set;
}
retry:
status = acpi_pci_run_osc(root, capbuf, &pci_result, &cxl_result);
if (ACPI_SUCCESS(status)) {
root->osc_support_set = support;
*control = pci_result;
if (is_cxl(root)) {
root->osc_ext_support_set = cxl_support;
*cxl_control = cxl_result;
}
} else if (is_cxl(root)) {
/*
* CXL _OSC is optional on CXL 1.1 hosts. Fall back to PCIe _OSC
* upon any failure using CXL _OSC.
*/
root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
goto retry;
}
return status;
}
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_root *root;
if (!device || acpi_match_device_ids(device, root_device_ids))
return NULL;
root = acpi_driver_data(device);
return root;
}
EXPORT_SYMBOL_GPL(acpi_pci_find_root);
struct acpi_handle_node {
struct list_head node;
acpi_handle handle;
};
/**
* acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
* @handle: the handle in question
*
* Given an ACPI CA handle, the desired PCI device is located in the
* list of PCI devices.
*
* If the device is found, its reference count is increased and this
* function returns a pointer to its data structure. The caller must
* decrement the reference count by calling pci_dev_put().
* If no device is found, %NULL is returned.
*/
struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct acpi_device_physical_node *pn;
struct pci_dev *pci_dev = NULL;
if (!adev)
return NULL;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn, &adev->physical_node_list, node) {
if (dev_is_pci(pn->dev)) {
get_device(pn->dev);
pci_dev = to_pci_dev(pn->dev);
break;
}
}
mutex_unlock(&adev->physical_node_lock);
return pci_dev;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
* acpi_pci_osc_control_set - Request control of PCI root _OSC features.
* @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
* @mask: Mask of _OSC bits to request control of, place to store control mask.
* @support: _OSC supported capability.
* @cxl_mask: Mask of CXL _OSC control bits, place to store control mask.
* @cxl_support: CXL _OSC supported capability.
*
* Run _OSC query for @mask and if that is successful, compare the returned
* mask of control bits with @req. If all of the @req bits are set in the
* returned mask, run _OSC request for it.
*
* The variable at the @mask address may be modified regardless of whether or
* not the function returns success. On success it will contain the mask of
* _OSC bits the BIOS has granted control of, but its contents are meaningless
* on failure.
**/
static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask,
u32 support, u32 *cxl_mask,
u32 cxl_support)
{
u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL;
struct acpi_pci_root *root;
acpi_status status;
u32 ctrl, cxl_ctrl = 0, capbuf[OSC_CXL_CAPABILITY_DWORDS];
if (!mask)
return AE_BAD_PARAMETER;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
ctrl = *mask;
*mask |= root->osc_control_set;
if (is_cxl(root)) {
cxl_ctrl = *cxl_mask;
*cxl_mask |= root->osc_ext_control_set;
}
/* Need to check the available controls bits before requesting them. */
do {
u32 pci_missing = 0, cxl_missing = 0;
status = acpi_pci_query_osc(root, support, mask, cxl_support,
cxl_mask);
if (ACPI_FAILURE(status))
return status;
if (is_cxl(root)) {
if (ctrl == *mask && cxl_ctrl == *cxl_mask)
break;
pci_missing = ctrl & ~(*mask);
cxl_missing = cxl_ctrl & ~(*cxl_mask);
} else {
if (ctrl == *mask)
break;
pci_missing = ctrl & ~(*mask);
}
if (pci_missing)
decode_osc_control(root, "platform does not support",
pci_missing);
if (cxl_missing)
decode_cxl_osc_control(root, "CXL platform does not support",
cxl_missing);
ctrl = *mask;
cxl_ctrl = *cxl_mask;
} while (*mask || *cxl_mask);
/* No need to request _OSC if the control was already granted. */
if ((root->osc_control_set & ctrl) == ctrl &&
(root->osc_ext_control_set & cxl_ctrl) == cxl_ctrl)
return AE_OK;
if ((ctrl & req) != req) {
decode_osc_control(root, "not requesting control; platform does not support",
req & ~(ctrl));
return AE_SUPPORT;
}
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
capbuf[OSC_CONTROL_DWORD] = ctrl;
if (is_cxl(root)) {
capbuf[OSC_EXT_SUPPORT_DWORD] = root->osc_ext_support_set;
capbuf[OSC_EXT_CONTROL_DWORD] = cxl_ctrl;
}
status = acpi_pci_run_osc(root, capbuf, mask, cxl_mask);
if (ACPI_FAILURE(status))
return status;
root->osc_control_set = *mask;
root->osc_ext_control_set = *cxl_mask;
return AE_OK;
}
static u32 calculate_support(void)
{
u32 support;
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
*/
support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
if (pci_ext_cfg_avail())
support |= OSC_PCI_EXT_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
if (pci_msi_enabled())
support |= OSC_PCI_MSI_SUPPORT;
if (IS_ENABLED(CONFIG_PCIE_EDR))
support |= OSC_PCI_EDR_SUPPORT;
return support;
}
/*
* Background on hotplug support, and making it depend on only
* CONFIG_HOTPLUG_PCI_PCIE vs. also considering CONFIG_MEMORY_HOTPLUG:
*
* CONFIG_ACPI_HOTPLUG_MEMORY does depend on CONFIG_MEMORY_HOTPLUG, but
* there is no existing _OSC for memory hotplug support. The reason is that
* ACPI memory hotplug requires the OS to acknowledge / coordinate with
* memory plug events via a scan handler. On the CXL side the equivalent
* would be if Linux supported the Mechanical Retention Lock [1], or
* otherwise had some coordination for the driver of a PCI device
* undergoing hotplug to be consulted on whether the hotplug should
* proceed or not.
*
* The concern is that if Linux says no to supporting CXL hotplug then
* the BIOS may say no to giving the OS hotplug control of any other PCIe
* device. So the question here is not whether hotplug is enabled, it's
* whether it is handled natively by the at all OS, and if
* CONFIG_HOTPLUG_PCI_PCIE is enabled then the answer is "yes".
*
* Otherwise, the plan for CXL coordinated remove, since the kernel does
* not support blocking hotplug, is to require the memory device to be
* disabled before hotplug is attempted. When CONFIG_MEMORY_HOTPLUG is
* disabled that step will fail and the remove attempt cancelled by the
* user. If that is not honored and the card is removed anyway then it
* does not matter if CONFIG_MEMORY_HOTPLUG is enabled or not, it will
* cause a crash and other badness.
*
* Therefore, just say yes to CXL hotplug and require removal to
* be coordinated by userspace unless and until the kernel grows better
* mechanisms for doing "managed" removal of devices in consultation with
* the driver.
*
* [1]: https://lore.kernel.org/all/[email protected]/
*/
static u32 calculate_cxl_support(void)
{
u32 support;
support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT;
support |= OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT;
if (pci_aer_available())
support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT;
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
support |= OSC_CXL_NATIVE_HP_SUPPORT;
return support;
}
static u32 calculate_control(void)
{
u32 control;
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
| OSC_PCI_EXPRESS_PME_CONTROL;
if (IS_ENABLED(CONFIG_PCIEASPM))
control |= OSC_PCI_EXPRESS_LTR_CONTROL;
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
if (pci_aer_available())
control |= OSC_PCI_EXPRESS_AER_CONTROL;
/*
* Per the Downstream Port Containment Related Enhancements ECN to
* the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5,
* OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC
* and EDR.
*/
if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR))
control |= OSC_PCI_EXPRESS_DPC_CONTROL;
return control;
}
static u32 calculate_cxl_control(void)
{
u32 control = 0;
if (IS_ENABLED(CONFIG_MEMORY_FAILURE))
control |= OSC_CXL_ERROR_REPORTING_CONTROL;
return control;
}
static bool os_control_query_checks(struct acpi_pci_root *root, u32 support)
{
struct acpi_device *device = root->device;
if (pcie_ports_disabled) {
dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
return false;
}
if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
decode_osc_support(root, "not requesting OS control; OS requires",
ACPI_PCIE_REQ_SUPPORT);
return false;
}
return true;
}
static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
{
u32 support, control = 0, requested = 0;
u32 cxl_support = 0, cxl_control = 0, cxl_requested = 0;
acpi_status status;
struct acpi_device *device = root->device;
acpi_handle handle = device->handle;
/*
* Apple always return failure on _OSC calls when _OSI("Darwin") has
* been called successfully. We know the feature set supported by the
* platform, so avoid calling _OSC at all
*/
if (x86_apple_machine) {
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
decode_osc_control(root, "OS assumes control of",
root->osc_control_set);
return;
}
support = calculate_support();
decode_osc_support(root, "OS supports", support);
if (os_control_query_checks(root, support))
requested = control = calculate_control();
if (is_cxl(root)) {
cxl_support = calculate_cxl_support();
decode_cxl_osc_support(root, "OS supports", cxl_support);
cxl_requested = cxl_control = calculate_cxl_control();
}
status = acpi_pci_osc_control_set(handle, &control, support,
&cxl_control, cxl_support);
if (ACPI_SUCCESS(status)) {
if (control)
decode_osc_control(root, "OS now controls", control);
if (cxl_control)
decode_cxl_osc_control(root, "OS now controls",
cxl_control);
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
/*
* We have ASPM control, but the FADT indicates that
* it's unsupported. Leave existing configuration
* intact and prevent the OS from touching it.
*/
dev_info(&device->dev, "FADT indicates ASPM is unsupported, using BIOS configuration\n");
*no_aspm = 1;
}
} else {
/*
* We want to disable ASPM here, but aspm_disabled
* needs to remain in its state from boot so that we
* properly handle PCIe 1.1 devices. So we set this
* flag here, to defer the action until after the ACPI
* root scan.
*/
*no_aspm = 1;
/* _OSC is optional for PCI host bridges */
if (status == AE_NOT_FOUND && !is_pcie(root))
return;
if (control) {
decode_osc_control(root, "OS requested", requested);
decode_osc_control(root, "platform willing to grant", control);
}
if (cxl_control) {
decode_cxl_osc_control(root, "OS requested", cxl_requested);
decode_cxl_osc_control(root, "platform willing to grant",
cxl_control);
}
dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
acpi_format_exception(status));
}
}
static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
unsigned long long segment, bus;
acpi_status status;
int result;
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0;
bool hotadd = system_state == SYSTEM_RUNNING;
const char *acpi_hid;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
segment = 0;
status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
dev_err(&device->dev, "can't evaluate _SEG\n");
result = -ENODEV;
goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
root->secondary.flags = IORESOURCE_BUS;
status = try_get_root_bridge_busnr(handle, &root->secondary);
if (ACPI_FAILURE(status)) {
/*
* We need both the start and end of the downstream bus range
* to interpret _CBA (MMCONFIG base address), so it really is
* supposed to be in _CRS. If we don't find it there, all we
* can do is assume [_BBN-0xFF] or [0-0xFF].
*/
root->secondary.end = 0xFF;
dev_warn(&device->dev,
FW_BUG "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(handle, METHOD_NAME__BBN,
NULL, &bus);
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
root->secondary.start = 0;
else {
dev_err(&device->dev, "can't evaluate _BBN\n");
result = -ENODEV;
goto end;
}
}
root->device = device;
root->segment = segment & 0xFFFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
result = -ENXIO;
goto end;
}
pr_info("%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
acpi_hid = acpi_device_hid(root->device);
if (strcmp(acpi_hid, "PNP0A08") == 0)
root->bridge_type = ACPI_BRIDGE_TYPE_PCIE;
else if (strcmp(acpi_hid, "ACPI0016") == 0)
root->bridge_type = ACPI_BRIDGE_TYPE_CXL;
else
dev_dbg(&device->dev, "Assuming non-PCIe host bridge\n");
negotiate_os_control(root, &no_aspm);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
*/
/*
* Scan the Root Bridge
* --------------------
* Must do this prior to any attempt to bind the root device, as the
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
dev_err(&device->dev,
"Bus %04x:%02x not present in PCI namespace\n",
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
goto remove_dmar;
}
if (no_aspm)
pcie_no_aspm();
pci_acpi_add_bus_pm_notifier(device);
device_set_wakeup_capable(root->bus->bridge, device->wakeup.flags.valid);
if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
/*
* This is only called for the hotadd case. For the boot-time
* case, we need to wait until after PCI initialization in
* order to deal with IOAPICs mapped in on a PCI BAR.
*
* This is currently x86-specific, because acpi_ioapic_add()
* is an empty function without CONFIG_ACPI_HOTPLUG_IOAPIC.
* And CONFIG_ACPI_HOTPLUG_IOAPIC depends on CONFIG_X86_IO_APIC
* (see drivers/acpi/Kconfig).
*/
acpi_ioapic_add(root->device->handle);
}
pci_lock_rescan_remove();
pci_bus_add_devices(root->bus);
pci_unlock_rescan_remove();
return 1;
remove_dmar:
if (hotadd)
dmar_device_remove(handle);
end:
kfree(root);
return result;
}
static void acpi_pci_root_remove(struct acpi_device *device)
{
struct acpi_pci_root *root = acpi_driver_data(device);
pci_lock_rescan_remove();
pci_stop_root_bus(root->bus);
pci_ioapic_remove(root);
device_set_wakeup_capable(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
pci_remove_root_bus(root->bus);
WARN_ON(acpi_ioapic_remove(root));
dmar_device_remove(device->handle);
pci_unlock_rescan_remove();
kfree(root);
}
/*
* Following code to support acpi_pci_root_create() is copied from
* arch/x86/pci/acpi.c and modified so it could be reused by x86, IA64
* and ARM64.
*/
static void acpi_pci_root_validate_resources(struct device *dev,
struct list_head *resources,
unsigned long type)
{
LIST_HEAD(list);
struct resource *res1, *res2, *root = NULL;
struct resource_entry *tmp, *entry, *entry2;
BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
list_splice_init(resources, &list);
resource_list_for_each_entry_safe(entry, tmp, &list) {
bool free = false;
resource_size_t end;
res1 = entry->res;
if (!(res1->flags & type))
goto next;
/* Exclude non-addressable range or non-addressable portion */
end = min(res1->end, root->end);
if (end <= res1->start) {
dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
res1);
free = true;
goto next;
} else if (res1->end != end) {
dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
res1, (unsigned long long)end + 1,
(unsigned long long)res1->end);
res1->end = end;
}
resource_list_for_each_entry(entry2, resources) {
res2 = entry2->res;
if (!(res2->flags & type))
continue;
/*
* I don't like throwing away windows because then
* our resources no longer match the ACPI _CRS, but
* the kernel resource tree doesn't allow overlaps.
*/
if (resource_union(res1, res2, res2)) {
dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
res2, res1);
free = true;
goto next;
}
}
next:
resource_list_del(entry);
if (free)
resource_list_free_entry(entry);
else
resource_list_add_tail(entry, resources);
}
}
static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
struct resource *res = entry->res;
resource_size_t cpu_addr = res->start;
resource_size_t pci_addr = cpu_addr - entry->offset;
resource_size_t length = resource_size(res);
unsigned long port;
if (pci_register_io_range(fwnode, cpu_addr, length))
goto err;
port = pci_address_to_pio(cpu_addr);
if (port == (unsigned long)-1)
goto err;
res->start = port;
res->end = port + length - 1;
entry->offset = port - pci_addr;
if (pci_remap_iospace(res, cpu_addr) < 0)
goto err;
pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res);
return;
err:
res->flags |= IORESOURCE_DISABLED;
#endif
}
int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
{
int ret;
struct list_head *list = &info->resources;
struct acpi_device *device = info->bridge;
struct resource_entry *entry, *tmp;
unsigned long flags;
flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT;
ret = acpi_dev_get_resources(device, list,
acpi_dev_filter_resource_type_cb,
(void *)flags);
if (ret < 0)
dev_warn(&device->dev,
"failed to parse _CRS method, error code %d\n", ret);
else if (ret == 0)
dev_dbg(&device->dev,
"no IO and memory resources present in _CRS\n");
else {
resource_list_for_each_entry_safe(entry, tmp, list) {
if (entry->res->flags & IORESOURCE_IO)
acpi_pci_root_remap_iospace(&device->fwnode,
entry);
if (entry->res->flags & IORESOURCE_DISABLED)
resource_list_destroy_entry(entry);
else
entry->res->name = info->name;
}
acpi_pci_root_validate_resources(&device->dev, list,
IORESOURCE_MEM);
acpi_pci_root_validate_resources(&device->dev, list,
IORESOURCE_IO);
}
return ret;
}
static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
{
struct resource_entry *entry, *tmp;
struct resource *res, *conflict, *root = NULL;
resource_list_for_each_entry_safe(entry, tmp, &info->resources) {
res = entry->res;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
else if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
continue;
/*
* Some legacy x86 host bridge drivers use iomem_resource and
* ioport_resource as default resource pool, skip it.
*/
if (res == root)
continue;
conflict = insert_resource_conflict(root, res);
if (conflict) {
dev_info(&info->bridge->dev,
"ignoring host bridge window %pR (conflicts with %s %pR)\n",
res, conflict->name, conflict);
resource_list_destroy_entry(entry);
}
}
}
static void __acpi_pci_root_release_info(struct acpi_pci_root_info *info)
{
struct resource *res;
struct resource_entry *entry, *tmp;
if (!info)
return;
resource_list_for_each_entry_safe(entry, tmp, &info->resources) {
res = entry->res;
if (res->parent &&
(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
release_resource(res);
resource_list_destroy_entry(entry);
}
info->ops->release_info(info);
}
static void acpi_pci_root_release_info(struct pci_host_bridge *bridge)
{
struct resource *res;
struct resource_entry *entry;
resource_list_for_each_entry(entry, &bridge->windows) {
res = entry->res;
if (res->flags & IORESOURCE_IO)
pci_unmap_iospace(res);
if (res->parent &&
(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
release_resource(res);
}
__acpi_pci_root_release_info(bridge->release_data);
}
struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
struct acpi_pci_root_ops *ops,
struct acpi_pci_root_info *info,
void *sysdata)
{
int ret, busnum = root->secondary.start;
struct acpi_device *device = root->device;
int node = acpi_get_node(device->handle);
struct pci_bus *bus;
struct pci_host_bridge *host_bridge;
union acpi_object *obj;
info->root = root;
info->bridge = device;
info->ops = ops;
INIT_LIST_HEAD(&info->resources);
snprintf(info->name, sizeof(info->name), "PCI Bus %04x:%02x",
root->segment, busnum);
if (ops->init_info && ops->init_info(info))
goto out_release_info;
if (ops->prepare_resources)
ret = ops->prepare_resources(info);
else
ret = acpi_pci_probe_root_resources(info);
if (ret < 0)
goto out_release_info;
pci_acpi_root_add_resources(info);
pci_add_resource(&info->resources, &root->secondary);
bus = pci_create_root_bus(NULL, busnum, ops->pci_ops,
sysdata, &info->resources);
if (!bus)
goto out_release_info;
host_bridge = to_pci_host_bridge(bus->bridge);
if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
host_bridge->native_pcie_hotplug = 0;
if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
host_bridge->native_shpc_hotplug = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
host_bridge->native_aer = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
host_bridge->native_pme = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
host_bridge->native_ltr = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL))
host_bridge->native_dpc = 0;
if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
host_bridge->native_cxl_error = 0;
/*
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
* exists and returns 0, we must preserve any PCI resource
* assignments made by firmware for this host bridge.
*/
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
DSM_PCI_PRESERVE_BOOT_CONFIG, NULL);
if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0)
host_bridge->preserve_config = 1;
ACPI_FREE(obj);
acpi_dev_power_up_children_with_adr(device);
pci_scan_child_bus(bus);
pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
info);
if (node != NUMA_NO_NODE)
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
return bus;
out_release_info:
__acpi_pci_root_release_info(info);
return NULL;
}
void __init acpi_pci_root_init(void)
{
if (acpi_pci_disabled)
return;
pci_acpi_crs_quirks();
acpi_scan_add_handler_with_hotplug(&pci_root_handler, "pci_root");
}
| linux-master | drivers/acpi/pci_root.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_osl.c - OS-dependent functions ($Revision: 83 $)
*
* Copyright (C) 2000 Andrew Henroid
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: OSL: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/jiffies.h>
#include <linux/semaphore.h>
#include <linux/security.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "acpica/accommon.h"
#include "internal.h"
/* Definitions for ACPI_DEBUG_PRINT() */
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
};
#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>
/* stuff for debugger support */
int acpi_in_debugger;
EXPORT_SYMBOL(acpi_in_debugger);
#endif /*ENABLE_DEBUGGER */
static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
u32 pm1b_ctrl);
static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
u32 val_b);
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
static bool acpi_os_initialized;
unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
bool acpi_permanent_mmap = false;
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
*/
struct acpi_ioremap {
struct list_head list;
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
union {
unsigned long refcount;
struct rcu_work rwork;
} track;
};
static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
#define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
u64 addr;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !length)
return;
/* Resources are never freed */
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
request_region(addr, length, desc);
else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1b_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
"ACPI PM1a_CNT_BLK");
acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
"ACPI PM1b_CNT_BLK");
if (acpi_gbl_FADT.pm_timer_length == 4)
acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
"ACPI PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
return 0;
}
fs_initcall_sync(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
acpi_os_vprintf(fmt, args);
va_end(args);
}
EXPORT_SYMBOL(acpi_os_printf);
void acpi_os_vprintf(const char *fmt, va_list args)
{
static char buffer[512];
vsprintf(buffer, fmt, args);
#ifdef ENABLE_DEBUGGER
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
if (printk_get_level(buffer))
printk("%s", buffer);
else
printk(KERN_CONT "%s", buffer);
}
#else
if (acpi_debugger_write_log(buffer) < 0) {
if (printk_get_level(buffer))
printk("%s", buffer);
else
printk(KERN_CONT "%s", buffer);
}
#endif
}
#ifdef CONFIG_KEXEC
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
return kstrtoul(arg, 16, &acpi_rsdp);
}
early_param("acpi_rsdp", setup_acpi_rsdp);
#endif
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
acpi_physical_address pa;
#ifdef CONFIG_KEXEC
/*
* We may have been provided with an RSDP on the command line,
* but if a malicious user has done so they may be pointing us
* at modified ACPI tables that could alter kernel behaviour -
* so, we check the lockdown status before making use of
* it. If we trust it then also stash it in an architecture
* specific location (if appropriate) so it can be carried
* over further kexec()s.
*/
if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
acpi_arch_set_root_pointer(acpi_rsdp);
return acpi_rsdp;
}
#endif
pa = acpi_arch_get_root_pointer();
if (pa)
return pa;
if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
return efi.acpi;
pr_err("System description tables not found\n");
} else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
acpi_find_root_pointer(&pa);
}
return pa;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
if (map->phys <= phys &&
phys + size <= map->phys + map->size)
return map;
return NULL;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static void __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
{
struct acpi_ioremap *map;
map = acpi_map_lookup(phys, size);
if (map)
return map->virt + (phys - map->phys);
return NULL;
}
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
{
struct acpi_ioremap *map;
void __iomem *virt = NULL;
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(phys, size);
if (map) {
virt = map->virt + (phys - map->phys);
map->track.refcount++;
}
mutex_unlock(&acpi_ioremap_lock);
return virt;
}
EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
if (map->virt <= virt &&
virt + size <= map->virt + map->size)
return map;
return NULL;
}
#if defined(CONFIG_IA64) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn) 0
#else
#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{
unsigned long pfn;
pfn = pg_off >> PAGE_SHIFT;
if (should_use_kmap(pfn)) {
if (pg_sz > PAGE_SIZE)
return NULL;
return (void __iomem __force *)kmap(pfn_to_page(pfn));
} else
return acpi_os_ioremap(pg_off, pg_sz);
}
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
{
unsigned long pfn;
pfn = pg_off >> PAGE_SHIFT;
if (should_use_kmap(pfn))
kunmap(pfn_to_page(pfn));
else
iounmap(vaddr);
}
/**
* acpi_os_map_iomem - Get a virtual address for a given physical address range.
* @phys: Start of the physical address range to map.
* @size: Size of the physical address range to map.
*
* Look up the given physical address range in the list of existing ACPI memory
* mappings. If found, get a reference to it and return a pointer to it (its
* virtual address). If not found, map it, add it to that list and return a
* pointer to it.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
void __iomem __ref
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
if (phys > ULONG_MAX) {
pr_err("Cannot map memory that high: 0x%llx\n", phys);
return NULL;
}
if (!acpi_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
mutex_lock(&acpi_ioremap_lock);
/* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size);
if (map) {
map->track.refcount++;
goto out;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return NULL;
}
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_map(phys, size);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
return NULL;
}
INIT_LIST_HEAD(&map->list);
map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
map->phys = pg_off;
map->size = pg_sz;
map->track.refcount = 1;
list_add_tail_rcu(&map->list, &acpi_ioremaps);
out:
mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
return (void *)acpi_os_map_iomem(phys, size);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
static void acpi_os_map_remove(struct work_struct *work)
{
struct acpi_ioremap *map = container_of(to_rcu_work(work),
struct acpi_ioremap,
track.rwork);
acpi_unmap(map->phys, map->virt);
kfree(map);
}
/* Must be called with mutex_lock(&acpi_ioremap_lock) */
static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
if (--map->track.refcount)
return;
list_del_rcu(&map->list);
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
queue_rcu_work(system_wq, &map->track.rwork);
}
/**
* acpi_os_unmap_iomem - Drop a memory mapping reference.
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
* mappings, drop a reference to it and if there are no more active references
* to it, queue it up for later removal.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
*/
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
return;
}
acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
/**
* acpi_os_unmap_memory - Drop a memory mapping reference.
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*/
void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{
acpi_os_unmap_iomem((void __iomem *)virt, size);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return NULL;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !gas->bit_width)
return NULL;
return acpi_os_map_iomem(addr, gas->bit_width / 8);
}
EXPORT_SYMBOL(acpi_os_map_generic_address);
void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !gas->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
}
acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
{
if (!phys || !virt)
return AE_BAD_PARAMETER;
*phys = virt_to_phys(virt);
return AE_OK;
}
#endif
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static bool acpi_rev_override;
int __init acpi_rev_override_setup(char *str)
{
acpi_rev_override = true;
return 1;
}
__setup("acpi_rev_override", acpi_rev_override_setup);
#else
#define acpi_rev_override false
#endif
#define ACPI_MAX_OVERRIDE_LEN 100
static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
acpi_string *new_val)
{
if (!init_val || !new_val)
return AE_BAD_PARAMETER;
*new_val = NULL;
if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
*new_val = acpi_os_name;
}
if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
pr_info("Overriding _REV return value to 5\n");
*new_val = (char *)5;
}
return AE_OK;
}
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
u32 handled;
handled = (*acpi_irq_handler) (acpi_irq_context);
if (handled) {
acpi_irq_handled++;
return IRQ_HANDLED;
} else {
acpi_irq_not_handled++;
return IRQ_NONE;
}
}
acpi_status
acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
void *context)
{
unsigned int irq;
acpi_irq_stats_init();
/*
* ACPI interrupts different from the SCI in our copy of the FADT are
* not supported.
*/
if (gsi != acpi_gbl_FADT.sci_interrupt)
return AE_BAD_PARAMETER;
if (acpi_irq_handler)
return AE_ALREADY_ACQUIRED;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
return AE_OK;
}
acpi_irq_handler = handler;
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
pr_err("SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
acpi_sci_irq = irq;
return AE_OK;
}
acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
{
if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
return AE_BAD_PARAMETER;
free_irq(acpi_sci_irq, acpi_irq);
acpi_irq_handler = NULL;
acpi_sci_irq = INVALID_ACPI_IRQ;
return AE_OK;
}
/*
* Running in interpreter thread context, safe to sleep
*/
void acpi_os_sleep(u64 ms)
{
msleep(ms);
}
void acpi_os_stall(u32 us)
{
while (us) {
u32 delay = 1000;
if (delay > us)
delay = us;
udelay(delay);
touch_nmi_watchdog();
us -= delay;
}
}
/*
* Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
* monotonically increasing timer with 100ns granularity. Do not use
* ktime_get() to implement this function because this function may get
* called after timekeeping has been suspended. Note: calling this function
* after timekeeping has been suspended may lead to unexpected results
* because when timekeeping is suspended the jiffies counter is not
* incremented. See also timekeeping_suspend().
*/
u64 acpi_os_get_timer(void)
{
return (get_jiffies_64() - INITIAL_JIFFIES) *
(ACPI_100NSEC_PER_SEC / HZ);
}
acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
if (value)
*value = 0;
else
value = &dummy;
if (width <= 8) {
*value = inb(port);
} else if (width <= 16) {
*value = inw(port);
} else if (width <= 32) {
*value = inl(port);
} else {
pr_debug("%s: Access width %d not supported\n", __func__, width);
return AE_BAD_PARAMETER;
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
outw(value, port);
} else if (width <= 32) {
outl(value, port);
} else {
pr_debug("%s: Access width %d not supported\n", __func__, width);
return AE_BAD_PARAMETER;
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_write_port);
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
{
switch (width) {
case 8:
*(u8 *) value = readb(virt_addr);
break;
case 16:
*(u16 *) value = readw(virt_addr);
break;
case 32:
*(u32 *) value = readl(virt_addr);
break;
case 64:
*(u64 *) value = readq(virt_addr);
break;
default:
return -EINVAL;
}
return 0;
}
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
int error;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
if (!value)
value = &dummy;
error = acpi_os_read_iomem(virt_addr, value, width);
BUG_ON(error);
if (unmap)
iounmap(virt_addr);
else
rcu_read_unlock();
return AE_OK;
}
acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
switch (width) {
case 8:
writeb(value, virt_addr);
break;
case 16:
writew(value, virt_addr);
break;
case 32:
writel(value, virt_addr);
break;
case 64:
writeq(value, virt_addr);
break;
default:
BUG();
}
if (unmap)
iounmap(virt_addr);
else
rcu_read_unlock();
return AE_OK;
}
#ifdef CONFIG_PCI
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 *value, u32 width)
{
int result, size;
u32 value32;
if (!value)
return AE_BAD_PARAMETER;
switch (width) {
case 8:
size = 1;
break;
case 16:
size = 2;
break;
case 32:
size = 4;
break;
default:
return AE_ERROR;
}
result = raw_pci_read(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, &value32);
*value = value32;
return (result ? AE_ERROR : AE_OK);
}
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 value, u32 width)
{
int result, size;
switch (width) {
case 8:
size = 1;
break;
case 16:
size = 2;
break;
case 32:
size = 4;
break;
default:
return AE_ERROR;
}
result = raw_pci_write(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, value);
return (result ? AE_ERROR : AE_OK);
}
#endif
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
dpc->function(dpc->context);
kfree(dpc);
}
#ifdef CONFIG_ACPI_DEBUGGER
static struct acpi_debugger acpi_debugger;
static bool acpi_debugger_initialized;
int acpi_register_debugger(struct module *owner,
const struct acpi_debugger_ops *ops)
{
int ret = 0;
mutex_lock(&acpi_debugger.lock);
if (acpi_debugger.ops) {
ret = -EBUSY;
goto err_lock;
}
acpi_debugger.owner = owner;
acpi_debugger.ops = ops;
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
EXPORT_SYMBOL(acpi_register_debugger);
void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
{
mutex_lock(&acpi_debugger.lock);
if (ops == acpi_debugger.ops) {
acpi_debugger.ops = NULL;
acpi_debugger.owner = NULL;
}
mutex_unlock(&acpi_debugger.lock);
}
EXPORT_SYMBOL(acpi_unregister_debugger);
int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
{
int ret;
int (*func)(acpi_osd_exec_callback, void *);
struct module *owner;
if (!acpi_debugger_initialized)
return -ENODEV;
mutex_lock(&acpi_debugger.lock);
if (!acpi_debugger.ops) {
ret = -ENODEV;
goto err_lock;
}
if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV;
goto err_lock;
}
func = acpi_debugger.ops->create_thread;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
ret = func(function, context);
mutex_lock(&acpi_debugger.lock);
module_put(owner);
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
ssize_t acpi_debugger_write_log(const char *msg)
{
ssize_t ret;
ssize_t (*func)(const char *);
struct module *owner;
if (!acpi_debugger_initialized)
return -ENODEV;
mutex_lock(&acpi_debugger.lock);
if (!acpi_debugger.ops) {
ret = -ENODEV;
goto err_lock;
}
if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV;
goto err_lock;
}
func = acpi_debugger.ops->write_log;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
ret = func(msg);
mutex_lock(&acpi_debugger.lock);
module_put(owner);
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
{
ssize_t ret;
ssize_t (*func)(char *, size_t);
struct module *owner;
if (!acpi_debugger_initialized)
return -ENODEV;
mutex_lock(&acpi_debugger.lock);
if (!acpi_debugger.ops) {
ret = -ENODEV;
goto err_lock;
}
if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV;
goto err_lock;
}
func = acpi_debugger.ops->read_cmd;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
ret = func(buffer, buffer_length);
mutex_lock(&acpi_debugger.lock);
module_put(owner);
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
int acpi_debugger_wait_command_ready(void)
{
int ret;
int (*func)(bool, char *, size_t);
struct module *owner;
if (!acpi_debugger_initialized)
return -ENODEV;
mutex_lock(&acpi_debugger.lock);
if (!acpi_debugger.ops) {
ret = -ENODEV;
goto err_lock;
}
if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV;
goto err_lock;
}
func = acpi_debugger.ops->wait_command_ready;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
ret = func(acpi_gbl_method_executing,
acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
mutex_lock(&acpi_debugger.lock);
module_put(owner);
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
int acpi_debugger_notify_command_complete(void)
{
int ret;
int (*func)(void);
struct module *owner;
if (!acpi_debugger_initialized)
return -ENODEV;
mutex_lock(&acpi_debugger.lock);
if (!acpi_debugger.ops) {
ret = -ENODEV;
goto err_lock;
}
if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV;
goto err_lock;
}
func = acpi_debugger.ops->notify_command_complete;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
ret = func();
mutex_lock(&acpi_debugger.lock);
module_put(owner);
err_lock:
mutex_unlock(&acpi_debugger.lock);
return ret;
}
int __init acpi_debugger_init(void)
{
mutex_init(&acpi_debugger.lock);
acpi_debugger_initialized = true;
return 0;
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_os_execute
*
* PARAMETERS: Type - Type of the callback
* Function - Function to be executed
* Context - Function parameters
*
* RETURN: Status
*
* DESCRIPTION: Depending on type, either queues function for deferred execution or
* immediately executes function on a separate thread.
*
******************************************************************************/
acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
if (type == OSL_DEBUGGER_MAIN_THREAD) {
ret = acpi_debugger_create_thread(function, context);
if (ret) {
pr_err("Kernel thread creation failed\n");
status = AE_ERROR;
}
goto out_thread;
}
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the work_struct list in a
* way that allows us to also free its memory inside the callee.
* Because we may want to schedule several tasks with different
* parameters we can't use the approach some kernel code uses of
* having a static work_struct.
*/
dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return AE_NO_MEMORY;
dpc->function = function;
dpc->context = context;
/*
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
if (type == OSL_NOTIFY_HANDLER) {
queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else if (type == OSL_GPE_HANDLER) {
queue = kacpid_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else {
pr_err("Unsupported os_execute type %d.\n", type);
status = AE_ERROR;
}
if (ACPI_FAILURE(status))
goto err_workqueue;
/*
* On some machines, a software-initiated SMI causes corruption unless
* the SMI runs on CPU 0. An SMI can be initiated by any AML, but
* typically it's done in GPE-related methods that are run via
* workqueues, so we can avoid the known corruption cases by always
* queueing on CPU 0.
*/
ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
pr_err("Unable to queue work\n");
status = AE_ERROR;
}
err_workqueue:
if (ACPI_FAILURE(status))
kfree(dpc);
out_thread:
return status;
}
EXPORT_SYMBOL(acpi_os_execute);
void acpi_os_wait_events_complete(void)
{
/*
* Make sure the GPE handler or the fixed event handler is not used
* on another CPU after removal.
*/
if (acpi_sci_irq_valid())
synchronize_hardirq(acpi_sci_irq);
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
EXPORT_SYMBOL(acpi_os_wait_events_complete);
struct acpi_hp_work {
struct work_struct work;
struct acpi_device *adev;
u32 src;
};
static void acpi_hotplug_work_fn(struct work_struct *work)
{
struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
acpi_os_wait_events_complete();
acpi_device_hotplug(hpw->adev, hpw->src);
kfree(hpw);
}
acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
struct acpi_hp_work *hpw;
acpi_handle_debug(adev->handle,
"Scheduling hotplug event %u for deferred handling\n",
src);
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
if (!hpw)
return AE_NO_MEMORY;
INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
hpw->adev = adev;
hpw->src = src;
/*
* We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
* the hotplug code may call driver .remove() functions, which may
* invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
* these workqueues.
*/
if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
kfree(hpw);
return AE_ERROR;
}
return AE_OK;
}
bool acpi_queue_hotplug_work(struct work_struct *work)
{
return queue_work(kacpi_hotplug_wq, work);
}
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
struct semaphore *sem = NULL;
sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
if (!sem)
return AE_NO_MEMORY;
sema_init(sem, initial_units);
*handle = (acpi_handle *) sem;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
*handle, initial_units));
return AE_OK;
}
/*
* TODO: A better way to delete semaphores? Linux doesn't have a
* 'delete_semaphore()' function -- may result in an invalid
* pointer dereference for non-synchronized consumers. Should
* we at least check for blocked threads and signal/cancel them?
*/
acpi_status acpi_os_delete_semaphore(acpi_handle handle)
{
struct semaphore *sem = (struct semaphore *)handle;
if (!sem)
return AE_BAD_PARAMETER;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
BUG_ON(!list_empty(&sem->wait_list));
kfree(sem);
sem = NULL;
return AE_OK;
}
/*
* TODO: Support for units > 1?
*/
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
{
acpi_status status = AE_OK;
struct semaphore *sem = (struct semaphore *)handle;
long jiffies;
int ret = 0;
if (!acpi_os_initialized)
return AE_OK;
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
if (units > 1)
return AE_SUPPORT;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
if (timeout == ACPI_WAIT_FOREVER)
jiffies = MAX_SCHEDULE_TIMEOUT;
else
jiffies = msecs_to_jiffies(timeout);
ret = down_timeout(sem, jiffies);
if (ret)
status = AE_TIME;
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Failed to acquire semaphore[%p|%d|%d], %s",
handle, units, timeout,
acpi_format_exception(status)));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Acquired semaphore[%p|%d|%d]", handle,
units, timeout));
}
return status;
}
/*
* TODO: Support for units > 1?
*/
acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
{
struct semaphore *sem = (struct semaphore *)handle;
if (!acpi_os_initialized)
return AE_OK;
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
if (units > 1)
return AE_SUPPORT;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
units));
up(sem);
return AE_OK;
}
acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
{
#ifdef ENABLE_DEBUGGER
if (acpi_in_debugger) {
u32 chars;
kdb_read(buffer, buffer_length);
/* remove the CR kdb includes */
chars = strlen(buffer) - 1;
buffer[chars] = '\0';
}
#else
int ret;
ret = acpi_debugger_read_cmd(buffer, buffer_length);
if (ret < 0)
return AE_ERROR;
if (bytes_read)
*bytes_read = ret;
#endif
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_get_line);
acpi_status acpi_os_wait_command_ready(void)
{
int ret;
ret = acpi_debugger_wait_command_ready();
if (ret < 0)
return AE_ERROR;
return AE_OK;
}
acpi_status acpi_os_notify_command_complete(void)
{
int ret;
ret = acpi_debugger_notify_command_complete();
if (ret < 0)
return AE_ERROR;
return AE_OK;
}
acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
case ACPI_SIGNAL_FATAL:
pr_err("Fatal opcode executed\n");
break;
case ACPI_SIGNAL_BREAKPOINT:
/*
* AML Breakpoint
* ACPI spec. says to treat it as a NOP unless
* you are debugging. So if/when we integrate
* AML debugger into the kernel debugger its
* hook will go here. But until then it is
* not useful to print anything on breakpoints.
*/
break;
default:
break;
}
return AE_OK;
}
static int __init acpi_os_name_setup(char *str)
{
char *p = acpi_os_name;
int count = ACPI_MAX_OVERRIDE_LEN - 1;
if (!str || !*str)
return 0;
for (; count-- && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
continue;
else
break;
}
*p = 0;
return 1;
}
__setup("acpi_os_name=", acpi_os_name_setup);
/*
* Disable the auto-serialization of named objects creation methods.
*
* This feature is enabled by default. It marks the AML control methods
* that contain the opcodes to create named objects as "Serialized".
*/
static int __init acpi_no_auto_serialize_setup(char *str)
{
acpi_gbl_auto_serialize_methods = FALSE;
pr_info("Auto-serialization disabled\n");
return 1;
}
__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
* IO ports and memory declared in ACPI might be used by the ACPI subsystem
* in arbitrary AML code and can interfere with legacy drivers.
* acpi_enforce_resources= can be set to:
*
* - strict (default) (2)
* -> further driver trying to access the resources will not load
* - lax (1)
* -> further driver trying to access the resources will load, but you
* get a system message that something might go wrong...
*
* - no (0)
* -> ACPI Operation Region resources will not be registered
*
*/
#define ENFORCE_RESOURCES_STRICT 2
#define ENFORCE_RESOURCES_LAX 1
#define ENFORCE_RESOURCES_NO 0
static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
static int __init acpi_enforce_resources_setup(char *str)
{
if (str == NULL || *str == '\0')
return 0;
if (!strcmp("strict", str))
acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
else if (!strcmp("lax", str))
acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
else if (!strcmp("no", str))
acpi_enforce_resources = ENFORCE_RESOURCES_NO;
return 1;
}
__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
/* Check for resource conflicts between ACPI OperationRegions and native
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
acpi_adr_space_type space_id;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (res->flags & IORESOURCE_IO)
space_id = ACPI_ADR_SPACE_SYSTEM_IO;
else if (res->flags & IORESOURCE_MEM)
space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
else
return 0;
if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
return 0;
pr_info("Resource conflict; ACPI support missing from driver?\n");
if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
return -EBUSY;
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
pr_notice("Resource conflict: System may be unstable or behave erratically\n");
return 0;
}
EXPORT_SYMBOL(acpi_check_resource_conflict);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name)
{
struct resource res = DEFINE_RES_IO_NAMED(start, n, name);
return acpi_check_resource_conflict(&res);
}
EXPORT_SYMBOL(acpi_check_region);
/*
* Let drivers know whether the resource checks are effective
*/
int acpi_resources_are_enforced(void)
{
return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
{
ACPI_FREE(handle);
}
/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
*/
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
__acquires(lockp)
{
acpi_cpu_flags flags;
spin_lock_irqsave(lockp, flags);
return flags;
}
/*
* Release a spinlock. See above.
*/
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
__releases(lockp)
{
spin_unlock_irqrestore(lockp, flags);
}
#ifndef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_os_create_cache
*
* PARAMETERS: name - Ascii name for the cache
* size - Size of each cached object
* depth - Maximum depth of the cache (in objects) <ignored>
* cache - Where the new cache object is returned
*
* RETURN: status
*
* DESCRIPTION: Create a cache object
*
******************************************************************************/
acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
*cache = kmem_cache_create(name, size, 0, 0, NULL);
if (*cache == NULL)
return AE_ERROR;
else
return AE_OK;
}
/*******************************************************************************
*
* FUNCTION: acpi_os_purge_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache.
*
******************************************************************************/
acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
{
kmem_cache_shrink(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_delete_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache and delete the
* cache object.
*
******************************************************************************/
acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
{
kmem_cache_destroy(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_release_object
*
* PARAMETERS: Cache - Handle to cache object
* Object - The object to be released
*
* RETURN: None
*
* DESCRIPTION: Release an object to the specified cache. If cache is full,
* the object is deleted.
*
******************************************************************************/
acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
{
kmem_cache_free(cache, object);
return (AE_OK);
}
#endif
static int __init acpi_no_static_ssdt_setup(char *s)
{
acpi_gbl_disable_ssdt_table_install = TRUE;
pr_info("Static SSDT installation disabled\n");
return 0;
}
early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
static int __init acpi_disable_return_repair(char *s)
{
pr_notice("Predefined validation mechanism disabled\n");
acpi_gbl_disable_auto_repair = TRUE;
return 1;
}
__setup("acpica_no_return_repair", acpi_disable_return_repair);
acpi_status __init acpi_os_initialize(void)
{
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_gbl_xgpe0_block_logical_address =
(unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_gbl_xgpe1_block_logical_address =
(unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
/*
* Use acpi_os_map_generic_address to pre-map the reset
* register if it's in system memory.
*/
void *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
pr_debug("%s: Reset register mapping %s\n", __func__,
rv ? "successful" : "failed");
}
acpi_os_initialized = true;
return AE_OK;
}
acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
acpi_osi_init();
return AE_OK;
}
acpi_status acpi_os_terminate(void)
{
if (acpi_irq_handler) {
acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
acpi_irq_handler);
}
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_gbl_xgpe0_block_logical_address = 0UL;
acpi_gbl_xgpe1_block_logical_address = 0UL;
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
destroy_workqueue(kacpi_hotplug_wq);
return AE_OK;
}
acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
u32 pm1b_control)
{
int rc = 0;
if (__acpi_os_prepare_sleep)
rc = __acpi_os_prepare_sleep(sleep_state,
pm1a_control, pm1b_control);
if (rc < 0)
return AE_ERROR;
else if (rc > 0)
return AE_CTRL_TERMINATE;
return AE_OK;
}
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl))
{
__acpi_os_prepare_sleep = func;
}
#if (ACPI_REDUCED_HARDWARE)
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
u32 val_b)
{
int rc = 0;
if (__acpi_os_prepare_extended_sleep)
rc = __acpi_os_prepare_extended_sleep(sleep_state,
val_a, val_b);
if (rc < 0)
return AE_ERROR;
else if (rc > 0)
return AE_CTRL_TERMINATE;
return AE_OK;
}
#else
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
u32 val_b)
{
return AE_OK;
}
#endif
void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
u32 val_a, u32 val_b))
{
__acpi_os_prepare_extended_sleep = func;
}
acpi_status acpi_os_enter_sleep(u8 sleep_state,
u32 reg_a_value, u32 reg_b_value)
{
acpi_status status;
if (acpi_gbl_reduced_hardware)
status = acpi_os_prepare_extended_sleep(sleep_state,
reg_a_value,
reg_b_value);
else
status = acpi_os_prepare_sleep(sleep_state,
reg_a_value, reg_b_value);
return status;
}
| linux-master | drivers/acpi/osl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004, 2013 Intel Corporation
* Author: Naveen B S <[email protected]>
* Author: Rafael J. Wysocki <[email protected]>
*
* All rights reserved.
*
* ACPI based HotPlug driver that supports Memory Hotplug
* This driver fields notifications from firmware for memory add
* and remove operations and alerts the VM of the affected memory
* ranges.
*/
#include <linux/acpi.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include "internal.h"
#define ACPI_MEMORY_DEVICE_CLASS "memory"
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
};
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
static int acpi_memory_device_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_memory_device_remove(struct acpi_device *device);
static struct acpi_scan_handler memory_device_handler = {
.ids = memory_device_ids,
.attach = acpi_memory_device_add,
.detach = acpi_memory_device_remove,
.hotplug = {
.enabled = true,
},
};
struct acpi_memory_info {
struct list_head list;
u64 start_addr; /* Memory Range start physical addr */
u64 length; /* Memory Range length */
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
unsigned int enabled:1;
};
struct acpi_memory_device {
struct acpi_device *device;
struct list_head res_list;
int mgid;
};
static acpi_status
acpi_memory_get_resource(struct acpi_resource *resource, void *context)
{
struct acpi_memory_device *mem_device = context;
struct acpi_resource_address64 address64;
struct acpi_memory_info *info, *new;
acpi_status status;
status = acpi_resource_to_address64(resource, &address64);
if (ACPI_FAILURE(status) ||
(address64.resource_type != ACPI_MEMORY_RANGE))
return AE_OK;
list_for_each_entry(info, &mem_device->res_list, list) {
/* Can we combine the resource range information? */
if ((info->caching == address64.info.mem.caching) &&
(info->write_protect == address64.info.mem.write_protect) &&
(info->start_addr + info->length == address64.address.minimum)) {
info->length += address64.address.address_length;
return AE_OK;
}
}
new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
if (!new)
return AE_ERROR;
INIT_LIST_HEAD(&new->list);
new->caching = address64.info.mem.caching;
new->write_protect = address64.info.mem.write_protect;
new->start_addr = address64.address.minimum;
new->length = address64.address.address_length;
list_add_tail(&new->list, &mem_device->res_list);
return AE_OK;
}
static void
acpi_memory_free_device_resources(struct acpi_memory_device *mem_device)
{
struct acpi_memory_info *info, *n;
list_for_each_entry_safe(info, n, &mem_device->res_list, list)
kfree(info);
INIT_LIST_HEAD(&mem_device->res_list);
}
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
if (!list_empty(&mem_device->res_list))
return 0;
status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
acpi_memory_get_resource, mem_device);
if (ACPI_FAILURE(status)) {
acpi_memory_free_device_resources(mem_device);
return -EINVAL;
}
return 0;
}
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
{
unsigned long long current_status;
/* Get device present/absent information from the _STA */
if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
METHOD_NAME__STA, NULL,
¤t_status)))
return -ENODEV;
/*
* Check for device status. Device should be
* present/enabled/functioning.
*/
if (!((current_status & ACPI_STA_DEVICE_PRESENT)
&& (current_status & ACPI_STA_DEVICE_ENABLED)
&& (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
return -ENODEV;
return 0;
}
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
return acpi_bind_one(&mem->dev, arg);
}
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
struct acpi_device *adev)
{
return walk_memory_blocks(info->start_addr, info->length, adev,
acpi_bind_memblk);
}
static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
{
acpi_unbind_one(&mem->dev);
return 0;
}
static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
{
walk_memory_blocks(info->start_addr, info->length, NULL,
acpi_unbind_memblk);
}
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
mhp_t mhp_flags = MHP_NID_IS_MGID;
int result, num_enabled = 0;
struct acpi_memory_info *info;
u64 total_length = 0;
int node, mgid;
node = acpi_get_node(handle);
list_for_each_entry(info, &mem_device->res_list, list) {
if (!info->length)
continue;
/* We want a single node for the whole memory group */
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
total_length += info->length;
}
if (!total_length) {
dev_err(&mem_device->device->dev, "device is empty\n");
return -EINVAL;
}
mgid = memory_group_register_static(node, PFN_UP(total_length));
if (mgid < 0)
return mgid;
mem_device->mgid = mgid;
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
* We don't have memory-hot-add rollback function,now.
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
/*
* If the memory block size is zero, please ignore it.
* Don't try to do the following memory hotplug flowchart.
*/
if (!info->length)
continue;
mhp_flags |= MHP_MEMMAP_ON_MEMORY;
result = __add_memory(mgid, info->start_addr, info->length,
mhp_flags);
/*
* If the memory block has been used by the kernel, add_memory()
* returns -EEXIST. If add_memory() returns the other error, it
* means that this memory block is not used by the kernel.
*/
if (result && result != -EEXIST)
continue;
result = acpi_bind_memory_blocks(info, mem_device->device);
if (result) {
acpi_unbind_memory_blocks(info);
return -ENODEV;
}
info->enabled = 1;
/*
* Add num_enable even if add_memory() returns -EEXIST, so the
* device is bound to this driver.
*/
num_enabled++;
}
if (!num_enabled) {
dev_err(&mem_device->device->dev, "add_memory failed\n");
return -EINVAL;
}
/*
* Sometimes the memory device will contain several memory blocks.
* When one memory block is hot-added to the system memory, it will
* be regarded as a success.
* Otherwise if the last memory block can't be hot-added to the system
* memory, it will be failure and the memory device can't be bound with
* driver.
*/
return 0;
}
static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
struct acpi_memory_info *info, *n;
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (!info->enabled)
continue;
acpi_unbind_memory_blocks(info);
__remove_memory(info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
}
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
{
if (!mem_device)
return;
/* In case we succeeded adding *some* memory, unregistering fails. */
if (mem_device->mgid >= 0)
memory_group_unregister(mem_device->mgid);
acpi_memory_free_device_resources(mem_device);
mem_device->device->driver_data = NULL;
kfree(mem_device);
}
static int acpi_memory_device_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
struct acpi_memory_device *mem_device;
int result;
if (!device)
return -EINVAL;
mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
if (!mem_device)
return -ENOMEM;
INIT_LIST_HEAD(&mem_device->res_list);
mem_device->device = device;
mem_device->mgid = -1;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
device->driver_data = mem_device;
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
if (result) {
device->driver_data = NULL;
kfree(mem_device);
return result;
}
result = acpi_memory_check_device(mem_device);
if (result) {
acpi_memory_device_free(mem_device);
return 0;
}
result = acpi_memory_enable_device(mem_device);
if (result) {
dev_err(&device->dev, "acpi_memory_enable_device() error\n");
acpi_memory_device_free(mem_device);
return result;
}
dev_dbg(&device->dev, "Memory device configured by ACPI\n");
return 1;
}
static void acpi_memory_device_remove(struct acpi_device *device)
{
struct acpi_memory_device *mem_device;
if (!device || !acpi_driver_data(device))
return;
mem_device = acpi_driver_data(device);
acpi_memory_remove_memory(mem_device);
acpi_memory_device_free(mem_device);
}
static bool __initdata acpi_no_memhotplug;
void __init acpi_memory_hotplug_init(void)
{
if (acpi_no_memhotplug) {
memory_device_handler.attach = NULL;
acpi_scan_add_handler(&memory_device_handler);
return;
}
acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
}
static int __init disable_acpi_memory_hotplug(char *str)
{
acpi_no_memhotplug = true;
return 1;
}
__setup("acpi_no_memhotplug", disable_acpi_memory_hotplug);
#else
static struct acpi_scan_handler memory_device_handler = {
.ids = memory_device_ids,
};
void __init acpi_memory_hotplug_init(void)
{
acpi_scan_add_handler(&memory_device_handler);
}
#endif /* CONFIG_ACPI_HOTPLUG_MEMORY */
| linux-master | drivers/acpi/acpi_memhotplug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* container.c - ACPI Generic Container Driver
*
* Copyright (C) 2004 Anil S Keshavamurthy ([email protected])
* Copyright (C) 2004 Keiichiro Tokunaga ([email protected])
* Copyright (C) 2004 Motoyuki Ito ([email protected])
* Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) 2004, 2013 Intel Corp.
* Author: Rafael J. Wysocki <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/container.h>
#include "internal.h"
static const struct acpi_device_id container_device_ids[] = {
{"ACPI0004", 0},
{"PNP0A05", 0},
{"PNP0A06", 0},
{"", 0},
};
#ifdef CONFIG_ACPI_CONTAINER
static int check_offline(struct acpi_device *adev, void *not_used)
{
if (acpi_scan_is_offline(adev, false))
return 0;
return -EBUSY;
}
static int acpi_container_offline(struct container_dev *cdev)
{
/* Check all of the dependent devices' physical companions. */
return acpi_dev_for_each_child(ACPI_COMPANION(&cdev->dev), check_offline, NULL);
}
static void acpi_container_release(struct device *dev)
{
kfree(to_container_dev(dev));
}
static int container_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
struct container_dev *cdev;
struct device *dev;
int ret;
if (adev->flags.is_dock_station)
return 0;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
cdev->offline = acpi_container_offline;
dev = &cdev->dev;
dev->bus = &container_subsys;
dev_set_name(dev, "%s", dev_name(&adev->dev));
ACPI_COMPANION_SET(dev, adev);
dev->release = acpi_container_release;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
adev->driver_data = dev;
return 1;
}
static void container_device_detach(struct acpi_device *adev)
{
struct device *dev = acpi_driver_data(adev);
adev->driver_data = NULL;
if (dev)
device_unregister(dev);
}
static void container_device_online(struct acpi_device *adev)
{
struct device *dev = acpi_driver_data(adev);
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
.attach = container_device_attach,
.detach = container_device_detach,
.hotplug = {
.enabled = true,
.demand_offline = true,
.notify_online = container_device_online,
},
};
void __init acpi_container_init(void)
{
acpi_scan_add_handler(&container_handler);
}
#else
static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
};
void __init acpi_container_init(void)
{
acpi_scan_add_handler_with_hotplug(&container_handler, "container");
}
#endif /* CONFIG_ACPI_CONTAINER */
| linux-master | drivers/acpi/container.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pci_link.c - ACPI PCI Interrupt Link Device Driver ($Revision: 34 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2002 Dominik Brodowski <[email protected]>
*
* TBD:
* 1. Support more than one IRQ resource entry per link device (index).
* 2. Implement start/stop mechanism and use ACPI Bus Driver facilities
* for IRQ management (e.g. start()->_SRS).
*/
#define pr_fmt(fmt) "ACPI: PCI: " fmt
#include <linux/syscore_ops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/irq.h>
#include "internal.h"
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
#define ACPI_PCI_LINK_MAX_POSSIBLE 16
static int acpi_pci_link_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_link_remove(struct acpi_device *device);
static const struct acpi_device_id link_device_ids[] = {
{"PNP0C0F", 0},
{"", 0},
};
static struct acpi_scan_handler pci_link_handler = {
.ids = link_device_ids,
.attach = acpi_pci_link_add,
.detach = acpi_pci_link_remove,
};
/*
* If a link is initialized, we never change its active and initialized
* later even the link is disable. Instead, we just repick the active irq
*/
struct acpi_pci_link_irq {
u32 active; /* Current IRQ */
u8 triggering; /* All IRQs */
u8 polarity; /* All IRQs */
u8 resource_type;
u8 possible_count;
u32 possible[ACPI_PCI_LINK_MAX_POSSIBLE];
u8 initialized:1;
u8 reserved:7;
};
struct acpi_pci_link {
struct list_head list;
struct acpi_device *device;
struct acpi_pci_link_irq irq;
int refcnt;
};
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
-------------------------------------------------------------------------- */
/*
* set context (link) possible list from resource list
*/
static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
void *context)
{
struct acpi_pci_link *link = context;
acpi_handle handle = link->device->handle;
u32 i;
switch (resource->type) {
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
case ACPI_RESOURCE_TYPE_IRQ:
{
struct acpi_resource_irq *p = &resource->data.irq;
if (!p->interrupt_count) {
acpi_handle_debug(handle,
"Blank _PRS IRQ resource\n");
return AE_OK;
}
for (i = 0;
(i < p->interrupt_count
&& i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
if (!p->interrupts[i]) {
acpi_handle_debug(handle,
"Invalid _PRS IRQ %d\n",
p->interrupts[i]);
continue;
}
link->irq.possible[i] = p->interrupts[i];
link->irq.possible_count++;
}
link->irq.triggering = p->triggering;
link->irq.polarity = p->polarity;
link->irq.resource_type = ACPI_RESOURCE_TYPE_IRQ;
break;
}
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
{
struct acpi_resource_extended_irq *p =
&resource->data.extended_irq;
if (!p->interrupt_count) {
acpi_handle_debug(handle,
"Blank _PRS EXT IRQ resource\n");
return AE_OK;
}
for (i = 0;
(i < p->interrupt_count
&& i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
if (!p->interrupts[i]) {
acpi_handle_debug(handle,
"Invalid _PRS IRQ %d\n",
p->interrupts[i]);
continue;
}
link->irq.possible[i] = p->interrupts[i];
link->irq.possible_count++;
}
link->irq.triggering = p->triggering;
link->irq.polarity = p->polarity;
link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
break;
}
default:
acpi_handle_debug(handle, "_PRS resource type 0x%x is not IRQ\n",
resource->type);
return AE_OK;
}
return AE_CTRL_TERMINATE;
}
static int acpi_pci_link_get_possible(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
acpi_status status;
status = acpi_walk_resources(handle, METHOD_NAME__PRS,
acpi_pci_link_check_possible, link);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "_PRS not present or invalid");
return 0;
}
acpi_handle_debug(handle, "Found %d possible IRQs\n",
link->irq.possible_count);
return 0;
}
static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
void *context)
{
int *irq = context;
switch (resource->type) {
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
case ACPI_RESOURCE_TYPE_IRQ:
{
struct acpi_resource_irq *p = &resource->data.irq;
if (!p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those w/ _STA disabled
*/
pr_debug("Blank _CRS IRQ resource\n");
return AE_OK;
}
*irq = p->interrupts[0];
break;
}
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
{
struct acpi_resource_extended_irq *p =
&resource->data.extended_irq;
if (!p->interrupt_count) {
/*
* extended IRQ descriptors must
* return at least 1 IRQ
*/
pr_debug("Blank _CRS EXT IRQ resource\n");
return AE_OK;
}
*irq = p->interrupts[0];
break;
}
break;
default:
pr_debug("_CRS resource type 0x%x is not IRQ\n",
resource->type);
return AE_OK;
}
return AE_CTRL_TERMINATE;
}
/*
* Run _CRS and set link->irq.active
*
* return value:
* 0 - success
* !0 - failure
*/
static int acpi_pci_link_get_current(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
acpi_status status;
int result = 0;
int irq = 0;
link->irq.active = 0;
/* in practice, status disabled is meaningless, ignore it */
if (acpi_strict) {
/* Query _STA, set link->device->status */
result = acpi_bus_get_status(link->device);
if (result) {
acpi_handle_err(handle, "Unable to read status\n");
goto end;
}
if (!link->device->status.enabled) {
acpi_handle_debug(handle, "Link disabled\n");
return 0;
}
}
/*
* Query and parse _CRS to get the current IRQ assignment.
*/
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
acpi_pci_link_check_current, &irq);
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(handle, "_CRS", status);
result = -ENODEV;
goto end;
}
if (acpi_strict && !irq) {
acpi_handle_err(handle, "_CRS returned 0\n");
result = -ENODEV;
}
link->irq.active = irq;
acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
end:
return result;
}
static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
{
struct {
struct acpi_resource res;
struct acpi_resource end;
} *resource;
struct acpi_buffer buffer = { 0, NULL };
acpi_handle handle = link->device->handle;
acpi_status status;
int result;
if (!irq)
return -EINVAL;
resource = kzalloc(sizeof(*resource) + 1, irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
if (!resource)
return -ENOMEM;
buffer.length = sizeof(*resource) + 1;
buffer.pointer = resource;
switch (link->irq.resource_type) {
case ACPI_RESOURCE_TYPE_IRQ:
resource->res.type = ACPI_RESOURCE_TYPE_IRQ;
resource->res.length = sizeof(struct acpi_resource);
resource->res.data.irq.triggering = link->irq.triggering;
resource->res.data.irq.polarity =
link->irq.polarity;
if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
resource->res.data.irq.shareable =
ACPI_EXCLUSIVE;
else
resource->res.data.irq.shareable = ACPI_SHARED;
resource->res.data.irq.interrupt_count = 1;
resource->res.data.irq.interrupts[0] = irq;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
resource->res.type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
resource->res.length = sizeof(struct acpi_resource);
resource->res.data.extended_irq.producer_consumer =
ACPI_CONSUMER;
resource->res.data.extended_irq.triggering =
link->irq.triggering;
resource->res.data.extended_irq.polarity =
link->irq.polarity;
if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
resource->res.data.extended_irq.shareable =
ACPI_EXCLUSIVE;
else
resource->res.data.extended_irq.shareable = ACPI_SHARED;
resource->res.data.extended_irq.interrupt_count = 1;
resource->res.data.extended_irq.interrupts[0] = irq;
/* ignore resource_source, it's optional */
break;
default:
acpi_handle_err(handle, "Invalid resource type %d\n",
link->irq.resource_type);
result = -EINVAL;
goto end;
}
resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
resource->end.length = sizeof(struct acpi_resource);
/* Attempt to set the resource */
status = acpi_set_current_resources(link->device->handle, &buffer);
/* check for total failure */
if (ACPI_FAILURE(status)) {
acpi_evaluation_failure_warn(handle, "_SRS", status);
result = -ENODEV;
goto end;
}
/* Query _STA, set device->status */
result = acpi_bus_get_status(link->device);
if (result) {
acpi_handle_err(handle, "Unable to read status\n");
goto end;
}
if (!link->device->status.enabled)
acpi_handle_warn(handle, "Disabled and referenced, BIOS bug\n");
/* Query _CRS, set link->irq.active */
result = acpi_pci_link_get_current(link);
if (result) {
goto end;
}
/*
* Is current setting not what we set?
* set link->irq.active
*/
if (link->irq.active != irq) {
/*
* policy: when _CRS doesn't return what we just _SRS
* assume _SRS worked and override _CRS value.
*/
acpi_handle_warn(handle, "BIOS reported IRQ %d, using IRQ %d\n",
link->irq.active, irq);
link->irq.active = irq;
}
acpi_handle_debug(handle, "Set IRQ %d\n", link->irq.active);
end:
kfree(resource);
return result;
}
/* --------------------------------------------------------------------------
PCI Link IRQ Management
-------------------------------------------------------------------------- */
/*
* "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt
* Link Devices to move the PIRQs around to minimize sharing.
*
* "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs
* that the BIOS has already set to active. This is necessary because
* ACPI has no automatic means of knowing what ISA IRQs are used. Note that
* if the BIOS doesn't set a Link Device active, ACPI needs to program it
* even if acpi_irq_nobalance is set.
*
* A tables of penalties avoids directing PCI interrupts to well known
* ISA IRQs. Boot params are available to over-ride the default table:
*
* List interrupts that are free for PCI use.
* acpi_irq_pci=n[,m]
*
* List interrupts that should not be used for PCI:
* acpi_irq_isa=n[,m]
*
* Note that PCI IRQ routers have a list of possible IRQs,
* which may not include the IRQs this table says are available.
*
* Since this heuristic can't tell the difference between a link
* that no device will attach to, vs. a link which may be shared
* by multiple active devices -- it is not optimal.
*
* If interrupt performance is that important, get an IO-APIC system
* with a pin dedicated to each device. Or for that matter, an MSI
* enabled system.
*/
#define ACPI_MAX_ISA_IRQS 16
#define PIRQ_PENALTY_PCI_POSSIBLE (16*16)
#define PIRQ_PENALTY_PCI_USING (16*16*16)
#define PIRQ_PENALTY_ISA_TYPICAL (16*16*16*16)
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ3 serial */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ4 serial */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ5 sometimes SoundBlaster */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ6 */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ7 parallel, spurious */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ8 rtc, sometimes */
0, /* IRQ9 PCI, often acpi */
0, /* IRQ10 PCI */
0, /* IRQ11 PCI */
PIRQ_PENALTY_ISA_USED, /* IRQ12 mouse */
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
/* >IRQ15 */
};
static int acpi_irq_pci_sharing_penalty(int irq)
{
struct acpi_pci_link *link;
int penalty = 0;
int i;
list_for_each_entry(link, &acpi_link_list, list) {
/*
* If a link is active, penalize its IRQ heavily
* so we try to choose a different IRQ.
*/
if (link->irq.active && link->irq.active == irq)
penalty += PIRQ_PENALTY_PCI_USING;
/*
* penalize the IRQs PCI might use, but not as severely.
*/
for (i = 0; i < link->irq.possible_count; i++)
if (link->irq.possible[i] == irq)
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
link->irq.possible_count;
}
return penalty;
}
static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
if (irq == sci_irq)
penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
int i;
/*
* Update penalties to facilitate IRQ balancing.
*/
list_for_each_entry(link, &acpi_link_list, list) {
/*
* reflect the possible and active irqs in the penalty table --
* useful for breaking ties.
*/
if (link->irq.possible_count) {
int penalty =
PIRQ_PENALTY_PCI_POSSIBLE /
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
acpi_isa_irq_penalty[link->irq.
possible[i]] +=
penalty;
}
} else if (link->irq.active &&
(link->irq.active < ACPI_MAX_ISA_IRQS)) {
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
return 0;
}
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
int irq;
int i;
if (link->irq.initialized) {
if (link->refcnt == 0)
/* This means the link is disabled but initialized */
acpi_pci_link_set(link, link->irq.active);
return 0;
}
/*
* search for active IRQ in list of possible IRQs.
*/
for (i = 0; i < link->irq.possible_count; ++i) {
if (link->irq.active == link->irq.possible[i])
break;
}
/*
* forget active IRQ that is not in possible list
*/
if (i == link->irq.possible_count) {
if (acpi_strict)
acpi_handle_warn(handle, "_CRS %d not found in _PRS\n",
link->irq.active);
link->irq.active = 0;
}
/*
* if active found, use it; else pick entry from end of possible list.
*/
if (link->irq.active)
irq = link->irq.active;
else
irq = link->irq.possible[link->irq.possible_count - 1];
if (acpi_irq_balance || !link->irq.active) {
/*
* Select the best IRQ. This is done in reverse to promote
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
if (acpi_irq_get_penalty(irq) >
acpi_irq_get_penalty(link->irq.possible[i]))
irq = link->irq.possible[i];
}
}
if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
acpi_handle_err(handle,
"No IRQ available. Try pci=noacpi or acpi=off\n");
return -ENODEV;
}
/* Attempt to enable the link device at this IRQ. */
if (acpi_pci_link_set(link, irq)) {
acpi_handle_err(handle,
"Unable to set IRQ. Try pci=noacpi or acpi=off\n");
return -ENODEV;
} else {
if (link->irq.active < ACPI_MAX_ISA_IRQS)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
acpi_handle_info(handle, "Enabled at IRQ %d\n",
link->irq.active);
}
link->irq.initialized = 1;
return 0;
}
/*
* acpi_pci_link_allocate_irq
* success: return IRQ >= 0
* failure: return -1
*/
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -1;
}
link = acpi_driver_data(device);
if (!link) {
acpi_handle_err(handle, "Invalid link context\n");
return -1;
}
/* TBD: Support multiple index (IRQ) entries per Link Device */
if (index) {
acpi_handle_err(handle, "Invalid index %d\n", index);
return -1;
}
mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
mutex_unlock(&acpi_link_lock);
return -1;
}
if (!link->irq.active) {
mutex_unlock(&acpi_link_lock);
acpi_handle_err(handle, "Link active IRQ is 0!\n");
return -1;
}
link->refcnt++;
mutex_unlock(&acpi_link_lock);
if (triggering)
*triggering = link->irq.triggering;
if (polarity)
*polarity = link->irq.polarity;
if (name)
*name = acpi_device_bid(link->device);
acpi_handle_debug(handle, "Link is referenced\n");
return link->irq.active;
}
/*
* We don't change link's irq information here. After it is reenabled, we
* continue use the info
*/
int acpi_pci_link_free_irq(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -1;
}
link = acpi_driver_data(device);
if (!link) {
acpi_handle_err(handle, "Invalid link context\n");
return -1;
}
mutex_lock(&acpi_link_lock);
if (!link->irq.initialized) {
mutex_unlock(&acpi_link_lock);
acpi_handle_err(handle, "Link isn't initialized\n");
return -1;
}
#ifdef FUTURE_USE
/*
* The Link reference count allows us to _DISable an unused link
* and suspend time, and set it again on resume.
* However, 2.6.12 still has irq_router.resume
* which blindly restores the link state.
* So we disable the reference count method
* to prevent duplicate acpi_pci_link_set()
* which would harm some systems
*/
link->refcnt--;
#endif
acpi_handle_debug(handle, "Link is dereferenced\n");
if (link->refcnt == 0)
acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
mutex_unlock(&acpi_link_lock);
return link->irq.active;
}
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_pci_link_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
acpi_handle handle = device->handle;
struct acpi_pci_link *link;
int result;
int i;
link = kzalloc(sizeof(struct acpi_pci_link), GFP_KERNEL);
if (!link)
return -ENOMEM;
link->device = device;
strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
device->driver_data = link;
mutex_lock(&acpi_link_lock);
result = acpi_pci_link_get_possible(link);
if (result)
goto end;
/* query and set link->irq.active */
acpi_pci_link_get_current(link);
pr_info("Interrupt link %s configured for IRQ %d\n",
acpi_device_bid(device), link->irq.active);
for (i = 0; i < link->irq.possible_count; i++) {
if (link->irq.active != link->irq.possible[i])
acpi_handle_debug(handle, "Possible IRQ %d\n",
link->irq.possible[i]);
}
if (!link->device->status.enabled)
pr_info("Interrupt link %s disabled\n", acpi_device_bid(device));
list_add_tail(&link->list, &acpi_link_list);
end:
/* disable all links -- to be activated on use */
acpi_evaluate_object(handle, "_DIS", NULL, NULL);
mutex_unlock(&acpi_link_lock);
if (result)
kfree(link);
return result < 0 ? result : 1;
}
static int acpi_pci_link_resume(struct acpi_pci_link *link)
{
if (link->refcnt && link->irq.active && link->irq.initialized)
return (acpi_pci_link_set(link, link->irq.active));
return 0;
}
static void irqrouter_resume(void)
{
struct acpi_pci_link *link;
list_for_each_entry(link, &acpi_link_list, list) {
acpi_pci_link_resume(link);
}
}
static void acpi_pci_link_remove(struct acpi_device *device)
{
struct acpi_pci_link *link;
link = acpi_driver_data(device);
mutex_lock(&acpi_link_lock);
list_del(&link->list);
mutex_unlock(&acpi_link_lock);
kfree(link);
}
/*
* modify acpi_isa_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
int i;
for (i = 0; i < 16; i++) {
int retval;
int irq;
int new_penalty;
retval = get_option(&str, &irq);
if (!retval)
break; /* no number found */
/* see if this is a ISA IRQ */
if ((irq < 0) || (irq >= ACPI_MAX_ISA_IRQS))
continue;
if (used)
new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
acpi_isa_irq_penalty[irq] = new_penalty;
if (retval != 2) /* no next number */
break;
}
return 1;
}
/*
* We'd like PNP to call this routine for the
* single ISA_USED value for each legacy device.
* But instead it calls us with each POSSIBLE setting.
* There is no ISA_POSSIBLE weight, so we simply use
* the (small) PCI_USING penalty.
*/
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
bool acpi_isa_irq_available(int irq)
{
return irq >= 0 && (irq >= ARRAY_SIZE(acpi_isa_irq_penalty) ||
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
{
sci_irq = irq;
if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
sci_penalty = PIRQ_PENALTY_PCI_USING;
else
sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
}
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
* Useful for telling ACPI how not to interfere with your ISA sound card.
*/
static int __init acpi_irq_isa(char *str)
{
return acpi_irq_penalty_update(str, 1);
}
__setup("acpi_irq_isa=", acpi_irq_isa);
/*
* Over-ride default table to free additional IRQs for use by PCI
* e.g. acpi_irq_pci=7,15
* Used for acpi_irq_balance to free up IRQs to reduce PCI IRQ sharing.
*/
static int __init acpi_irq_pci(char *str)
{
return acpi_irq_penalty_update(str, 0);
}
__setup("acpi_irq_pci=", acpi_irq_pci);
static int __init acpi_irq_nobalance_set(char *str)
{
acpi_irq_balance = 0;
return 1;
}
__setup("acpi_irq_nobalance", acpi_irq_nobalance_set);
static int __init acpi_irq_balance_set(char *str)
{
acpi_irq_balance = 1;
return 1;
}
__setup("acpi_irq_balance", acpi_irq_balance_set);
static struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
return;
if (acpi_irq_balance == -1) {
/* no command line switch: enable balancing in IOAPIC mode */
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
acpi_irq_balance = 1;
else
acpi_irq_balance = 0;
}
register_syscore_ops(&irqrouter_syscore_ops);
acpi_scan_add_handler(&pci_link_handler);
}
| linux-master | drivers/acpi/pci_link.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI Time and Alarm (TAD) Device Driver
*
* Copyright (C) 2018 Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
*
* This driver is based on Section 9.18 of the ACPI 6.2 specification revision.
*
* It only supports the system wakeup capabilities of the TAD.
*
* Provided are sysfs attributes, available under the TAD platform device,
* allowing user space to manage the AC and DC wakeup timers of the TAD:
* set and read their values, set and check their expire timer wake policies,
* check and clear their status and check the capabilities of the TAD reported
* by AML. The DC timer attributes are only present if the TAD supports a
* separate DC alarm timer.
*
* The wakeup events handling and power management of the TAD is expected to
* be taken care of by the ACPI PM domain attached to its platform device.
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rafael J. Wysocki");
/* ACPI TAD capability flags (ACPI 6.2, Section 9.18.2) */
#define ACPI_TAD_AC_WAKE BIT(0)
#define ACPI_TAD_DC_WAKE BIT(1)
#define ACPI_TAD_RT BIT(2)
#define ACPI_TAD_RT_IN_MS BIT(3)
#define ACPI_TAD_S4_S5__GWS BIT(4)
#define ACPI_TAD_AC_S4_WAKE BIT(5)
#define ACPI_TAD_AC_S5_WAKE BIT(6)
#define ACPI_TAD_DC_S4_WAKE BIT(7)
#define ACPI_TAD_DC_S5_WAKE BIT(8)
/* ACPI TAD alarm timer selection */
#define ACPI_TAD_AC_TIMER (u32)0
#define ACPI_TAD_DC_TIMER (u32)1
/* Special value for disabled timer or expired timer wake policy. */
#define ACPI_TAD_WAKE_DISABLED (~(u32)0)
struct acpi_tad_driver_data {
u32 capabilities;
};
struct acpi_tad_rt {
u16 year; /* 1900 - 9999 */
u8 month; /* 1 - 12 */
u8 day; /* 1 - 31 */
u8 hour; /* 0 - 23 */
u8 minute; /* 0 - 59 */
u8 second; /* 0 - 59 */
u8 valid; /* 0 (failed) or 1 (success) for reads, 0 for writes */
u16 msec; /* 1 - 1000 */
s16 tz; /* -1440 to 1440 or 2047 (unspecified) */
u8 daylight;
u8 padding[3]; /* must be 0 */
} __packed;
static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object args[] = {
{ .type = ACPI_TYPE_BUFFER, },
};
struct acpi_object_list arg_list = {
.pointer = args,
.count = ARRAY_SIZE(args),
};
unsigned long long retval;
acpi_status status;
if (rt->year < 1900 || rt->year > 9999 ||
rt->month < 1 || rt->month > 12 ||
rt->hour > 23 || rt->minute > 59 || rt->second > 59 ||
rt->tz < -1440 || (rt->tz > 1440 && rt->tz != 2047) ||
rt->daylight > 3)
return -ERANGE;
args[0].buffer.pointer = (u8 *)rt;
args[0].buffer.length = sizeof(*rt);
pm_runtime_get_sync(dev);
status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER };
union acpi_object *out_obj;
struct acpi_tad_rt *data;
acpi_status status;
int ret = -EIO;
pm_runtime_get_sync(dev);
status = acpi_evaluate_object(handle, "_GRT", NULL, &output);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status))
goto out_free;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER)
goto out_free;
if (out_obj->buffer.length != sizeof(*rt))
goto out_free;
data = (struct acpi_tad_rt *)(out_obj->buffer.pointer);
if (!data->valid)
goto out_free;
memcpy(rt, data, sizeof(*rt));
ret = 0;
out_free:
ACPI_FREE(output.pointer);
return ret;
}
static char *acpi_tad_rt_next_field(char *s, int *val)
{
char *p;
p = strchr(s, ':');
if (!p)
return NULL;
*p = '\0';
if (kstrtoint(s, 10, val))
return NULL;
return p + 1;
}
static ssize_t time_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_tad_rt rt;
char *str, *s;
int val, ret = -ENODATA;
str = kmemdup_nul(buf, count, GFP_KERNEL);
if (!str)
return -ENOMEM;
s = acpi_tad_rt_next_field(str, &val);
if (!s)
goto out_free;
rt.year = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.month = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.day = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.hour = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.minute = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.second = val;
s = acpi_tad_rt_next_field(s, &val);
if (!s)
goto out_free;
rt.tz = val;
if (kstrtoint(s, 10, &val))
goto out_free;
rt.daylight = val;
rt.valid = 0;
rt.msec = 0;
memset(rt.padding, 0, 3);
ret = acpi_tad_set_real_time(dev, &rt);
out_free:
kfree(str);
return ret ? ret : count;
}
static ssize_t time_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct acpi_tad_rt rt;
int ret;
ret = acpi_tad_get_real_time(dev, &rt);
if (ret)
return ret;
return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt.tz, rt.daylight);
}
static DEVICE_ATTR_RW(time);
static struct attribute *acpi_tad_time_attrs[] = {
&dev_attr_time.attr,
NULL,
};
static const struct attribute_group acpi_tad_time_attr_group = {
.attrs = acpi_tad_time_attrs,
};
static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id,
u32 value)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object args[] = {
{ .type = ACPI_TYPE_INTEGER, },
{ .type = ACPI_TYPE_INTEGER, },
};
struct acpi_object_list arg_list = {
.pointer = args,
.count = ARRAY_SIZE(args),
};
unsigned long long retval;
acpi_status status;
args[0].integer.value = timer_id;
args[1].integer.value = value;
pm_runtime_get_sync(dev);
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
static int acpi_tad_wake_write(struct device *dev, const char *buf, char *method,
u32 timer_id, const char *specval)
{
u32 value;
if (sysfs_streq(buf, specval)) {
value = ACPI_TAD_WAKE_DISABLED;
} else {
int ret = kstrtou32(buf, 0, &value);
if (ret)
return ret;
if (value == ACPI_TAD_WAKE_DISABLED)
return -EINVAL;
}
return acpi_tad_wake_set(dev, method, timer_id, value);
}
static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method,
u32 timer_id, const char *specval)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object args[] = {
{ .type = ACPI_TYPE_INTEGER, },
};
struct acpi_object_list arg_list = {
.pointer = args,
.count = ARRAY_SIZE(args),
};
unsigned long long retval;
acpi_status status;
args[0].integer.value = timer_id;
pm_runtime_get_sync(dev);
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status))
return -EIO;
if ((u32)retval == ACPI_TAD_WAKE_DISABLED)
return sprintf(buf, "%s\n", specval);
return sprintf(buf, "%u\n", (u32)retval);
}
static const char *alarm_specval = "disabled";
static int acpi_tad_alarm_write(struct device *dev, const char *buf,
u32 timer_id)
{
return acpi_tad_wake_write(dev, buf, "_STV", timer_id, alarm_specval);
}
static ssize_t acpi_tad_alarm_read(struct device *dev, char *buf, u32 timer_id)
{
return acpi_tad_wake_read(dev, buf, "_TIV", timer_id, alarm_specval);
}
static const char *policy_specval = "never";
static int acpi_tad_policy_write(struct device *dev, const char *buf,
u32 timer_id)
{
return acpi_tad_wake_write(dev, buf, "_STP", timer_id, policy_specval);
}
static ssize_t acpi_tad_policy_read(struct device *dev, char *buf, u32 timer_id)
{
return acpi_tad_wake_read(dev, buf, "_TIP", timer_id, policy_specval);
}
static int acpi_tad_clear_status(struct device *dev, u32 timer_id)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object args[] = {
{ .type = ACPI_TYPE_INTEGER, },
};
struct acpi_object_list arg_list = {
.pointer = args,
.count = ARRAY_SIZE(args),
};
unsigned long long retval;
acpi_status status;
args[0].integer.value = timer_id;
pm_runtime_get_sync(dev);
status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
static int acpi_tad_status_write(struct device *dev, const char *buf, u32 timer_id)
{
int ret, value;
ret = kstrtoint(buf, 0, &value);
if (ret)
return ret;
if (value)
return -EINVAL;
return acpi_tad_clear_status(dev, timer_id);
}
static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object args[] = {
{ .type = ACPI_TYPE_INTEGER, },
};
struct acpi_object_list arg_list = {
.pointer = args,
.count = ARRAY_SIZE(args),
};
unsigned long long retval;
acpi_status status;
args[0].integer.value = timer_id;
pm_runtime_get_sync(dev);
status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval);
pm_runtime_put_sync(dev);
if (ACPI_FAILURE(status))
return -EIO;
return sprintf(buf, "0x%02X\n", (u32)retval);
}
static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
return sprintf(buf, "0x%02X\n", dd->capabilities);
}
static DEVICE_ATTR_RO(caps);
static ssize_t ac_alarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_alarm_write(dev, buf, ACPI_TAD_AC_TIMER);
return ret ? ret : count;
}
static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR_RW(ac_alarm);
static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_policy_write(dev, buf, ACPI_TAD_AC_TIMER);
return ret ? ret : count;
}
static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR_RW(ac_policy);
static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_status_write(dev, buf, ACPI_TAD_AC_TIMER);
return ret ? ret : count;
}
static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR_RW(ac_status);
static struct attribute *acpi_tad_attrs[] = {
&dev_attr_caps.attr,
&dev_attr_ac_alarm.attr,
&dev_attr_ac_policy.attr,
&dev_attr_ac_status.attr,
NULL,
};
static const struct attribute_group acpi_tad_attr_group = {
.attrs = acpi_tad_attrs,
};
static ssize_t dc_alarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_alarm_write(dev, buf, ACPI_TAD_DC_TIMER);
return ret ? ret : count;
}
static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR_RW(dc_alarm);
static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_policy_write(dev, buf, ACPI_TAD_DC_TIMER);
return ret ? ret : count;
}
static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR_RW(dc_policy);
static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = acpi_tad_status_write(dev, buf, ACPI_TAD_DC_TIMER);
return ret ? ret : count;
}
static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR_RW(dc_status);
static struct attribute *acpi_tad_dc_attrs[] = {
&dev_attr_dc_alarm.attr,
&dev_attr_dc_policy.attr,
&dev_attr_dc_status.attr,
NULL,
};
static const struct attribute_group acpi_tad_dc_attr_group = {
.attrs = acpi_tad_dc_attrs,
};
static int acpi_tad_disable_timer(struct device *dev, u32 timer_id)
{
return acpi_tad_wake_set(dev, "_STV", timer_id, ACPI_TAD_WAKE_DISABLED);
}
static int acpi_tad_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
device_init_wakeup(dev, false);
pm_runtime_get_sync(dev);
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group);
acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
if (dd->capabilities & ACPI_TAD_DC_WAKE) {
acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
}
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
acpi_remove_cmos_rtc_space_handler(handle);
return 0;
}
static int acpi_tad_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_tad_driver_data *dd;
acpi_status status;
unsigned long long caps;
int ret;
ret = acpi_install_cmos_rtc_space_handler(handle);
if (ret < 0) {
dev_info(dev, "Unable to install space handler\n");
return -ENODEV;
}
/*
* Initialization failure messages are mostly about firmware issues, so
* print them at the "info" level.
*/
status = acpi_evaluate_integer(handle, "_GCP", NULL, &caps);
if (ACPI_FAILURE(status)) {
dev_info(dev, "Unable to get capabilities\n");
ret = -ENODEV;
goto remove_handler;
}
if (!(caps & ACPI_TAD_AC_WAKE)) {
dev_info(dev, "Unsupported capabilities\n");
ret = -ENODEV;
goto remove_handler;
}
if (!acpi_has_method(handle, "_PRW")) {
dev_info(dev, "Missing _PRW\n");
ret = -ENODEV;
goto remove_handler;
}
dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
if (!dd) {
ret = -ENOMEM;
goto remove_handler;
}
dd->capabilities = caps;
dev_set_drvdata(dev, dd);
/*
* Assume that the ACPI PM domain has been attached to the device and
* simply enable system wakeup and runtime PM and put the device into
* runtime suspend. Everything else should be taken care of by the ACPI
* PM domain callbacks.
*/
device_init_wakeup(dev, true);
dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
/*
* The platform bus type layer tells the ACPI PM domain powers up the
* device, so set the runtime PM status of it to "active".
*/
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_suspend(dev);
ret = sysfs_create_group(&dev->kobj, &acpi_tad_attr_group);
if (ret)
goto fail;
if (caps & ACPI_TAD_DC_WAKE) {
ret = sysfs_create_group(&dev->kobj, &acpi_tad_dc_attr_group);
if (ret)
goto fail;
}
if (caps & ACPI_TAD_RT) {
ret = sysfs_create_group(&dev->kobj, &acpi_tad_time_attr_group);
if (ret)
goto fail;
}
return 0;
fail:
acpi_tad_remove(pdev);
/* Don't fallthrough because cmos rtc space handler is removed in acpi_tad_remove() */
return ret;
remove_handler:
acpi_remove_cmos_rtc_space_handler(handle);
return ret;
}
static const struct acpi_device_id acpi_tad_ids[] = {
{"ACPI000E", 0},
{}
};
static struct platform_driver acpi_tad_driver = {
.driver = {
.name = "acpi-tad",
.acpi_match_table = acpi_tad_ids,
},
.probe = acpi_tad_probe,
.remove = acpi_tad_remove,
};
MODULE_DEVICE_TABLE(acpi, acpi_tad_ids);
module_platform_driver(acpi_tad_driver);
| linux-master | drivers/acpi/acpi_tad.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Platform profile sysfs interface */
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
static struct platform_profile_handler *cur_profile;
static DEFINE_MUTEX(profile_lock);
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
[PLATFORM_PROFILE_QUIET] = "quiet",
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int len = 0;
int err, i;
err = mutex_lock_interruptible(&profile_lock);
if (err)
return err;
if (!cur_profile) {
mutex_unlock(&profile_lock);
return -ENODEV;
}
for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
mutex_unlock(&profile_lock);
return len;
}
static ssize_t platform_profile_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
int err;
err = mutex_lock_interruptible(&profile_lock);
if (err)
return err;
if (!cur_profile) {
mutex_unlock(&profile_lock);
return -ENODEV;
}
err = cur_profile->profile_get(cur_profile, &profile);
mutex_unlock(&profile_lock);
if (err)
return err;
/* Check that profile is valid index */
if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
return -EIO;
return sysfs_emit(buf, "%s\n", profile_names[profile]);
}
static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int err, i;
err = mutex_lock_interruptible(&profile_lock);
if (err)
return err;
if (!cur_profile) {
mutex_unlock(&profile_lock);
return -ENODEV;
}
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
if (i < 0) {
mutex_unlock(&profile_lock);
return -EINVAL;
}
/* Check that platform supports this profile choice */
if (!test_bit(i, cur_profile->choices)) {
mutex_unlock(&profile_lock);
return -EOPNOTSUPP;
}
err = cur_profile->profile_set(cur_profile, i);
if (!err)
sysfs_notify(acpi_kobj, NULL, "platform_profile");
mutex_unlock(&profile_lock);
if (err)
return err;
return count;
}
static DEVICE_ATTR_RO(platform_profile_choices);
static DEVICE_ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
&dev_attr_platform_profile_choices.attr,
&dev_attr_platform_profile.attr,
NULL
};
static const struct attribute_group platform_profile_group = {
.attrs = platform_profile_attrs
};
void platform_profile_notify(void)
{
if (!cur_profile)
return;
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
int platform_profile_register(struct platform_profile_handler *pprof)
{
int err;
mutex_lock(&profile_lock);
/* We can only have one active profile */
if (cur_profile) {
mutex_unlock(&profile_lock);
return -EEXIST;
}
/* Sanity check the profile handler field are set */
if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
!pprof->profile_set || !pprof->profile_get) {
mutex_unlock(&profile_lock);
return -EINVAL;
}
err = sysfs_create_group(acpi_kobj, &platform_profile_group);
if (err) {
mutex_unlock(&profile_lock);
return err;
}
cur_profile = pprof;
mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_register);
int platform_profile_remove(void)
{
sysfs_remove_group(acpi_kobj, &platform_profile_group);
mutex_lock(&profile_lock);
cur_profile = NULL;
mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
MODULE_AUTHOR("Mark Pearson <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/platform_profile.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi_ipmi.c - ACPI IPMI opregion
*
* Copyright (C) 2010, 2013 Intel Corporation
* Author: Zhao Yakui <[email protected]>
* Lv Zheng <[email protected]>
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/ipmi.h>
#include <linux/spinlock.h>
MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
MODULE_LICENSE("GPL");
#define ACPI_IPMI_OK 0
#define ACPI_IPMI_TIMEOUT 0x10
#define ACPI_IPMI_UNKNOWN 0x07
/* the IPMI timeout is 5s */
#define IPMI_TIMEOUT (5000)
#define ACPI_IPMI_MAX_MSG_LENGTH 64
struct acpi_ipmi_device {
/* the device list attached to driver_data.ipmi_devices */
struct list_head head;
/* the IPMI request message list */
struct list_head tx_msg_list;
spinlock_t tx_msg_lock;
acpi_handle handle;
struct device *dev;
struct ipmi_user *user_interface;
int ipmi_ifnum; /* IPMI interface number */
long curr_msgid;
bool dead;
struct kref kref;
};
struct ipmi_driver_data {
struct list_head ipmi_devices;
struct ipmi_smi_watcher bmc_events;
const struct ipmi_user_hndl ipmi_hndlrs;
struct mutex ipmi_lock;
/*
* NOTE: IPMI System Interface Selection
* There is no system interface specified by the IPMI operation
* region access. We try to select one system interface with ACPI
* handle set. IPMI messages passed from the ACPI codes are sent
* to this selected global IPMI system interface.
*/
struct acpi_ipmi_device *selected_smi;
};
struct acpi_ipmi_msg {
struct list_head head;
/*
* General speaking the addr type should be SI_ADDR_TYPE. And
* the addr channel should be BMC.
* In fact it can also be IPMB type. But we will have to
* parse it from the Netfn command buffer. It is so complex
* that it is skipped.
*/
struct ipmi_addr addr;
long tx_msgid;
/* it is used to track whether the IPMI message is finished */
struct completion tx_complete;
struct kernel_ipmi_msg tx_message;
int msg_done;
/* tx/rx data . And copy it from/to ACPI object buffer */
u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
u8 rx_len;
struct acpi_ipmi_device *device;
struct kref kref;
};
/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
struct acpi_ipmi_buffer {
u8 status;
u8 length;
u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
};
static void ipmi_register_bmc(int iface, struct device *dev);
static void ipmi_bmc_gone(int iface);
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
static struct ipmi_driver_data driver_data = {
.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
.bmc_events = {
.owner = THIS_MODULE,
.new_smi = ipmi_register_bmc,
.smi_gone = ipmi_bmc_gone,
},
.ipmi_hndlrs = {
.ipmi_recv_hndl = ipmi_msg_handler,
},
.ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
};
static struct acpi_ipmi_device *
ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
{
struct acpi_ipmi_device *ipmi_device;
int err;
struct ipmi_user *user;
ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
if (!ipmi_device)
return NULL;
kref_init(&ipmi_device->kref);
INIT_LIST_HEAD(&ipmi_device->head);
INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
spin_lock_init(&ipmi_device->tx_msg_lock);
ipmi_device->handle = handle;
ipmi_device->dev = get_device(dev);
ipmi_device->ipmi_ifnum = iface;
err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
ipmi_device, &user);
if (err) {
put_device(dev);
kfree(ipmi_device);
return NULL;
}
ipmi_device->user_interface = user;
return ipmi_device;
}
static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
{
ipmi_destroy_user(ipmi_device->user_interface);
put_device(ipmi_device->dev);
kfree(ipmi_device);
}
static void ipmi_dev_release_kref(struct kref *kref)
{
struct acpi_ipmi_device *ipmi =
container_of(kref, struct acpi_ipmi_device, kref);
ipmi_dev_release(ipmi);
}
static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
{
list_del(&ipmi_device->head);
if (driver_data.selected_smi == ipmi_device)
driver_data.selected_smi = NULL;
/*
* Always setting dead flag after deleting from the list or
* list_for_each_entry() codes must get changed.
*/
ipmi_device->dead = true;
}
static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
{
struct acpi_ipmi_device *ipmi_device = NULL;
mutex_lock(&driver_data.ipmi_lock);
if (driver_data.selected_smi) {
ipmi_device = driver_data.selected_smi;
kref_get(&ipmi_device->kref);
}
mutex_unlock(&driver_data.ipmi_lock);
return ipmi_device;
}
static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
{
kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
}
static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
{
struct acpi_ipmi_device *ipmi;
struct acpi_ipmi_msg *ipmi_msg;
ipmi = acpi_ipmi_dev_get();
if (!ipmi)
return NULL;
ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
if (!ipmi_msg) {
acpi_ipmi_dev_put(ipmi);
return NULL;
}
kref_init(&ipmi_msg->kref);
init_completion(&ipmi_msg->tx_complete);
INIT_LIST_HEAD(&ipmi_msg->head);
ipmi_msg->device = ipmi;
ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
return ipmi_msg;
}
static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
{
acpi_ipmi_dev_put(tx_msg->device);
kfree(tx_msg);
}
static void ipmi_msg_release_kref(struct kref *kref)
{
struct acpi_ipmi_msg *tx_msg =
container_of(kref, struct acpi_ipmi_msg, kref);
ipmi_msg_release(tx_msg);
}
static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
{
kref_get(&tx_msg->kref);
return tx_msg;
}
static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
{
kref_put(&tx_msg->kref, ipmi_msg_release_kref);
}
#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
acpi_physical_address address,
acpi_integer *value)
{
struct kernel_ipmi_msg *msg;
struct acpi_ipmi_buffer *buffer;
struct acpi_ipmi_device *device;
unsigned long flags;
msg = &tx_msg->tx_message;
/*
* IPMI network function and command are encoded in the address
* within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
*/
msg->netfn = IPMI_OP_RGN_NETFN(address);
msg->cmd = IPMI_OP_RGN_CMD(address);
msg->data = tx_msg->data;
/*
* value is the parameter passed by the IPMI opregion space handler.
* It points to the IPMI request message buffer
*/
buffer = (struct acpi_ipmi_buffer *)value;
/* copy the tx message data */
if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
dev_WARN_ONCE(tx_msg->device->dev, true,
"Unexpected request (msg len %d).\n",
buffer->length);
return -EINVAL;
}
msg->data_len = buffer->length;
memcpy(tx_msg->data, buffer->data, msg->data_len);
/*
* now the default type is SYSTEM_INTERFACE and channel type is BMC.
* If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
* the addr type should be changed to IPMB. Then we will have to parse
* the IPMI request message buffer to get the IPMB address.
* If so, please fix me.
*/
tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
tx_msg->addr.channel = IPMI_BMC_CHANNEL;
tx_msg->addr.data[0] = 0;
/* Get the msgid */
device = tx_msg->device;
spin_lock_irqsave(&device->tx_msg_lock, flags);
device->curr_msgid++;
tx_msg->tx_msgid = device->curr_msgid;
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
return 0;
}
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
acpi_integer *value)
{
struct acpi_ipmi_buffer *buffer;
/*
* value is also used as output parameter. It represents the response
* IPMI message returned by IPMI command.
*/
buffer = (struct acpi_ipmi_buffer *)value;
/*
* If the flag of msg_done is not set, it means that the IPMI command is
* not executed correctly.
*/
buffer->status = msg->msg_done;
if (msg->msg_done != ACPI_IPMI_OK)
return;
/*
* If the IPMI response message is obtained correctly, the status code
* will be ACPI_IPMI_OK
*/
buffer->length = msg->rx_len;
memcpy(buffer->data, msg->data, msg->rx_len);
}
static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{
struct acpi_ipmi_msg *tx_msg;
unsigned long flags;
/*
* NOTE: On-going ipmi_recv_msg
* ipmi_msg_handler() may still be invoked by ipmi_si after
* flushing. But it is safe to do a fast flushing on module_exit()
* without waiting for all ipmi_recv_msg(s) to complete from
* ipmi_msg_handler() as it is ensured by ipmi_si that all
* ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
*/
spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
while (!list_empty(&ipmi->tx_msg_list)) {
tx_msg = list_first_entry(&ipmi->tx_msg_list,
struct acpi_ipmi_msg,
head);
list_del(&tx_msg->head);
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
/* wake up the sleep thread on the Tx msg */
complete(&tx_msg->tx_complete);
acpi_ipmi_msg_put(tx_msg);
spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
}
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
}
static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
struct acpi_ipmi_msg *msg)
{
struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
unsigned long flags;
spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
if (msg == iter) {
tx_msg = iter;
list_del(&iter->head);
break;
}
}
spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
if (tx_msg)
acpi_ipmi_msg_put(tx_msg);
}
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
struct acpi_ipmi_device *ipmi_device = user_msg_data;
struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
struct device *dev = ipmi_device->dev;
unsigned long flags;
if (msg->user != ipmi_device->user_interface) {
dev_warn(dev,
"Unexpected response is returned. returned user %p, expected user %p\n",
msg->user, ipmi_device->user_interface);
goto out_msg;
}
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == iter->tx_msgid) {
tx_msg = iter;
list_del(&iter->head);
break;
}
}
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
if (!tx_msg) {
dev_warn(dev,
"Unexpected response (msg id %ld) is returned.\n",
msg->msgid);
goto out_msg;
}
/* copy the response data to Rx_data buffer */
if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
dev_WARN_ONCE(dev, true,
"Unexpected response (msg len %d).\n",
msg->msg.data_len);
goto out_comp;
}
/* response msg is an error msg */
msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
msg->msg.data_len == 1) {
if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
dev_dbg_once(dev, "Unexpected response (timeout).\n");
tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
}
goto out_comp;
}
tx_msg->rx_len = msg->msg.data_len;
memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
tx_msg->msg_done = ACPI_IPMI_OK;
out_comp:
complete(&tx_msg->tx_complete);
acpi_ipmi_msg_put(tx_msg);
out_msg:
ipmi_free_recv_msg(msg);
}
static void ipmi_register_bmc(int iface, struct device *dev)
{
struct acpi_ipmi_device *ipmi_device, *temp;
int err;
struct ipmi_smi_info smi_data;
acpi_handle handle;
err = ipmi_get_smi_info(iface, &smi_data);
if (err)
return;
if (smi_data.addr_src != SI_ACPI)
goto err_ref;
handle = smi_data.addr_info.acpi_info.acpi_handle;
if (!handle)
goto err_ref;
ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
if (!ipmi_device) {
dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
goto err_ref;
}
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
/*
* if the corresponding ACPI handle is already added
* to the device list, don't add it again.
*/
if (temp->handle == handle)
goto err_lock;
}
if (!driver_data.selected_smi)
driver_data.selected_smi = ipmi_device;
list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
mutex_unlock(&driver_data.ipmi_lock);
put_device(smi_data.dev);
return;
err_lock:
mutex_unlock(&driver_data.ipmi_lock);
ipmi_dev_release(ipmi_device);
err_ref:
put_device(smi_data.dev);
}
static void ipmi_bmc_gone(int iface)
{
struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry_safe(iter, temp,
&driver_data.ipmi_devices, head) {
if (iter->ipmi_ifnum != iface) {
ipmi_device = iter;
__ipmi_dev_kill(iter);
break;
}
}
if (!driver_data.selected_smi)
driver_data.selected_smi = list_first_entry_or_null(
&driver_data.ipmi_devices,
struct acpi_ipmi_device, head);
mutex_unlock(&driver_data.ipmi_lock);
if (ipmi_device) {
ipmi_flush_tx_msg(ipmi_device);
acpi_ipmi_dev_put(ipmi_device);
}
}
/*
* This is the IPMI opregion space handler.
* @function: indicates the read/write. In fact as the IPMI message is driven
* by command, only write is meaningful.
* @address: This contains the netfn/command of IPMI request message.
* @bits : not used.
* @value : it is an in/out parameter. It points to the IPMI message buffer.
* Before the IPMI message is sent, it represents the actual request
* IPMI message. After the IPMI message is finished, it represents
* the response IPMI message returned by IPMI command.
* @handler_context: IPMI device context.
*/
static acpi_status
acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
u32 bits, acpi_integer *value,
void *handler_context, void *region_context)
{
struct acpi_ipmi_msg *tx_msg;
struct acpi_ipmi_device *ipmi_device;
int err;
acpi_status status;
unsigned long flags;
/*
* IPMI opregion message.
* IPMI message is firstly written to the BMC and system software
* can get the respsonse. So it is unmeaningful for the read access
* of IPMI opregion.
*/
if ((function & ACPI_IO_MASK) == ACPI_READ)
return AE_TYPE;
tx_msg = ipmi_msg_alloc();
if (!tx_msg)
return AE_NOT_EXIST;
ipmi_device = tx_msg->device;
if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
ipmi_msg_release(tx_msg);
return AE_TYPE;
}
acpi_ipmi_msg_get(tx_msg);
mutex_lock(&driver_data.ipmi_lock);
/* Do not add a tx_msg that can not be flushed. */
if (ipmi_device->dead) {
mutex_unlock(&driver_data.ipmi_lock);
ipmi_msg_release(tx_msg);
return AE_NOT_EXIST;
}
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
mutex_unlock(&driver_data.ipmi_lock);
err = ipmi_request_settime(ipmi_device->user_interface,
&tx_msg->addr,
tx_msg->tx_msgid,
&tx_msg->tx_message,
NULL, 0, 0, IPMI_TIMEOUT);
if (err) {
status = AE_ERROR;
goto out_msg;
}
wait_for_completion(&tx_msg->tx_complete);
acpi_format_ipmi_response(tx_msg, value);
status = AE_OK;
out_msg:
ipmi_cancel_tx_msg(ipmi_device, tx_msg);
acpi_ipmi_msg_put(tx_msg);
return status;
}
static int __init acpi_ipmi_init(void)
{
int result;
acpi_status status;
if (acpi_disabled)
return 0;
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_warn("Can't register IPMI opregion space handle\n");
return -EINVAL;
}
result = ipmi_smi_watcher_register(&driver_data.bmc_events);
if (result) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler);
pr_err("Can't register IPMI system interface watcher\n");
}
return result;
}
static void __exit acpi_ipmi_exit(void)
{
struct acpi_ipmi_device *ipmi_device;
if (acpi_disabled)
return;
ipmi_smi_watcher_unregister(&driver_data.bmc_events);
/*
* When one smi_watcher is unregistered, it is only deleted
* from the smi_watcher list. But the smi_gone callback function
* is not called. So explicitly uninstall the ACPI IPMI oregion
* handler and free it.
*/
mutex_lock(&driver_data.ipmi_lock);
while (!list_empty(&driver_data.ipmi_devices)) {
ipmi_device = list_first_entry(&driver_data.ipmi_devices,
struct acpi_ipmi_device,
head);
__ipmi_dev_kill(ipmi_device);
mutex_unlock(&driver_data.ipmi_lock);
ipmi_flush_tx_msg(ipmi_device);
acpi_ipmi_dev_put(ipmi_device);
mutex_lock(&driver_data.ipmi_lock);
}
mutex_unlock(&driver_data.ipmi_lock);
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler);
}
module_init(acpi_ipmi_init);
module_exit(acpi_ipmi_exit);
| linux-master | drivers/acpi/acpi_ipmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* acpi_pad.c ACPI Processor Aggregator Driver
*
* Copyright (c) 2009, Intel Corporation.
*/
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <asm/mwait.h>
#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
static unsigned long power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
int i;
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
return;
edx >>= MWAIT_SUBSTATE_SIZE;
for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
if (edx & MWAIT_SUBSTATE_MASK) {
highest_cstate = i;
highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
}
}
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
(highest_subcstate - 1);
#if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
tsc_detected_unstable = 1;
break;
default:
/* TSC could halt in idle */
tsc_detected_unstable = 1;
}
#endif
}
static unsigned long cpu_weight[NR_CPUS];
static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
static void round_robin_cpu(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_var_t tmp;
int cpu;
unsigned long min_weight = -1;
unsigned long preferred_cpu;
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
mutex_lock(&round_robin_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
cpumask_andnot(tmp, cpu_online_mask, tmp);
/* avoid HT sibilings if possible */
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
mutex_unlock(&round_robin_lock);
free_cpumask_var(tmp);
return;
}
for_each_cpu(cpu, tmp) {
if (cpu_weight[cpu] < min_weight) {
min_weight = cpu_weight[cpu];
preferred_cpu = cpu;
}
}
if (tsk_in_cpu[tsk_index] != -1)
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
mutex_unlock(&round_robin_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
free_cpumask_var(tmp);
}
static void exit_round_robin(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = -1;
}
static unsigned int idle_pct = 5; /* percentage */
static unsigned int round_robin_time = 1; /* second */
static int power_saving_thread(void *data)
{
int do_sleep;
unsigned int tsk_index = (unsigned long)data;
u64 last_jiffies = 0;
sched_set_fifo_low(current);
while (!kthread_should_stop()) {
unsigned long expire_time;
/* round robin to cpus */
expire_time = last_jiffies + round_robin_time * HZ;
if (time_before(expire_time, jiffies)) {
last_jiffies = jiffies;
round_robin_cpu(tsk_index);
}
do_sleep = 0;
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
if (tsc_detected_unstable && !tsc_marked_unstable) {
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");
tsc_marked_unstable = 1;
}
local_irq_disable();
perf_lopwr_cb(true);
tick_broadcast_enable();
tick_broadcast_enter();
stop_critical_timings();
mwait_idle_with_hints(power_saving_mwait_eax, 1);
start_critical_timings();
tick_broadcast_exit();
perf_lopwr_cb(false);
local_irq_enable();
if (time_before(expire_time, jiffies)) {
do_sleep = 1;
break;
}
}
/*
* current sched_rt has threshold for rt task running time.
* When a rt task uses 95% CPU time, the rt thread will be
* scheduled out for 5% CPU time to not starve other tasks. But
* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
* CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (unlikely(do_sleep))
schedule_timeout_killable(HZ * idle_pct / 100);
/* If an external event has set the need_resched flag, then
* we need to deal with it, or this loop will continue to
* spin without calling __mwait().
*/
if (unlikely(need_resched()))
schedule();
}
exit_round_robin(tsk_index);
return 0;
}
static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
int rc;
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"acpi_pad/%d", ps_tsk_num);
if (IS_ERR(ps_tsks[ps_tsk_num])) {
rc = PTR_ERR(ps_tsks[ps_tsk_num]);
ps_tsks[ps_tsk_num] = NULL;
} else {
rc = 0;
ps_tsk_num++;
}
return rc;
}
static void destroy_power_saving_task(void)
{
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
ps_tsks[ps_tsk_num] = NULL;
}
}
static void set_power_saving_task_num(unsigned int num)
{
if (num > ps_tsk_num) {
while (ps_tsk_num < num) {
if (create_power_saving_task())
return;
}
} else if (num < ps_tsk_num) {
while (ps_tsk_num > num)
destroy_power_saving_task();
}
}
static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
cpus_read_lock();
num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
set_power_saving_task_num(num_cpus);
cpus_read_unlock();
}
static uint32_t acpi_pad_idle_cpus_num(void)
{
return ps_tsk_num;
}
static ssize_t rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
round_robin_time = num;
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t rrtime_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", round_robin_time);
}
static DEVICE_ATTR_RW(rrtime);
static ssize_t idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
idle_pct = num;
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t idlepct_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", idle_pct);
}
static DEVICE_ATTR_RW(idlepct);
static ssize_t idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
if (kstrtoul(buf, 0, &num))
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(num);
mutex_unlock(&isolated_cpus_lock);
return count;
}
static ssize_t idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(false, buf,
to_cpumask(pad_busy_cpus_bits));
}
static DEVICE_ATTR_RW(idlecpus);
static int acpi_pad_add_sysfs(struct acpi_device *device)
{
int result;
result = device_create_file(&device->dev, &dev_attr_idlecpus);
if (result)
return -ENODEV;
result = device_create_file(&device->dev, &dev_attr_idlepct);
if (result) {
device_remove_file(&device->dev, &dev_attr_idlecpus);
return -ENODEV;
}
result = device_create_file(&device->dev, &dev_attr_rrtime);
if (result) {
device_remove_file(&device->dev, &dev_attr_idlecpus);
device_remove_file(&device->dev, &dev_attr_idlepct);
return -ENODEV;
}
return 0;
}
static void acpi_pad_remove_sysfs(struct acpi_device *device)
{
device_remove_file(&device->dev, &dev_attr_idlecpus);
device_remove_file(&device->dev, &dev_attr_idlepct);
device_remove_file(&device->dev, &dev_attr_rrtime);
}
/*
* Query firmware how many CPUs should be idle
* return -1 on failure
*/
static int acpi_pad_pur(acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package;
int num = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
return num;
if (!buffer.length || !buffer.pointer)
return num;
package = buffer.pointer;
if (package->type == ACPI_TYPE_PACKAGE &&
package->package.count == 2 &&
package->package.elements[0].integer.value == 1) /* rev 1 */
num = package->package.elements[1].integer.value;
kfree(buffer.pointer);
return num;
}
static void acpi_pad_handle_notify(acpi_handle handle)
{
int num_cpus;
uint32_t idle_cpus;
struct acpi_buffer param = {
.length = 4,
.pointer = (void *)&idle_cpus,
};
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
if (num_cpus < 0) {
mutex_unlock(&isolated_cpus_lock);
return;
}
acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m);
mutex_unlock(&isolated_cpus_lock);
}
static void acpi_pad_notify(acpi_handle handle, u32 event,
void *data)
{
struct acpi_device *device = data;
switch (event) {
case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
acpi_pad_handle_notify(handle);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
default:
pr_warn("Unsupported event [0x%x]\n", event);
break;
}
}
static int acpi_pad_add(struct acpi_device *device)
{
acpi_status status;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
if (acpi_pad_add_sysfs(device))
return -ENODEV;
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
if (ACPI_FAILURE(status)) {
acpi_pad_remove_sysfs(device);
return -ENODEV;
}
return 0;
}
static void acpi_pad_remove(struct acpi_device *device)
{
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(0);
mutex_unlock(&isolated_cpus_lock);
acpi_remove_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY, acpi_pad_notify);
acpi_pad_remove_sysfs(device);
}
static const struct acpi_device_id pad_device_ids[] = {
{"ACPI000C", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pad_device_ids);
static struct acpi_driver acpi_pad_driver = {
.name = "processor_aggregator",
.class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
.ids = pad_device_ids,
.ops = {
.add = acpi_pad_add,
.remove = acpi_pad_remove,
},
};
static int __init acpi_pad_init(void)
{
/* Xen ACPI PAD is used when running as Xen Dom0. */
if (xen_initial_domain())
return -ENODEV;
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
return acpi_bus_register_driver(&acpi_pad_driver);
}
static void __exit acpi_pad_exit(void)
{
acpi_bus_unregister_driver(&acpi_pad_driver);
}
module_init(acpi_pad_init);
module_exit(acpi_pad_exit);
MODULE_AUTHOR("Shaohua Li<[email protected]>");
MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/acpi/acpi_pad.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pci_irq.c - ACPI PCI Interrupt Routing ($Revision: 11 $)
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2002 Dominik Brodowski <[email protected]>
* (c) Copyright 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: PCI: " fmt
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
u8 pin;
acpi_handle link;
u32 index; /* GSI, or link _CRS index */
};
static inline char pin_name(int pin)
{
return 'A' + pin - 1;
}
/* --------------------------------------------------------------------------
PCI IRQ Routing Table (PRT) Support
-------------------------------------------------------------------------- */
/* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */
static const struct dmi_system_id medion_md9580[] = {
{
.ident = "Medion MD9580-F laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"),
DMI_MATCH(DMI_PRODUCT_NAME, "A555"),
},
},
{ }
};
/* http://bugzilla.kernel.org/show_bug.cgi?id=5044 */
static const struct dmi_system_id dell_optiplex[] = {
{
.ident = "Dell Optiplex GX1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX1 600S+"),
},
},
{ }
};
/* http://bugzilla.kernel.org/show_bug.cgi?id=10138 */
static const struct dmi_system_id hp_t5710[] = {
{
.ident = "HP t5710",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "hp t5000 series"),
DMI_MATCH(DMI_BOARD_NAME, "098Ch"),
},
},
{ }
};
struct prt_quirk {
const struct dmi_system_id *system;
unsigned int segment;
unsigned int bus;
unsigned int device;
unsigned char pin;
const char *source; /* according to BIOS */
const char *actual_source;
};
#define PCI_INTX_PIN(c) (c - 'A' + 1)
/*
* These systems have incorrect _PRT entries. The BIOS claims the PCI
* interrupt at the listed segment/bus/device/pin is connected to the first
* link device, but it is actually connected to the second.
*/
static const struct prt_quirk prt_quirks[] = {
{ medion_md9580, 0, 0, 9, PCI_INTX_PIN('A'),
"\\_SB_.PCI0.ISA_.LNKA",
"\\_SB_.PCI0.ISA_.LNKB"},
{ dell_optiplex, 0, 0, 0xd, PCI_INTX_PIN('A'),
"\\_SB_.LNKB",
"\\_SB_.LNKA"},
{ hp_t5710, 0, 0, 1, PCI_INTX_PIN('A'),
"\\_SB_.PCI0.LNK1",
"\\_SB_.PCI0.LNK3"},
};
static void do_prt_fixups(struct acpi_prt_entry *entry,
struct acpi_pci_routing_table *prt)
{
int i;
const struct prt_quirk *quirk;
for (i = 0; i < ARRAY_SIZE(prt_quirks); i++) {
quirk = &prt_quirks[i];
/* All current quirks involve link devices, not GSIs */
if (dmi_check_system(quirk->system) &&
entry->id.segment == quirk->segment &&
entry->id.bus == quirk->bus &&
entry->id.device == quirk->device &&
entry->pin == quirk->pin &&
!strcmp(prt->source, quirk->source) &&
strlen(prt->source) >= strlen(quirk->actual_source)) {
pr_warn("Firmware reports "
"%04x:%02x:%02x PCI INT %c connected to %s; "
"changing to %s\n",
entry->id.segment, entry->id.bus,
entry->id.device, pin_name(entry->pin),
prt->source, quirk->actual_source);
strcpy(prt->source, quirk->actual_source);
}
}
}
static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
int pin, struct acpi_pci_routing_table *prt,
struct acpi_prt_entry **entry_ptr)
{
int segment = pci_domain_nr(dev->bus);
int bus = dev->bus->number;
int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn);
struct acpi_prt_entry *entry;
if (((prt->address >> 16) & 0xffff) != device ||
prt->pin + 1 != pin)
return -ENODEV;
entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
/*
* Note that the _PRT uses 0=INTA, 1=INTB, etc, while PCI uses
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
entry->id.segment = segment;
entry->id.bus = bus;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
do_prt_fixups(entry, prt);
entry->index = prt->source_index;
/*
* Type 1: Dynamic
* ---------------
* The 'source' field specifies the PCI interrupt link device used to
* configure the IRQ assigned to this slot|dev|pin. The 'source_index'
* indicates which resource descriptor in the resource template (of
* the link device) this interrupt is allocated from.
*
* NOTE: Don't query the Link Device for IRQ information at this time
* because Link Device enumeration may not have occurred yet
* (e.g. exists somewhere 'below' this _PRT entry in the ACPI
* namespace).
*/
if (prt->source[0])
acpi_get_handle(handle, prt->source, &entry->link);
/*
* Type 2: Static
* --------------
* The 'source' field is NULL, and the 'source_index' field specifies
* the IRQ value, which is hardwired to specific interrupt inputs on
* the interrupt controller.
*/
pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n",
entry->id.segment, entry->id.bus, entry->id.device,
pin_name(entry->pin), prt->source, entry->index);
*entry_ptr = entry;
return 0;
}
static int acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
int pin, struct acpi_prt_entry **entry_ptr)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_pci_routing_table *entry;
acpi_handle handle = NULL;
if (dev->bus->bridge)
handle = ACPI_HANDLE(dev->bus->bridge);
if (!handle)
return -ENODEV;
/* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
status = acpi_get_irq_routing_table(handle, &buffer);
if (ACPI_FAILURE(status)) {
kfree(buffer.pointer);
return -ENODEV;
}
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
if (!acpi_pci_irq_check_entry(handle, dev, pin,
entry, entry_ptr))
break;
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
kfree(buffer.pointer);
return 0;
}
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
#ifdef CONFIG_X86_IO_APIC
extern int noioapicquirk;
extern int noioapicreroute;
static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
{
struct pci_bus *bus_it;
for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
if (!bus_it->self)
return 0;
if (bus_it->self->irq_reroute_variant)
return bus_it->self->irq_reroute_variant;
}
return 0;
}
/*
* Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ
* entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
* during interrupt handling). When this INTx generation cannot be disabled,
* we reroute these interrupts to their legacy equivalent to get rid of
* spurious interrupts.
*/
static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
struct acpi_prt_entry *entry)
{
if (noioapicquirk || noioapicreroute) {
return 0;
} else {
switch (bridge_has_boot_interrupt_variant(dev->bus)) {
case 0:
/* no rerouting necessary */
return 0;
case INTEL_IRQ_REROUTE_VARIANT:
/*
* Remap according to INTx routing table in 6700PXH
* specs, intel order number 302628-002, section
* 2.15.2. Other chipsets (80332, ...) have the same
* mapping and are handled here as well.
*/
dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy "
"IRQ %d\n", entry->index,
(entry->index % 4) + 16);
entry->index = (entry->index % 4) + 16;
return 1;
default:
dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy "
"IRQ: unknown mapping\n", entry->index);
return -1;
}
}
}
#endif /* CONFIG_X86_IO_APIC */
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
u8 bridge_pin, orig_pin = pin;
int ret;
ret = acpi_pci_irq_find_prt_entry(dev, pin, &entry);
if (!ret && entry) {
#ifdef CONFIG_X86_IO_APIC
acpi_reroute_boot_interrupt(dev, entry);
#endif /* CONFIG_X86_IO_APIC */
dev_dbg(&dev->dev, "Found [%c] _PRT entry\n", pin_name(pin));
return entry;
}
/*
* Attempt to derive an IRQ for this device from a parent bridge's
* PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
*/
bridge = dev->bus->self;
while (bridge) {
pin = pci_swizzle_interrupt_pin(dev, pin);
if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
/* PC card has the same IRQ as its cardbridge */
bridge_pin = bridge->pin;
if (!bridge_pin) {
dev_dbg(&bridge->dev, "No interrupt pin configured\n");
return NULL;
}
pin = bridge_pin;
}
ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry);
if (!ret && entry) {
dev_dbg(&dev->dev, "Derived GSI INT %c from %s\n",
pin_name(orig_pin), pci_name(bridge));
return entry;
}
dev = bridge;
bridge = dev->bus->self;
}
dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
pin_name(orig_pin));
return NULL;
}
#if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA)
static int acpi_isa_register_gsi(struct pci_dev *dev)
{
u32 dev_gsi;
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
acpi_isa_irq_available(dev->irq) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
pin_name(dev->pin), dev->irq);
acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
}
return -EINVAL;
}
#else
static inline int acpi_isa_register_gsi(struct pci_dev *dev)
{
return -ENODEV;
}
#endif
static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
{
#ifdef CONFIG_X86
/*
* On x86 irq line 0xff means "unknown" or "no connection"
* (PCI 3.0, Section 6.2.4, footnote on page 223).
*/
if (dev->irq == 0xff) {
dev->irq = IRQ_NOTCONNECTED;
dev_warn(&dev->dev, "PCI INT %c: not connected\n",
pin_name(pin));
return false;
}
#endif
return true;
}
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
int gsi;
u8 pin;
int triggering = ACPI_LEVEL_SENSITIVE;
/*
* On ARM systems with the GIC interrupt model, or LoongArch
* systems with the LPIC interrupt model, level interrupts
* are always polarity high by specification; PCI legacy
* IRQs lines are inverted before reaching the interrupt
* controller and must therefore be considered active high
* as default.
*/
int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ||
acpi_irq_model == ACPI_IRQ_MODEL_LPIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
char *link = NULL;
char link_desc[16];
int rc;
pin = dev->pin;
if (!pin) {
dev_dbg(&dev->dev, "No interrupt pin configured\n");
return 0;
}
if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry) {
/*
* IDE legacy mode controller IRQs are magic. Why do compat
* extensions always make such a nasty mess.
*/
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
(dev->class & 0x05) == 0)
return 0;
}
if (entry) {
if (entry->link)
gsi = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&triggering, &polarity,
&link);
else
gsi = entry->index;
} else
gsi = -1;
if (gsi < 0) {
/*
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.
*/
if (!acpi_pci_irq_valid(dev, pin)) {
kfree(entry);
return 0;
}
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
kfree(entry);
return 0;
}
rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (rc < 0) {
dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
pin_name(pin));
kfree(entry);
return rc;
}
dev->irq = rc;
dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
else
link_desc[0] = '\0';
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
(polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
kfree(entry);
return 0;
}
void acpi_pci_irq_disable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
int gsi;
u8 pin;
pin = dev->pin;
if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
/* Keep IOAPIC pin configuration when suspending */
if (dev->dev.power.is_prepared)
return;
#ifdef CONFIG_PM
if (dev->dev.power.runtime_status == RPM_SUSPENDING)
return;
#endif
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
if (entry->link)
gsi = acpi_pci_link_free_irq(entry->link);
else
gsi = entry->index;
kfree(entry);
/*
* TBD: It might be worth clearing dev->irq by magic constant
* (e.g. PCI_UNDEFINED_IRQ).
*/
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
dev->irq_managed = 0;
}
}
| linux-master | drivers/acpi/pci_irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Virtual I/O topology
*
* The Virtual I/O Translation Table (VIOT) describes the topology of
* para-virtual IOMMUs and the endpoints they manage. The OS uses it to
* initialize devices in the right order, preventing endpoints from issuing DMA
* before their IOMMU is ready.
*
* When binding a driver to a device, before calling the device driver's probe()
* method, the driver infrastructure calls dma_configure(). At that point the
* VIOT driver looks for an IOMMU associated to the device in the VIOT table.
* If an IOMMU exists and has been initialized, the VIOT driver initializes the
* device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
* ops when the device driver configures DMA mappings. If an IOMMU exists and
* hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
* the device until the IOMMU is available.
*/
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
struct viot_iommu {
/* Node offset within the table */
unsigned int offset;
struct fwnode_handle *fwnode;
struct list_head list;
};
struct viot_endpoint {
union {
/* PCI range */
struct {
u16 segment_start;
u16 segment_end;
u16 bdf_start;
u16 bdf_end;
};
/* MMIO */
u64 address;
};
u32 endpoint_id;
struct viot_iommu *viommu;
struct list_head list;
};
static struct acpi_table_viot *viot;
static LIST_HEAD(viot_iommus);
static LIST_HEAD(viot_pci_ranges);
static LIST_HEAD(viot_mmio_endpoints);
static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
{
struct acpi_viot_header *start, *end, *hdr_end;
start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
max_t(size_t, sizeof(*viot), viot->node_offset));
end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
if (hdr < start || hdr_end > end) {
pr_err(FW_BUG "Node pointer overflows\n");
return -EOVERFLOW;
}
if (hdr->length < sizeof(*hdr)) {
pr_err(FW_BUG "Empty node\n");
return -EINVAL;
}
return 0;
}
static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
u16 segment, u16 bdf)
{
struct pci_dev *pdev;
struct fwnode_handle *fwnode;
pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
bdf & 0xff);
if (!pdev) {
pr_err("Could not find PCI IOMMU\n");
return -ENODEV;
}
fwnode = dev_fwnode(&pdev->dev);
if (!fwnode) {
/*
* PCI devices aren't necessarily described by ACPI. Create a
* fwnode so the IOMMU subsystem can identify this device.
*/
fwnode = acpi_alloc_fwnode_static();
if (!fwnode) {
pci_dev_put(pdev);
return -ENOMEM;
}
set_primary_fwnode(&pdev->dev, fwnode);
}
viommu->fwnode = dev_fwnode(&pdev->dev);
pci_dev_put(pdev);
return 0;
}
static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
u64 address)
{
struct acpi_device *adev;
struct resource res = {
.start = address,
.end = address,
.flags = IORESOURCE_MEM,
};
adev = acpi_resource_consumer(&res);
if (!adev) {
pr_err("Could not find MMIO IOMMU\n");
return -EINVAL;
}
viommu->fwnode = &adev->fwnode;
return 0;
}
static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
{
int ret;
struct viot_iommu *viommu;
struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
viot, offset);
union {
struct acpi_viot_virtio_iommu_pci pci;
struct acpi_viot_virtio_iommu_mmio mmio;
} *node = (void *)hdr;
list_for_each_entry(viommu, &viot_iommus, list)
if (viommu->offset == offset)
return viommu;
if (viot_check_bounds(hdr))
return NULL;
viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
if (!viommu)
return NULL;
viommu->offset = offset;
switch (hdr->type) {
case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
if (hdr->length < sizeof(node->pci))
goto err_free;
ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
node->pci.bdf);
break;
case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
if (hdr->length < sizeof(node->mmio))
goto err_free;
ret = viot_get_mmio_iommu_fwnode(viommu,
node->mmio.base_address);
break;
default:
ret = -EINVAL;
}
if (ret)
goto err_free;
list_add(&viommu->list, &viot_iommus);
return viommu;
err_free:
kfree(viommu);
return NULL;
}
static int __init viot_parse_node(const struct acpi_viot_header *hdr)
{
int ret = -EINVAL;
struct list_head *list;
struct viot_endpoint *ep;
union {
struct acpi_viot_mmio mmio;
struct acpi_viot_pci_range pci;
} *node = (void *)hdr;
if (viot_check_bounds(hdr))
return -EINVAL;
if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
return 0;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
switch (hdr->type) {
case ACPI_VIOT_NODE_PCI_RANGE:
if (hdr->length < sizeof(node->pci)) {
pr_err(FW_BUG "Invalid PCI node size\n");
goto err_free;
}
ep->segment_start = node->pci.segment_start;
ep->segment_end = node->pci.segment_end;
ep->bdf_start = node->pci.bdf_start;
ep->bdf_end = node->pci.bdf_end;
ep->endpoint_id = node->pci.endpoint_start;
ep->viommu = viot_get_iommu(node->pci.output_node);
list = &viot_pci_ranges;
break;
case ACPI_VIOT_NODE_MMIO:
if (hdr->length < sizeof(node->mmio)) {
pr_err(FW_BUG "Invalid MMIO node size\n");
goto err_free;
}
ep->address = node->mmio.base_address;
ep->endpoint_id = node->mmio.endpoint;
ep->viommu = viot_get_iommu(node->mmio.output_node);
list = &viot_mmio_endpoints;
break;
default:
pr_warn("Unsupported node %x\n", hdr->type);
ret = 0;
goto err_free;
}
if (!ep->viommu) {
pr_warn("No IOMMU node found\n");
/*
* A future version of the table may use the node for other
* purposes. Keep parsing.
*/
ret = 0;
goto err_free;
}
list_add(&ep->list, list);
return 0;
err_free:
kfree(ep);
return ret;
}
/**
* acpi_viot_early_init - Test the presence of VIOT and enable ACS
*
* If the VIOT does exist, ACS must be enabled. This cannot be
* done in acpi_viot_init() which is called after the bus scan
*/
void __init acpi_viot_early_init(void)
{
#ifdef CONFIG_PCI
acpi_status status;
struct acpi_table_header *hdr;
status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
if (ACPI_FAILURE(status))
return;
pci_request_acs();
acpi_put_table(hdr);
#endif
}
/**
* acpi_viot_init - Parse the VIOT table
*
* Parse the VIOT table, prepare the list of endpoints to be used during DMA
* setup of devices.
*/
void __init acpi_viot_init(void)
{
int i;
acpi_status status;
struct acpi_table_header *hdr;
struct acpi_viot_header *node;
status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_err("Failed to get table, %s\n", msg);
}
return;
}
viot = (void *)hdr;
node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
for (i = 0; i < viot->node_count; i++) {
if (viot_parse_node(node))
return;
node = ACPI_ADD_PTR(struct acpi_viot_header, node,
node->length);
}
acpi_put_table(hdr);
}
static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
u32 epid)
{
const struct iommu_ops *ops;
if (!viommu)
return -ENODEV;
/* We're not translating ourself */
if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
ops = iommu_ops_from_fwnode(viommu->fwnode);
if (!ops)
return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
-EPROBE_DEFER : -ENODEV;
return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
}
static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
{
u32 epid;
struct viot_endpoint *ep;
struct device *aliased_dev = data;
u32 domain_nr = pci_domain_nr(pdev->bus);
list_for_each_entry(ep, &viot_pci_ranges, list) {
if (domain_nr >= ep->segment_start &&
domain_nr <= ep->segment_end &&
dev_id >= ep->bdf_start &&
dev_id <= ep->bdf_end) {
epid = ((domain_nr - ep->segment_start) << 16) +
dev_id - ep->bdf_start + ep->endpoint_id;
return viot_dev_iommu_init(aliased_dev, ep->viommu,
epid);
}
}
return -ENODEV;
}
static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
{
struct resource *mem;
struct viot_endpoint *ep;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -ENODEV;
list_for_each_entry(ep, &viot_mmio_endpoints, list) {
if (ep->address == mem->start)
return viot_dev_iommu_init(&pdev->dev, ep->viommu,
ep->endpoint_id);
}
return -ENODEV;
}
/**
* viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
* @dev: the endpoint
*
* Return: 0 on success, <0 on failure
*/
int viot_iommu_configure(struct device *dev)
{
if (dev_is_pci(dev))
return pci_for_each_dma_alias(to_pci_dev(dev),
viot_pci_dev_iommu_init, dev);
else if (dev_is_platform(dev))
return viot_mmio_dev_iommu_init(to_platform_device(dev));
return -ENODEV;
}
| linux-master | drivers/acpi/viot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
*
* (C) Copyright 2014, 2015 Linaro Ltd.
* Author: Ashwin Chaugule <[email protected]>
*
* CPPC describes a few methods for controlling CPU performance using
* information from a per CPU table called CPC. This table is described in
* the ACPI v5.0+ specification. The table consists of a list of
* registers which may be memory mapped or hardware registers and also may
* include some static integer values.
*
* CPU performance is on an abstract continuous scale as against a discretized
* P-state scale which is tied to CPU frequency only. In brief, the basic
* operation involves:
*
* - OS makes a CPU performance request. (Can provide min and max bounds)
*
* - Platform (such as BMC) is free to optimize request within requested bounds
* depending on power/thermal budgets etc.
*
* - Platform conveys its decision back to OS
*
* The communication between OS and platform occurs through another medium
* called (PCC) Platform Communication Channel. This is a generic mailbox like
* mechanism which includes doorbell semantics to indicate register updates.
* See drivers/mailbox/pcc.c for details on PCC.
*
* Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
* above specifications.
*/
#define pr_fmt(fmt) "ACPI CPPC: " fmt
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/ktime.h>
#include <linux/rwsem.h>
#include <linux/wait.h>
#include <linux/topology.h>
#include <acpi/cppc_acpi.h>
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
bool platform_owns_pcc; /* Ownership of PCC subspace */
unsigned int pcc_write_cnt; /* Running count of PCC write commands */
/*
* Lock to provide controlled access to the PCC channel.
*
* For performance critical usecases(currently cppc_set_perf)
* We need to take read_lock and check if channel belongs to OSPM
* before reading or writing to PCC subspace
* We need to take write_lock before transferring the channel
* ownership to the platform via a Doorbell
* This allows us to batch a number of CPPC requests if they happen
* to originate in about the same time
*
* For non-performance critical usecases(init)
* Take write_lock for all purposes which gives exclusive access
*/
struct rw_semaphore pcc_lock;
/* Wait queue for CPUs whose requests were batched */
wait_queue_head_t pcc_write_wait_q;
ktime_t last_cmd_cmpl_time;
ktime_t last_mpar_reset;
int mpar_count;
int refcount;
};
/* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
* The cpc_desc structure contains the ACPI register details
* as described in the per CPU _CPC tables. The details
* include the type of register (e.g. PCC, System IO, FFH etc.)
* and destination addresses which lets us READ/WRITE CPU performance
* information using the appropriate I/O methods.
*/
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_SYSTEM_MEMORY)
/* Check if a CPC register is in SystemIo */
#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_SYSTEM_IO)
/* Evaluates to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
(reg)->address == 0 && \
(reg)->bit_width == 0 && \
(reg)->bit_offset == 0 && \
(reg)->access_width == 0)
/* Evaluates to True if an optional cpc field is supported */
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
* the processors run painfully slow.
*/
#define NUM_RETRIES 500ULL
#define OVER_16BTS_MASK ~0xFFFFULL
#define define_one_cppc_ro(_name) \
static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
#define show_cppc_data(access_fn, struct_name, member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
struct struct_name st_name = {0}; \
int ret; \
\
ret = access_fn(cpc_ptr->cpu_id, &st_name); \
if (ret) \
return ret; \
\
return sysfs_emit(buf, "%llu\n", \
(u64)st_name.member_name); \
} \
define_one_cppc_ro(member_name)
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cppc_perf_fb_ctrs fb_ctrs = {0};
int ret;
ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
if (ret)
return ret;
return sysfs_emit(buf, "ref:%llu del:%llu\n",
fb_ctrs.reference, fb_ctrs.delivered);
}
define_one_cppc_ro(feedback_ctrs);
static struct attribute *cppc_attrs[] = {
&feedback_ctrs.attr,
&reference_perf.attr,
&wraparound_time.attr,
&highest_perf.attr,
&lowest_perf.attr,
&lowest_nonlinear_perf.attr,
&nominal_perf.attr,
&nominal_freq.attr,
&lowest_freq.attr,
NULL
};
ATTRIBUTE_GROUPS(cppc);
static const struct kobj_type cppc_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = cppc_groups,
};
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
{
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
pcc_ss_data->pcc_comm_addr;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
/*
* Poll PCC status register every 3us(delay_us) for maximum of
* deadline_us(timeout_us) until PCC command complete bit is set(cond)
*/
ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
status & PCC_CMD_COMPLETE_MASK, 3,
pcc_ss_data->deadline_us);
if (likely(!ret)) {
pcc_ss_data->platform_owns_pcc = false;
if (chk_err_bit && (status & PCC_ERROR_MASK))
ret = -EIO;
}
if (unlikely(ret))
pr_err("PCC check channel failed for ss: %d. ret=%d\n",
pcc_ss_id, ret);
return ret;
}
/*
* This function transfers the ownership of the PCC to the platform
* So it must be called while holding write_lock(pcc_lock)
*/
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
pcc_ss_data->pcc_comm_addr;
unsigned int time_delta;
/*
* For CMD_WRITE we know for a fact the caller should have checked
* the channel before writing to PCC space
*/
if (cmd == CMD_READ) {
/*
* If there are pending cpc_writes, then we stole the channel
* before write completion, so first send a WRITE command to
* platform
*/
if (pcc_ss_data->pending_pcc_write_cmd)
send_pcc_cmd(pcc_ss_id, CMD_WRITE);
ret = check_pcc_chan(pcc_ss_id, false);
if (ret)
goto end;
} else /* CMD_WRITE */
pcc_ss_data->pending_pcc_write_cmd = FALSE;
/*
* Handle the Minimum Request Turnaround Time(MRTT)
* "The minimum amount of time that OSPM must wait after the completion
* of a command before issuing the next command, in microseconds"
*/
if (pcc_ss_data->pcc_mrtt) {
time_delta = ktime_us_delta(ktime_get(),
pcc_ss_data->last_cmd_cmpl_time);
if (pcc_ss_data->pcc_mrtt > time_delta)
udelay(pcc_ss_data->pcc_mrtt - time_delta);
}
/*
* Handle the non-zero Maximum Periodic Access Rate(MPAR)
* "The maximum number of periodic requests that the subspace channel can
* support, reported in commands per minute. 0 indicates no limitation."
*
* This parameter should be ideally zero or large enough so that it can
* handle maximum number of requests that all the cores in the system can
* collectively generate. If it is not, we will follow the spec and just
* not send the request to the platform after hitting the MPAR limit in
* any 60s window
*/
if (pcc_ss_data->pcc_mpar) {
if (pcc_ss_data->mpar_count == 0) {
time_delta = ktime_ms_delta(ktime_get(),
pcc_ss_data->last_mpar_reset);
if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
pcc_ss_id);
ret = -EIO;
goto end;
}
pcc_ss_data->last_mpar_reset = ktime_get();
pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
}
pcc_ss_data->mpar_count--;
}
/* Write to the shared comm region. */
writew_relaxed(cmd, &generic_comm_base->command);
/* Flip CMD COMPLETE bit */
writew_relaxed(0, &generic_comm_base->status);
pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
pcc_ss_id, cmd, ret);
goto end;
}
/* wait for completion and check for PCC error bit */
ret = check_pcc_chan(pcc_ss_id, true);
if (pcc_ss_data->pcc_mrtt)
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
else
mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
end:
if (cmd == CMD_WRITE) {
if (unlikely(ret)) {
for_each_possible_cpu(i) {
struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
if (!desc)
continue;
if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
desc->write_cmd_status = ret;
}
}
pcc_ss_data->pcc_write_cnt++;
wake_up_all(&pcc_ss_data->pcc_write_wait_q);
}
return ret;
}
static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{
if (ret < 0)
pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret);
else
pr_debug("TX completed. CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret);
}
static struct mbox_client cppc_mbox_cl = {
.tx_done = cppc_chan_tx_done,
.knows_txdone = true,
};
static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
{
int result = -EFAULT;
acpi_status status = AE_OK;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
struct acpi_buffer state = {0, NULL};
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
&buffer, ACPI_TYPE_PACKAGE);
if (status == AE_NOT_FOUND) /* _PSD is optional */
return 0;
if (ACPI_FAILURE(status))
return -ENODEV;
psd = buffer.pointer;
if (!psd || psd->package.count != 1) {
pr_debug("Invalid _PSD data\n");
goto end;
}
pdomain = &(cpc_ptr->domain_info);
state.length = sizeof(struct acpi_psd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(psd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
goto end;
}
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
goto end;
}
result = 0;
end:
kfree(buffer.pointer);
return result;
}
bool acpi_cpc_valid(void)
{
struct cpc_desc *cpc_ptr;
int cpu;
if (acpi_disabled)
return false;
for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(acpi_cpc_valid);
bool cppc_allow_fast_switch(void)
{
struct cpc_register_resource *desired_reg;
struct cpc_desc *cpc_ptr;
int cpu;
for_each_possible_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
!CPC_IN_SYSTEM_IO(desired_reg))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
/**
* acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
* @cpu: Find all CPUs that share a domain with cpu.
* @cpu_data: Pointer to CPU specific CPPC data including PSD info.
*
* Return: 0 for success or negative value for err.
*/
int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
{
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
struct acpi_psd_package *match_pdomain;
struct acpi_psd_package *pdomain;
int count_target, i;
/*
* Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return -EFAULT;
pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
if (pdomain->num_processors <= 1)
return 0;
/* Validate the Domain info */
count_target = pdomain->num_processors;
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
for_each_possible_cpu(i) {
if (i == cpu)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
if (!match_cpc_ptr)
goto err_fault;
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and cpu are in the same domain */
if (match_pdomain->num_processors != count_target)
goto err_fault;
if (pdomain->coord_type != match_pdomain->coord_type)
goto err_fault;
cpumask_set_cpu(i, cpu_data->shared_cpu_map);
}
return 0;
err_fault:
/* Assume no coordination on any error parsing domain info */
cpumask_clear(cpu_data->shared_cpu_map);
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
return -EFAULT;
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
static int register_pcc_channel(int pcc_ss_idx)
{
struct pcc_mbox_chan *pcc_chan;
u64 usecs_lat;
if (pcc_ss_idx >= 0) {
pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
if (IS_ERR(pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
pcc_ss_idx);
return -ENODEV;
}
pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
/*
* cppc_ss->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
usecs_lat = NUM_RETRIES * pcc_chan->latency;
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
pcc_data[pcc_ss_idx]->pcc_comm_addr =
acpi_os_ioremap(pcc_chan->shmem_base_addr,
pcc_chan->shmem_size);
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
pcc_ss_idx);
return -ENOMEM;
}
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
return 0;
}
/**
* cpc_ffh_supported() - check if FFH reading supported
*
* Check if the architecture has support for functional fixed hardware
* read/write capability.
*
* Return: true for supported, false for not supported
*/
bool __weak cpc_ffh_supported(void)
{
return false;
}
/**
* cpc_supported_by_cpu() - check if CPPC is supported by CPU
*
* Check if the architectural support for CPPC is present even
* if the _OSC hasn't prescribed it
*
* Return: true for supported, false for not supported
*/
bool __weak cpc_supported_by_cpu(void)
{
return false;
}
/**
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
* @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
* is shared between multiple CPUs. This is seen especially in CPUs
* with hardware multi-threading support.
*
* Return: 0 for success, errno for failure
*/
static int pcc_data_alloc(int pcc_ss_id)
{
if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
return -EINVAL;
if (pcc_data[pcc_ss_id]) {
pcc_data[pcc_ss_id]->refcount++;
} else {
pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
GFP_KERNEL);
if (!pcc_data[pcc_ss_id])
return -ENOMEM;
pcc_data[pcc_ss_id]->refcount++;
}
return 0;
}
/*
* An example CPC table looks like the following.
*
* Name (_CPC, Package() {
* 17, // NumEntries
* 1, // Revision
* ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
* ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
* ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
* ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
* ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
* ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
* ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
* ...
* ...
* ...
* }
* Each Register() encodes how to access that specific register.
* e.g. a sample PCC entry has the following encoding:
*
* Register (
* PCC, // AddressSpaceKeyword
* 8, // RegisterBitWidth
* 8, // RegisterBitOffset
* 0x30, // RegisterAddress
* 9, // AccessSize (subspace ID)
* )
*/
#ifndef arch_init_invariance_cppc
static inline void arch_init_invariance_cppc(void) { }
#endif
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: 0 for success or negative value for err.
*/
int acpi_cppc_processor_probe(struct acpi_processor *pr)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *out_obj, *cpc_obj;
struct cpc_desc *cpc_ptr;
struct cpc_reg *gas_t;
struct device *cpu_dev;
acpi_handle handle = pr->handle;
unsigned int num_ent, i, cpc_rev;
int pcc_subspace_id = -1;
acpi_status status;
int ret = -ENODATA;
if (!osc_sb_cppc2_support_acked) {
pr_debug("CPPC v2 _OSC not acked\n");
if (!cpc_supported_by_cpu())
return -ENODEV;
}
/* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) {
ret = -ENODEV;
goto out_buf_free;
}
out_obj = (union acpi_object *) output.pointer;
cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
if (!cpc_ptr) {
ret = -ENOMEM;
goto out_buf_free;
}
/* First entry is NumEntries. */
cpc_obj = &out_obj->package.elements[0];
if (cpc_obj->type == ACPI_TYPE_INTEGER) {
num_ent = cpc_obj->integer.value;
if (num_ent <= 1) {
pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
num_ent, pr->id);
goto out_free;
}
} else {
pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
cpc_obj->type, pr->id);
goto out_free;
}
/* Second entry should be revision. */
cpc_obj = &out_obj->package.elements[1];
if (cpc_obj->type == ACPI_TYPE_INTEGER) {
cpc_rev = cpc_obj->integer.value;
} else {
pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
cpc_obj->type, pr->id);
goto out_free;
}
if (cpc_rev < CPPC_V2_REV) {
pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
pr->id);
goto out_free;
}
/*
* Disregard _CPC if the number of entries in the return pachage is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/
if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
(cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
(cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
num_ent, pr->id);
goto out_free;
}
if (cpc_rev > CPPC_V3_REV) {
num_ent = CPPC_V3_NUM_ENT;
cpc_rev = CPPC_V3_REV;
}
cpc_ptr->num_entries = num_ent;
cpc_ptr->version = cpc_rev;
/* Iterate through remaining entries in _CPC */
for (i = 2; i < num_ent; i++) {
cpc_obj = &out_obj->package.elements[i];
if (cpc_obj->type == ACPI_TYPE_INTEGER) {
cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
gas_t = (struct cpc_reg *)
cpc_obj->buffer.pointer;
/*
* The PCC Subspace index is encoded inside
* the CPC table entries. The same PCC index
* will be used for all the PCC entries,
* so extract it only once.
*/
if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
if (pcc_subspace_id < 0) {
pcc_subspace_id = gas_t->access_width;
if (pcc_data_alloc(pcc_subspace_id))
goto out_free;
} else if (pcc_subspace_id != gas_t->access_width) {
pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
pr->id);
goto out_free;
}
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
if (!cpc_supported_by_cpu())
goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
}
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
if (gas_t->access_width < 1 || gas_t->access_width > 3) {
/*
* 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
* SystemIO doesn't implement 64-bit
* registers.
*/
pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
gas_t->access_width);
goto out_free;
}
if (gas_t->address & OVER_16BTS_MASK) {
/* SystemIO registers use 16-bit integer addresses */
pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
gas_t->address);
goto out_free;
}
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
if (!cpc_supported_by_cpu())
goto out_free;
}
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
pr_debug("Unsupported register type (%d) in _CPC\n",
gas_t->space_id);
goto out_free;
}
}
cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
} else {
pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
i, pr->id);
goto out_free;
}
}
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
/*
* Initialize the remaining cpc_regs as unsupported.
* Example: In case FW exposes CPPC v2, the below loop will initialize
* LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
*/
for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
}
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
if (ret)
goto out_free;
/* Register PCC channel once for all PCC subspace ID. */
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
ret = register_pcc_channel(pcc_subspace_id);
if (ret)
goto out_free;
init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
}
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
/* Add per logical CPU nodes for reading its feedback counters. */
cpu_dev = get_cpu_device(pr->id);
if (!cpu_dev) {
ret = -EINVAL;
goto out_free;
}
/* Plug PSD data into this CPU's CPC descriptor. */
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
kobject_put(&cpc_ptr->kobj);
goto out_free;
}
arch_init_invariance_cppc();
kfree(output.pointer);
return 0;
out_free:
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
if (addr)
iounmap(addr);
}
kfree(cpc_ptr);
out_buf_free:
kfree(output.pointer);
return ret;
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
/**
* acpi_cppc_processor_exit - Cleanup CPC structs.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: Void
*/
void acpi_cppc_processor_exit(struct acpi_processor *pr)
{
struct cpc_desc *cpc_ptr;
unsigned int i;
void __iomem *addr;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
pcc_data[pcc_ss_id]->refcount--;
if (!pcc_data[pcc_ss_id]->refcount) {
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
kfree(pcc_data[pcc_ss_id]);
pcc_data[pcc_ss_id] = NULL;
}
}
}
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
if (!cpc_ptr)
return;
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
if (addr)
iounmap(addr);
}
kobject_put(&cpc_ptr->kobj);
kfree(cpc_ptr);
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
/**
* cpc_read_ffh() - Read FFH register
* @cpunum: CPU number to read
* @reg: cppc register information
* @val: place holder for return value
*
* Read bit_width bits from a specified address and bit_offset
*
* Return: 0 for success and error code
*/
int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
return -ENOTSUPP;
}
/**
* cpc_write_ffh() - Write FFH register
* @cpunum: CPU number to write
* @reg: cppc register information
* @val: value to write
*
* Write value of bit_width bits to a specified address and bit_offset
*
* Return: 0 for success and error code
*/
int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
return -ENOTSUPP;
}
/*
* Since cpc_read and cpc_write are called while holding pcc_lock, it should be
* as fast as possible. We have already mapped the PCC subspace during init, so
* we can directly write to it.
*/
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = ®_res->cpc_entry.reg;
if (reg_res->type == ACPI_TYPE_INTEGER) {
*val = reg_res->cpc_entry.int_value;
return 0;
}
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
&val_u32, width);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
return -EFAULT;
}
*val = val_u32;
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
switch (reg->bit_width) {
case 8:
*val = readb_relaxed(vaddr);
break;
case 16:
*val = readw_relaxed(vaddr);
break;
case 32:
*val = readl_relaxed(vaddr);
break;
case 64:
*val = readq_relaxed(vaddr);
break;
default:
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
return -EFAULT;
}
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = ®_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
(u32)val, width);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
return -EFAULT;
}
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
switch (reg->bit_width) {
case 8:
writeb_relaxed(val, vaddr);
break;
case 16:
writew_relaxed(val, vaddr);
break;
case 32:
writel_relaxed(val, vaddr);
break;
case 64:
writeq_relaxed(val, vaddr);
break;
default:
pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
ret_val = -EFAULT;
break;
}
return ret_val;
}
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *reg;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
if (CPC_IN_PCC(reg)) {
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (pcc_ss_id < 0)
return -EIO;
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
cpc_read(cpunum, reg, perf);
else
ret = -EIO;
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
cpc_read(cpunum, reg, perf);
return 0;
}
/**
* cppc_get_desired_perf - Get the desired performance register value.
* @cpunum: CPU from which to get desired performance.
* @desired_perf: Return address.
*
* Return: 0 for success, -EIO otherwise.
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
* cppc_get_nominal_perf - Get the nominal performance register value.
* @cpunum: CPU from which to get nominal performance.
* @nominal_perf: Return address.
*
* Return: 0 for success, -EIO otherwise.
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
* cppc_get_epp_perf - Get the epp register value.
* @cpunum: CPU from which to get epp preference value.
* @epp_perf: Return address.
*
* Return: 0 for success, -EIO otherwise.
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
*
* Return: 0 for success with perf_caps populated else -ERRNO.
*/
int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
*low_freq_reg = NULL, *nom_freq_reg = NULL;
u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
}
pcc_ss_data = pcc_data[pcc_ss_id];
regs_in_pcc = 1;
down_write(&pcc_ss_data->pcc_lock);
/* Ring doorbell once to update PCC subspace */
if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
}
cpc_read(cpunum, highest_reg, &high);
perf_caps->highest_perf = high;
cpc_read(cpunum, lowest_reg, &low);
perf_caps->lowest_perf = low;
cpc_read(cpunum, nominal_reg, &nom);
perf_caps->nominal_perf = nom;
if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
perf_caps->guaranteed_perf = 0;
} else {
cpc_read(cpunum, guaranteed_reg, &guaranteed);
perf_caps->guaranteed_perf = guaranteed;
}
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
perf_caps->lowest_nonlinear_perf = min_nonlinear;
if (!high || !low || !nom || !min_nonlinear)
ret = -EFAULT;
/* Read optional lowest and nominal frequencies if present */
if (CPC_SUPPORTED(low_freq_reg))
cpc_read(cpunum, low_freq_reg, &low_f);
if (CPC_SUPPORTED(nom_freq_reg))
cpc_read(cpunum, nom_freq_reg, &nom_f);
perf_caps->lowest_freq = low_f;
perf_caps->nominal_freq = nom_f;
out_err:
if (regs_in_pcc)
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
* cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
*
* CPPC has flexibility about how CPU performance counters are accessed.
* One of the choices is PCC regions, which can have a high access latency. This
* routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
*
* Return: true if any of the counters are in PCC regions, false otherwise
*/
bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
for_each_present_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
return true;
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
/*
* If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
if (CPC_IN_PCC(ref_perf_reg))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
/**
* cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
*
* Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
*/
int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *delivered_reg, *reference_reg,
*ref_perf_reg, *ctr_wrap_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
u64 delivered, reference, ref_perf, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
* If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
}
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
regs_in_pcc = 1;
/* Ring doorbell once to update PCC subspace */
if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
}
cpc_read(cpunum, delivered_reg, &delivered);
cpc_read(cpunum, reference_reg, &reference);
cpc_read(cpunum, ref_perf_reg, &ref_perf);
/*
* Per spec, if ctr_wrap_time optional register is unsupported, then the
* performance counters are assumed to never wrap during the lifetime of
* platform
*/
ctr_wrap_time = (u64)(~((u64)0));
if (CPC_SUPPORTED(ctr_wrap_reg))
cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
if (!delivered || !reference || !ref_perf) {
ret = -EFAULT;
goto out_err;
}
perf_fb_ctrs->delivered = delivered;
perf_fb_ctrs->reference = reference;
perf_fb_ctrs->reference_perf = ref_perf;
perf_fb_ctrs->wraparound_time = ctr_wrap_time;
out_err:
if (regs_in_pcc)
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/*
* Set Energy Performance Preference Register value through
* Performance Controls Interface
*/
int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_register_resource *epp_set_reg;
struct cpc_register_resource *auto_sel_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
return -ENODEV;
}
if (CPC_SUPPORTED(auto_sel_reg)) {
ret = cpc_write(cpu, auto_sel_reg, enable);
if (ret)
return ret;
}
if (CPC_SUPPORTED(epp_set_reg)) {
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
if (ret)
return ret;
}
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
} else {
ret = -ENOTSUPP;
pr_debug("_CPC in PCC is not supported\n");
}
return ret;
}
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
* cppc_get_auto_sel_caps - Read autonomous selection register.
* @cpunum : CPU from which to read register.
* @perf_caps : struct where autonomous selection register value is updated.
*/
int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *auto_sel_reg;
u64 auto_sel;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
return -ENODEV;
}
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
if (!CPC_SUPPORTED(auto_sel_reg))
pr_warn_once("Autonomous mode is not unsupported!\n");
if (CPC_IN_PCC(auto_sel_reg)) {
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (pcc_ss_id < 0)
return -ENODEV;
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
cpc_read(cpunum, auto_sel_reg, &auto_sel);
perf_caps->auto_sel = (bool)auto_sel;
} else {
ret = -EIO;
}
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
/**
* cppc_set_auto_sel - Write autonomous selection register.
* @cpu : CPU to which to write register.
* @enable : the desired value of autonomous selection resiter to be updated.
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_register_resource *auto_sel_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = -EINVAL;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
if (CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
}
if (CPC_SUPPORTED(auto_sel_reg)) {
ret = cpc_write(cpu, auto_sel_reg, enable);
if (ret)
return ret;
}
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
} else {
ret = -ENOTSUPP;
pr_debug("_CPC in PCC is not supported\n");
}
return ret;
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
* @cpu: CPU for which to enable CPPC register.
* @enable: 0 - disable, 1 - enable CPPC feature on the processor.
*
* Return: 0 for success, -ERRNO or -EIO otherwise.
*/
int cppc_set_enable(int cpu, bool enable)
{
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_register_resource *enable_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = -EINVAL;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -EINVAL;
}
enable_reg = &cpc_desc->cpc_regs[ENABLE];
if (CPC_IN_PCC(enable_reg)) {
if (pcc_ss_id < 0)
return -EIO;
ret = cpc_write(cpu, enable_reg, enable);
if (ret)
return ret;
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
/* after writing CPC, transfer the ownership of PCC to platfrom */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
return cpc_write(cpu, enable_reg, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
/**
* cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
*
* Return: 0 for success, -ERRNO otherwise.
*/
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
/*
* This is Phase-I where we want to write to CPC registers
* -> We want all CPUs to be able to execute this phase in parallel
*
* Since read_lock can be acquired by multiple CPUs simultaneously we
* achieve that goal here
*/
if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
}
pcc_ss_data = pcc_data[pcc_ss_id];
down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
if (pcc_ss_data->platform_owns_pcc) {
ret = check_pcc_chan(pcc_ss_id, false);
if (ret) {
up_read(&pcc_ss_data->pcc_lock);
return ret;
}
}
/*
* Update the pending_write to make sure a PCC CMD_READ will not
* arrive and steal the channel during the switch to write lock
*/
pcc_ss_data->pending_pcc_write_cmd = true;
cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
cpc_desc->write_cmd_status = 0;
}
cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
/*
* Only write if min_perf and max_perf not zero. Some drivers pass zero
* value to min and max perf, but they don't mean to set the zero value,
* they just don't want to write to those registers.
*/
if (perf_ctrls->min_perf)
cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
if (perf_ctrls->max_perf)
cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
/*
* This is Phase-II where we transfer the ownership of PCC to Platform
*
* Short Summary: Basically if we think of a group of cppc_set_perf
* requests that happened in short overlapping interval. The last CPU to
* come out of Phase-I will enter Phase-II and ring the doorbell.
*
* We have the following requirements for Phase-II:
* 1. We want to execute Phase-II only when there are no CPUs
* currently executing in Phase-I
* 2. Once we start Phase-II we want to avoid all other CPUs from
* entering Phase-I.
* 3. We want only one CPU among all those who went through Phase-I
* to run phase-II
*
* If write_trylock fails to get the lock and doesn't transfer the
* PCC ownership to the platform, then one of the following will be TRUE
* 1. There is at-least one CPU in Phase-I which will later execute
* write_trylock, so the CPUs in Phase-I will be responsible for
* executing the Phase-II.
* 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock. We know for a
* fact it (other CPU acquiring the write_lock) couldn't have happened
* before this CPU's Phase-I as we held the read_lock.
* 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write, in which case, send_pcc_cmd will check for pending
* CMD_WRITE commands by checking the pending_pcc_write_cmd.
* So this CPU can be certain that its request will be delivered
* So in all cases, this CPU knows that its request will be delivered
* by another CPU and can return
*
* After getting the down_write we still need to check for
* pending_pcc_write_cmd to take care of the following scenario
* The thread running this code could be scheduled out between
* Phase-I and Phase-II. Before it is scheduled back on, another CPU
* could have delivered the request to Platform by triggering the
* doorbell and transferred the ownership of PCC to platform. So this
* avoids triggering an unnecessary doorbell and more importantly before
* triggering the doorbell it makes sure that the PCC channel ownership
* is still with OSPM.
* pending_pcc_write_cmd can also be cleared by a different CPU, if
* there was a pcc CMD_READ waiting on down_write and it steals the lock
* before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
* case during a CMD_READ and if there are pending writes it delivers
* the write command before servicing the read command
*/
if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
/* Update only if there are pending write commands */
if (pcc_ss_data->pending_pcc_write_cmd)
send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
} else
/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
wait_event(pcc_ss_data->pcc_write_wait_q,
cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
/* send_pcc_cmd updates the status in case of failure */
ret = cpc_desc->write_cmd_status;
}
return ret;
}
EXPORT_SYMBOL_GPL(cppc_set_perf);
/**
* cppc_get_transition_latency - returns frequency transition latency in ns
* @cpu_num: CPU number for per_cpu().
*
* ACPI CPPC does not explicitly specify how a platform can specify the
* transition latency for performance change requests. The closest we have
* is the timing information from the PCCT tables which provides the info
* on the number and frequency of PCC commands the platform can handle.
*
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
unsigned int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
* Below are definition from ACPI spec:
* pcc_nominal- Expected latency to process a command, in microseconds
* pcc_mpar - The maximum number of periodic requests that the subspace
* channel can support, reported in commands per minute. 0
* indicates no limitation.
* pcc_mrtt - The minimum amount of time that OSPM must wait after the
* completion of a command before issuing the next command,
* in microseconds.
*/
unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
return CPUFREQ_ETERNAL;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
else if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
if (pcc_ss_id < 0)
return CPUFREQ_ETERNAL;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
| linux-master | drivers/acpi/cppc_acpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
*
* Copyright (C) 2015, Intel Corp.
* Author: Mika Westerberg <[email protected]>
* Author: Rafael J. Wysocki <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/nls.h>
#include "internal.h"
static ssize_t acpi_object_path(acpi_handle handle, char *buf)
{
struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
int result;
result = acpi_get_name(handle, ACPI_FULL_PATHNAME, &path);
if (result)
return result;
result = sprintf(buf, "%s\n", (char *)path.pointer);
kfree(path.pointer);
return result;
}
struct acpi_data_node_attr {
struct attribute attr;
ssize_t (*show)(struct acpi_data_node *, char *);
ssize_t (*store)(struct acpi_data_node *, const char *, size_t count);
};
#define DATA_NODE_ATTR(_name) \
static struct acpi_data_node_attr data_node_##_name = \
__ATTR(_name, 0444, data_node_show_##_name, NULL)
static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf)
{
return dn->handle ? acpi_object_path(dn->handle, buf) : 0;
}
DATA_NODE_ATTR(path);
static struct attribute *acpi_data_node_default_attrs[] = {
&data_node_path.attr,
NULL
};
ATTRIBUTE_GROUPS(acpi_data_node_default);
#define to_data_node(k) container_of(k, struct acpi_data_node, kobj)
#define to_attr(a) container_of(a, struct acpi_data_node_attr, attr)
static ssize_t acpi_data_node_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct acpi_data_node *dn = to_data_node(kobj);
struct acpi_data_node_attr *dn_attr = to_attr(attr);
return dn_attr->show ? dn_attr->show(dn, buf) : -ENXIO;
}
static const struct sysfs_ops acpi_data_node_sysfs_ops = {
.show = acpi_data_node_attr_show,
};
static void acpi_data_node_release(struct kobject *kobj)
{
struct acpi_data_node *dn = to_data_node(kobj);
complete(&dn->kobj_done);
}
static const struct kobj_type acpi_data_node_ktype = {
.sysfs_ops = &acpi_data_node_sysfs_ops,
.default_groups = acpi_data_node_default_groups,
.release = acpi_data_node_release,
};
static void acpi_expose_nondev_subnodes(struct kobject *kobj,
struct acpi_device_data *data)
{
struct list_head *list = &data->subnodes;
struct acpi_data_node *dn;
if (list_empty(list))
return;
list_for_each_entry(dn, list, sibling) {
int ret;
init_completion(&dn->kobj_done);
ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype,
kobj, "%s", dn->name);
if (!ret)
acpi_expose_nondev_subnodes(&dn->kobj, &dn->data);
else if (dn->handle)
acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
}
}
static void acpi_hide_nondev_subnodes(struct acpi_device_data *data)
{
struct list_head *list = &data->subnodes;
struct acpi_data_node *dn;
if (list_empty(list))
return;
list_for_each_entry_reverse(dn, list, sibling) {
acpi_hide_nondev_subnodes(&dn->data);
kobject_put(&dn->kobj);
}
}
/**
* create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
* Return: 0: no _HID and no _CID
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
int count;
struct acpi_hardware_id *id;
/* Avoid unnecessarily loading modules for non present devices. */
if (!acpi_device_is_present(acpi_dev))
return 0;
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
* device's list.
*/
count = 0;
list_for_each_entry(id, &acpi_dev->pnp.ids, list)
if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
count++;
if (!count)
return 0;
len = snprintf(modalias, size, "acpi:");
if (len <= 0)
return len;
size -= len;
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
continue;
count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
/**
* create_of_modalias - Creates DT compatible string for modalias and uevent
* @acpi_dev: ACPI device object.
* @modalias: Buffer to print into.
* @size: Size of the buffer.
*
* Expose DT compatible modalias as of:NnameTCcompatible. This function should
* only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
* ACPI/PNP IDs.
*/
static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
acpi_status status;
int len, count;
int i, nval;
char *c;
status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
if (ACPI_FAILURE(status))
return -ENODEV;
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
ACPI_FREE(buf.pointer);
if (len <= 0)
return len;
of_compatible = acpi_dev->data.of_compatible;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
nval = of_compatible->package.count;
obj = of_compatible->package.elements;
} else { /* Must be ACPI_TYPE_STRING. */
nval = 1;
obj = of_compatible;
}
for (i = 0; i < nval; i++, obj++) {
count = snprintf(&modalias[len], size, "C%s",
obj->string.pointer);
if (count < 0)
return -EINVAL;
if (count >= size)
return -ENOMEM;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env)
{
int len;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
if (adev->data.of_compatible)
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
else
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
env->buflen += len;
return 0;
}
/**
* acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
* @dev: Struct device to get ACPI device node.
* @env: Environment variables of the kobject uevent.
*
* Create the uevent modalias field for ACPI-enumerated devices.
*
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
}
EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size)
{
int len, count;
if (!adev)
return -ENODEV;
if (list_empty(&adev->pnp.ids))
return 0;
len = create_pnp_modalias(adev, buf, size - 1);
if (len < 0) {
return len;
} else if (len > 0) {
buf[len++] = '\n';
size -= len;
}
if (!adev->data.of_compatible)
return len;
count = create_of_modalias(adev, buf + len, size - 1);
if (count < 0) {
return count;
} else if (count > 0) {
len += count;
buf[len++] = '\n';
}
return len;
}
/**
* acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
* @dev: Struct device to get ACPI device node.
* @buf: The buffer to save pnp_modalias and of_modalias.
* @size: Size of buffer.
*
* Create the modalias sysfs attribute for ACPI-enumerated devices.
*
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
int acpi_device_modalias(struct device *dev, char *buf, int size)
{
return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
}
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
int state;
int ret;
ret = acpi_device_get_power(adev, &state);
if (ret)
return ret;
return sprintf(buf, "%s\n", acpi_power_state_string(state));
}
static DEVICE_ATTR_RO(real_power_state);
static ssize_t power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *adev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
}
static DEVICE_ATTR_RO(power_state);
static ssize_t
eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
acpi_status status;
if (!count || buf[0] != '1')
return -EINVAL;
if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
&& !d->driver)
return -ENODEV;
status = acpi_get_type(acpi_device->handle, ¬_used);
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
acpi_dev_get(acpi_device);
status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
acpi_dev_put(acpi_device);
acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR_WO(eject);
static ssize_t
hid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR_RO(hid);
static ssize_t uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
static DEVICE_ATTR_RO(uid);
static ssize_t adr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
if (acpi_dev->pnp.bus_address > U32_MAX)
return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address);
else
return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
static DEVICE_ATTR_RO(adr);
static ssize_t path_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return acpi_object_path(acpi_dev->handle, buf);
}
static DEVICE_ATTR_RO(path);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
int result;
if (acpi_dev->pnp.str_obj == NULL)
return 0;
/*
* The _STR object contains a Unicode identifier for a device.
* We need to convert to utf-8 so it can be displayed.
*/
result = utf16s_to_utf8s(
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE - 1);
buf[result++] = '\n';
return result;
}
static DEVICE_ATTR_RO(description);
static ssize_t
sun_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
if (ACPI_FAILURE(status))
return -EIO;
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR_RO(sun);
static ssize_t
hrv_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long hrv;
status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv);
if (ACPI_FAILURE(status))
return -EIO;
return sprintf(buf, "%llu\n", hrv);
}
static DEVICE_ATTR_RO(hrv);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sta;
status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -EIO;
return sprintf(buf, "%llu\n", sta);
}
static DEVICE_ATTR_RO(status);
/**
* acpi_device_setup_files - Create sysfs attributes of an ACPI device.
* @dev: ACPI device object.
*/
int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
int result = 0;
/*
* Devices gotten from FADT don't have a "path" attribute
*/
if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
if (result)
goto end;
}
if (!list_empty(&dev->pnp.ids)) {
result = device_create_file(&dev->dev, &dev_attr_hid);
if (result)
goto end;
result = device_create_file(&dev->dev, &dev_attr_modalias);
if (result)
goto end;
}
/*
* If device has _STR, 'description' file is created
*/
if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
if (ACPI_FAILURE(status))
buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer;
result = device_create_file(&dev->dev, &dev_attr_description);
if (result)
goto end;
}
if (dev->pnp.type.bus_address)
result = device_create_file(&dev->dev, &dev_attr_adr);
if (dev->pnp.unique_id)
result = device_create_file(&dev->dev, &dev_attr_uid);
if (acpi_has_method(dev->handle, "_SUN")) {
result = device_create_file(&dev->dev, &dev_attr_sun);
if (result)
goto end;
}
if (acpi_has_method(dev->handle, "_HRV")) {
result = device_create_file(&dev->dev, &dev_attr_hrv);
if (result)
goto end;
}
if (acpi_has_method(dev->handle, "_STA")) {
result = device_create_file(&dev->dev, &dev_attr_status);
if (result)
goto end;
}
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
return result;
}
if (dev->flags.power_manageable) {
result = device_create_file(&dev->dev, &dev_attr_power_state);
if (result)
return result;
if (dev->power.flags.power_resources)
result = device_create_file(&dev->dev,
&dev_attr_real_power_state);
}
acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data);
end:
return result;
}
/**
* acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
* @dev: ACPI device object.
*/
void acpi_device_remove_files(struct acpi_device *dev)
{
acpi_hide_nondev_subnodes(&dev->data);
if (dev->flags.power_manageable) {
device_remove_file(&dev->dev, &dev_attr_power_state);
if (dev->power.flags.power_resources)
device_remove_file(&dev->dev,
&dev_attr_real_power_state);
}
/*
* If device has _STR, remove 'description' file
*/
if (acpi_has_method(dev->handle, "_STR")) {
kfree(dev->pnp.str_obj);
device_remove_file(&dev->dev, &dev_attr_description);
}
/*
* If device has _EJ0, remove 'eject' file.
*/
if (acpi_has_method(dev->handle, "_EJ0"))
device_remove_file(&dev->dev, &dev_attr_eject);
if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
if (acpi_has_method(dev->handle, "_HRV"))
device_remove_file(&dev->dev, &dev_attr_hrv);
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
if (dev->pnp.type.bus_address)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (acpi_has_method(dev->handle, "_STA"))
device_remove_file(&dev->dev, &dev_attr_status);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
| linux-master | drivers/acpi/device_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pci_slot.c - ACPI PCI Slot Driver
*
* The code here is heavily leveraged from the acpiphp module.
* Thanks to Matthew Wilcox <[email protected]> for much guidance.
* Thanks to Kenji Kaneshige <[email protected]> for code
* review and fixes.
*
* Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P.
* Alex Chiang <[email protected]>
*
* Copyright (C) 2013 Huawei Tech. Co., Ltd.
* Jiang Liu <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/pci-acpi.h>
static int check_sta_before_sun;
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
struct pci_slot *pci_slot; /* corresponding pci_slot */
struct list_head list; /* node in the list of slots */
};
static LIST_HEAD(slot_list);
static DEFINE_MUTEX(slot_list_lock);
static int
check_slot(acpi_handle handle, unsigned long long *sun)
{
int device = -1;
unsigned long long adr, sta;
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
pr_debug("Checking slot on path: %s\n", (char *)buffer.pointer);
if (check_sta_before_sun) {
/* If SxFy doesn't have _STA, we just assume it's there */
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT))
goto out;
}
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
pr_debug("_ADR returned %d on %s\n",
status, (char *)buffer.pointer);
goto out;
}
/* No _SUN == not a slot == bail */
status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
if (ACPI_FAILURE(status)) {
pr_debug("_SUN returned %d on %s\n",
status, (char *)buffer.pointer);
goto out;
}
device = (adr >> 16) & 0xffff;
out:
kfree(buffer.pointer);
return device;
}
/*
* Check whether handle has an associated slot and create PCI slot if it has.
*/
static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int device;
unsigned long long sun;
char name[SLOT_NAME_SIZE];
struct acpi_pci_slot *slot;
struct pci_slot *pci_slot;
struct pci_bus *pci_bus = context;
device = check_slot(handle, &sun);
if (device < 0)
return AE_OK;
/*
* There may be multiple PCI functions associated with the same slot.
* Check whether PCI slot has already been created for this PCI device.
*/
list_for_each_entry(slot, &slot_list, list) {
pci_slot = slot->pci_slot;
if (pci_slot->bus == pci_bus && pci_slot->number == device)
return AE_OK;
}
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return AE_OK;
snprintf(name, sizeof(name), "%llu", sun);
pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
kfree(slot);
return AE_OK;
}
slot->pci_slot = pci_slot;
list_add(&slot->list, &slot_list);
get_device(&pci_bus->dev);
pr_debug("%p, pci_bus: %x, device: %d, name: %s\n",
pci_slot, pci_bus->number, device, name);
return AE_OK;
}
void acpi_pci_slot_enumerate(struct pci_bus *bus)
{
acpi_handle handle = ACPI_HANDLE(bus->bridge);
if (handle) {
mutex_lock(&slot_list_lock);
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
register_slot, NULL, bus, NULL);
mutex_unlock(&slot_list_lock);
}
}
void acpi_pci_slot_remove(struct pci_bus *bus)
{
struct acpi_pci_slot *slot, *tmp;
mutex_lock(&slot_list_lock);
list_for_each_entry_safe(slot, tmp, &slot_list, list) {
if (slot->pci_slot->bus == bus) {
list_del(&slot->list);
pci_destroy_slot(slot->pci_slot);
put_device(&bus->dev);
kfree(slot);
}
}
mutex_unlock(&slot_list_lock);
}
static int do_sta_before_sun(const struct dmi_system_id *d)
{
pr_info("%s detected: will evaluate _STA before calling _SUN\n",
d->ident);
check_sta_before_sun = 1;
return 0;
}
static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
/*
* Fujitsu Primequest machines will return 1023 to indicate an
* error if the _SUN method is evaluated on SxFy objects that
* are not present (as indicated by _STA), so for those machines,
* we want to check _STA before evaluating _SUN.
*/
{
.callback = do_sta_before_sun,
.ident = "Fujitsu PRIMEQUEST",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"),
DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"),
},
},
{}
};
void __init acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
}
| linux-master | drivers/acpi/pci_slot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for platform bus type.
*
* Copyright (C) 2012, Intel Corporation
* Authors: Mika Westerberg <[email protected]>
* Mathias Nyman <[email protected]>
* Rafael J. Wysocki <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include "internal.h"
/* Exclude devices that have no _CRS resources provided */
#define ACPI_ALLOW_WO_RESOURCES BIT(0)
static const struct acpi_device_id forbidden_id_list[] = {
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
{"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */
{"PNP0200", 0}, /* AT DMA Controller */
{ACPI_SMBUS_MS_HID, ACPI_ALLOW_WO_RESOURCES}, /* ACPI SMBUS virtual device */
{ }
};
static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
{
struct device *dev;
dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
return dev ? to_platform_device(dev) : NULL;
}
static int acpi_platform_device_remove_notify(struct notifier_block *nb,
unsigned long value, void *arg)
{
struct acpi_device *adev = arg;
struct platform_device *pdev;
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
/* Nothing to do here */
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
break;
pdev = acpi_platform_device_find_by_companion(adev);
if (!pdev)
break;
platform_device_unregister(pdev);
put_device(&pdev->dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block acpi_platform_notifier = {
.notifier_call = acpi_platform_device_remove_notify,
};
static void acpi_platform_fill_resource(struct acpi_device *adev,
const struct resource *src, struct resource *dest)
{
struct device *parent;
*dest = *src;
/*
* If the device has parent we need to take its resources into
* account as well because this device might consume part of those.
*/
parent = acpi_get_first_physical_node(acpi_dev_parent(adev));
if (parent && dev_is_pci(parent))
dest->parent = pci_find_resource(to_pci_dev(parent), dest);
}
static unsigned int acpi_platform_resource_count(struct acpi_resource *ares, void *data)
{
bool *has_resources = data;
*has_resources = true;
return AE_CTRL_TERMINATE;
}
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
* @properties: Optional collection of build-in properties.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
* resources and returns a pointer to it. Otherwise, return %NULL.
*
* Name of the platform device will be the same as @adev's.
*/
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
const struct property_entry *properties)
{
struct acpi_device *parent = acpi_dev_parent(adev);
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
const struct acpi_device_id *match;
struct resource_entry *rentry;
struct list_head resource_list;
struct resource *resources = NULL;
int count;
/* If the ACPI node already has a physical device attached, skip it. */
if (adev->physical_node_count)
return NULL;
match = acpi_match_acpi_device(forbidden_id_list, adev);
if (match) {
if (match->driver_data & ACPI_ALLOW_WO_RESOURCES) {
bool has_resources = false;
acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
acpi_platform_resource_count, &has_resources);
if (has_resources)
return ERR_PTR(-EINVAL);
} else {
return ERR_PTR(-EINVAL);
}
}
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count < 0)
return NULL;
if (count > 0) {
resources = kcalloc(count, sizeof(*resources), GFP_KERNEL);
if (!resources) {
acpi_dev_free_resource_list(&resource_list);
return ERR_PTR(-ENOMEM);
}
count = 0;
list_for_each_entry(rentry, &resource_list, node)
acpi_platform_fill_resource(adev, rentry->res,
&resources[count++]);
acpi_dev_free_resource_list(&resource_list);
}
memset(&pdevinfo, 0, sizeof(pdevinfo));
/*
* If the ACPI node has a parent and that parent has a physical device
* attached to it, that physical device should be the parent of the
* platform device we are about to create.
*/
pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL;
pdevinfo.name = dev_name(&adev->dev);
pdevinfo.id = PLATFORM_DEVID_NONE;
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
pdevinfo.properties = properties;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
else
pdevinfo.dma_mask = 0;
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev))
dev_err(&adev->dev, "platform device creation failed: %ld\n",
PTR_ERR(pdev));
else {
set_dev_node(&pdev->dev, acpi_get_node(adev->handle));
dev_dbg(&adev->dev, "created platform device %s\n",
dev_name(&pdev->dev));
}
kfree(resources);
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_create_platform_device);
void __init acpi_platform_init(void)
{
acpi_reconfig_notifier_register(&acpi_platform_notifier);
}
| linux-master | drivers/acpi/acpi_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Link physical devices with ACPI devices support
*
* Copyright (c) 2005 David Shaohua Li <[email protected]>
* Copyright (c) 2005 Intel Corp.
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi_iort.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/platform_device.h>
#include "internal.h"
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
if (type && type->match && type->find_companion) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
pr_info("bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(register_acpi_bus_type);
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return 0;
if (type) {
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
pr_info("bus type %s unregistered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->match(dev)) {
ret = tmp;
break;
}
}
up_read(&bus_type_sem);
return ret;
}
#define FIND_CHILD_MIN_SCORE 1
#define FIND_CHILD_MID_SCORE 2
#define FIND_CHILD_MAX_SCORE 3
static int match_any(struct acpi_device *adev, void *not_used)
{
return 1;
}
static bool acpi_dev_has_children(struct acpi_device *adev)
{
return acpi_dev_for_each_child(adev, match_any, NULL) > 0;
}
static int find_child_checks(struct acpi_device *adev, bool check_children)
{
unsigned long long sta;
acpi_status status;
if (check_children && !acpi_dev_has_children(adev))
return -ENODEV;
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
if (status == AE_NOT_FOUND) {
/*
* Special case: backlight device objects without _STA are
* preferred to other objects with the same _ADR value, because
* it is more likely that they are actually useful.
*/
if (adev->pnp.type.backlight)
return FIND_CHILD_MID_SCORE;
return FIND_CHILD_MIN_SCORE;
}
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
/*
* If the device has a _HID returning a valid ACPI/PNP device ID, it is
* better to make it look less attractive here, so that the other device
* with the same _ADR value (that may not have a valid device ID) can be
* matched going forward. [This means a second spec violation in a row,
* so whatever we do here is best effort anyway.]
*/
if (adev->pnp.type.platform_id)
return FIND_CHILD_MIN_SCORE;
return FIND_CHILD_MAX_SCORE;
}
struct find_child_walk_data {
struct acpi_device *adev;
u64 address;
int score;
bool check_sta;
bool check_children;
};
static int check_one_child(struct acpi_device *adev, void *data)
{
struct find_child_walk_data *wd = data;
int score;
if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address)
return 0;
if (!wd->adev) {
/*
* This is the first matching object, so save it. If it is not
* necessary to look for any other matching objects, stop the
* search.
*/
wd->adev = adev;
return !(wd->check_sta || wd->check_children);
}
/*
* There is more than one matching device object with the same _ADR
* value. That really is unexpected, so we are kind of beyond the scope
* of the spec here. We have to choose which one to return, though.
*
* First, get the score for the previously found object and terminate
* the walk if it is maximum.
*/
if (!wd->score) {
score = find_child_checks(wd->adev, wd->check_children);
if (score == FIND_CHILD_MAX_SCORE)
return 1;
wd->score = score;
}
/*
* Second, if the object that has just been found has a better score,
* replace the previously found one with it and terminate the walk if
* the new score is maximum.
*/
score = find_child_checks(adev, wd->check_children);
if (score > wd->score) {
wd->adev = adev;
if (score == FIND_CHILD_MAX_SCORE)
return 1;
wd->score = score;
}
/* Continue, because there may be better matches. */
return 0;
}
static struct acpi_device *acpi_find_child(struct acpi_device *parent,
u64 address, bool check_children,
bool check_sta)
{
struct find_child_walk_data wd = {
.address = address,
.check_children = check_children,
.check_sta = check_sta,
.adev = NULL,
.score = 0,
};
if (parent)
acpi_dev_for_each_child(parent, check_one_child, &wd);
return wd.adev;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children)
{
return acpi_find_child(parent, address, check_children, true);
}
EXPORT_SYMBOL_GPL(acpi_find_child_device);
struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
acpi_bus_address adr)
{
return acpi_find_child(adev, adr, false, false);
}
EXPORT_SYMBOL_GPL(acpi_find_child_by_adr);
static void acpi_physnode_link_name(char *buf, unsigned int node_id)
{
if (node_id > 0)
snprintf(buf, PHYSICAL_NODE_NAME_SIZE,
PHYSICAL_NODE_STRING "%u", node_id);
else
strcpy(buf, PHYSICAL_NODE_STRING);
}
int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
{
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
if (has_acpi_companion(dev)) {
if (acpi_dev) {
dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
acpi_dev = ACPI_COMPANION(dev);
}
}
if (!acpi_dev)
return -EINVAL;
acpi_dev_get(acpi_dev);
get_device(dev);
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
/*
* Keep the list sorted by node_id so that the IDs of removed nodes can
* be recycled easily.
*/
physnode_list = &acpi_dev->physical_node_list;
node_id = 0;
list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
/* Sanity check. */
if (pn->dev == dev) {
mutex_unlock(&acpi_dev->physical_node_lock);
dev_warn(dev, "Already associated with ACPI node\n");
kfree(physical_node);
if (ACPI_COMPANION(dev) != acpi_dev)
goto err;
put_device(dev);
acpi_dev_put(acpi_dev);
return 0;
}
if (pn->node_id == node_id) {
physnode_list = &pn->node;
node_id++;
}
}
physical_node->node_id = node_id;
physical_node->dev = dev;
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
if (!has_acpi_companion(dev))
ACPI_COMPANION_SET(dev, acpi_dev);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
physical_node_name);
if (retval)
dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n",
physical_node_name, retval);
retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
"firmware_node");
if (retval)
dev_err(dev, "Failed to create link firmware_node (%d)\n",
retval);
mutex_unlock(&acpi_dev->physical_node_lock);
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
return 0;
err:
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
acpi_dev_put(acpi_dev);
return retval;
}
EXPORT_SYMBOL_GPL(acpi_bind_one);
int acpi_unbind_one(struct device *dev)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
struct acpi_device_physical_node *entry;
if (!acpi_dev)
return 0;
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
if (entry->dev == dev) {
char physnode_name[PHYSICAL_NODE_NAME_SIZE];
list_del(&entry->node);
acpi_dev->physical_node_count--;
acpi_physnode_link_name(physnode_name, entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
ACPI_COMPANION_SET(dev, NULL);
/* Drop references taken by acpi_bind_one(). */
put_device(dev);
acpi_dev_put(acpi_dev);
kfree(entry);
break;
}
mutex_unlock(&acpi_dev->physical_node_lock);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
void acpi_device_notify(struct device *dev)
{
struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret) {
struct acpi_bus_type *type = acpi_get_bus_type(dev);
if (!type)
goto err;
adev = type->find_companion(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion not found\n");
goto err;
}
ret = acpi_bind_one(dev, adev);
if (ret)
goto err;
if (type->setup) {
type->setup(dev);
goto done;
}
} else {
adev = ACPI_COMPANION(dev);
if (dev_is_pci(dev)) {
pci_acpi_setup(dev, adev);
goto done;
} else if (dev_is_platform(dev)) {
acpi_configure_pmsi_domain(dev);
}
}
if (adev->handler && adev->handler->bind)
adev->handler->bind(dev);
done:
acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
dev_name(dev));
return;
err:
dev_dbg(dev, "No ACPI support\n");
}
void acpi_device_notify_remove(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return;
if (dev_is_pci(dev))
pci_acpi_cleanup(dev, adev);
else if (adev->handler && adev->handler->unbind)
adev->handler->unbind(dev);
acpi_unbind_one(dev);
}
| linux-master | drivers/acpi/glue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Intel Corporation
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
*
* Venkatesh Pallipadi <[email protected]>
* - Added _PDC for platforms with Intel CPUs
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include "internal.h"
static void acpi_set_pdc_bits(u32 *buf)
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = 0;
/* Twiddle arch-specific bits needed for _PDC */
arch_acpi_set_proc_cap_bits(&buf[2]);
}
static struct acpi_object_list *acpi_processor_alloc_pdc(void)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list)
goto out;
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
kfree(obj_list);
goto out;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
kfree(obj);
kfree(obj_list);
goto out;
}
acpi_set_pdc_bits(buf);
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
return obj_list;
out:
pr_err("Memory allocation error\n");
return NULL;
}
/*
* _PDC is required for a BIOS-OS handshake for most of the newer
* ACPI processor features.
*/
static acpi_status
acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
if (ACPI_FAILURE(status))
acpi_handle_debug(handle,
"Could not evaluate _PDC, using legacy perf control\n");
return status;
}
void acpi_processor_set_pdc(acpi_handle handle)
{
struct acpi_object_list *obj_list;
if (arch_has_acpi_pdc() == false)
return;
obj_list = acpi_processor_alloc_pdc();
if (!obj_list)
return;
acpi_processor_eval_pdc(handle, obj_list);
kfree(obj_list->pointer->buffer.pointer);
kfree(obj_list->pointer);
kfree(obj_list);
}
static acpi_status __init
early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (processor_physically_present(handle) == false)
return AE_OK;
acpi_processor_set_pdc(handle);
return AE_OK;
}
void __init acpi_early_processor_set_pdc(void)
{
acpi_proc_quirk_mwait_check();
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
}
| linux-master | drivers/acpi/processor_pdc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* acpi_processor.c - ACPI processor enumeration support
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
* Copyright (C) 2013, Intel Corporation
* Rafael J. Wysocki <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <acpi/processor.h>
#include <asm/cpu.h>
#include <xen/xen.h>
#include "internal.h"
DEFINE_PER_CPU(struct acpi_processor *, processors);
EXPORT_PER_CPU_SYMBOL(processors);
/* Errata Handling */
struct acpi_processor_errata errata __read_mostly;
EXPORT_SYMBOL_GPL(errata);
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
u8 value2 = 0;
if (!dev)
return -EINVAL;
/*
* Note that 'dev' references the PIIX4 ACPI Controller.
*/
switch (dev->revision) {
case 0:
dev_dbg(&dev->dev, "Found PIIX4 A-step\n");
break;
case 1:
dev_dbg(&dev->dev, "Found PIIX4 B-step\n");
break;
case 2:
dev_dbg(&dev->dev, "Found PIIX4E\n");
break;
case 3:
dev_dbg(&dev->dev, "Found PIIX4M\n");
break;
default:
dev_dbg(&dev->dev, "Found unknown PIIX4\n");
break;
}
switch (dev->revision) {
case 0: /* PIIX4 A-step */
case 1: /* PIIX4 B-step */
/*
* See specification changes #13 ("Manual Throttle Duty Cycle")
* and #14 ("Enabling and Disabling Manual Throttle"), plus
* erratum #5 ("STPCLK# Deassertion Time") from the January
* 2002 PIIX4 specification update. Applies to only older
* PIIX4 models.
*/
errata.piix4.throttle = 1;
fallthrough;
case 2: /* PIIX4E */
case 3: /* PIIX4M */
/*
* See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
* Livelock") from the January 2002 PIIX4 specification update.
* Applies to all PIIX4 models.
*/
/*
* BM-IDE
* ------
* Find the PIIX4 IDE Controller and get the Bus Master IDE
* Status register address. We'll use this later to read
* each IDE controller's DMA status to make sure we catch all
* DMA activity.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
errata.piix4.bmisx = pci_resource_start(dev, 4);
pci_dev_put(dev);
}
/*
* Type-F DMA
* ----------
* Find the PIIX4 ISA Controller and read the Motherboard
* DMA controller's status to see if Type-F (Fast) DMA mode
* is enabled (bit 7) on either channel. Note that we'll
* disable C3 support if this is enabled, as some legacy
* devices won't operate well if fast DMA is disabled.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
pci_read_config_byte(dev, 0x76, &value1);
pci_read_config_byte(dev, 0x77, &value2);
if ((value1 & 0x80) || (value2 & 0x80))
errata.piix4.fdma = 1;
pci_dev_put(dev);
}
break;
}
if (errata.piix4.bmisx)
dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
if (errata.piix4.fdma)
dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
return 0;
}
static int acpi_processor_errata(void)
{
int result = 0;
struct pci_dev *dev = NULL;
/*
* PIIX4
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
PCI_ANY_ID, NULL);
if (dev) {
result = acpi_processor_errata_piix4(dev);
pci_dev_put(dev);
}
return result;
}
/* Create a platform device to represent a CPU frequency control mechanism. */
static void cpufreq_add_device(const char *name)
{
struct platform_device *pdev;
pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev))
pr_info("%s device creation failed: %ld\n", name, PTR_ERR(pdev));
}
#ifdef CONFIG_X86
/* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */
static void __init acpi_pcc_cpufreq_init(void)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return;
if (acpi_has_method(handle, "PCCH"))
cpufreq_add_device("pcc-cpufreq");
}
#else
static void __init acpi_pcc_cpufreq_init(void) {}
#endif /* CONFIG_X86 */
/* Initialization */
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int __weak acpi_map_cpu(acpi_handle handle,
phys_cpuid_t physid, u32 acpi_id, int *pcpu)
{
return -ENODEV;
}
int __weak acpi_unmap_cpu(int cpu)
{
return -ENODEV;
}
int __weak arch_register_cpu(int cpu)
{
return -ENODEV;
}
void __weak arch_unregister_cpu(int cpu) {}
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
acpi_status status;
int ret;
if (invalid_phys_cpuid(pr->phys_id))
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
cpu_maps_update_begin();
cpus_write_lock();
ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
acpi_unmap_cpu(pr->id);
goto out;
}
/*
* CPU got hot-added, but cpu_data is not initialized yet. Set a flag
* to delay cpu_idle/throttling initialization and do it when the CPU
* gets online for the first time.
*/
pr_info("CPU%d has been hot-added\n", pr->id);
pr->flags.need_hotplug_init = 1;
out:
cpus_write_unlock();
cpu_maps_update_done();
return ret;
}
#else
static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
return -ENODEV;
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
static int acpi_processor_get_info(struct acpi_device *device)
{
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
acpi_processor_errata();
/*
* Check to see if we have bus mastering arbitration control. This
* is required for proper C3 usage (to maintain cache coherency).
*/
if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
pr->flags.bm_control = 1;
dev_dbg(&device->dev, "Bus mastering arbitration control present\n");
} else
dev_dbg(&device->dev, "No bus mastering arbitration control\n");
if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
/* Declared with "Processor" statement; match ProcessorID */
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev,
"Failed to evaluate processor object (0x%x)\n",
status);
return -ENODEV;
}
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
*/
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev,
"Failed to evaluate processor _UID (0x%x)\n",
status);
return -ENODEV;
}
device_declaration = 1;
pr->acpi_id = value;
}
if (acpi_duplicate_processor_id(pr->acpi_id)) {
if (pr->acpi_id == 0xff)
dev_info_once(&device->dev,
"Entry not well-defined, consider updating BIOS\n");
else
dev_err(&device->dev,
"Failed to get unique processor _UID (0x%x)\n",
pr->acpi_id);
return -ENODEV;
}
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
pr->acpi_id);
if (invalid_phys_cpuid(pr->phys_id))
dev_dbg(&device->dev, "Failed to get CPU physical ID.\n");
pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized) {
cpu0_initialized = 1;
/*
* Handle UP system running SMP kernel, with no CPU
* entry in MADT
*/
if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) &&
(num_online_cpus() == 1))
pr->id = 0;
/*
* Check availability of Processor Performance Control by
* looking at the presence of the _PCT object under the first
* processor definition.
*/
if (acpi_has_method(pr->handle, "_PCT"))
cpufreq_add_device("acpi-cpufreq");
}
/*
* Extra Processor objects may be enumerated on MP systems with
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*
* NOTE: Even if the processor has a cpuid, it may not be present
* because cpuid <-> apicid mapping is persistent now.
*/
if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
int ret = acpi_processor_hotadd_init(pr);
if (ret)
return ret;
}
/*
* On some boxes several processors use the same processor bus id.
* But they are located in different scope. For example:
* \_SB.SCK0.CPU0
* \_SB.SCK1.CPU0
* Rename the processor device bus id. And the new bus id will be
* generated as the following format:
* CPU+CPU ID.
*/
sprintf(acpi_device_bid(device), "CPU%X", pr->id);
dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id);
if (!object.processor.pblk_address)
dev_dbg(&device->dev, "No PBLK (NULL address)\n");
else if (object.processor.pblk_length != 6)
dev_err(&device->dev, "Invalid PBLK length [%d]\n",
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
}
/*
* If ACPI describes a slot number for this CPU, we can use it to
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
if (ACPI_SUCCESS(status))
arch_fix_phys_package_id(pr->id, value);
return 0;
}
/*
* Do not put anything in here which needs the core to be online.
* For example MSR access or setting up things which check for cpuinfo_x86
* (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
* Such things have to be put in and set up by the processor driver's .probe().
*/
static DEFINE_PER_CPU(void *, processor_device_array);
static int acpi_processor_add(struct acpi_device *device,
const struct acpi_device_id *id)
{
struct acpi_processor *pr;
struct device *dev;
int result = 0;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
return -ENOMEM;
if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
result = -ENOMEM;
goto err_free_pr;
}
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
return 0;
BUG_ON(pr->id >= nr_cpu_ids);
/*
* Buggy BIOS check.
* ACPI id of processors can be reported wrongly by the BIOS.
* Don't trust it blindly
*/
if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu(processor_device_array, pr->id) != device) {
dev_warn(&device->dev,
"BIOS reported wrong ACPI id %d for the processor\n",
pr->id);
/* Give up, but do not abort the namespace scan. */
goto err;
}
/*
* processor_device_array is not cleared on errors to allow buggy BIOS
* checks.
*/
per_cpu(processor_device_array, pr->id) = device;
per_cpu(processors, pr->id) = pr;
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
goto err;
}
result = acpi_bind_one(dev, device);
if (result)
goto err;
pr->dev = dev;
/* Trigger the processor driver's .probe() if present. */
if (device_attach(dev) >= 0)
return 1;
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
err:
free_cpumask_var(pr->throttling.shared_cpu_map);
device->driver_data = NULL;
per_cpu(processors, pr->id) = NULL;
err_free_pr:
kfree(pr);
return result;
}
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Removal */
static void acpi_processor_remove(struct acpi_device *device)
{
struct acpi_processor *pr;
if (!device || !acpi_driver_data(device))
return;
pr = acpi_driver_data(device);
if (pr->id >= nr_cpu_ids)
goto out;
/*
* The only reason why we ever get here is CPU hot-removal. The CPU is
* already offline and the ACPI device removal locking prevents it from
* being put back online at this point.
*
* Unbind the driver from the processor device and detach it from the
* ACPI companion object.
*/
device_release_driver(pr->dev);
acpi_unbind_one(pr->dev);
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
cpu_maps_update_begin();
cpus_write_lock();
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
acpi_unmap_cpu(pr->id);
cpus_write_unlock();
cpu_maps_update_done();
try_offline_node(cpu_to_node(pr->id));
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
bool __init processor_physically_present(acpi_handle handle)
{
int cpuid, type;
u32 acpi_id;
acpi_status status;
acpi_object_type acpi_type;
unsigned long long tmp;
union acpi_object object = {};
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
return false;
switch (acpi_type) {
case ACPI_TYPE_PROCESSOR:
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status))
return false;
acpi_id = object.processor.proc_id;
break;
case ACPI_TYPE_DEVICE:
status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
NULL, &tmp);
if (ACPI_FAILURE(status))
return false;
acpi_id = tmp;
break;
default:
return false;
}
if (xen_initial_domain())
/*
* When running as a Xen dom0 the number of processors Linux
* sees can be different from the real number of processors on
* the system, and we still need to execute _PDC or _OSC for
* all of them.
*/
return xen_processor_present(acpi_id);
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
return !invalid_logical_cpuid(cpuid);
}
/* vendor specific UUID indicating an Intel platform */
static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
u32 capbuf[2] = {};
struct acpi_osc_context osc_context = {
.uuid_str = sb_uuid_str,
.rev = 1,
.cap.length = 8,
.cap.pointer = capbuf,
};
acpi_status status;
if (!processor_physically_present(handle))
return AE_OK;
arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]);
status = acpi_run_osc(handle, &osc_context);
if (ACPI_FAILURE(status))
return status;
kfree(osc_context.ret.pointer);
return AE_OK;
}
static bool __init acpi_early_processor_osc(void)
{
acpi_status status;
acpi_proc_quirk_mwait_check();
status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, acpi_processor_osc, NULL,
NULL, NULL);
if (ACPI_FAILURE(status))
return false;
status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc,
NULL, NULL);
if (ACPI_FAILURE(status))
return false;
return true;
}
void __init acpi_early_processor_control_setup(void)
{
if (acpi_early_processor_osc()) {
pr_info("_OSC evaluated successfully for all CPUs\n");
} else {
pr_info("_OSC evaluation for CPUs failed, trying _PDC\n");
acpi_early_processor_set_pdc();
}
}
#endif
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
*/
static const struct acpi_device_id processor_device_ids[] = {
{ ACPI_PROCESSOR_OBJECT_HID, },
{ ACPI_PROCESSOR_DEVICE_HID, },
{ }
};
static struct acpi_scan_handler processor_handler = {
.ids = processor_device_ids,
.attach = acpi_processor_add,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
.detach = acpi_processor_remove,
#endif
.hotplug = {
.enabled = true,
},
};
static int acpi_processor_container_attach(struct acpi_device *dev,
const struct acpi_device_id *id)
{
return 1;
}
static const struct acpi_device_id processor_container_ids[] = {
{ ACPI_PROCESSOR_CONTAINER_HID, },
{ }
};
static struct acpi_scan_handler processor_container_handler = {
.ids = processor_container_ids,
.attach = acpi_processor_container_attach,
};
/* The number of the unique processor IDs */
static int nr_unique_ids __initdata;
/* The number of the duplicate processor IDs */
static int nr_duplicate_ids;
/* Used to store the unique processor IDs */
static int unique_processor_ids[] __initdata = {
[0 ... NR_CPUS - 1] = -1,
};
/* Used to store the duplicate processor IDs */
static int duplicate_processor_ids[] = {
[0 ... NR_CPUS - 1] = -1,
};
static void __init processor_validated_ids_update(int proc_id)
{
int i;
if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
return;
/*
* Firstly, compare the proc_id with duplicate IDs, if the proc_id is
* already in the IDs, do nothing.
*/
for (i = 0; i < nr_duplicate_ids; i++) {
if (duplicate_processor_ids[i] == proc_id)
return;
}
/*
* Secondly, compare the proc_id with unique IDs, if the proc_id is in
* the IDs, put it in the duplicate IDs.
*/
for (i = 0; i < nr_unique_ids; i++) {
if (unique_processor_ids[i] == proc_id) {
duplicate_processor_ids[nr_duplicate_ids] = proc_id;
nr_duplicate_ids++;
return;
}
}
/*
* Lastly, the proc_id is a unique ID, put it in the unique IDs.
*/
unique_processor_ids[nr_unique_ids] = proc_id;
nr_unique_ids++;
}
static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
u32 lvl,
void *context,
void **rv)
{
acpi_status status;
acpi_object_type acpi_type;
unsigned long long uid;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
return status;
switch (acpi_type) {
case ACPI_TYPE_PROCESSOR:
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status))
goto err;
uid = object.processor.proc_id;
break;
case ACPI_TYPE_DEVICE:
status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
if (ACPI_FAILURE(status))
goto err;
break;
default:
goto err;
}
processor_validated_ids_update(uid);
return AE_OK;
err:
/* Exit on error, but don't abort the namespace walk */
acpi_handle_info(handle, "Invalid processor object\n");
return AE_OK;
}
static void __init acpi_processor_check_duplicates(void)
{
/* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
acpi_processor_ids_walk,
NULL, NULL, NULL);
acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
NULL, NULL);
}
bool acpi_duplicate_processor_id(int proc_id)
{
int i;
/*
* compare the proc_id with duplicate IDs, if the proc_id is already
* in the duplicate IDs, return true, otherwise, return false.
*/
for (i = 0; i < nr_duplicate_ids; i++) {
if (duplicate_processor_ids[i] == proc_id)
return true;
}
return false;
}
void __init acpi_processor_init(void)
{
acpi_processor_check_duplicates();
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
acpi_pcc_cpufreq_init();
}
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
/**
* acpi_processor_claim_cst_control - Request _CST control from the platform.
*/
bool acpi_processor_claim_cst_control(void)
{
static bool cst_control_claimed;
acpi_status status;
if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
return true;
status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
acpi_gbl_FADT.cst_control, 8);
if (ACPI_FAILURE(status)) {
pr_warn("ACPI: Failed to claim processor _CST control\n");
return false;
}
cst_control_claimed = true;
return true;
}
EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
* @handle: ACPI handle of the processor object containing the _CST.
* @cpu: The numeric ID of the target CPU.
* @info: Object write the C-states information into.
*
* Extract the C-state information for the given CPU from the output of the _CST
* control method under the corresponding ACPI processor object (or processor
* device object) and populate @info with it.
*
* If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
* acpi_processor_ffh_cstate_probe() to verify them and update the
* cpu_cstate_entry data for @cpu.
*/
int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
struct acpi_processor_power *info)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *cst;
acpi_status status;
u64 count;
int last_index = 0;
int i, ret = 0;
status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_debug(handle, "No _CST\n");
return -ENODEV;
}
cst = buffer.pointer;
/* There must be at least 2 elements. */
if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
acpi_handle_warn(handle, "Invalid _CST output\n");
ret = -EFAULT;
goto end;
}
count = cst->package.elements[0].integer.value;
/* Validate the number of C-states. */
if (count < 1 || count != cst->package.count - 1) {
acpi_handle_warn(handle, "Inconsistent _CST data\n");
ret = -EFAULT;
goto end;
}
for (i = 1; i <= count; i++) {
union acpi_object *element;
union acpi_object *obj;
struct acpi_power_register *reg;
struct acpi_processor_cx cx;
/*
* If there is not enough space for all C-states, skip the
* excess ones and log a warning.
*/
if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
acpi_handle_warn(handle,
"No room for more idle states (limit: %d)\n",
ACPI_PROCESSOR_MAX_POWER - 1);
break;
}
memset(&cx, 0, sizeof(cx));
element = &cst->package.elements[i];
if (element->type != ACPI_TYPE_PACKAGE) {
acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
i, element->type);
continue;
}
if (element->package.count != 4) {
acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
i, element->package.count);
continue;
}
obj = &element->package.elements[0];
if (obj->type != ACPI_TYPE_BUFFER) {
acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
i, obj->type);
continue;
}
reg = (struct acpi_power_register *)obj->buffer.pointer;
obj = &element->package.elements[1];
if (obj->type != ACPI_TYPE_INTEGER) {
acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
i, obj->type);
continue;
}
cx.type = obj->integer.value;
/*
* There are known cases in which the _CST output does not
* contain C1, so if the type of the first state found is not
* C1, leave an empty slot for C1 to be filled in later.
*/
if (i == 1 && cx.type != ACPI_STATE_C1)
last_index = 1;
cx.address = reg->address;
cx.index = last_index + 1;
if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
/*
* In the majority of cases _CST describes C1 as
* a FIXED_HARDWARE C-state, but if the command
* line forbids using MWAIT, use CSTATE_HALT for
* C1 regardless.
*/
if (cx.type == ACPI_STATE_C1 &&
boot_option_idle_override == IDLE_NOMWAIT) {
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
cx.entry_method = ACPI_CSTATE_FFH;
}
} else if (cx.type == ACPI_STATE_C1) {
/*
* In the special case of C1, FIXED_HARDWARE can
* be handled by executing the HLT instruction.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
i);
continue;
}
} else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
cx.entry_method = ACPI_CSTATE_SYSTEMIO;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
cx.address);
} else {
acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
i, reg->space_id);
continue;
}
if (cx.type == ACPI_STATE_C1)
cx.valid = 1;
obj = &element->package.elements[2];
if (obj->type != ACPI_TYPE_INTEGER) {
acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
i, obj->type);
continue;
}
cx.latency = obj->integer.value;
obj = &element->package.elements[3];
if (obj->type != ACPI_TYPE_INTEGER) {
acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
i, obj->type);
continue;
}
memcpy(&info->states[++last_index], &cx, sizeof(cx));
}
acpi_handle_info(handle, "Found %d idle states\n", last_index);
info->count = last_index;
end:
kfree(buffer.pointer);
return ret;
}
EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
| linux-master | drivers/acpi/acpi_processor.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utids")
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_HID
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the string HID is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _HID control method that returns the hardware
* ID of the device. The HID is either an 32-bit encoded EISAID
* Integer or a String. A string is always returned. An EISAID
* is converted to a string.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id *hid;
u32 length;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_HID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
length = ACPI_EISAID_STRING_SIZE;
} else {
length = obj_desc->string.length + 1;
}
/* Allocate a buffer for the HID */
hid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size)length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
hid->string =
ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id));
/* Convert EISAID to a string or simply copy existing string */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
} else {
strcpy(hid->string, obj_desc->string.pointer);
}
hid->length = length;
*return_id = hid;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_UID
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the string UID is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _UID control method that returns the unique
* ID of the device. The UID is either a 64-bit Integer (NOT an
* EISAID) or a string. Always returns a string. A 64-bit integer
* is converted to a decimal string.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id *uid;
u32 length;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_UID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
length = ACPI_MAX64_DECIMAL_DIGITS + 1;
} else {
length = obj_desc->string.length + 1;
}
/* Allocate a buffer for the UID */
uid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size)length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
uid->string =
ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id));
/* Convert an Integer to string, or just copy an existing string */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
} else {
strcpy(uid->string, obj_desc->string.pointer);
}
uid->length = length;
*return_id = uid;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_CID
*
* PARAMETERS: device_node - Node for the device
* return_cid_list - Where the CID list is returned
*
* RETURN: Status, list of CID strings
*
* DESCRIPTION: Executes the _CID control method that returns one or more
* compatible hardware IDs for the device.
*
* NOTE: Internal function, no parameter validation
*
* A _CID method can return either a single compatible ID or a package of
* compatible IDs. Each compatible ID can be one of the following:
* 1) Integer (32 bit compressed EISA ID) or
* 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
*
* The Integer CIDs are converted to string format by this function.
*
******************************************************************************/
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id_list **return_cid_list)
{
union acpi_operand_object **cid_objects;
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id_list *cid_list;
char *next_id_string;
u32 string_area_size;
u32 length;
u32 cid_list_size;
acpi_status status;
u32 count;
u32 i;
ACPI_FUNCTION_TRACE(ut_execute_CID);
/* Evaluate the _CID method for this device */
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
| ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Get the count and size of the returned _CIDs. _CID can return either
* a Package of Integers/Strings or a single Integer or String.
* Note: This section also validates that all CID elements are of the
* correct type (Integer or String).
*/
if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
count = obj_desc->package.count;
cid_objects = obj_desc->package.elements;
} else { /* Single Integer or String CID */
count = 1;
cid_objects = &obj_desc;
}
string_area_size = 0;
for (i = 0; i < count; i++) {
/* String lengths include null terminator */
switch (cid_objects[i]->common.type) {
case ACPI_TYPE_INTEGER:
string_area_size += ACPI_EISAID_STRING_SIZE;
break;
case ACPI_TYPE_STRING:
string_area_size += cid_objects[i]->string.length + 1;
break;
default:
status = AE_TYPE;
goto cleanup;
}
}
/*
* Now that we know the length of the CIDs, allocate return buffer:
* 1) Size of the base structure +
* 2) Size of the CID PNP_DEVICE_ID array +
* 3) Size of the actual CID strings
*/
cid_list_size = sizeof(struct acpi_pnp_device_id_list) +
(count * sizeof(struct acpi_pnp_device_id)) + string_area_size;
cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
if (!cid_list) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
((acpi_size)count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
for (i = 0; i < count; i++) {
if (cid_objects[i]->common.type == ACPI_TYPE_INTEGER) {
/* Convert the Integer (EISAID) CID to a string */
acpi_ex_eisa_id_to_string(next_id_string,
cid_objects[i]->integer.
value);
length = ACPI_EISAID_STRING_SIZE;
} else { /* ACPI_TYPE_STRING */
/* Copy the String CID from the returned object */
strcpy(next_id_string, cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
cid_list->ids[i].string = next_id_string;
cid_list->ids[i].length = length;
next_id_string += length;
}
/* Finish the CID list */
cid_list->count = count;
cid_list->list_size = cid_list_size;
*return_cid_list = cid_list;
cleanup:
/* On exit, we must delete the _CID return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_CLS
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the _CLS is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _CLS control method that returns PCI-defined
* class code of the device. The _CLS value is always a package
* containing PCI class information as a list of integers.
* The returned string has format "BBSSPP", where:
* BB = Base-class code
* SS = Sub-class code
* PP = Programming Interface code
*
******************************************************************************/
acpi_status
acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object **cls_objects;
u32 count;
struct acpi_pnp_device_id *cls;
u32 length;
acpi_status status;
u8 class_code[3] = { 0, 0, 0 };
ACPI_FUNCTION_TRACE(ut_execute_CLS);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CLS,
ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
length = ACPI_PCICLS_STRING_SIZE;
cls_objects = obj_desc->package.elements;
count = obj_desc->package.count;
if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
if (count > 0
&& cls_objects[0]->common.type == ACPI_TYPE_INTEGER) {
class_code[0] = (u8)cls_objects[0]->integer.value;
}
if (count > 1
&& cls_objects[1]->common.type == ACPI_TYPE_INTEGER) {
class_code[1] = (u8)cls_objects[1]->integer.value;
}
if (count > 2
&& cls_objects[2]->common.type == ACPI_TYPE_INTEGER) {
class_code[2] = (u8)cls_objects[2]->integer.value;
}
}
/* Allocate a buffer for the CLS */
cls =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size)length);
if (!cls) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
cls->string =
ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id));
/* Simply copy existing string */
acpi_ex_pci_cls_to_string(cls->string, class_code);
cls->length = length;
*return_id = cls;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
| linux-master | drivers/acpi/acpica/utids.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exregion")
/*******************************************************************************
*
* FUNCTION: acpi_ex_system_memory_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the System Memory address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_system_memory_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
void *logical_addr_ptr = NULL;
struct acpi_mem_space_context *mem_info = region_context;
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
/* Validate and translate the bit width */
switch (bit_width) {
case 8:
length = 1;
break;
case 16:
length = 2;
break;
case 32:
length = 4;
break;
case 64:
length = 8;
break;
default:
ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
/*
* Hardware does not support non-aligned data transfers, we must verify
* the request.
*/
(void)acpi_ut_short_divide((u64) address, length, NULL, &remainder);
if (remainder != 0) {
return_ACPI_STATUS(AE_AML_ALIGNMENT);
}
#endif
/*
* Does the request fit into the cached memory mapping?
* Is 1) Address below the current mapping? OR
* 2) Address beyond the current mapping?
*/
if (!mm || (address < mm->physical_address) ||
((u64) address + length > (u64) mm->physical_address + mm->length)) {
/*
* The request cannot be resolved by the current memory mapping.
*
* Look for an existing saved mapping covering the address range
* at hand. If found, save it as the current one and carry out
* the access.
*/
for (mm = mem_info->first_mm; mm; mm = mm->next_mm) {
if (mm == mem_info->cur_mm)
continue;
if (address < mm->physical_address)
continue;
if ((u64) address + length >
(u64) mm->physical_address + mm->length)
continue;
mem_info->cur_mm = mm;
goto access;
}
/* Create a new mappings list entry */
mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm));
if (!mm) {
ACPI_ERROR((AE_INFO,
"Unable to save memory mapping at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_UINT64(address), length));
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* October 2009: Attempt to map from the requested address to the
* end of the region. However, we will never map more than one
* page, nor will we cross a page boundary.
*/
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
/*
* If mapping the entire remaining portion of the region will cross
* a page boundary, just map up to the page boundary, do not cross.
* On some systems, crossing a page boundary while mapping regions
* can cause warnings if the pages have different attributes
* due to resource management.
*
* This has the added benefit of constraining a single mapping to
* one page, which is similar to the original code that used a 4k
* maximum window.
*/
page_boundary_map_length = (acpi_size)
(ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
if (page_boundary_map_length == 0) {
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
}
if (map_length > page_boundary_map_length) {
map_length = page_boundary_map_length;
}
/* Create a new mapping starting at the address given */
logical_addr_ptr = acpi_os_map_memory(address, map_length);
if (!logical_addr_ptr) {
ACPI_ERROR((AE_INFO,
"Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_UINT64(address),
(u32)map_length));
ACPI_FREE(mm);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Save the physical address and mapping size */
mm->logical_address = logical_addr_ptr;
mm->physical_address = address;
mm->length = map_length;
/*
* Add the new entry to the mappigs list and save it as the
* current mapping.
*/
mm->next_mm = mem_info->first_mm;
mem_info->first_mm = mm;
mem_info->cur_mm = mm;
}
access:
/*
* Generate a logical pointer corresponding to the address we want to
* access
*/
logical_addr_ptr = mm->logical_address +
((u64) address - (u64) mm->physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
*
* Note: For machines that do not support non-aligned transfers, the target
* address was checked for alignment above. We do not attempt to break the
* transfer up into smaller (byte-size) chunks because the AML specifically
* asked for a transfer width that the hardware may require.
*/
switch (function) {
case ACPI_READ:
*value = 0;
switch (bit_width) {
case 8:
*value = (u64)ACPI_GET8(logical_addr_ptr);
break;
case 16:
*value = (u64)ACPI_GET16(logical_addr_ptr);
break;
case 32:
*value = (u64)ACPI_GET32(logical_addr_ptr);
break;
case 64:
*value = (u64)ACPI_GET64(logical_addr_ptr);
break;
default:
/* bit_width was already validated */
break;
}
break;
case ACPI_WRITE:
switch (bit_width) {
case 8:
ACPI_SET8(logical_addr_ptr, *value);
break;
case 16:
ACPI_SET16(logical_addr_ptr, *value);
break;
case 32:
ACPI_SET32(logical_addr_ptr, *value);
break;
case 64:
ACPI_SET64(logical_addr_ptr, *value);
break;
default:
/* bit_width was already validated */
break;
}
break;
default:
status = AE_BAD_PARAMETER;
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_system_io_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the System IO address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_system_io_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
u32 value32;
ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
switch (function) {
case ACPI_READ:
status = acpi_hw_read_port((acpi_io_address)address,
&value32, bit_width);
*value = value32;
break;
case ACPI_WRITE:
status = acpi_hw_write_port((acpi_io_address)address,
(u32)*value, bit_width);
break;
default:
status = AE_BAD_PARAMETER;
break;
}
return_ACPI_STATUS(status);
}
#ifdef ACPI_PCI_CONFIGURED
/*******************************************************************************
*
* FUNCTION: acpi_ex_pci_config_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the PCI Config address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_pci_config_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
struct acpi_pci_id *pci_id;
u16 pci_register;
ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
/*
* The arguments to acpi_os(Read|Write)pci_configuration are:
*
* pci_segment is the PCI bus segment range 0-31
* pci_bus is the PCI bus number range 0-255
* pci_device is the PCI device number range 0-31
* pci_function is the PCI device function number
* pci_register is the Config space register range 0-255 bytes
*
* value - input value for write, output address for read
*
*/
pci_id = (struct acpi_pci_id *)region_context;
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Pci-Config %u (%u) Seg(%04x) Bus(%04x) "
"Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));
switch (function) {
case ACPI_READ:
*value = 0;
status =
acpi_os_read_pci_configuration(pci_id, pci_register, value,
bit_width);
break;
case ACPI_WRITE:
status =
acpi_os_write_pci_configuration(pci_id, pci_register,
*value, bit_width);
break;
default:
status = AE_BAD_PARAMETER;
break;
}
return_ACPI_STATUS(status);
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ex_cmos_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the CMOS address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_cmos_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
return_ACPI_STATUS(status);
}
#ifdef ACPI_PCI_CONFIGURED
/*******************************************************************************
*
* FUNCTION: acpi_ex_pci_bar_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the PCI bar_target address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_pci_bar_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
return_ACPI_STATUS(status);
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ex_data_table_space_handler
*
* PARAMETERS: function - Read or Write operation
* address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
* value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
*
* RETURN: Status
*
* DESCRIPTION: Handler for the Data Table address space (Op Region)
*
******************************************************************************/
acpi_status
acpi_ex_data_table_space_handler(u32 function,
acpi_physical_address address,
u32 bit_width,
u64 *value,
void *handler_context, void *region_context)
{
struct acpi_data_table_mapping *mapping;
char *pointer;
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
mapping = (struct acpi_data_table_mapping *) region_context;
pointer = ACPI_CAST_PTR(char, mapping->pointer) +
(address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
/*
* Perform the memory read or write. The bit_width was already
* validated.
*/
switch (function) {
case ACPI_READ:
memcpy(ACPI_CAST_PTR(char, value), pointer,
ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
memcpy(pointer, ACPI_CAST_PTR(char, value),
ACPI_DIV_8(bit_width));
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
}
| linux-master | drivers/acpi/acpica/exregion.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: dsobject - Dispatcher object management routines
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_object
*
* PARAMETERS: walk_state - Current walk state
* op - Parser object to be translated
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Translate a parser Op object to the equivalent namespace object
* Simple objects are any objects other than a package object!
*
******************************************************************************/
acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in
* this op. Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
/* Check if we are resolving a named reference within a package */
if ((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VARIABLE_PACKAGE_OP)) {
/*
* We won't resolve package elements here, we will do this
* after all ACPI tables are loaded into the namespace. This
* behavior supports both forward references to named objects
* and external references to objects in other tables.
*/
goto create_new_object;
} else {
status = acpi_ns_lookup(walk_state->scope_info,
op->common.value.string,
ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE,
NULL,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(walk_state->
scope_info,
op->common.value.
string, status);
return_ACPI_STATUS(status);
}
}
}
}
create_new_object:
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))->
object_type);
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
status =
acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode,
&obj_desc);
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*
* Handling for unresolved package reference elements.
* These are elements that are namepaths.
*/
if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
(op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
obj_desc->reference.resolved = TRUE;
if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
!obj_desc->reference.node) {
/*
* Name was unresolved above.
* Get the prefix node for later lookup
*/
obj_desc->reference.node =
walk_state->scope_info->scope.node;
obj_desc->reference.aml = op->common.aml;
obj_desc->reference.resolved = FALSE;
}
}
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_buffer_obj
*
* PARAMETERS: walk_state - Current walk state
* op - Parser object to be translated
* buffer_length - Length of the buffer
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Translate a parser Op package object to the equivalent
* namespace object
*
******************************************************************************/
acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_parse_object *arg;
union acpi_operand_object *obj_desc;
union acpi_parse_object *byte_list;
u32 byte_list_length = 0;
ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node), otherwise it must
* be created.
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
}
/*
* Second arg is the buffer data (optional) byte_list can be either
* individual bytes or a string initializer. In either case, a
* byte_list appears in the AML.
*/
arg = op->common.value.arg; /* skip first arg */
byte_list = arg->named.next;
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
"Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
return (AE_TYPE);
}
byte_list_length = (u32) byte_list->common.value.integer;
}
/*
* The buffer length (number of bytes) will be the larger of:
* 1) The specified buffer length and
* 2) The length of the initializer byte list
*/
obj_desc->buffer.length = buffer_length;
if (byte_list_length > buffer_length) {
obj_desc->buffer.length = byte_list_length;
}
/* Allocate the buffer */
if (obj_desc->buffer.length == 0) {
obj_desc->buffer.pointer = NULL;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Buffer defined with zero length in AML, creating\n"));
} else {
obj_desc->buffer.pointer =
ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
if (!obj_desc->buffer.pointer) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Initialize buffer from the byte_list (if present) */
if (byte_list) {
memcpy(obj_desc->buffer.pointer, byte_list->named.data,
byte_list_length);
}
}
obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
* node - NS Node to be initialized
* op - Parser object to be translated
*
* RETURN: Status
*
* DESCRIPTION: Create the object to be associated with a namespace node
*
******************************************************************************/
acpi_status
acpi_ds_create_node(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node,
union acpi_parse_object *op)
{
acpi_status status;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
/*
* Because of the execution pass through the non-control-method
* parts of the table, we can arrive here twice. Only init
* the named object node the first time through
*/
if (acpi_ns_get_attached_object(node)) {
return_ACPI_STATUS(AE_OK);
}
if (!op->common.value.arg) {
/* No arguments, there is nothing to do */
return_ACPI_STATUS(AE_OK);
}
/* Build an internal object for the argument(s) */
status =
acpi_ds_build_internal_object(walk_state, op->common.value.arg,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Re-type the object according to its argument */
node->type = obj_desc->common.type;
/* Attach obj to node */
status = acpi_ns_attach_object(node, obj_desc, node->type);
/* Remove local reference to the object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_init_object_from_op
*
* PARAMETERS: walk_state - Current walk state
* op - Parser op used to init the internal object
* opcode - AML opcode associated with the object
* ret_obj_desc - Namespace object to be initialized
*
* RETURN: Status
*
* DESCRIPTION: Initialize a namespace object from a parser Op and its
* associated arguments. The namespace object is a more compact
* representation of the Op and its arguments.
*
******************************************************************************/
acpi_status
acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u16 opcode,
union acpi_operand_object **ret_obj_desc)
{
const struct acpi_opcode_info *op_info;
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ds_init_object_from_op);
obj_desc = *ret_obj_desc;
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
/* Unknown opcode */
return_ACPI_STATUS(AE_TYPE);
}
/* Perform per-object initialization */
switch (obj_desc->common.type) {
case ACPI_TYPE_BUFFER:
/*
* Defer evaluation of Buffer term_arg operand
*/
obj_desc->buffer.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->buffer.aml_start = op->named.data;
obj_desc->buffer.aml_length = op->named.length;
break;
case ACPI_TYPE_PACKAGE:
/*
* Defer evaluation of Package term_arg operand and all
* package elements. (01/2017): We defer the element
* resolution to allow forward references from the package
* in order to provide compatibility with other ACPI
* implementations.
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
if (!op->named.data) {
return_ACPI_STATUS(AE_OK);
}
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
case ACPI_TYPE_INTEGER:
switch (op_info->type) {
case AML_TYPE_CONSTANT:
/*
* Resolve AML Constants here - AND ONLY HERE!
* All constants are integers.
* We mark the integer with a flag that indicates that it started
* life as a constant -- so that stores to constants will perform
* as expected (noop). zero_op is used as a placeholder for optional
* target operands.
*/
obj_desc->common.flags = AOPOBJ_AML_CONSTANT;
switch (opcode) {
case AML_ZERO_OP:
obj_desc->integer.value = 0;
break;
case AML_ONE_OP:
obj_desc->integer.value = 1;
break;
case AML_ONES_OP:
obj_desc->integer.value = ACPI_UINT64_MAX;
/* Truncate value if we are executing from a 32-bit ACPI table */
(void)acpi_ex_truncate_for32bit_table(obj_desc);
break;
case AML_REVISION_OP:
obj_desc->integer.value = ACPI_CA_VERSION;
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
}
break;
case AML_TYPE_LITERAL:
obj_desc->integer.value = op->common.value.integer;
if (acpi_ex_truncate_for32bit_table(obj_desc)) {
/* Warn if we found a 64-bit constant in a 32-bit table */
ACPI_WARNING((AE_INFO,
"Truncated 64-bit constant found in 32-bit table: %8.8X%8.8X => %8.8X",
ACPI_FORMAT_UINT64(op->common.
value.integer),
(u32)obj_desc->integer.value));
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
}
break;
case ACPI_TYPE_STRING:
obj_desc->string.pointer = op->common.value.string;
obj_desc->string.length = (u32)strlen(op->common.value.string);
/*
* The string is contained in the ACPI table, don't ever try
* to delete it
*/
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_LOCAL_REFERENCE:
switch (op_info->type) {
case AML_TYPE_LOCAL_VARIABLE:
/* Local ID (0-7) is (AML opcode - base AML_FIRST_LOCAL_OP) */
obj_desc->reference.value =
((u32)opcode) - AML_FIRST_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
status =
acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
obj_desc->reference.
value, walk_state,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&obj_desc->reference.
object));
break;
case AML_TYPE_METHOD_ARGUMENT:
/* Arg ID (0-6) is (AML opcode - base AML_FIRST_ARG_OP) */
obj_desc->reference.value =
((u32)opcode) - AML_FIRST_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG;
status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
obj_desc->
reference.value,
walk_state,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&obj_desc->
reference.
object));
break;
default: /* Object name or Debug object */
switch (op->common.aml_opcode) {
case AML_INT_NAMEPATH_OP:
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
if (op->common.node) {
obj_desc->reference.object =
op->common.node->object;
}
break;
case AML_DEBUG_OP:
obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
break;
default:
ACPI_ERROR((AE_INFO,
"Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
break;
}
return_ACPI_STATUS(status);
}
| linux-master | drivers/acpi/acpica/dsobject.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.