python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2019 IBM Corp.
#include <linux/idr.h>
#include "ocxl_internal.h"
static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
{
return (get_device(&fn->dev) == NULL) ? NULL : fn;
}
static void ocxl_fn_put(struct ocxl_fn *fn)
{
put_device(&fn->dev);
}
static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
{
struct ocxl_afu *afu;
afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
if (!afu)
return NULL;
kref_init(&afu->kref);
mutex_init(&afu->contexts_lock);
mutex_init(&afu->afu_control_lock);
idr_init(&afu->contexts_idr);
afu->fn = fn;
ocxl_fn_get(fn);
return afu;
}
static void free_afu(struct kref *kref)
{
struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
idr_destroy(&afu->contexts_idr);
ocxl_fn_put(afu->fn);
kfree(afu);
}
void ocxl_afu_get(struct ocxl_afu *afu)
{
kref_get(&afu->kref);
}
EXPORT_SYMBOL_GPL(ocxl_afu_get);
void ocxl_afu_put(struct ocxl_afu *afu)
{
kref_put(&afu->kref, free_afu);
}
EXPORT_SYMBOL_GPL(ocxl_afu_put);
static int assign_afu_actag(struct ocxl_afu *afu)
{
struct ocxl_fn *fn = afu->fn;
int actag_count, actag_offset;
struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
/*
* if there were not enough actags for the function, each afu
* reduces its count as well
*/
actag_count = afu->config.actag_supported *
fn->actag_enabled / fn->actag_supported;
actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
if (actag_offset < 0) {
dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
actag_count, actag_offset);
return actag_offset;
}
afu->actag_base = fn->actag_base + actag_offset;
afu->actag_enabled = actag_count;
ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
afu->actag_base, afu->actag_enabled);
dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
afu->actag_base, afu->actag_enabled);
return 0;
}
static void reclaim_afu_actag(struct ocxl_afu *afu)
{
struct ocxl_fn *fn = afu->fn;
int start_offset, size;
start_offset = afu->actag_base - fn->actag_base;
size = afu->actag_enabled;
ocxl_actag_afu_free(afu->fn, start_offset, size);
}
static int assign_afu_pasid(struct ocxl_afu *afu)
{
struct ocxl_fn *fn = afu->fn;
int pasid_count, pasid_offset;
struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
/*
* We only support the case where the function configuration
* requested enough PASIDs to cover all AFUs.
*/
pasid_count = 1 << afu->config.pasid_supported_log;
pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
if (pasid_offset < 0) {
dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
pasid_count, pasid_offset);
return pasid_offset;
}
afu->pasid_base = fn->pasid_base + pasid_offset;
afu->pasid_count = 0;
afu->pasid_max = pasid_count;
ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
afu->pasid_base,
afu->config.pasid_supported_log);
dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
afu->pasid_base, pasid_count);
return 0;
}
static void reclaim_afu_pasid(struct ocxl_afu *afu)
{
struct ocxl_fn *fn = afu->fn;
int start_offset, size;
start_offset = afu->pasid_base - fn->pasid_base;
size = 1 << afu->config.pasid_supported_log;
ocxl_pasid_afu_free(afu->fn, start_offset, size);
}
static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
{
struct pci_dev *dev = to_pci_dev(fn->dev.parent);
int rc, idx;
if (bar != 0 && bar != 2 && bar != 4)
return -EINVAL;
idx = bar >> 1;
if (fn->bar_used[idx]++ == 0) {
rc = pci_request_region(dev, bar, "ocxl");
if (rc)
return rc;
}
return 0;
}
static void release_fn_bar(struct ocxl_fn *fn, int bar)
{
struct pci_dev *dev = to_pci_dev(fn->dev.parent);
int idx;
if (bar != 0 && bar != 2 && bar != 4)
return;
idx = bar >> 1;
if (--fn->bar_used[idx] == 0)
pci_release_region(dev, bar);
WARN_ON(fn->bar_used[idx] < 0);
}
static int map_mmio_areas(struct ocxl_afu *afu)
{
int rc;
struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
if (rc)
return rc;
rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
if (rc) {
release_fn_bar(afu->fn, afu->config.global_mmio_bar);
return rc;
}
afu->global_mmio_start =
pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
afu->config.global_mmio_offset;
afu->pp_mmio_start =
pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
afu->config.pp_mmio_offset;
afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
afu->config.global_mmio_size);
if (!afu->global_mmio_ptr) {
release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
release_fn_bar(afu->fn, afu->config.global_mmio_bar);
dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
return -ENOMEM;
}
/*
* Leave an empty page between the per-process mmio area and
* the AFU interrupt mappings
*/
afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
return 0;
}
static void unmap_mmio_areas(struct ocxl_afu *afu)
{
if (afu->global_mmio_ptr) {
iounmap(afu->global_mmio_ptr);
afu->global_mmio_ptr = NULL;
}
afu->global_mmio_start = 0;
afu->pp_mmio_start = 0;
release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
release_fn_bar(afu->fn, afu->config.global_mmio_bar);
}
static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
{
int rc;
rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
if (rc)
return rc;
rc = assign_afu_actag(afu);
if (rc)
return rc;
rc = assign_afu_pasid(afu);
if (rc)
goto err_free_actag;
rc = map_mmio_areas(afu);
if (rc)
goto err_free_pasid;
return 0;
err_free_pasid:
reclaim_afu_pasid(afu);
err_free_actag:
reclaim_afu_actag(afu);
return rc;
}
static void deconfigure_afu(struct ocxl_afu *afu)
{
unmap_mmio_areas(afu);
reclaim_afu_pasid(afu);
reclaim_afu_actag(afu);
}
static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
{
ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
return 0;
}
static void deactivate_afu(struct ocxl_afu *afu)
{
struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
}
static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
{
int rc;
struct ocxl_afu *afu;
afu = alloc_afu(fn);
if (!afu)
return -ENOMEM;
rc = configure_afu(afu, afu_idx, dev);
if (rc) {
ocxl_afu_put(afu);
return rc;
}
rc = activate_afu(dev, afu);
if (rc) {
deconfigure_afu(afu);
ocxl_afu_put(afu);
return rc;
}
list_add_tail(&afu->list, &fn->afu_list);
return 0;
}
static void remove_afu(struct ocxl_afu *afu)
{
list_del(&afu->list);
ocxl_context_detach_all(afu);
deactivate_afu(afu);
deconfigure_afu(afu);
ocxl_afu_put(afu); // matches the implicit get in alloc_afu
}
static struct ocxl_fn *alloc_function(void)
{
struct ocxl_fn *fn;
fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
if (!fn)
return NULL;
INIT_LIST_HEAD(&fn->afu_list);
INIT_LIST_HEAD(&fn->pasid_list);
INIT_LIST_HEAD(&fn->actag_list);
return fn;
}
static void free_function(struct ocxl_fn *fn)
{
WARN_ON(!list_empty(&fn->afu_list));
WARN_ON(!list_empty(&fn->pasid_list));
kfree(fn);
}
static void free_function_dev(struct device *dev)
{
struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
free_function(fn);
}
static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
{
fn->dev.parent = &dev->dev;
fn->dev.release = free_function_dev;
return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
}
static int assign_function_actag(struct ocxl_fn *fn)
{
struct pci_dev *dev = to_pci_dev(fn->dev.parent);
u16 base, enabled, supported;
int rc;
rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
if (rc)
return rc;
fn->actag_base = base;
fn->actag_enabled = enabled;
fn->actag_supported = supported;
ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
fn->actag_base, fn->actag_enabled);
dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
fn->actag_base, fn->actag_enabled);
return 0;
}
static int set_function_pasid(struct ocxl_fn *fn)
{
struct pci_dev *dev = to_pci_dev(fn->dev.parent);
int rc, desired_count, max_count;
/* A function may not require any PASID */
if (fn->config.max_pasid_log < 0)
return 0;
rc = ocxl_config_get_pasid_info(dev, &max_count);
if (rc)
return rc;
desired_count = 1 << fn->config.max_pasid_log;
if (desired_count > max_count) {
dev_err(&fn->dev,
"Function requires more PASIDs than is available (%d vs. %d)\n",
desired_count, max_count);
return -ENOSPC;
}
fn->pasid_base = 0;
return 0;
}
static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
{
int rc;
rc = pci_enable_device(dev);
if (rc) {
dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
return rc;
}
/*
* Once it has been confirmed to work on our hardware, we
* should reset the function, to force the adapter to restart
* from scratch.
* A function reset would also reset all its AFUs.
*
* Some hints for implementation:
*
* - there's not status bit to know when the reset is done. We
* should try reading the config space to know when it's
* done.
* - probably something like:
* Reset
* wait 100ms
* issue config read
* allow device up to 1 sec to return success on config
* read before declaring it broken
*
* Some shared logic on the card (CFG, TLX) won't be reset, so
* there's no guarantee that it will be enough.
*/
rc = ocxl_config_read_function(dev, &fn->config);
if (rc)
return rc;
rc = set_function_device(fn, dev);
if (rc)
return rc;
rc = assign_function_actag(fn);
if (rc)
return rc;
rc = set_function_pasid(fn);
if (rc)
return rc;
rc = ocxl_link_setup(dev, 0, &fn->link);
if (rc)
return rc;
rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
if (rc) {
ocxl_link_release(dev, fn->link);
return rc;
}
return 0;
}
static void deconfigure_function(struct ocxl_fn *fn)
{
struct pci_dev *dev = to_pci_dev(fn->dev.parent);
ocxl_link_release(dev, fn->link);
pci_disable_device(dev);
}
static struct ocxl_fn *init_function(struct pci_dev *dev)
{
struct ocxl_fn *fn;
int rc;
fn = alloc_function();
if (!fn)
return ERR_PTR(-ENOMEM);
rc = configure_function(fn, dev);
if (rc) {
free_function(fn);
return ERR_PTR(rc);
}
rc = device_register(&fn->dev);
if (rc) {
deconfigure_function(fn);
put_device(&fn->dev);
return ERR_PTR(rc);
}
return fn;
}
// Device detection & initialisation
struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
{
int rc, afu_count = 0;
u8 afu;
struct ocxl_fn *fn;
if (!radix_enabled()) {
dev_err(&dev->dev, "Unsupported memory model (hash)\n");
return ERR_PTR(-ENODEV);
}
fn = init_function(dev);
if (IS_ERR(fn)) {
dev_err(&dev->dev, "function init failed: %li\n",
PTR_ERR(fn));
return fn;
}
for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
if (rc > 0) {
rc = init_afu(dev, fn, afu);
if (rc) {
dev_err(&dev->dev,
"Can't initialize AFU index %d\n", afu);
continue;
}
afu_count++;
}
}
dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
return fn;
}
EXPORT_SYMBOL_GPL(ocxl_function_open);
struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
{
return &fn->afu_list;
}
EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
{
struct ocxl_afu *afu;
list_for_each_entry(afu, &fn->afu_list, list) {
if (afu->config.idx == afu_idx)
return afu;
}
return NULL;
}
EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
{
return &fn->config;
}
EXPORT_SYMBOL_GPL(ocxl_function_config);
void ocxl_function_close(struct ocxl_fn *fn)
{
struct ocxl_afu *afu, *tmp;
list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
remove_afu(afu);
}
deconfigure_function(fn);
device_unregister(&fn->dev);
}
EXPORT_SYMBOL_GPL(ocxl_function_close);
// AFU Metadata
struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
{
return &afu->config;
}
EXPORT_SYMBOL_GPL(ocxl_afu_config);
void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
{
afu->private = private;
}
EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
void *ocxl_afu_get_private(struct ocxl_afu *afu)
{
if (afu)
return afu->private;
return NULL;
}
EXPORT_SYMBOL_GPL(ocxl_afu_get_private);
| linux-master | drivers/misc/ocxl/core.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/module.h>
#include <linux/pci.h>
#include <asm/mmu.h>
#include "ocxl_internal.h"
static int __init init_ocxl(void)
{
int rc = 0;
if (!tlbie_capable)
return -EINVAL;
rc = ocxl_file_init();
if (rc)
return rc;
rc = pci_register_driver(&ocxl_pci_driver);
if (rc) {
ocxl_file_exit();
return rc;
}
return 0;
}
static void exit_ocxl(void)
{
pci_unregister_driver(&ocxl_pci_driver);
ocxl_file_exit();
}
module_init(init_ocxl);
module_exit(exit_ocxl);
MODULE_DESCRIPTION("Open Coherent Accelerator");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/ocxl/main.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
#include "ocxl_internal.h"
#include "trace.h"
struct afu_irq {
int id;
int hw_irq;
unsigned int virq;
char *name;
irqreturn_t (*handler)(void *private);
void (*free_private)(void *private);
void *private;
};
int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
{
return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT;
}
u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id)
{
return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT);
}
int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
irqreturn_t (*handler)(void *private),
void (*free_private)(void *private),
void *private)
{
struct afu_irq *irq;
int rc;
mutex_lock(&ctx->irq_lock);
irq = idr_find(&ctx->irq_idr, irq_id);
if (!irq) {
rc = -EINVAL;
goto unlock;
}
irq->handler = handler;
irq->private = private;
irq->free_private = free_private;
rc = 0;
// Fall through to unlock
unlock:
mutex_unlock(&ctx->irq_lock);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
static irqreturn_t afu_irq_handler(int virq, void *data)
{
struct afu_irq *irq = (struct afu_irq *) data;
trace_ocxl_afu_irq_receive(virq);
if (irq->handler)
return irq->handler(irq->private);
return IRQ_HANDLED; // Just drop it on the ground
}
static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq)
{
int rc;
irq->virq = irq_create_mapping(NULL, irq->hw_irq);
if (!irq->virq) {
pr_err("irq_create_mapping failed\n");
return -ENOMEM;
}
pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq);
irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq);
if (!irq->name) {
irq_dispose_mapping(irq->virq);
return -ENOMEM;
}
rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq);
if (rc) {
kfree(irq->name);
irq->name = NULL;
irq_dispose_mapping(irq->virq);
pr_err("request_irq failed: %d\n", rc);
return rc;
}
return 0;
}
static void release_afu_irq(struct afu_irq *irq)
{
free_irq(irq->virq, irq);
irq_dispose_mapping(irq->virq);
kfree(irq->name);
}
int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id)
{
struct afu_irq *irq;
int rc;
irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL);
if (!irq)
return -ENOMEM;
/*
* We limit the number of afu irqs per context and per link to
* avoid a single process or user depleting the pool of IPIs
*/
mutex_lock(&ctx->irq_lock);
irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT,
GFP_KERNEL);
if (irq->id < 0) {
rc = -ENOSPC;
goto err_unlock;
}
rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq);
if (rc)
goto err_idr;
rc = setup_afu_irq(ctx, irq);
if (rc)
goto err_alloc;
trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq);
mutex_unlock(&ctx->irq_lock);
*irq_id = irq->id;
return 0;
err_alloc:
ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
err_idr:
idr_remove(&ctx->irq_idr, irq->id);
err_unlock:
mutex_unlock(&ctx->irq_lock);
kfree(irq);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc);
static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx)
{
trace_ocxl_afu_irq_free(ctx->pasid, irq->id);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping,
ocxl_irq_id_to_offset(ctx, irq->id),
1 << PAGE_SHIFT, 1);
release_afu_irq(irq);
if (irq->free_private)
irq->free_private(irq->private);
ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
kfree(irq);
}
int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id)
{
struct afu_irq *irq;
mutex_lock(&ctx->irq_lock);
irq = idr_find(&ctx->irq_idr, irq_id);
if (!irq) {
mutex_unlock(&ctx->irq_lock);
return -EINVAL;
}
idr_remove(&ctx->irq_idr, irq->id);
afu_irq_free(irq, ctx);
mutex_unlock(&ctx->irq_lock);
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_afu_irq_free);
void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
{
struct afu_irq *irq;
int id;
mutex_lock(&ctx->irq_lock);
idr_for_each_entry(&ctx->irq_idr, irq, id)
afu_irq_free(irq, ctx);
mutex_unlock(&ctx->irq_lock);
}
u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id)
{
struct xive_irq_data *xd;
struct afu_irq *irq;
u64 addr = 0;
mutex_lock(&ctx->irq_lock);
irq = idr_find(&ctx->irq_idr, irq_id);
if (irq) {
xd = irq_get_handler_data(irq->virq);
addr = xd ? xd->trig_page : 0;
}
mutex_unlock(&ctx->irq_lock);
return addr;
}
EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr);
| linux-master | drivers/misc/ocxl/afu_irq.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/pci.h>
#include <asm/pnv-ocxl.h>
#include <misc/ocxl-config.h>
#include "ocxl_internal.h"
#define EXTRACT_BIT(val, bit) (!!(val & BIT(bit)))
#define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s)
#define OCXL_DVSEC_AFU_IDX_MASK GENMASK(5, 0)
#define OCXL_DVSEC_ACTAG_MASK GENMASK(11, 0)
#define OCXL_DVSEC_PASID_MASK GENMASK(19, 0)
#define OCXL_DVSEC_PASID_LOG_MASK GENMASK(4, 0)
#define OCXL_DVSEC_TEMPL_VERSION 0x0
#define OCXL_DVSEC_TEMPL_NAME 0x4
#define OCXL_DVSEC_TEMPL_AFU_VERSION 0x1C
#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL 0x20
#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ 0x28
#define OCXL_DVSEC_TEMPL_MMIO_PP 0x30
#define OCXL_DVSEC_TEMPL_MMIO_PP_SZ 0x38
#define OCXL_DVSEC_TEMPL_ALL_MEM_SZ 0x3C
#define OCXL_DVSEC_TEMPL_LPC_MEM_START 0x40
#define OCXL_DVSEC_TEMPL_WWID 0x48
#define OCXL_DVSEC_TEMPL_LPC_MEM_SZ 0x58
#define OCXL_MAX_AFU_PER_FUNCTION 64
#define OCXL_TEMPL_LEN_1_0 0x58
#define OCXL_TEMPL_LEN_1_1 0x60
#define OCXL_TEMPL_NAME_LEN 24
#define OCXL_CFG_TIMEOUT 3
static int find_dvsec(struct pci_dev *dev, int dvsec_id)
{
return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id);
}
static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
{
int vsec = 0;
u16 vendor, id;
u8 idx;
while ((vsec = pci_find_next_ext_capability(dev, vsec,
OCXL_EXT_CAP_ID_DVSEC))) {
pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
&vendor);
pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
if (vendor == PCI_VENDOR_ID_IBM &&
id == OCXL_DVSEC_AFU_CTRL_ID) {
pci_read_config_byte(dev,
vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX,
&idx);
if (idx == afu_idx)
return vsec;
}
}
return 0;
}
/**
* get_function_0() - Find a related PCI device (function 0)
* @dev: PCI device to match
*
* Returns a pointer to the related device, or null if not found
*/
static struct pci_dev *get_function_0(struct pci_dev *dev)
{
unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
dev->bus->number, devfn);
}
static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
u16 val;
int pos;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PASID);
if (!pos) {
/*
* PASID capability is not mandatory, but there
* shouldn't be any AFU
*/
dev_dbg(&dev->dev, "Function doesn't require any PASID\n");
fn->max_pasid_log = -1;
goto out;
}
pci_read_config_word(dev, pos + PCI_PASID_CAP, &val);
fn->max_pasid_log = EXTRACT_BITS(val, 8, 12);
out:
dev_dbg(&dev->dev, "PASID capability:\n");
dev_dbg(&dev->dev, " Max PASID log = %d\n", fn->max_pasid_log);
}
static int read_dvsec_tl(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
int pos;
pos = find_dvsec(dev, OCXL_DVSEC_TL_ID);
if (!pos && PCI_FUNC(dev->devfn) == 0) {
dev_err(&dev->dev, "Can't find TL DVSEC\n");
return -ENODEV;
}
if (pos && PCI_FUNC(dev->devfn) != 0) {
dev_err(&dev->dev, "TL DVSEC is only allowed on function 0\n");
return -ENODEV;
}
fn->dvsec_tl_pos = pos;
return 0;
}
static int read_dvsec_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
int pos, afu_present;
u32 val;
pos = find_dvsec(dev, OCXL_DVSEC_FUNC_ID);
if (!pos) {
dev_err(&dev->dev, "Can't find function DVSEC\n");
return -ENODEV;
}
fn->dvsec_function_pos = pos;
pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val);
afu_present = EXTRACT_BIT(val, 31);
if (!afu_present) {
fn->max_afu_index = -1;
dev_dbg(&dev->dev, "Function doesn't define any AFU\n");
goto out;
}
fn->max_afu_index = EXTRACT_BITS(val, 24, 29);
out:
dev_dbg(&dev->dev, "Function DVSEC:\n");
dev_dbg(&dev->dev, " Max AFU index = %d\n", fn->max_afu_index);
return 0;
}
static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
int pos;
if (fn->max_afu_index < 0) {
fn->dvsec_afu_info_pos = -1;
return 0;
}
pos = find_dvsec(dev, OCXL_DVSEC_AFU_INFO_ID);
if (!pos) {
dev_err(&dev->dev, "Can't find AFU information DVSEC\n");
return -ENODEV;
}
fn->dvsec_afu_info_pos = pos;
return 0;
}
static int read_dvsec_vendor(struct pci_dev *dev)
{
int pos;
u32 cfg, tlx, dlx, reset_reload;
/*
* vendor specific DVSEC, for IBM images only. Some older
* images may not have it
*
* It's only used on function 0 to specify the version of some
* logic blocks and to give access to special registers to
* enable host-based flashing.
*/
if (PCI_FUNC(dev->devfn) != 0)
return 0;
pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
if (!pos)
return 0;
pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg);
pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx);
pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx);
pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
&reset_reload);
dev_dbg(&dev->dev, "Vendor specific DVSEC:\n");
dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg);
dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx);
dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx);
dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload);
return 0;
}
/**
* get_dvsec_vendor0() - Find a related PCI device (function 0)
* @dev: PCI device to match
* @dev0: The PCI device (function 0) found
* @out_pos: The position of PCI device (function 0)
*
* Returns 0 on success, negative on failure.
*
* NOTE: If it's successful, the reference of dev0 is increased,
* so after using it, the callers must call pci_dev_put() to give
* up the reference.
*/
static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
int *out_pos)
{
int pos;
if (PCI_FUNC(dev->devfn) != 0) {
dev = get_function_0(dev);
if (!dev)
return -1;
} else {
dev = pci_dev_get(dev);
}
pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
if (!pos) {
pci_dev_put(dev);
return -1;
}
*dev0 = dev;
*out_pos = pos;
return 0;
}
int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val)
{
struct pci_dev *dev0;
u32 reset_reload;
int pos;
if (get_dvsec_vendor0(dev, &dev0, &pos))
return -1;
pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
&reset_reload);
pci_dev_put(dev0);
*val = !!(reset_reload & BIT(0));
return 0;
}
int ocxl_config_set_reset_reload(struct pci_dev *dev, int val)
{
struct pci_dev *dev0;
u32 reset_reload;
int pos;
if (get_dvsec_vendor0(dev, &dev0, &pos))
return -1;
pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
&reset_reload);
if (val)
reset_reload |= BIT(0);
else
reset_reload &= ~BIT(0);
pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
reset_reload);
pci_dev_put(dev0);
return 0;
}
static int validate_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
if (fn->max_pasid_log == -1 && fn->max_afu_index >= 0) {
dev_err(&dev->dev,
"AFUs are defined but no PASIDs are requested\n");
return -EINVAL;
}
if (fn->max_afu_index > OCXL_MAX_AFU_PER_FUNCTION) {
dev_err(&dev->dev,
"Max AFU index out of architectural limit (%d vs %d)\n",
fn->max_afu_index, OCXL_MAX_AFU_PER_FUNCTION);
return -EINVAL;
}
return 0;
}
int ocxl_config_read_function(struct pci_dev *dev, struct ocxl_fn_config *fn)
{
int rc;
read_pasid(dev, fn);
rc = read_dvsec_tl(dev, fn);
if (rc) {
dev_err(&dev->dev,
"Invalid Transaction Layer DVSEC configuration: %d\n",
rc);
return -ENODEV;
}
rc = read_dvsec_function(dev, fn);
if (rc) {
dev_err(&dev->dev,
"Invalid Function DVSEC configuration: %d\n", rc);
return -ENODEV;
}
rc = read_dvsec_afu_info(dev, fn);
if (rc) {
dev_err(&dev->dev, "Invalid AFU configuration: %d\n", rc);
return -ENODEV;
}
rc = read_dvsec_vendor(dev);
if (rc) {
dev_err(&dev->dev,
"Invalid vendor specific DVSEC configuration: %d\n",
rc);
return -ENODEV;
}
rc = validate_function(dev, fn);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_config_read_function);
static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn,
int offset, u32 *data)
{
u32 val;
unsigned long timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT);
int pos = fn->dvsec_afu_info_pos;
/* Protect 'data valid' bit */
if (EXTRACT_BIT(offset, 31)) {
dev_err(&dev->dev, "Invalid offset in AFU info DVSEC\n");
return -EINVAL;
}
pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, offset);
pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val);
while (!EXTRACT_BIT(val, 31)) {
if (time_after_eq(jiffies, timeout)) {
dev_err(&dev->dev,
"Timeout while reading AFU info DVSEC (offset=%d)\n",
offset);
return -EBUSY;
}
cpu_relax();
pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val);
}
pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_DATA, data);
return 0;
}
/**
* read_template_version() - Read the template version from the AFU
* @dev: the device for the AFU
* @fn: the AFU offsets
* @len: outputs the template length
* @version: outputs the major<<8,minor version
*
* Returns 0 on success, negative on failure
*/
static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn,
u16 *len, u16 *version)
{
u32 val32;
u8 major, minor;
int rc;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val32);
if (rc)
return rc;
*len = EXTRACT_BITS(val32, 16, 31);
major = EXTRACT_BITS(val32, 8, 15);
minor = EXTRACT_BITS(val32, 0, 7);
*version = (major << 8) + minor;
return 0;
}
int ocxl_config_check_afu_index(struct pci_dev *dev,
struct ocxl_fn_config *fn, int afu_idx)
{
int rc;
u16 templ_version;
u16 len, expected_len;
pci_write_config_byte(dev,
fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
afu_idx);
rc = read_template_version(dev, fn, &len, &templ_version);
if (rc)
return rc;
/* AFU index map can have holes, in which case we read all 0's */
if (!templ_version && !len)
return 0;
dev_dbg(&dev->dev, "AFU descriptor template version %d.%d\n",
templ_version >> 8, templ_version & 0xFF);
switch (templ_version) {
case 0x0005: // v0.5 was used prior to the spec approval
case 0x0100:
expected_len = OCXL_TEMPL_LEN_1_0;
break;
case 0x0101:
expected_len = OCXL_TEMPL_LEN_1_1;
break;
default:
dev_warn(&dev->dev, "Unknown AFU template version %#x\n",
templ_version);
expected_len = len;
}
if (len != expected_len)
dev_warn(&dev->dev,
"Unexpected template length %#x in AFU information, expected %#x for version %#x\n",
len, expected_len, templ_version);
return 1;
}
static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu)
{
int i, rc;
u32 val, *ptr;
BUILD_BUG_ON(OCXL_AFU_NAME_SZ < OCXL_TEMPL_NAME_LEN);
for (i = 0; i < OCXL_TEMPL_NAME_LEN; i += 4) {
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_NAME + i, &val);
if (rc)
return rc;
ptr = (u32 *) &afu->name[i];
*ptr = le32_to_cpu((__force __le32) val);
}
afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
return 0;
}
static int read_afu_mmio(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu)
{
int rc;
u32 val;
/*
* Global MMIO
*/
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL, &val);
if (rc)
return rc;
afu->global_mmio_bar = EXTRACT_BITS(val, 0, 2);
afu->global_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL + 4, &val);
if (rc)
return rc;
afu->global_mmio_offset += (u64) val << 32;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ, &val);
if (rc)
return rc;
afu->global_mmio_size = val;
/*
* Per-process MMIO
*/
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP, &val);
if (rc)
return rc;
afu->pp_mmio_bar = EXTRACT_BITS(val, 0, 2);
afu->pp_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP + 4, &val);
if (rc)
return rc;
afu->pp_mmio_offset += (u64) val << 32;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP_SZ, &val);
if (rc)
return rc;
afu->pp_mmio_stride = val;
return 0;
}
static int read_afu_control(struct pci_dev *dev, struct ocxl_afu_config *afu)
{
int pos;
u8 val8;
u16 val16;
pos = find_dvsec_afu_ctrl(dev, afu->idx);
if (!pos) {
dev_err(&dev->dev, "Can't find AFU control DVSEC for AFU %d\n",
afu->idx);
return -ENODEV;
}
afu->dvsec_afu_control_pos = pos;
pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_SUP, &val8);
afu->pasid_supported_log = EXTRACT_BITS(val8, 0, 4);
pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP, &val16);
afu->actag_supported = EXTRACT_BITS(val16, 0, 11);
return 0;
}
static bool char_allowed(int c)
{
/*
* Permitted Characters : Alphanumeric, hyphen, underscore, comma
*/
if ((c >= 0x30 && c <= 0x39) /* digits */ ||
(c >= 0x41 && c <= 0x5A) /* upper case */ ||
(c >= 0x61 && c <= 0x7A) /* lower case */ ||
c == 0 /* NULL */ ||
c == 0x2D /* - */ ||
c == 0x5F /* _ */ ||
c == 0x2C /* , */)
return true;
return false;
}
static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu)
{
int i;
if (!afu->name[0]) {
dev_err(&dev->dev, "Empty AFU name\n");
return -EINVAL;
}
for (i = 0; i < OCXL_TEMPL_NAME_LEN; i++) {
if (!char_allowed(afu->name[i])) {
dev_err(&dev->dev,
"Invalid character in AFU name\n");
return -EINVAL;
}
}
if (afu->global_mmio_bar != 0 &&
afu->global_mmio_bar != 2 &&
afu->global_mmio_bar != 4) {
dev_err(&dev->dev, "Invalid global MMIO bar number\n");
return -EINVAL;
}
if (afu->pp_mmio_bar != 0 &&
afu->pp_mmio_bar != 2 &&
afu->pp_mmio_bar != 4) {
dev_err(&dev->dev, "Invalid per-process MMIO bar number\n");
return -EINVAL;
}
return 0;
}
/**
* read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory
* @dev: the device for the AFU
* @fn: the AFU offsets
* @afu: the AFU struct to populate the LPC metadata into
*
* Returns 0 on success, negative on failure
*/
static int read_afu_lpc_memory_info(struct pci_dev *dev,
struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu)
{
int rc;
u32 val32;
u16 templ_version;
u16 templ_len;
u64 total_mem_size = 0;
u64 lpc_mem_size = 0;
afu->lpc_mem_offset = 0;
afu->lpc_mem_size = 0;
afu->special_purpose_mem_offset = 0;
afu->special_purpose_mem_size = 0;
/*
* For AFUs following template v1.0, the LPC memory covers the
* total memory. Its size is a power of 2.
*
* For AFUs with template >= v1.01, the total memory size is
* still a power of 2, but it is split in 2 parts:
* - the LPC memory, whose size can now be anything
* - the remainder memory is a special purpose memory, whose
* definition is AFU-dependent. It is not accessible through
* the usual commands for LPC memory
*/
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_ALL_MEM_SZ, &val32);
if (rc)
return rc;
val32 = EXTRACT_BITS(val32, 0, 7);
if (!val32)
return 0; /* No LPC memory */
/*
* The configuration space spec allows for a memory size of up
* to 2^255 bytes.
*
* Current generation hardware uses 56-bit physical addresses,
* but we won't be able to get near close to that, as we won't
* have a hole big enough in the memory map. Let it pass in
* the driver for now. We'll get an error from the firmware
* when trying to configure something too big.
*/
total_mem_size = 1ull << val32;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START, &val32);
if (rc)
return rc;
afu->lpc_mem_offset = val32;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START + 4, &val32);
if (rc)
return rc;
afu->lpc_mem_offset |= (u64) val32 << 32;
rc = read_template_version(dev, fn, &templ_len, &templ_version);
if (rc)
return rc;
if (templ_version >= 0x0101) {
rc = read_afu_info(dev, fn,
OCXL_DVSEC_TEMPL_LPC_MEM_SZ, &val32);
if (rc)
return rc;
lpc_mem_size = val32;
rc = read_afu_info(dev, fn,
OCXL_DVSEC_TEMPL_LPC_MEM_SZ + 4, &val32);
if (rc)
return rc;
lpc_mem_size |= (u64) val32 << 32;
} else {
lpc_mem_size = total_mem_size;
}
afu->lpc_mem_size = lpc_mem_size;
if (lpc_mem_size < total_mem_size) {
afu->special_purpose_mem_offset =
afu->lpc_mem_offset + lpc_mem_size;
afu->special_purpose_mem_size =
total_mem_size - lpc_mem_size;
}
return 0;
}
int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu, u8 afu_idx)
{
int rc;
u32 val32;
/*
* First, we need to write the AFU idx for the AFU we want to
* access.
*/
WARN_ON((afu_idx & OCXL_DVSEC_AFU_IDX_MASK) != afu_idx);
afu->idx = afu_idx;
pci_write_config_byte(dev,
fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
afu->idx);
rc = read_afu_name(dev, fn, afu);
if (rc)
return rc;
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_AFU_VERSION, &val32);
if (rc)
return rc;
afu->version_major = EXTRACT_BITS(val32, 24, 31);
afu->version_minor = EXTRACT_BITS(val32, 16, 23);
afu->afuc_type = EXTRACT_BITS(val32, 14, 15);
afu->afum_type = EXTRACT_BITS(val32, 12, 13);
afu->profile = EXTRACT_BITS(val32, 0, 7);
rc = read_afu_mmio(dev, fn, afu);
if (rc)
return rc;
rc = read_afu_lpc_memory_info(dev, fn, afu);
if (rc)
return rc;
rc = read_afu_control(dev, afu);
if (rc)
return rc;
dev_dbg(&dev->dev, "AFU configuration:\n");
dev_dbg(&dev->dev, " name = %s\n", afu->name);
dev_dbg(&dev->dev, " version = %d.%d\n", afu->version_major,
afu->version_minor);
dev_dbg(&dev->dev, " global mmio bar = %hhu\n", afu->global_mmio_bar);
dev_dbg(&dev->dev, " global mmio offset = %#llx\n",
afu->global_mmio_offset);
dev_dbg(&dev->dev, " global mmio size = %#x\n", afu->global_mmio_size);
dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar);
dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset);
dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride);
dev_dbg(&dev->dev, " lpc_mem offset = %#llx\n", afu->lpc_mem_offset);
dev_dbg(&dev->dev, " lpc_mem size = %#llx\n", afu->lpc_mem_size);
dev_dbg(&dev->dev, " special purpose mem offset = %#llx\n",
afu->special_purpose_mem_offset);
dev_dbg(&dev->dev, " special purpose mem size = %#llx\n",
afu->special_purpose_mem_size);
dev_dbg(&dev->dev, " pasid supported (log) = %u\n",
afu->pasid_supported_log);
dev_dbg(&dev->dev, " actag supported = %u\n",
afu->actag_supported);
rc = validate_afu(dev, afu);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_config_read_afu);
int ocxl_config_get_actag_info(struct pci_dev *dev, u16 *base, u16 *enabled,
u16 *supported)
{
int rc;
/*
* This is really a simple wrapper for the kernel API, to
* avoid an external driver using ocxl as a library to call
* platform-dependent code
*/
rc = pnv_ocxl_get_actag(dev, base, enabled, supported);
if (rc) {
dev_err(&dev->dev, "Can't get actag for device: %d\n", rc);
return rc;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_config_get_actag_info);
void ocxl_config_set_afu_actag(struct pci_dev *dev, int pos, int actag_base,
int actag_count)
{
u16 val;
val = actag_count & OCXL_DVSEC_ACTAG_MASK;
pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_EN, val);
val = actag_base & OCXL_DVSEC_ACTAG_MASK;
pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_BASE, val);
}
EXPORT_SYMBOL_GPL(ocxl_config_set_afu_actag);
int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count)
{
return pnv_ocxl_get_pasid_count(dev, count);
}
void ocxl_config_set_afu_pasid(struct pci_dev *dev, int pos, int pasid_base,
u32 pasid_count_log)
{
u8 val8;
u32 val32;
val8 = pasid_count_log & OCXL_DVSEC_PASID_LOG_MASK;
pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_EN, val8);
pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE,
&val32);
val32 &= ~OCXL_DVSEC_PASID_MASK;
val32 |= pasid_base & OCXL_DVSEC_PASID_MASK;
pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE,
val32);
}
EXPORT_SYMBOL_GPL(ocxl_config_set_afu_pasid);
void ocxl_config_set_afu_state(struct pci_dev *dev, int pos, int enable)
{
u8 val;
pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, &val);
if (enable)
val |= 1;
else
val &= 0xFE;
pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, val);
}
EXPORT_SYMBOL_GPL(ocxl_config_set_afu_state);
int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec)
{
u32 val;
__be32 *be32ptr;
u8 timers;
int i, rc;
long recv_cap;
char *recv_rate;
/*
* Skip on function != 0, as the TL can only be defined on 0
*/
if (PCI_FUNC(dev->devfn) != 0)
return 0;
recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL);
if (!recv_rate)
return -ENOMEM;
/*
* The spec defines 64 templates for messages in the
* Transaction Layer (TL).
*
* The host and device each support a subset, so we need to
* configure the transmitters on each side to send only
* templates the receiver understands, at a rate the receiver
* can process. Per the spec, template 0 must be supported by
* everybody. That's the template which has been used by the
* host and device so far.
*
* The sending rate limit must be set before the template is
* enabled.
*/
/*
* Device -> host
*/
rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate,
PNV_OCXL_TL_RATE_BUF_SIZE);
if (rc)
goto out;
for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
be32ptr = (__be32 *) &recv_rate[i];
pci_write_config_dword(dev,
tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i,
be32_to_cpu(*be32ptr));
}
val = recv_cap >> 32;
pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val);
val = recv_cap & GENMASK(31, 0);
pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val);
/*
* Host -> device
*/
for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) {
pci_read_config_dword(dev,
tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i,
&val);
be32ptr = (__be32 *) &recv_rate[i];
*be32ptr = cpu_to_be32(val);
}
pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val);
recv_cap = (long) val << 32;
pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val);
recv_cap |= val;
rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate),
PNV_OCXL_TL_RATE_BUF_SIZE);
if (rc)
goto out;
/*
* Opencapi commands needing to be retried are classified per
* the TL in 2 groups: short and long commands.
*
* The short back off timer it not used for now. It will be
* for opencapi 4.0.
*
* The long back off timer is typically used when an AFU hits
* a page fault but the NPU is already processing one. So the
* AFU needs to wait before it can resubmit. Having a value
* too low doesn't break anything, but can generate extra
* traffic on the link.
* We set it to 1.6 us for now. It's shorter than, but in the
* same order of magnitude as the time spent to process a page
* fault.
*/
timers = 0x2 << 4; /* long timer = 1.6 us */
pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS,
timers);
rc = 0;
out:
kfree(recv_rate);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_config_set_TL);
int ocxl_config_terminate_pasid(struct pci_dev *dev, int afu_control, int pasid)
{
u32 val;
unsigned long timeout;
pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
&val);
if (EXTRACT_BIT(val, 20)) {
dev_err(&dev->dev,
"Can't terminate PASID %#x, previous termination didn't complete\n",
pasid);
return -EBUSY;
}
val &= ~OCXL_DVSEC_PASID_MASK;
val |= pasid & OCXL_DVSEC_PASID_MASK;
val |= BIT(20);
pci_write_config_dword(dev,
afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
val);
timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT);
pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
&val);
while (EXTRACT_BIT(val, 20)) {
if (time_after_eq(jiffies, timeout)) {
dev_err(&dev->dev,
"Timeout while waiting for AFU to terminate PASID %#x\n",
pasid);
return -EBUSY;
}
cpu_relax();
pci_read_config_dword(dev,
afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID,
&val);
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_config_terminate_pasid);
void ocxl_config_set_actag(struct pci_dev *dev, int func_dvsec, u32 tag_first,
u32 tag_count)
{
u32 val;
val = (tag_first & OCXL_DVSEC_ACTAG_MASK) << 16;
val |= tag_count & OCXL_DVSEC_ACTAG_MASK;
pci_write_config_dword(dev, func_dvsec + OCXL_DVSEC_FUNC_OFF_ACTAG,
val);
}
EXPORT_SYMBOL_GPL(ocxl_config_set_actag);
| linux-master | drivers/misc/ocxl/config.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2019 IBM Corp.
#include <linux/sched/mm.h>
#include "trace.h"
#include "ocxl_internal.h"
int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u32 *val)
{
if (offset > afu->config.global_mmio_size - 4)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
*val = readl_be((char *)afu->global_mmio_ptr + offset);
break;
default:
*val = readl((char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_read32);
int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u64 *val)
{
if (offset > afu->config.global_mmio_size - 8)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
*val = readq_be((char *)afu->global_mmio_ptr + offset);
break;
default:
*val = readq((char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_read64);
int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u32 val)
{
if (offset > afu->config.global_mmio_size - 4)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
writel_be(val, (char *)afu->global_mmio_ptr + offset);
break;
default:
writel(val, (char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_write32);
int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u64 val)
{
if (offset > afu->config.global_mmio_size - 8)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
writeq_be(val, (char *)afu->global_mmio_ptr + offset);
break;
default:
writeq(val, (char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_write64);
int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u32 mask)
{
u32 tmp;
if (offset > afu->config.global_mmio_size - 4)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
tmp = readl_be((char *)afu->global_mmio_ptr + offset);
tmp |= mask;
writel_be(tmp, (char *)afu->global_mmio_ptr + offset);
break;
default:
tmp = readl((char *)afu->global_mmio_ptr + offset);
tmp |= mask;
writel(tmp, (char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_set32);
int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u64 mask)
{
u64 tmp;
if (offset > afu->config.global_mmio_size - 8)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
tmp = readq_be((char *)afu->global_mmio_ptr + offset);
tmp |= mask;
writeq_be(tmp, (char *)afu->global_mmio_ptr + offset);
break;
default:
tmp = readq((char *)afu->global_mmio_ptr + offset);
tmp |= mask;
writeq(tmp, (char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_set64);
int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u32 mask)
{
u32 tmp;
if (offset > afu->config.global_mmio_size - 4)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
tmp = readl_be((char *)afu->global_mmio_ptr + offset);
tmp &= ~mask;
writel_be(tmp, (char *)afu->global_mmio_ptr + offset);
break;
default:
tmp = readl((char *)afu->global_mmio_ptr + offset);
tmp &= ~mask;
writel(tmp, (char *)afu->global_mmio_ptr + offset);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear32);
int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset,
enum ocxl_endian endian, u64 mask)
{
u64 tmp;
if (offset > afu->config.global_mmio_size - 8)
return -EINVAL;
#ifdef __BIG_ENDIAN__
if (endian == OCXL_HOST_ENDIAN)
endian = OCXL_BIG_ENDIAN;
#endif
switch (endian) {
case OCXL_BIG_ENDIAN:
tmp = readq_be((char *)afu->global_mmio_ptr + offset);
tmp &= ~mask;
writeq_be(tmp, (char *)afu->global_mmio_ptr + offset);
break;
default:
tmp = readq((char *)afu->global_mmio_ptr + offset);
tmp &= ~mask;
writeq(tmp, (char *)afu->global_mmio_ptr + offset);
break;
}
writeq(tmp, (char *)afu->global_mmio_ptr + offset);
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear64);
| linux-master | drivers/misc/ocxl/mmio.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include "trace.h"
#include "ocxl_internal.h"
int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
struct address_space *mapping)
{
int pasid;
struct ocxl_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->afu = afu;
mutex_lock(&afu->contexts_lock);
pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
afu->pasid_base + afu->pasid_max, GFP_KERNEL);
if (pasid < 0) {
mutex_unlock(&afu->contexts_lock);
kfree(ctx);
return pasid;
}
afu->pasid_count++;
mutex_unlock(&afu->contexts_lock);
ctx->pasid = pasid;
ctx->status = OPENED;
mutex_init(&ctx->status_mutex);
ctx->mapping = mapping;
mutex_init(&ctx->mapping_lock);
init_waitqueue_head(&ctx->events_wq);
mutex_init(&ctx->xsl_error_lock);
mutex_init(&ctx->irq_lock);
idr_init(&ctx->irq_idr);
ctx->tidr = 0;
/*
* Keep a reference on the AFU to make sure it's valid for the
* duration of the life of the context
*/
ocxl_afu_get(afu);
*context = ctx;
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_context_alloc);
/*
* Callback for when a translation fault triggers an error
* data: a pointer to the context which triggered the fault
* addr: the address that triggered the error
* dsisr: the value of the PPC64 dsisr register
*/
static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
{
struct ocxl_context *ctx = (struct ocxl_context *) data;
mutex_lock(&ctx->xsl_error_lock);
ctx->xsl_error.addr = addr;
ctx->xsl_error.dsisr = dsisr;
ctx->xsl_error.count++;
mutex_unlock(&ctx->xsl_error_lock);
wake_up_all(&ctx->events_wq);
}
int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
{
int rc;
unsigned long pidr = 0;
struct pci_dev *dev;
// Locks both status & tidr
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
rc = -EIO;
goto out;
}
if (mm)
pidr = mm->context.id;
dev = to_pci_dev(ctx->afu->fn->dev.parent);
rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
if (rc)
goto out;
ctx->status = ATTACHED;
out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_context_attach);
static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 trigger_addr;
int irq_id = ocxl_irq_offset_to_id(ctx, offset);
trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
if (!trigger_addr)
return VM_FAULT_SIGBUS;
return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
}
static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 pp_mmio_addr;
int pasid_off;
vm_fault_t ret;
if (offset >= ctx->afu->config.pp_mmio_stride)
return VM_FAULT_SIGBUS;
mutex_lock(&ctx->status_mutex);
if (ctx->status != ATTACHED) {
mutex_unlock(&ctx->status_mutex);
pr_debug("%s: Context not attached, failing mmio mmap\n",
__func__);
return VM_FAULT_SIGBUS;
}
pasid_off = ctx->pasid - ctx->afu->pasid_base;
pp_mmio_addr = ctx->afu->pp_mmio_start +
pasid_off * ctx->afu->config.pp_mmio_stride +
offset;
ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
return ret;
}
static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_context *ctx = vma->vm_file->private_data;
u64 offset;
vm_fault_t ret;
offset = vmf->pgoff << PAGE_SHIFT;
pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
ctx->pasid, vmf->address, offset);
if (offset < ctx->afu->irq_base_offset)
ret = map_pp_mmio(vma, vmf->address, offset, ctx);
else
ret = map_afu_irq(vma, vmf->address, offset, ctx);
return ret;
}
static const struct vm_operations_struct ocxl_vmops = {
.fault = ocxl_mmap_fault,
};
static int check_mmap_afu_irq(struct ocxl_context *ctx,
struct vm_area_struct *vma)
{
int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
/* only one page */
if (vma_pages(vma) != 1)
return -EINVAL;
/* check offset validty */
if (!ocxl_afu_irq_get_addr(ctx, irq_id))
return -EINVAL;
/*
* trigger page should only be accessible in write mode.
*
* It's a bit theoretical, as a page mmaped with only
* PROT_WRITE is currently readable, but it doesn't hurt.
*/
if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
!(vma->vm_flags & VM_WRITE))
return -EINVAL;
vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
return 0;
}
static int check_mmap_mmio(struct ocxl_context *ctx,
struct vm_area_struct *vma)
{
if ((vma_pages(vma) + vma->vm_pgoff) >
(ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
return -EINVAL;
return 0;
}
int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
{
int rc;
if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
rc = check_mmap_mmio(ctx, vma);
else
rc = check_mmap_afu_irq(ctx, vma);
if (rc)
return rc;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxl_vmops;
return 0;
}
int ocxl_context_detach(struct ocxl_context *ctx)
{
struct pci_dev *dev;
int afu_control_pos;
enum ocxl_context_status status;
int rc;
mutex_lock(&ctx->status_mutex);
status = ctx->status;
ctx->status = CLOSED;
mutex_unlock(&ctx->status_mutex);
if (status != ATTACHED)
return 0;
dev = to_pci_dev(ctx->afu->fn->dev.parent);
afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
mutex_lock(&ctx->afu->afu_control_lock);
rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
mutex_unlock(&ctx->afu->afu_control_lock);
trace_ocxl_terminate_pasid(ctx->pasid, rc);
if (rc) {
/*
* If we timeout waiting for the AFU to terminate the
* pasid, then it's dangerous to clean up the Process
* Element entry in the SPA, as it may be referenced
* in the future by the AFU. In which case, we would
* checkstop because of an invalid PE access (FIR
* register 2, bit 42). So leave the PE
* defined. Caller shouldn't free the context so that
* PASID remains allocated.
*
* A link reset will be required to cleanup the AFU
* and the SPA.
*/
if (rc == -EBUSY)
return rc;
}
rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
if (rc) {
dev_warn(&dev->dev,
"Couldn't remove PE entry cleanly: %d\n", rc);
}
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_context_detach);
void ocxl_context_detach_all(struct ocxl_afu *afu)
{
struct ocxl_context *ctx;
int tmp;
mutex_lock(&afu->contexts_lock);
idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
ocxl_context_detach(ctx);
/*
* We are force detaching - remove any active mmio
* mappings so userspace cannot interfere with the
* card if it comes back. Easiest way to exercise
* this is to unbind and rebind the driver via sysfs
* while it is in use.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
void ocxl_context_free(struct ocxl_context *ctx)
{
mutex_lock(&ctx->afu->contexts_lock);
ctx->afu->pasid_count--;
idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
mutex_unlock(&ctx->afu->contexts_lock);
ocxl_afu_irq_free_all(ctx);
idr_destroy(&ctx->irq_idr);
/* reference to the AFU taken in ocxl_context_alloc() */
ocxl_afu_put(ctx->afu);
kfree(ctx);
}
EXPORT_SYMBOL_GPL(ocxl_context_free);
| linux-master | drivers/misc/ocxl/context.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/eventfd.h>
#include <linux/uaccess.h>
#include <uapi/misc/ocxl.h>
#include <asm/reg.h>
#include <asm/switch_to.h>
#include "ocxl_internal.h"
#define OCXL_NUM_MINORS 256 /* Total to reserve */
static dev_t ocxl_dev;
static struct class *ocxl_class;
static DEFINE_MUTEX(minors_idr_lock);
static struct idr minors_idr;
static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
{
struct ocxl_file_info *info;
mutex_lock(&minors_idr_lock);
info = idr_find(&minors_idr, MINOR(devno));
if (info)
get_device(&info->dev);
mutex_unlock(&minors_idr_lock);
return info;
}
static int allocate_minor(struct ocxl_file_info *info)
{
int minor;
mutex_lock(&minors_idr_lock);
minor = idr_alloc(&minors_idr, info, 0, OCXL_NUM_MINORS, GFP_KERNEL);
mutex_unlock(&minors_idr_lock);
return minor;
}
static void free_minor(struct ocxl_file_info *info)
{
mutex_lock(&minors_idr_lock);
idr_remove(&minors_idr, MINOR(info->dev.devt));
mutex_unlock(&minors_idr_lock);
}
static int afu_open(struct inode *inode, struct file *file)
{
struct ocxl_file_info *info;
struct ocxl_context *ctx;
int rc;
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
info = find_and_get_file_info(inode->i_rdev);
if (!info)
return -ENODEV;
rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
if (rc) {
put_device(&info->dev);
return rc;
}
put_device(&info->dev);
file->private_data = ctx;
return 0;
}
static long afu_ioctl_attach(struct ocxl_context *ctx,
struct ocxl_ioctl_attach __user *uarg)
{
struct ocxl_ioctl_attach arg;
u64 amr = 0;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
/* Make sure reserved fields are not set for forward compatibility */
if (arg.reserved1 || arg.reserved2 || arg.reserved3)
return -EINVAL;
amr = arg.amr & mfspr(SPRN_UAMOR);
return ocxl_context_attach(ctx, amr, current->mm);
}
static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
struct ocxl_ioctl_metadata __user *uarg)
{
struct ocxl_ioctl_metadata arg;
memset(&arg, 0, sizeof(arg));
arg.version = 0;
arg.afu_version_major = ctx->afu->config.version_major;
arg.afu_version_minor = ctx->afu->config.version_minor;
arg.pasid = ctx->pasid;
arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
arg.global_mmio_size = ctx->afu->config.global_mmio_size;
if (copy_to_user(uarg, &arg, sizeof(arg)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_PPC64
static long afu_ioctl_enable_p9_wait(struct ocxl_context *ctx,
struct ocxl_ioctl_p9_wait __user *uarg)
{
struct ocxl_ioctl_p9_wait arg;
memset(&arg, 0, sizeof(arg));
if (cpu_has_feature(CPU_FTR_P9_TIDR)) {
enum ocxl_context_status status;
// Locks both status & tidr
mutex_lock(&ctx->status_mutex);
if (!ctx->tidr) {
if (set_thread_tidr(current)) {
mutex_unlock(&ctx->status_mutex);
return -ENOENT;
}
ctx->tidr = current->thread.tidr;
}
status = ctx->status;
mutex_unlock(&ctx->status_mutex);
if (status == ATTACHED) {
int rc = ocxl_link_update_pe(ctx->afu->fn->link,
ctx->pasid, ctx->tidr);
if (rc)
return rc;
}
arg.thread_id = ctx->tidr;
} else
return -ENOENT;
if (copy_to_user(uarg, &arg, sizeof(arg)))
return -EFAULT;
return 0;
}
#endif
static long afu_ioctl_get_features(struct ocxl_context *ctx,
struct ocxl_ioctl_features __user *uarg)
{
struct ocxl_ioctl_features arg;
memset(&arg, 0, sizeof(arg));
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_P9_TIDR))
arg.flags[0] |= OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT;
#endif
if (copy_to_user(uarg, &arg, sizeof(arg)))
return -EFAULT;
return 0;
}
#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
x == OCXL_IOCTL_ENABLE_P9_WAIT ? "ENABLE_P9_WAIT" : \
x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \
"UNKNOWN")
static irqreturn_t irq_handler(void *private)
{
struct eventfd_ctx *ev_ctx = private;
eventfd_signal(ev_ctx, 1);
return IRQ_HANDLED;
}
static void irq_free(void *private)
{
struct eventfd_ctx *ev_ctx = private;
eventfd_ctx_put(ev_ctx);
}
static long afu_ioctl(struct file *file, unsigned int cmd,
unsigned long args)
{
struct ocxl_context *ctx = file->private_data;
struct ocxl_ioctl_irq_fd irq_fd;
struct eventfd_ctx *ev_ctx;
int irq_id;
u64 irq_offset;
long rc;
bool closed;
pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid,
CMD_STR(cmd));
mutex_lock(&ctx->status_mutex);
closed = (ctx->status == CLOSED);
mutex_unlock(&ctx->status_mutex);
if (closed)
return -EIO;
switch (cmd) {
case OCXL_IOCTL_ATTACH:
rc = afu_ioctl_attach(ctx,
(struct ocxl_ioctl_attach __user *) args);
break;
case OCXL_IOCTL_IRQ_ALLOC:
rc = ocxl_afu_irq_alloc(ctx, &irq_id);
if (!rc) {
irq_offset = ocxl_irq_id_to_offset(ctx, irq_id);
rc = copy_to_user((u64 __user *) args, &irq_offset,
sizeof(irq_offset));
if (rc) {
ocxl_afu_irq_free(ctx, irq_id);
return -EFAULT;
}
}
break;
case OCXL_IOCTL_IRQ_FREE:
rc = copy_from_user(&irq_offset, (u64 __user *) args,
sizeof(irq_offset));
if (rc)
return -EFAULT;
irq_id = ocxl_irq_offset_to_id(ctx, irq_offset);
rc = ocxl_afu_irq_free(ctx, irq_id);
break;
case OCXL_IOCTL_IRQ_SET_FD:
rc = copy_from_user(&irq_fd, (u64 __user *) args,
sizeof(irq_fd));
if (rc)
return -EFAULT;
if (irq_fd.reserved)
return -EINVAL;
irq_id = ocxl_irq_offset_to_id(ctx, irq_fd.irq_offset);
ev_ctx = eventfd_ctx_fdget(irq_fd.eventfd);
if (IS_ERR(ev_ctx))
return PTR_ERR(ev_ctx);
rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
if (rc)
eventfd_ctx_put(ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
rc = afu_ioctl_get_metadata(ctx,
(struct ocxl_ioctl_metadata __user *) args);
break;
#ifdef CONFIG_PPC64
case OCXL_IOCTL_ENABLE_P9_WAIT:
rc = afu_ioctl_enable_p9_wait(ctx,
(struct ocxl_ioctl_p9_wait __user *) args);
break;
#endif
case OCXL_IOCTL_GET_FEATURES:
rc = afu_ioctl_get_features(ctx,
(struct ocxl_ioctl_features __user *) args);
break;
default:
rc = -EINVAL;
}
return rc;
}
static long afu_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long args)
{
return afu_ioctl(file, cmd, args);
}
static int afu_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ocxl_context *ctx = file->private_data;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
return ocxl_context_mmap(ctx, vma);
}
static bool has_xsl_error(struct ocxl_context *ctx)
{
bool ret;
mutex_lock(&ctx->xsl_error_lock);
ret = !!ctx->xsl_error.addr;
mutex_unlock(&ctx->xsl_error_lock);
return ret;
}
/*
* Are there any events pending on the AFU
* ctx: The AFU context
* Returns: true if there are events pending
*/
static bool afu_events_pending(struct ocxl_context *ctx)
{
if (has_xsl_error(ctx))
return true;
return false;
}
static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait)
{
struct ocxl_context *ctx = file->private_data;
unsigned int mask = 0;
bool closed;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
poll_wait(file, &ctx->events_wq, wait);
mutex_lock(&ctx->status_mutex);
closed = (ctx->status == CLOSED);
mutex_unlock(&ctx->status_mutex);
if (afu_events_pending(ctx))
mask = EPOLLIN | EPOLLRDNORM;
else if (closed)
mask = EPOLLERR;
return mask;
}
/*
* Populate the supplied buffer with a single XSL error
* ctx: The AFU context to report the error from
* header: the event header to populate
* buf: The buffer to write the body into (should be at least
* AFU_EVENT_BODY_XSL_ERROR_SIZE)
* Return: the amount of buffer that was populated
*/
static ssize_t append_xsl_error(struct ocxl_context *ctx,
struct ocxl_kernel_event_header *header,
char __user *buf)
{
struct ocxl_kernel_event_xsl_fault_error body;
memset(&body, 0, sizeof(body));
mutex_lock(&ctx->xsl_error_lock);
if (!ctx->xsl_error.addr) {
mutex_unlock(&ctx->xsl_error_lock);
return 0;
}
body.addr = ctx->xsl_error.addr;
body.dsisr = ctx->xsl_error.dsisr;
body.count = ctx->xsl_error.count;
ctx->xsl_error.addr = 0;
ctx->xsl_error.dsisr = 0;
ctx->xsl_error.count = 0;
mutex_unlock(&ctx->xsl_error_lock);
header->type = OCXL_AFU_EVENT_XSL_FAULT_ERROR;
if (copy_to_user(buf, &body, sizeof(body)))
return -EFAULT;
return sizeof(body);
}
#define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error)
/*
* Reports events on the AFU
* Format:
* Header (struct ocxl_kernel_event_header)
* Body (struct ocxl_kernel_event_*)
* Header...
*/
static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct ocxl_context *ctx = file->private_data;
struct ocxl_kernel_event_header header;
ssize_t rc;
ssize_t used = 0;
DEFINE_WAIT(event_wait);
memset(&header, 0, sizeof(header));
/* Require offset to be 0 */
if (*off != 0)
return -EINVAL;
if (count < (sizeof(struct ocxl_kernel_event_header) +
AFU_EVENT_BODY_MAX_SIZE))
return -EINVAL;
for (;;) {
prepare_to_wait(&ctx->events_wq, &event_wait,
TASK_INTERRUPTIBLE);
if (afu_events_pending(ctx))
break;
if (ctx->status == CLOSED)
break;
if (file->f_flags & O_NONBLOCK) {
finish_wait(&ctx->events_wq, &event_wait);
return -EAGAIN;
}
if (signal_pending(current)) {
finish_wait(&ctx->events_wq, &event_wait);
return -ERESTARTSYS;
}
schedule();
}
finish_wait(&ctx->events_wq, &event_wait);
if (has_xsl_error(ctx)) {
used = append_xsl_error(ctx, &header, buf + sizeof(header));
if (used < 0)
return used;
}
if (!afu_events_pending(ctx))
header.flags |= OCXL_KERNEL_EVENT_FLAG_LAST;
if (copy_to_user(buf, &header, sizeof(header)))
return -EFAULT;
used += sizeof(header);
rc = used;
return rc;
}
static int afu_release(struct inode *inode, struct file *file)
{
struct ocxl_context *ctx = file->private_data;
int rc;
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
rc = ocxl_context_detach(ctx);
mutex_lock(&ctx->mapping_lock);
ctx->mapping = NULL;
mutex_unlock(&ctx->mapping_lock);
wake_up_all(&ctx->events_wq);
if (rc != -EBUSY)
ocxl_context_free(ctx);
return 0;
}
static const struct file_operations ocxl_afu_fops = {
.owner = THIS_MODULE,
.open = afu_open,
.unlocked_ioctl = afu_ioctl,
.compat_ioctl = afu_compat_ioctl,
.mmap = afu_mmap,
.poll = afu_poll,
.read = afu_read,
.release = afu_release,
};
// Free the info struct
static void info_release(struct device *dev)
{
struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
ocxl_afu_put(info->afu);
kfree(info);
}
static int ocxl_file_make_visible(struct ocxl_file_info *info)
{
int rc;
cdev_init(&info->cdev, &ocxl_afu_fops);
rc = cdev_add(&info->cdev, info->dev.devt, 1);
if (rc) {
dev_err(&info->dev, "Unable to add afu char device: %d\n", rc);
return rc;
}
return 0;
}
static void ocxl_file_make_invisible(struct ocxl_file_info *info)
{
cdev_del(&info->cdev);
}
int ocxl_file_register_afu(struct ocxl_afu *afu)
{
int minor;
int rc;
struct ocxl_file_info *info;
struct ocxl_fn *fn = afu->fn;
struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
return -ENOMEM;
minor = allocate_minor(info);
if (minor < 0) {
kfree(info);
return minor;
}
info->dev.parent = &fn->dev;
info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor);
info->dev.class = ocxl_class;
info->dev.release = info_release;
info->afu = afu;
ocxl_afu_get(afu);
rc = dev_set_name(&info->dev, "%s.%s.%hhu",
afu->config.name, dev_name(&pci_dev->dev), afu->config.idx);
if (rc)
goto err_put;
rc = device_register(&info->dev);
if (rc) {
free_minor(info);
put_device(&info->dev);
return rc;
}
rc = ocxl_sysfs_register_afu(info);
if (rc)
goto err_unregister;
rc = ocxl_file_make_visible(info);
if (rc)
goto err_unregister;
ocxl_afu_set_private(afu, info);
return 0;
err_unregister:
ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
free_minor(info);
device_unregister(&info->dev);
return rc;
err_put:
ocxl_afu_put(afu);
free_minor(info);
kfree(info);
return rc;
}
void ocxl_file_unregister_afu(struct ocxl_afu *afu)
{
struct ocxl_file_info *info = ocxl_afu_get_private(afu);
if (!info)
return;
ocxl_file_make_invisible(info);
ocxl_sysfs_unregister_afu(info);
free_minor(info);
device_unregister(&info->dev);
}
static char *ocxl_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev));
}
int ocxl_file_init(void)
{
int rc;
idr_init(&minors_idr);
rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl");
if (rc) {
pr_err("Unable to allocate ocxl major number: %d\n", rc);
return rc;
}
ocxl_class = class_create("ocxl");
if (IS_ERR(ocxl_class)) {
pr_err("Unable to create ocxl class\n");
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
return PTR_ERR(ocxl_class);
}
ocxl_class->devnode = ocxl_devnode;
return 0;
}
void ocxl_file_exit(void)
{
class_destroy(ocxl_class);
unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS);
idr_destroy(&minors_idr);
}
| linux-master | drivers/misc/ocxl/file.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include "ocxl_internal.h"
struct id_range {
struct list_head list;
u32 start;
u32 end;
};
#ifdef DEBUG
static void dump_list(struct list_head *head, char *type_str)
{
struct id_range *cur;
pr_debug("%s ranges allocated:\n", type_str);
list_for_each_entry(cur, head, list) {
pr_debug("Range %d->%d\n", cur->start, cur->end);
}
}
#endif
static int range_alloc(struct list_head *head, u32 size, int max_id,
char *type_str)
{
struct list_head *pos;
struct id_range *cur, *new;
int rc, last_end;
new = kmalloc(sizeof(struct id_range), GFP_KERNEL);
if (!new)
return -ENOMEM;
pos = head;
last_end = -1;
list_for_each_entry(cur, head, list) {
if ((cur->start - last_end) > size)
break;
last_end = cur->end;
pos = &cur->list;
}
new->start = last_end + 1;
new->end = new->start + size - 1;
if (new->end > max_id) {
kfree(new);
rc = -ENOSPC;
} else {
list_add(&new->list, pos);
rc = new->start;
}
#ifdef DEBUG
dump_list(head, type_str);
#endif
return rc;
}
static void range_free(struct list_head *head, u32 start, u32 size,
char *type_str)
{
bool found = false;
struct id_range *cur, *tmp;
list_for_each_entry_safe(cur, tmp, head, list) {
if (cur->start == start && cur->end == (start + size - 1)) {
found = true;
list_del(&cur->list);
kfree(cur);
break;
}
}
WARN_ON(!found);
#ifdef DEBUG
dump_list(head, type_str);
#endif
}
int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size)
{
int max_pasid;
if (fn->config.max_pasid_log < 0)
return -ENOSPC;
max_pasid = 1 << fn->config.max_pasid_log;
return range_alloc(&fn->pasid_list, size, max_pasid, "afu pasid");
}
void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size)
{
return range_free(&fn->pasid_list, start, size, "afu pasid");
}
int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size)
{
int max_actag;
max_actag = fn->actag_enabled;
return range_alloc(&fn->actag_list, size, max_actag, "afu actag");
}
void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size)
{
return range_free(&fn->actag_list, start, size, "afu actag");
}
| linux-master | drivers/misc/ocxl/pasid.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015 IBM Corp.
*/
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
| linux-master | drivers/misc/cxl/trace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <misc/cxl.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
#include <linux/irqdomain.h>
#include "cxl.h"
/*
* Since we want to track memory mappings to be able to force-unmap
* when the AFU is no longer reachable, we need an inode. For devices
* opened through the cxl user API, this is not a problem, but a
* userland process can also get a cxl fd through the cxl_get_fd()
* API, which is used by the cxlflash driver.
*
* Therefore we implement our own simple pseudo-filesystem and inode
* allocator. We don't use the anonymous inode, as we need the
* meta-data associated with it (address_space) and it is shared by
* other drivers/processes, so it could lead to cxl unmapping VMAs
* from random processes.
*/
#define CXL_PSEUDO_FS_MAGIC 0x1697697f
static int cxl_fs_cnt;
static struct vfsmount *cxl_vfs_mount;
static int cxl_fs_init_fs_context(struct fs_context *fc)
{
return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type cxl_fs_type = {
.name = "cxl",
.owner = THIS_MODULE,
.init_fs_context = cxl_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
void cxl_release_mapping(struct cxl_context *ctx)
{
if (ctx->kernelapi && ctx->mapping)
simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
}
static struct file *cxl_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
struct file *file;
struct inode *inode;
int rc;
/* strongly inspired by anon_inode_getfile() */
if (fops->owner && !try_module_get(fops->owner))
return ERR_PTR(-ENOENT);
rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
if (rc < 0) {
pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
file = ERR_PTR(rc);
goto err_module;
}
inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
if (IS_ERR(inode)) {
file = ERR_CAST(inode);
goto err_fs;
}
file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file))
goto err_inode;
file->private_data = priv;
return file;
err_inode:
iput(inode);
err_fs:
simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
err_module:
module_put(fops->owner);
return file;
}
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
afu = cxl_pci_to_afu(dev);
if (IS_ERR(afu))
return ERR_CAST(afu);
ctx = cxl_context_alloc();
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->kernelapi = true;
/* Make it a slave context. We can promote it later? */
rc = cxl_context_init(ctx, afu, false);
if (rc)
goto err_ctx;
return ctx;
err_ctx:
kfree(ctx);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
struct cxl_context *cxl_get_context(struct pci_dev *dev)
{
return dev->dev.archdata.cxl_ctx;
}
EXPORT_SYMBOL_GPL(cxl_get_context);
int cxl_release_context(struct cxl_context *ctx)
{
if (ctx->status >= STARTED)
return -EBUSY;
cxl_context_free(ctx);
return 0;
}
EXPORT_SYMBOL_GPL(cxl_release_context);
static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
{
__u16 range;
int r;
for (r = 0; r < CXL_IRQ_RANGES; r++) {
range = ctx->irqs.range[r];
if (num < range) {
return ctx->irqs.offset[r] + num;
}
num -= range;
}
return 0;
}
int cxl_set_priv(struct cxl_context *ctx, void *priv)
{
if (!ctx)
return -EINVAL;
ctx->priv = priv;
return 0;
}
EXPORT_SYMBOL_GPL(cxl_set_priv);
void *cxl_get_priv(struct cxl_context *ctx)
{
if (!ctx)
return ERR_PTR(-EINVAL);
return ctx->priv;
}
EXPORT_SYMBOL_GPL(cxl_get_priv);
int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
{
int res;
irq_hw_number_t hwirq;
if (num == 0)
num = ctx->afu->pp_irqs;
res = afu_allocate_irqs(ctx, num);
if (res)
return res;
if (!cpu_has_feature(CPU_FTR_HVMODE)) {
/* In a guest, the PSL interrupt is not multiplexed. It was
* allocated above, and we need to set its handler
*/
hwirq = cxl_find_afu_irq(ctx, 0);
if (hwirq)
cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
}
if (ctx->status == STARTED) {
if (cxl_ops->update_ivtes)
cxl_ops->update_ivtes(ctx);
else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
}
return res;
}
EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
void cxl_free_afu_irqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
unsigned int virq;
if (!cpu_has_feature(CPU_FTR_HVMODE)) {
hwirq = cxl_find_afu_irq(ctx, 0);
if (hwirq) {
virq = irq_find_mapping(NULL, hwirq);
if (virq)
cxl_unmap_irq(virq, ctx);
}
}
afu_irq_name_free(ctx);
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
}
EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
int cxl_map_afu_irq(struct cxl_context *ctx, int num,
irq_handler_t handler, void *cookie, char *name)
{
irq_hw_number_t hwirq;
/*
* Find interrupt we are to register.
*/
hwirq = cxl_find_afu_irq(ctx, num);
if (!hwirq)
return -ENOENT;
return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
}
EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
{
irq_hw_number_t hwirq;
unsigned int virq;
hwirq = cxl_find_afu_irq(ctx, num);
if (!hwirq)
return;
virq = irq_find_mapping(NULL, hwirq);
if (virq)
cxl_unmap_irq(virq, cookie);
}
EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
/*
* Start a context
* Code here similar to afu_ioctl_start_work().
*/
int cxl_start_context(struct cxl_context *ctx, u64 wed,
struct task_struct *task)
{
int rc = 0;
bool kernel = true;
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
mutex_lock(&ctx->status_mutex);
if (ctx->status == STARTED)
goto out; /* already started */
/*
* Increment the mapped context count for adapter. This also checks
* if adapter_context_lock is taken.
*/
rc = cxl_adapter_context_get(ctx->afu->adapter);
if (rc)
goto out;
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
kernel = false;
/* acquire a reference to the task's mm */
ctx->mm = get_task_mm(current);
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
if (ctx->mm) {
/* decrement the use count from above */
mmput(ctx->mm);
/* make TLBIs for this context global */
mm_context_add_copro(ctx->mm);
}
}
/*
* Increment driver use count. Enables global TLBIs for hash
* and callbacks to handle the segment table
*/
cxl_ctx_get();
/* See the comment in afu_ioctl_start_work() */
smp_mb();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
if (task) {
cxl_context_mm_count_put(ctx);
if (ctx->mm)
mm_context_remove_copro(ctx->mm);
}
goto out;
}
ctx->status = STARTED;
out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(cxl_start_context);
int cxl_process_element(struct cxl_context *ctx)
{
return ctx->external_pe;
}
EXPORT_SYMBOL_GPL(cxl_process_element);
/* Stop a context. Returns 0 on success, otherwise -Errno */
int cxl_stop_context(struct cxl_context *ctx)
{
return __detach_context(ctx);
}
EXPORT_SYMBOL_GPL(cxl_stop_context);
void cxl_set_master(struct cxl_context *ctx)
{
ctx->master = true;
}
EXPORT_SYMBOL_GPL(cxl_set_master);
/* wrappers around afu_* file ops which are EXPORTED */
int cxl_fd_open(struct inode *inode, struct file *file)
{
return afu_open(inode, file);
}
EXPORT_SYMBOL_GPL(cxl_fd_open);
int cxl_fd_release(struct inode *inode, struct file *file)
{
return afu_release(inode, file);
}
EXPORT_SYMBOL_GPL(cxl_fd_release);
long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return afu_ioctl(file, cmd, arg);
}
EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
{
return afu_mmap(file, vm);
}
EXPORT_SYMBOL_GPL(cxl_fd_mmap);
__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
{
return afu_poll(file, poll);
}
EXPORT_SYMBOL_GPL(cxl_fd_poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
return afu_read(file, buf, count, off);
}
EXPORT_SYMBOL_GPL(cxl_fd_read);
#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
/* Get a struct file and fd for a context and attach the ops */
struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
int *fd)
{
struct file *file;
int rc, flags, fdtmp;
char *name = NULL;
/* only allow one per context */
if (ctx->mapping)
return ERR_PTR(-EEXIST);
flags = O_RDWR | O_CLOEXEC;
/* This code is similar to anon_inode_getfd() */
rc = get_unused_fd_flags(flags);
if (rc < 0)
return ERR_PTR(rc);
fdtmp = rc;
/*
* Patch the file ops. Needs to be careful that this is rentrant safe.
*/
if (fops) {
PATCH_FOPS(open);
PATCH_FOPS(poll);
PATCH_FOPS(read);
PATCH_FOPS(release);
PATCH_FOPS(unlocked_ioctl);
PATCH_FOPS(compat_ioctl);
PATCH_FOPS(mmap);
} else /* use default ops */
fops = (struct file_operations *)&afu_fops;
name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
file = cxl_getfile(name, fops, ctx, flags);
kfree(name);
if (IS_ERR(file))
goto err_fd;
cxl_context_set_mapping(ctx, file->f_mapping);
*fd = fdtmp;
return file;
err_fd:
put_unused_fd(fdtmp);
return NULL;
}
EXPORT_SYMBOL_GPL(cxl_get_fd);
struct cxl_context *cxl_fops_get_context(struct file *file)
{
return file->private_data;
}
EXPORT_SYMBOL_GPL(cxl_fops_get_context);
void cxl_set_driver_ops(struct cxl_context *ctx,
struct cxl_afu_driver_ops *ops)
{
WARN_ON(!ops->fetch_event || !ops->event_delivered);
atomic_set(&ctx->afu_driver_events, 0);
ctx->afu_driver_ops = ops;
}
EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
void cxl_context_events_pending(struct cxl_context *ctx,
unsigned int new_events)
{
atomic_add(new_events, &ctx->afu_driver_events);
wake_up_all(&ctx->wq);
}
EXPORT_SYMBOL_GPL(cxl_context_events_pending);
int cxl_start_work(struct cxl_context *ctx,
struct cxl_ioctl_start_work *work)
{
int rc;
/* code taken from afu_ioctl_start_work */
if (!(work->flags & CXL_START_WORK_NUM_IRQS))
work->num_interrupts = ctx->afu->pp_irqs;
else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
(work->num_interrupts > ctx->afu->irqs_max)) {
return -EINVAL;
}
rc = afu_register_irqs(ctx, work->num_interrupts);
if (rc)
return rc;
rc = cxl_start_context(ctx, work->work_element_descriptor, current);
if (rc < 0) {
afu_release_irqs(ctx, ctx);
return rc;
}
return 0;
}
EXPORT_SYMBOL_GPL(cxl_start_work);
void __iomem *cxl_psa_map(struct cxl_context *ctx)
{
if (ctx->status != STARTED)
return NULL;
pr_devel("%s: psn_phys%llx size:%llx\n",
__func__, ctx->psn_phys, ctx->psn_size);
return ioremap(ctx->psn_phys, ctx->psn_size);
}
EXPORT_SYMBOL_GPL(cxl_psa_map);
void cxl_psa_unmap(void __iomem *addr)
{
iounmap(addr);
}
EXPORT_SYMBOL_GPL(cxl_psa_unmap);
int cxl_afu_reset(struct cxl_context *ctx)
{
struct cxl_afu *afu = ctx->afu;
int rc;
rc = cxl_ops->afu_reset(afu);
if (rc)
return rc;
return cxl_ops->afu_check_and_enable(afu);
}
EXPORT_SYMBOL_GPL(cxl_afu_reset);
void cxl_perst_reloads_same_image(struct cxl_afu *afu,
bool perst_reloads_same_image)
{
afu->adapter->perst_same_image = perst_reloads_same_image;
}
EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
{
struct cxl_afu *afu = cxl_pci_to_afu(dev);
if (IS_ERR(afu))
return -ENODEV;
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
| linux-master | drivers/misc/cxl/api.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <asm/rtas.h>
#include "cxl.h"
#include "hcalls.h"
#define DOWNLOAD_IMAGE 1
#define VALIDATE_IMAGE 2
struct ai_header {
u16 version;
u8 reserved0[6];
u16 vendor;
u16 device;
u16 subsystem_vendor;
u16 subsystem;
u64 image_offset;
u64 image_length;
u8 reserved1[96];
};
static struct semaphore sem;
static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
static struct sg_list *le;
static u64 continue_token;
static unsigned int transfer;
struct update_props_workarea {
__be32 phandle;
__be32 state;
__be64 reserved;
__be32 nprops;
} __packed;
struct update_nodes_workarea {
__be32 state;
__be64 unit_address;
__be32 reserved;
} __packed;
#define DEVICE_SCOPE 3
#define NODE_ACTION_MASK 0xff000000
#define NODE_COUNT_MASK 0x00ffffff
#define OPCODE_DELETE 0x01000000
#define OPCODE_UPDATE 0x02000000
#define OPCODE_ADD 0x03000000
static int rcall(int token, char *buf, s32 scope)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
return rc;
}
static int update_property(struct device_node *dn, const char *name,
u32 vd, char *value)
{
struct property *new_prop;
u32 *val;
int rc;
new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
if (!new_prop)
return -ENOMEM;
new_prop->name = kstrdup(name, GFP_KERNEL);
if (!new_prop->name) {
kfree(new_prop);
return -ENOMEM;
}
new_prop->length = vd;
new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
if (!new_prop->value) {
kfree(new_prop->name);
kfree(new_prop);
return -ENOMEM;
}
memcpy(new_prop->value, value, vd);
val = (u32 *)new_prop->value;
rc = cxl_update_properties(dn, new_prop);
pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
dn, name, vd, be32_to_cpu(*val));
if (rc) {
kfree(new_prop->name);
kfree(new_prop->value);
kfree(new_prop);
}
return rc;
}
static int update_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
int i, rc, ret;
char *prop_data;
char *buf;
int token;
u32 nprops;
u32 vd;
token = rtas_token("ibm,update-properties");
if (token == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(buf);
return -ENOENT;
}
upwa = (struct update_props_workarea *)&buf[0];
upwa->phandle = phandle;
do {
rc = rcall(token, buf, scope);
if (rc < 0)
break;
prop_data = buf + sizeof(*upwa);
nprops = be32_to_cpu(upwa->nprops);
if (*prop_data == 0) {
prop_data++;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
nprops--;
}
for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
if ((vd != 0x00000000) && (vd != 0x80000000)) {
ret = update_property(dn, prop_name, vd,
prop_data);
if (ret)
pr_err("cxl: Could not update property %s - %i\n",
prop_name, ret);
prop_data += vd;
}
}
} while (rc == 1);
of_node_put(dn);
kfree(buf);
return rc;
}
static int update_devicetree(struct cxl *adapter, s32 scope)
{
struct update_nodes_workarea *unwa;
u32 action, node_count;
int token, rc, i;
__be32 *data, phandle;
char *buf;
token = rtas_token("ibm,update-nodes");
if (token == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
unwa = (struct update_nodes_workarea *)&buf[0];
unwa->unit_address = cpu_to_be64(adapter->guest->handle);
do {
rc = rcall(token, buf, scope);
if (rc && rc != 1)
break;
data = (__be32 *)buf + 4;
while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
action = be32_to_cpu(*data) & NODE_ACTION_MASK;
node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
action, node_count);
data++;
for (i = 0; i < node_count; i++) {
phandle = *data++;
switch (action) {
case OPCODE_DELETE:
/* nothing to do */
break;
case OPCODE_UPDATE:
update_node(phandle, scope);
break;
case OPCODE_ADD:
/* nothing to do, just move pointer */
data++;
break;
}
}
}
} while (rc == 1);
kfree(buf);
return 0;
}
static int handle_image(struct cxl *adapter, int operation,
long (*fct)(u64, u64, u64, u64 *),
struct cxl_adapter_image *ai)
{
size_t mod, s_copy, len_chunk = 0;
struct ai_header *header = NULL;
unsigned int entries = 0, i;
void *dest, *from;
int rc = 0, need_header;
/* base adapter image header */
need_header = (ai->flags & CXL_AI_NEED_HEADER);
if (need_header) {
header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
if (!header)
return -ENOMEM;
header->version = cpu_to_be16(1);
header->vendor = cpu_to_be16(adapter->guest->vendor);
header->device = cpu_to_be16(adapter->guest->device);
header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
header->subsystem = cpu_to_be16(adapter->guest->subsystem);
header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
header->image_length = cpu_to_be64(ai->len_image);
}
/* number of entries in the list */
len_chunk = ai->len_data;
if (need_header)
len_chunk += CXL_AI_HEADER_SIZE;
entries = len_chunk / CXL_AI_BUFFER_SIZE;
mod = len_chunk % CXL_AI_BUFFER_SIZE;
if (mod)
entries++;
if (entries > CXL_AI_MAX_ENTRIES) {
rc = -EINVAL;
goto err;
}
/* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
* chunk 0 ----------------------------------------------------
* | header | data |
* ----------------------------------------------------
* chunk 1 ----------------------------------------------------
* | data |
* ----------------------------------------------------
* ....
* chunk n ----------------------------------------------------
* | data |
* ----------------------------------------------------
*/
from = (void *) ai->data;
for (i = 0; i < entries; i++) {
dest = buffer[i];
s_copy = CXL_AI_BUFFER_SIZE;
if ((need_header) && (i == 0)) {
/* add adapter image header */
memcpy(buffer[i], header, sizeof(struct ai_header));
s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
dest += CXL_AI_HEADER_SIZE; /* image offset */
}
if ((i == (entries - 1)) && mod)
s_copy = mod;
/* copy data */
if (copy_from_user(dest, from, s_copy))
goto err;
/* fill in the list */
le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
if ((i == (entries - 1)) && mod)
le[i].len = cpu_to_be64(mod);
from += s_copy;
}
pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
__func__, operation, need_header, entries, continue_token);
/*
* download/validate the adapter image to the coherent
* platform facility
*/
rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
&continue_token);
if (rc == 0) /* success of download/validation operation */
continue_token = 0;
err:
kfree(header);
return rc;
}
static int transfer_image(struct cxl *adapter, int operation,
struct cxl_adapter_image *ai)
{
int rc = 0;
int afu;
switch (operation) {
case DOWNLOAD_IMAGE:
rc = handle_image(adapter, operation,
&cxl_h_download_adapter_image, ai);
if (rc < 0) {
pr_devel("resetting adapter\n");
cxl_h_reset_adapter(adapter->guest->handle);
}
return rc;
case VALIDATE_IMAGE:
rc = handle_image(adapter, operation,
&cxl_h_validate_adapter_image, ai);
if (rc < 0) {
pr_devel("resetting adapter\n");
cxl_h_reset_adapter(adapter->guest->handle);
return rc;
}
if (rc == 0) {
pr_devel("remove current afu\n");
for (afu = 0; afu < adapter->slices; afu++)
cxl_guest_remove_afu(adapter->afu[afu]);
pr_devel("resetting adapter\n");
cxl_h_reset_adapter(adapter->guest->handle);
/* The entire image has now been
* downloaded and the validation has
* been successfully performed.
* After that, the partition should call
* ibm,update-nodes and
* ibm,update-properties to receive the
* current configuration
*/
rc = update_devicetree(adapter, DEVICE_SCOPE);
transfer = 1;
}
return rc;
}
return -EINVAL;
}
static long ioctl_transfer_image(struct cxl *adapter, int operation,
struct cxl_adapter_image __user *uai)
{
struct cxl_adapter_image ai;
pr_devel("%s\n", __func__);
if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
return -EFAULT;
/*
* Make sure reserved fields and bits are set to 0
*/
if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
(ai.flags & ~CXL_AI_ALL))
return -EINVAL;
return transfer_image(adapter, operation, &ai);
}
static int device_open(struct inode *inode, struct file *file)
{
int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
struct cxl *adapter;
int rc = 0, i;
pr_devel("in %s\n", __func__);
BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
/* Allows one process to open the device by using a semaphore */
if (down_interruptible(&sem) != 0)
return -EPERM;
if (!(adapter = get_cxl_adapter(adapter_num))) {
rc = -ENODEV;
goto err_unlock;
}
file->private_data = adapter;
continue_token = 0;
transfer = 0;
for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
buffer[i] = NULL;
/* aligned buffer containing list entries which describes up to
* 1 megabyte of data (256 entries of 4096 bytes each)
* Logical real address of buffer 0 - Buffer 0 length in bytes
* Logical real address of buffer 1 - Buffer 1 length in bytes
* Logical real address of buffer 2 - Buffer 2 length in bytes
* ....
* ....
* Logical real address of buffer N - Buffer N length in bytes
*/
le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
if (!le) {
rc = -ENOMEM;
goto err;
}
for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!buffer[i]) {
rc = -ENOMEM;
goto err1;
}
}
return 0;
err1:
for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
if (buffer[i])
free_page((unsigned long) buffer[i]);
}
if (le)
free_page((unsigned long) le);
err:
put_device(&adapter->dev);
err_unlock:
up(&sem);
return rc;
}
static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct cxl *adapter = file->private_data;
pr_devel("in %s\n", __func__);
if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
return ioctl_transfer_image(adapter,
DOWNLOAD_IMAGE,
(struct cxl_adapter_image __user *)arg);
else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
return ioctl_transfer_image(adapter,
VALIDATE_IMAGE,
(struct cxl_adapter_image __user *)arg);
else
return -EINVAL;
}
static int device_close(struct inode *inode, struct file *file)
{
struct cxl *adapter = file->private_data;
int i;
pr_devel("in %s\n", __func__);
for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
if (buffer[i])
free_page((unsigned long) buffer[i]);
}
if (le)
free_page((unsigned long) le);
up(&sem);
put_device(&adapter->dev);
continue_token = 0;
/* reload the module */
if (transfer)
cxl_guest_reload_module(adapter);
else {
pr_devel("resetting adapter\n");
cxl_h_reset_adapter(adapter->guest->handle);
}
transfer = 0;
return 0;
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = device_open,
.unlocked_ioctl = device_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = device_close,
};
void cxl_guest_remove_chardev(struct cxl *adapter)
{
cdev_del(&adapter->guest->cdev);
}
int cxl_guest_add_chardev(struct cxl *adapter)
{
dev_t devt;
int rc;
devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
cdev_init(&adapter->guest->cdev, &fops);
if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
dev_err(&adapter->dev,
"Unable to add chardev on adapter (card%i): %i\n",
adapter->adapter_num, rc);
goto err;
}
adapter->dev.devt = devt;
sema_init(&sem, 1);
err:
return rc;
}
| linux-master | drivers/misc/cxl/flash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/pci_regs.h>
#include "cxl.h"
#define to_afu_chardev_m(d) dev_get_drvdata(d)
/********* Adapter attributes **********************************************/
static ssize_t caia_version_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
adapter->caia_minor);
}
static ssize_t psl_revision_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
}
static ssize_t base_image_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
}
static ssize_t image_loaded_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
if (adapter->user_image_loaded)
return scnprintf(buf, PAGE_SIZE, "user\n");
return scnprintf(buf, PAGE_SIZE, "factory\n");
}
static ssize_t psl_timebase_synced_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
u64 psl_tb, delta;
/* Recompute the status only in native mode */
if (cpu_has_feature(CPU_FTR_HVMODE)) {
psl_tb = adapter->native->sl_ops->timebase_read(adapter);
delta = abs(mftb() - psl_tb);
/* CORE TB and PSL TB difference <= 16usecs ? */
adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
pr_devel("PSL timebase %s - delta: 0x%016llx\n",
(tb_to_ns(delta) < 16000) ? "synchronized" :
"not synchronized", tb_to_ns(delta));
}
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
}
static ssize_t tunneled_ops_supported_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
}
static ssize_t reset_adapter_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl *adapter = to_cxl_adapter(device);
int rc;
int val;
rc = sscanf(buf, "%i", &val);
if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
/*
* See if we can lock the context mapping that's only allowed
* when there are no contexts attached to the adapter. Once
* taken this will also prevent any context from getting activated.
*/
if (val == 1) {
rc = cxl_adapter_context_lock(adapter);
if (rc)
goto out;
rc = cxl_ops->adapter_reset(adapter);
/* In case reset failed release context lock */
if (rc)
cxl_adapter_context_unlock(adapter);
} else if (val == -1) {
/* Perform a forced adapter reset */
rc = cxl_ops->adapter_reset(adapter);
}
out:
return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
if (!adapter->perst_loads_image)
return scnprintf(buf, PAGE_SIZE, "none\n");
if (adapter->perst_select_user)
return scnprintf(buf, PAGE_SIZE, "user\n");
return scnprintf(buf, PAGE_SIZE, "factory\n");
}
static ssize_t load_image_on_perst_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl *adapter = to_cxl_adapter(device);
int rc;
if (!strncmp(buf, "none", 4))
adapter->perst_loads_image = false;
else if (!strncmp(buf, "user", 4)) {
adapter->perst_select_user = true;
adapter->perst_loads_image = true;
} else if (!strncmp(buf, "factory", 7)) {
adapter->perst_select_user = false;
adapter->perst_loads_image = true;
} else
return -EINVAL;
if ((rc = cxl_update_image_control(adapter)))
return rc;
return count;
}
static ssize_t perst_reloads_same_image_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl *adapter = to_cxl_adapter(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
}
static ssize_t perst_reloads_same_image_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl *adapter = to_cxl_adapter(device);
int rc;
int val;
rc = sscanf(buf, "%i", &val);
if ((rc != 1) || !(val == 1 || val == 0))
return -EINVAL;
adapter->perst_same_image = (val == 1);
return count;
}
static struct device_attribute adapter_attrs[] = {
__ATTR_RO(caia_version),
__ATTR_RO(psl_revision),
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
__ATTR_RO(psl_timebase_synced),
__ATTR_RO(tunneled_ops_supported),
__ATTR_RW(load_image_on_perst),
__ATTR_RW(perst_reloads_same_image),
__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
};
/********* AFU master specific attributes **********************************/
static ssize_t mmio_size_show_master(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_afu_chardev_m(device);
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
}
static ssize_t pp_mmio_off_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_afu_chardev_m(device);
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
}
static ssize_t pp_mmio_len_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_afu_chardev_m(device);
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
}
static struct device_attribute afu_master_attrs[] = {
__ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
__ATTR_RO(pp_mmio_off),
__ATTR_RO(pp_mmio_len),
};
/********* AFU attributes **************************************************/
static ssize_t mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
if (afu->pp_size)
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
}
static ssize_t reset_store_afu(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(device);
int rc;
/* Not safe to reset if it is currently in use */
mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr)) {
rc = -EBUSY;
goto err;
}
if ((rc = cxl_ops->afu_reset(afu)))
goto err;
rc = count;
err:
mutex_unlock(&afu->contexts_lock);
return rc;
}
static ssize_t irqs_min_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
}
static ssize_t irqs_max_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
}
static ssize_t irqs_max_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(device);
ssize_t ret;
int irqs_max;
ret = sscanf(buf, "%i", &irqs_max);
if (ret != 1)
return -EINVAL;
if (irqs_max < afu->pp_irqs)
return -EINVAL;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (irqs_max > afu->adapter->user_irqs)
return -EINVAL;
} else {
/* pHyp sets a per-AFU limit */
if (irqs_max > afu->guest->max_ints)
return -EINVAL;
}
afu->irqs_max = irqs_max;
return count;
}
static ssize_t modes_supported_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
char *p = buf, *end = buf + PAGE_SIZE;
if (afu->modes_supported & CXL_MODE_DEDICATED)
p += scnprintf(p, end - p, "dedicated_process\n");
if (afu->modes_supported & CXL_MODE_DIRECTED)
p += scnprintf(p, end - p, "afu_directed\n");
return (p - buf);
}
static ssize_t prefault_mode_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
switch (afu->prefault_mode) {
case CXL_PREFAULT_WED:
return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
case CXL_PREFAULT_ALL:
return scnprintf(buf, PAGE_SIZE, "all\n");
default:
return scnprintf(buf, PAGE_SIZE, "none\n");
}
}
static ssize_t prefault_mode_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(device);
enum prefault_modes mode = -1;
if (!strncmp(buf, "none", 4))
mode = CXL_PREFAULT_NONE;
else {
if (!radix_enabled()) {
/* only allowed when not in radix mode */
if (!strncmp(buf, "work_element_descriptor", 23))
mode = CXL_PREFAULT_WED;
if (!strncmp(buf, "all", 3))
mode = CXL_PREFAULT_ALL;
} else {
dev_err(device, "Cannot prefault with radix enabled\n");
}
}
if (mode == -1)
return -EINVAL;
afu->prefault_mode = mode;
return count;
}
static ssize_t mode_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct cxl_afu *afu = to_cxl_afu(device);
if (afu->current_mode == CXL_MODE_DEDICATED)
return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
if (afu->current_mode == CXL_MODE_DIRECTED)
return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
return scnprintf(buf, PAGE_SIZE, "none\n");
}
static ssize_t mode_store(struct device *device, struct device_attribute *attr,
const char *buf, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(device);
int old_mode, mode = -1;
int rc = -EBUSY;
/* can't change this if we have a user */
mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr))
goto err;
if (!strncmp(buf, "dedicated_process", 17))
mode = CXL_MODE_DEDICATED;
if (!strncmp(buf, "afu_directed", 12))
mode = CXL_MODE_DIRECTED;
if (!strncmp(buf, "none", 4))
mode = 0;
if (mode == -1) {
rc = -EINVAL;
goto err;
}
/*
* afu_deactivate_mode needs to be done outside the lock, prevent
* other contexts coming in before we are ready:
*/
old_mode = afu->current_mode;
afu->current_mode = 0;
afu->num_procs = 0;
mutex_unlock(&afu->contexts_lock);
if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
return rc;
if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
return rc;
return count;
err:
mutex_unlock(&afu->contexts_lock);
return rc;
}
static ssize_t api_version_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
}
static ssize_t api_version_compatible_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
}
static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
}
static struct device_attribute afu_attrs[] = {
__ATTR_RO(mmio_size),
__ATTR_RO(irqs_min),
__ATTR_RW(irqs_max),
__ATTR_RO(modes_supported),
__ATTR_RW(mode),
__ATTR_RW(prefault_mode),
__ATTR_RO(api_version),
__ATTR_RO(api_version_compatible),
__ATTR(reset, S_IWUSR, NULL, reset_store_afu),
};
int cxl_sysfs_adapter_add(struct cxl *adapter)
{
struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
dev_attr = &adapter_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_ADAPTER_ATTRS)) {
if ((rc = device_create_file(&adapter->dev, dev_attr)))
goto err;
}
}
return 0;
err:
for (i--; i >= 0; i--) {
dev_attr = &adapter_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_ADAPTER_ATTRS))
device_remove_file(&adapter->dev, dev_attr);
}
return rc;
}
void cxl_sysfs_adapter_remove(struct cxl *adapter)
{
struct device_attribute *dev_attr;
int i;
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
dev_attr = &adapter_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_ADAPTER_ATTRS))
device_remove_file(&adapter->dev, dev_attr);
}
}
struct afu_config_record {
struct kobject kobj;
struct bin_attribute config_attr;
struct list_head list;
int cr;
u16 device;
u16 vendor;
u32 class;
};
#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
static ssize_t vendor_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct afu_config_record *cr = to_cr(kobj);
return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
}
static ssize_t device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct afu_config_record *cr = to_cr(kobj);
return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
}
static ssize_t class_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct afu_config_record *cr = to_cr(kobj);
return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
}
static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
u64 i, j, val, rc;
for (i = 0; i < count;) {
rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
if (rc)
val = ~0ULL;
for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
buf[i] = (val >> (j * 8)) & 0xff;
}
return count;
}
static struct kobj_attribute vendor_attribute =
__ATTR_RO(vendor);
static struct kobj_attribute device_attribute =
__ATTR_RO(device);
static struct kobj_attribute class_attribute =
__ATTR_RO(class);
static struct attribute *afu_cr_attrs[] = {
&vendor_attribute.attr,
&device_attribute.attr,
&class_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(afu_cr);
static void release_afu_config_record(struct kobject *kobj)
{
struct afu_config_record *cr = to_cr(kobj);
kfree(cr);
}
static struct kobj_type afu_config_record_type = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_afu_config_record,
.default_groups = afu_cr_groups,
};
static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
{
struct afu_config_record *cr;
int rc;
cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cr = cr_idx;
rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
if (rc)
goto err;
rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
if (rc)
goto err;
rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
if (rc)
goto err;
cr->class >>= 8;
/*
* Export raw AFU PCIe like config record. For now this is read only by
* root - we can expand that later to be readable by non-root and maybe
* even writable provided we have a good use-case. Once we support
* exposing AFUs through a virtual PHB they will get that for free from
* Linux' PCI infrastructure, but until then it's not clear that we
* need it for anything since the main use case is just identifying
* AFUs, which can be done via the vendor, device and class attributes.
*/
sysfs_bin_attr_init(&cr->config_attr);
cr->config_attr.attr.name = "config";
cr->config_attr.attr.mode = S_IRUSR;
cr->config_attr.size = afu->crs_len;
cr->config_attr.read = afu_read_config;
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
if (rc)
goto err1;
rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
if (rc)
goto err1;
rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
if (rc)
goto err2;
return cr;
err2:
sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
err1:
kobject_put(&cr->kobj);
return ERR_PTR(rc);
err:
kfree(cr);
return ERR_PTR(rc);
}
void cxl_sysfs_afu_remove(struct cxl_afu *afu)
{
struct device_attribute *dev_attr;
struct afu_config_record *cr, *tmp;
int i;
/* remove the err buffer bin attribute */
if (afu->eb_len)
device_remove_bin_file(&afu->dev, &afu->attr_eb);
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
dev_attr = &afu_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_ATTRS))
device_remove_file(&afu->dev, &afu_attrs[i]);
}
list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
kobject_put(&cr->kobj);
}
}
int cxl_sysfs_afu_add(struct cxl_afu *afu)
{
struct device_attribute *dev_attr;
struct afu_config_record *cr;
int i, rc;
INIT_LIST_HEAD(&afu->crs);
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
dev_attr = &afu_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_ATTRS)) {
if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
goto err;
}
}
/* conditionally create the add the binary file for error info buffer */
if (afu->eb_len) {
sysfs_attr_init(&afu->attr_eb.attr);
afu->attr_eb.attr.name = "afu_err_buff";
afu->attr_eb.attr.mode = S_IRUGO;
afu->attr_eb.size = afu->eb_len;
afu->attr_eb.read = afu_eb_read;
rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
if (rc) {
dev_err(&afu->dev,
"Unable to create eb attr for the afu. Err(%d)\n",
rc);
goto err;
}
}
for (i = 0; i < afu->crs_num; i++) {
cr = cxl_sysfs_afu_new_cr(afu, i);
if (IS_ERR(cr)) {
rc = PTR_ERR(cr);
goto err1;
}
list_add(&cr->list, &afu->crs);
}
return 0;
err1:
cxl_sysfs_afu_remove(afu);
return rc;
err:
/* reset the eb_len as we havent created the bin attr */
afu->eb_len = 0;
for (i--; i >= 0; i--) {
dev_attr = &afu_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_ATTRS))
device_remove_file(&afu->dev, &afu_attrs[i]);
}
return rc;
}
int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
{
struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
dev_attr = &afu_master_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_MASTER_ATTRS)) {
if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
goto err;
}
}
return 0;
err:
for (i--; i >= 0; i--) {
dev_attr = &afu_master_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_MASTER_ATTRS))
device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
}
return rc;
}
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
{
struct device_attribute *dev_attr;
int i;
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
dev_attr = &afu_master_attrs[i];
if (cxl_ops->support_attributes(dev_attr->attr.name,
CXL_AFU_MASTER_ATTRS))
device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
}
}
| linux-master | drivers/misc/cxl/sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/pid.h>
#include <asm/cputable.h>
#include <misc/cxl-base.h>
#include "cxl.h"
#include "trace.h"
static int afu_irq_range_start(void)
{
if (cpu_has_feature(CPU_FTR_HVMODE))
return 1;
return 0;
}
static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
{
ctx->dsisr = dsisr;
ctx->dar = dar;
schedule_work(&ctx->fault_work);
return IRQ_HANDLED;
}
irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
u64 dsisr, dar;
dsisr = irq_info->dsisr;
dar = irq_info->dar;
trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
if (dsisr & CXL_PSL9_DSISR_An_TF) {
pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
return schedule_cxl_fault(ctx, dsisr, dar);
}
if (dsisr & CXL_PSL9_DSISR_An_PE)
return cxl_ops->handle_psl_slice_error(ctx, dsisr,
irq_info->errstat);
if (dsisr & CXL_PSL9_DSISR_An_AE) {
pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
if (ctx->pending_afu_err) {
/*
* This shouldn't happen - the PSL treats these errors
* as fatal and will have reset the AFU, so there's not
* much point buffering multiple AFU errors.
* OTOH if we DO ever see a storm of these come in it's
* probably best that we log them somewhere:
*/
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
ctx->pe, irq_info->afu_err);
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
ctx->pending_afu_err = 1;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
}
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
return IRQ_HANDLED;
}
if (dsisr & CXL_PSL9_DSISR_An_OC)
pr_devel("CXL interrupt: OS Context Warning\n");
WARN(1, "Unhandled CXL PSL IRQ\n");
return IRQ_HANDLED;
}
irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
u64 dsisr, dar;
dsisr = irq_info->dsisr;
dar = irq_info->dar;
trace_cxl_psl_irq(ctx, irq, dsisr, dar);
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
if (dsisr & CXL_PSL_DSISR_An_DS) {
/*
* We don't inherently need to sleep to handle this, but we do
* need to get a ref to the task's mm, which we can't do from
* irq context without the potential for a deadlock since it
* takes the task_lock. An alternate option would be to keep a
* reference to the task's mm the entire time it has cxl open,
* but to do that we need to solve the issue where we hold a
* ref to the mm, but the mm can hold a ref to the fd after an
* mmap preventing anything from being cleaned up.
*/
pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
return schedule_cxl_fault(ctx, dsisr, dar);
}
if (dsisr & CXL_PSL_DSISR_An_M)
pr_devel("CXL interrupt: PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_P)
pr_devel("CXL interrupt: Storage protection violation\n");
if (dsisr & CXL_PSL_DSISR_An_A)
pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
if (dsisr & CXL_PSL_DSISR_An_S)
pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
if (dsisr & CXL_PSL_DSISR_An_K)
pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
if (dsisr & CXL_PSL_DSISR_An_DM) {
/*
* In some cases we might be able to handle the fault
* immediately if hash_page would succeed, but we still need
* the task's mm, which as above we can't get without a lock
*/
pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
return schedule_cxl_fault(ctx, dsisr, dar);
}
if (dsisr & CXL_PSL_DSISR_An_ST)
WARN(1, "CXL interrupt: Segment Table PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_UR)
pr_devel("CXL interrupt: AURP PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_PE)
return cxl_ops->handle_psl_slice_error(ctx, dsisr,
irq_info->errstat);
if (dsisr & CXL_PSL_DSISR_An_AE) {
pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
if (ctx->pending_afu_err) {
/*
* This shouldn't happen - the PSL treats these errors
* as fatal and will have reset the AFU, so there's not
* much point buffering multiple AFU errors.
* OTOH if we DO ever see a storm of these come in it's
* probably best that we log them somewhere:
*/
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
"undelivered to pe %i: 0x%016llx\n",
ctx->pe, irq_info->afu_err);
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
ctx->pending_afu_err = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
}
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
return IRQ_HANDLED;
}
if (dsisr & CXL_PSL_DSISR_An_OC)
pr_devel("CXL interrupt: OS Context Warning\n");
WARN(1, "Unhandled CXL PSL IRQ\n");
return IRQ_HANDLED;
}
static irqreturn_t cxl_irq_afu(int irq, void *data)
{
struct cxl_context *ctx = data;
irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
int irq_off, afu_irq = 0;
__u16 range;
int r;
/*
* Look for the interrupt number.
* On bare-metal, we know range 0 only contains the PSL
* interrupt so we could start counting at range 1 and initialize
* afu_irq at 1.
* In a guest, range 0 also contains AFU interrupts, so it must
* be counted for. Therefore we initialize afu_irq at 0 to take into
* account the PSL interrupt.
*
* For code-readability, it just seems easier to go over all
* the ranges on bare-metal and guest. The end result is the same.
*/
for (r = 0; r < CXL_IRQ_RANGES; r++) {
irq_off = hwirq - ctx->irqs.offset[r];
range = ctx->irqs.range[r];
if (irq_off >= 0 && irq_off < range) {
afu_irq += irq_off;
break;
}
afu_irq += range;
}
if (unlikely(r >= CXL_IRQ_RANGES)) {
WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
ctx->pe, irq, hwirq);
return IRQ_HANDLED;
}
trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
afu_irq, ctx->pe, irq, hwirq);
if (unlikely(!ctx->irq_bitmap)) {
WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
return IRQ_HANDLED;
}
spin_lock(&ctx->lock);
set_bit(afu_irq - 1, ctx->irq_bitmap);
ctx->pending_irq = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
return IRQ_HANDLED;
}
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
irq_handler_t handler, void *cookie, const char *name)
{
unsigned int virq;
int result;
/* IRQ Domain? */
virq = irq_create_mapping(NULL, hwirq);
if (!virq) {
dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
return 0;
}
if (cxl_ops->setup_irq)
cxl_ops->setup_irq(adapter, hwirq, virq);
pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
result = request_irq(virq, handler, 0, name, cookie);
if (result) {
dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
return 0;
}
return virq;
}
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
}
int cxl_register_one_irq(struct cxl *adapter,
irq_handler_t handler,
void *cookie,
irq_hw_number_t *dest_hwirq,
unsigned int *dest_virq,
const char *name)
{
int hwirq, virq;
if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
return hwirq;
if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
goto err;
*dest_hwirq = hwirq;
*dest_virq = virq;
return 0;
err:
cxl_ops->release_one_irq(adapter, hwirq);
return -ENOMEM;
}
void afu_irq_name_free(struct cxl_context *ctx)
{
struct cxl_irq_name *irq_name, *tmp;
list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
kfree(irq_name->name);
list_del(&irq_name->list);
kfree(irq_name);
}
}
int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
{
int rc, r, i, j = 1;
struct cxl_irq_name *irq_name;
int alloc_count;
/*
* In native mode, range 0 is reserved for the multiplexed
* PSL interrupt. It has been allocated when the AFU was initialized.
*
* In a guest, the PSL interrupt is not mutliplexed, but per-context,
* and is the first interrupt from range 0. It still needs to be
* allocated, so bump the count by one.
*/
if (cpu_has_feature(CPU_FTR_HVMODE))
alloc_count = count;
else
alloc_count = count + 1;
if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
alloc_count)))
return rc;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
/* Multiplexed PSL Interrupt */
ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
ctx->irqs.range[0] = 1;
}
ctx->irq_count = count;
ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!ctx->irq_bitmap)
goto out;
/*
* Allocate names first. If any fail, bail out before allocating
* actual hardware IRQs.
*/
for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
irq_name = kmalloc(sizeof(struct cxl_irq_name),
GFP_KERNEL);
if (!irq_name)
goto out;
irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
dev_name(&ctx->afu->dev),
ctx->pe, j);
if (!irq_name->name) {
kfree(irq_name);
goto out;
}
/* Add to tail so next look get the correct order */
list_add_tail(&irq_name->list, &ctx->irq_names);
j++;
}
}
return 0;
out:
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
bitmap_free(ctx->irq_bitmap);
afu_irq_name_free(ctx);
return -ENOMEM;
}
static void afu_register_hwirqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
struct cxl_irq_name *irq_name;
int r, i;
irqreturn_t (*handler)(int irq, void *data);
/* We've allocated all memory now, so let's do the irq allocations */
irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
if (r == 0 && i == 0)
/*
* The very first interrupt of range 0 is
* always the PSL interrupt, but we only
* need to connect a handler for guests,
* because there's one PSL interrupt per
* context.
* On bare-metal, the PSL interrupt is
* multiplexed and was setup when the AFU
* was configured.
*/
handler = cxl_ops->psl_interrupt;
else
handler = cxl_irq_afu;
cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
irq_name->name);
irq_name = list_next_entry(irq_name, list);
}
}
}
int afu_register_irqs(struct cxl_context *ctx, u32 count)
{
int rc;
rc = afu_allocate_irqs(ctx, count);
if (rc)
return rc;
afu_register_hwirqs(ctx);
return 0;
}
void afu_release_irqs(struct cxl_context *ctx, void *cookie)
{
irq_hw_number_t hwirq;
unsigned int virq;
int r, i;
for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
virq = irq_find_mapping(NULL, hwirq);
if (virq)
cxl_unmap_irq(virq, cookie);
}
}
afu_irq_name_free(ctx);
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
ctx->irq_count = 0;
}
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
{
dev_crit(&afu->dev,
"PSL Slice error received. Check AFU for root cause.\n");
dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
if (serr & CXL_PSL_SERR_An_afuto)
dev_crit(&afu->dev, "AFU MMIO Timeout\n");
if (serr & CXL_PSL_SERR_An_afudis)
dev_crit(&afu->dev,
"MMIO targeted Accelerator that was not enabled\n");
if (serr & CXL_PSL_SERR_An_afuov)
dev_crit(&afu->dev, "AFU CTAG Overflow\n");
if (serr & CXL_PSL_SERR_An_badsrc)
dev_crit(&afu->dev, "Bad Interrupt Source\n");
if (serr & CXL_PSL_SERR_An_badctx)
dev_crit(&afu->dev, "Bad Context Handle\n");
if (serr & CXL_PSL_SERR_An_llcmdis)
dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
if (serr & CXL_PSL_SERR_An_llcmdto)
dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
if (serr & CXL_PSL_SERR_An_afupar)
dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
if (serr & CXL_PSL_SERR_An_afudup)
dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
if (serr & CXL_PSL_SERR_An_AE)
dev_crit(&afu->dev,
"AFU asserted JDONE with JERROR in AFU Directed Mode\n");
}
| linux-master | drivers/misc/cxl/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015 IBM Corp.
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include "hcalls.h"
#include "trace.h"
#define CXL_HCALL_TIMEOUT 60000
#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000
#define H_ATTACH_CA_PROCESS 0x344
#define H_CONTROL_CA_FUNCTION 0x348
#define H_DETACH_CA_PROCESS 0x34C
#define H_COLLECT_CA_INT_INFO 0x350
#define H_CONTROL_CA_FAULTS 0x354
#define H_DOWNLOAD_CA_FUNCTION 0x35C
#define H_DOWNLOAD_CA_FACILITY 0x364
#define H_CONTROL_CA_FACILITY 0x368
#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */
#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */
#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */
#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */
#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */
#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */
#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */
#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */
#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */
#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */
#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */
#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */
#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1
#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2
#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */
#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */
#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */
#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */
#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \
{ \
unsigned int delay, total_delay = 0; \
u64 token = 0; \
\
memset(retbuf, 0, sizeof(retbuf)); \
while (1) { \
rc = call(fn, retbuf, __VA_ARGS__, token); \
token = retbuf[0]; \
if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \
break; \
\
if (rc == H_BUSY) \
delay = 10; \
else \
delay = get_longbusy_msecs(rc); \
\
total_delay += delay; \
if (total_delay > CXL_HCALL_TIMEOUT) { \
WARN(1, "Warning: Giving up waiting for CXL hcall " \
"%#x after %u msec\n", fn, total_delay); \
rc = H_BUSY; \
break; \
} \
msleep(delay); \
} \
}
#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__)
#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__)
#define _PRINT_MSG(rc, format, ...) \
{ \
if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \
pr_err(format, __VA_ARGS__); \
else \
pr_devel(format, __VA_ARGS__); \
} \
static char *afu_op_names[] = {
"UNKNOWN_OP", /* 0 undefined */
"RESET", /* 1 */
"SUSPEND_PROCESS", /* 2 */
"RESUME_PROCESS", /* 3 */
"READ_ERR_STATE", /* 4 */
"GET_AFU_ERR", /* 5 */
"GET_CONFIG", /* 6 */
"GET_DOWNLOAD_STATE", /* 7 */
"TERMINATE_PROCESS", /* 8 */
"COLLECT_VPD", /* 9 */
"UNKNOWN_OP", /* 10 undefined */
"GET_FUNCTION_ERR_INT", /* 11 */
"ACK_FUNCTION_ERR_INT", /* 12 */
"GET_ERROR_LOG", /* 13 */
};
static char *control_adapter_op_names[] = {
"UNKNOWN_OP", /* 0 undefined */
"RESET", /* 1 */
"COLLECT_VPD", /* 2 */
};
static char *download_op_names[] = {
"UNKNOWN_OP", /* 0 undefined */
"DOWNLOAD", /* 1 */
"VALIDATE", /* 2 */
};
static char *op_str(unsigned int op, char *name_array[], int array_len)
{
if (op >= array_len)
return "UNKNOWN_OP";
return name_array[op];
}
#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array))
#define OP_STR_AFU(op) OP_STR(op, afu_op_names)
#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names)
#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names)
long cxl_h_attach_process(u64 unit_address,
struct cxl_process_element_hcall *element,
u64 *process_token, u64 *mmio_addr, u64 *mmio_size)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element));
_PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n",
unit_address, virt_to_phys(element), rc);
trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc);
pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n",
retbuf[0], retbuf[1], retbuf[2]);
cxl_dump_debug_buffer(element, sizeof(*element));
switch (rc) {
case H_SUCCESS: /* The process info is attached to the coherent platform function */
*process_token = retbuf[0];
if (mmio_addr)
*mmio_addr = retbuf[1];
if (mmio_size)
*mmio_size = retbuf[2];
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
case H_FUNCTION: /* The function is not supported. */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */
case H_HARDWARE: /* A hardware event prevented the attach operation */
case H_STATE: /* The coherent platform function is not in a valid state */
case H_BUSY:
return -EBUSY;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_detach_process - Detach a process element from a coherent
* platform function.
*/
long cxl_h_detach_process(u64 unit_address, u64 process_token)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token);
_PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc);
trace_cxl_hcall_detach(unit_address, process_token, rc);
switch (rc) {
case H_SUCCESS: /* The process was detached from the coherent platform function */
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
case H_RESOURCE: /* The function has page table mappings for MMIO */
case H_HARDWARE: /* A hardware event prevented the detach operation */
case H_STATE: /* The coherent platform function is not in a valid state */
case H_BUSY:
return -EBUSY;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
* the partition to manipulate or query
* certain coherent platform function behaviors.
*/
static long cxl_h_control_function(u64 unit_address, u64 op,
u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
{
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
long rc;
CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4);
_PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
switch (rc) {
case H_SUCCESS: /* The operation is completed for the coherent platform function */
if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT ||
op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE ||
op == H_CONTROL_CA_FUNCTION_COLLECT_VPD))
*out = retbuf[0];
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
case H_FUNCTION: /* The function is not supported. */
case H_NOT_FOUND: /* The operation supplied was not valid */
case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
case H_SG_LIST: /* An block list entry was invalid */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
case H_RESOURCE: /* The function has page table mappings for MMIO */
case H_HARDWARE: /* A hardware event prevented the attach operation */
case H_STATE: /* The coherent platform function is not in a valid state */
case H_BUSY:
return -EBUSY;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_reset_afu - Perform a reset to the coherent platform function.
*/
long cxl_h_reset_afu(u64 unit_address)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_RESET,
0, 0, 0, 0,
NULL);
}
/*
* cxl_h_suspend_process - Suspend a process from being executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
*/
long cxl_h_suspend_process(u64 unit_address, u64 process_token)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS,
process_token, 0, 0, 0,
NULL);
}
/*
* cxl_h_resume_process - Resume a process to be executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
*/
long cxl_h_resume_process(u64 unit_address, u64 process_token)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_RESUME_PROCESS,
process_token, 0, 0, 0,
NULL);
}
/*
* cxl_h_read_error_state - Checks the error state of the coherent
* platform function.
* R4 contains the error state
*/
long cxl_h_read_error_state(u64 unit_address, u64 *state)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_READ_ERR_STATE,
0, 0, 0, 0,
state);
}
/*
* cxl_h_get_afu_err - collect the AFU error buffer
* Parameter1 = byte offset into error buffer to retrieve, valid values
* are between 0 and (ibm,error-buffer-size - 1)
* Parameter2 = 4K aligned real address of error buffer, to be filled in
* Parameter3 = length of error buffer, valid values are 4K or less
*/
long cxl_h_get_afu_err(u64 unit_address, u64 offset,
u64 buf_address, u64 len)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_GET_AFU_ERR,
offset, buf_address, len, 0,
NULL);
}
/*
* cxl_h_get_config - collect configuration record for the
* coherent platform function
* Parameter1 = # of configuration record to retrieve, valid values are
* between 0 and (ibm,#config-records - 1)
* Parameter2 = byte offset into configuration record to retrieve,
* valid values are between 0 and (ibm,config-record-size - 1)
* Parameter3 = 4K aligned real address of configuration record buffer,
* to be filled in
* Parameter4 = length of configuration buffer, valid values are 4K or less
*/
long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
u64 buf_address, u64 len)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_GET_CONFIG,
cr_num, offset, buf_address, len,
NULL);
}
/*
* cxl_h_terminate_process - Terminate the process before completion
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
*/
long cxl_h_terminate_process(u64 unit_address, u64 process_token)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS,
process_token, 0, 0, 0,
NULL);
}
/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = # of VPD record to retrieve, valid values are between 0
* and (ibm,#config-records - 1).
* Parameter2 = 4K naturally aligned real buffer containing block
* list entries
* Parameter3 = number of block list entries in the block list, valid
* values are between 0 and 256
*/
long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
u64 num, u64 *out)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_COLLECT_VPD,
record, list_address, num, 0,
out);
}
/*
* cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
*/
long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT,
0, 0, 0, 0, reg);
}
/*
* cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
* based on an interrupt
* Parameter1 = value to write to the function-wide error interrupt register
*/
long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT,
value, 0, 0, 0,
NULL);
}
/*
* cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
* an error log
*/
long cxl_h_get_error_log(u64 unit_address, u64 value)
{
return cxl_h_control_function(unit_address,
H_CONTROL_CA_FUNCTION_GET_ERROR_LOG,
0, 0, 0, 0,
NULL);
}
/*
* cxl_h_collect_int_info - Collect interrupt info about a coherent
* platform function after an interrupt occurred.
*/
long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
struct cxl_irq_info *info)
{
long rc;
BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE]));
rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info,
unit_address, process_token);
_PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n",
unit_address, process_token, rc);
trace_cxl_hcall_collect_int_info(unit_address, process_token, rc);
switch (rc) {
case H_SUCCESS: /* The interrupt info is returned in return registers. */
pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n",
info->dsisr, info->dar, info->dsr, info->reserved,
info->afu_err, info->errstat);
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */
case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/
case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */
return -EBUSY;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_control_faults - Control the operation of a coherent platform
* function after a fault occurs.
*
* Parameters
* control-mask: value to control the faults
* looks like PSL_TFC_An shifted >> 32
* reset-mask: mask to control reset of function faults
* Set reset_mask = 1 to reset PSL errors
*/
long cxl_h_control_faults(u64 unit_address, u64 process_token,
u64 control_mask, u64 reset_mask)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
memset(retbuf, 0, sizeof(retbuf));
rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address,
H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token,
control_mask, reset_mask);
_PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n",
unit_address, process_token, control_mask, reset_mask,
rc, retbuf[0]);
trace_cxl_hcall_control_faults(unit_address, process_token,
control_mask, reset_mask, retbuf[0], rc);
switch (rc) {
case H_SUCCESS: /* Faults were successfully controlled for the function. */
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
return -EINVAL;
case H_HARDWARE: /* A hardware event prevented the control of faults. */
case H_STATE: /* The function was in an invalid state. */
case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */
return -EBUSY;
case H_FUNCTION: /* The function is not supported */
case H_NOT_FOUND: /* The operation supplied was not valid */
return -EINVAL;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
* allows the partition to manipulate or query
* certain coherent platform facility behaviors.
*/
static long cxl_h_control_facility(u64 unit_address, u64 op,
u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
{
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
long rc;
CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4);
_PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
switch (rc) {
case H_SUCCESS: /* The operation is completed for the coherent platform facility */
if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD)
*out = retbuf[0];
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
case H_FUNCTION: /* The function is not supported. */
case H_NOT_FOUND: /* The operation supplied was not valid */
case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
case H_SG_LIST: /* An block list entry was invalid */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
case H_RESOURCE: /* The function has page table mappings for MMIO */
case H_HARDWARE: /* A hardware event prevented the attach operation */
case H_STATE: /* The coherent platform facility is not in a valid state */
case H_BUSY:
return -EBUSY;
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
*/
long cxl_h_reset_adapter(u64 unit_address)
{
return cxl_h_control_facility(unit_address,
H_CONTROL_CA_FACILITY_RESET,
0, 0, 0, 0,
NULL);
}
/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = 4K naturally aligned real buffer containing block
* list entries
* Parameter2 = number of block list entries in the block list, valid
* values are between 0 and 256
*/
long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
u64 num, u64 *out)
{
return cxl_h_control_facility(unit_address,
H_CONTROL_CA_FACILITY_COLLECT_VPD,
list_address, num, 0, 0,
out);
}
/*
* cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
* hypervisor call provide platform support for
* downloading a base adapter image to the coherent
* platform facility, and for validating the entire
* image after the download.
* Parameters
* op: operation to perform to the coherent platform function
* Download: operation = 1, the base image in the coherent platform
* facility is first erased, and then
* programmed using the image supplied
* in the scatter/gather list.
* Validate: operation = 2, the base image in the coherent platform
* facility is compared with the image
* supplied in the scatter/gather list.
* list_address: 4K naturally aligned real buffer containing
* scatter/gather list entries.
* num: number of block list entries in the scatter/gather list.
*/
static long cxl_h_download_facility(u64 unit_address, u64 op,
u64 list_address, u64 num,
u64 *out)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
unsigned int delay, total_delay = 0;
u64 token = 0;
long rc;
if (*out != 0)
token = *out;
memset(retbuf, 0, sizeof(retbuf));
while (1) {
rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf,
unit_address, op, list_address, num,
token);
token = retbuf[0];
if (rc != H_BUSY && !H_IS_LONG_BUSY(rc))
break;
if (rc != H_BUSY) {
delay = get_longbusy_msecs(rc);
total_delay += delay;
if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) {
WARN(1, "Warning: Giving up waiting for CXL hcall "
"%#x after %u msec\n",
H_DOWNLOAD_CA_FACILITY, total_delay);
rc = H_BUSY;
break;
}
msleep(delay);
}
}
_PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n",
unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
switch (rc) {
case H_SUCCESS: /* The operation is completed for the coherent platform facility */
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied */
case H_FUNCTION: /* The function is not supported. */
case H_SG_LIST: /* An block list entry was invalid */
case H_BAD_DATA: /* Image verification failed */
return -EINVAL;
case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
case H_RESOURCE: /* The function has page table mappings for MMIO */
case H_HARDWARE: /* A hardware event prevented the attach operation */
case H_STATE: /* The coherent platform facility is not in a valid state */
case H_BUSY:
return -EBUSY;
case H_CONTINUE:
*out = retbuf[0];
return 1; /* More data is needed for the complete image */
default:
WARN(1, "Unexpected return code: %lx", rc);
return -EINVAL;
}
}
/*
* cxl_h_download_adapter_image - Download the base image to the coherent
* platform facility.
*/
long cxl_h_download_adapter_image(u64 unit_address,
u64 list_address, u64 num,
u64 *out)
{
return cxl_h_download_facility(unit_address,
H_DOWNLOAD_CA_FACILITY_DOWNLOAD,
list_address, num, out);
}
/*
* cxl_h_validate_adapter_image - Validate the base image in the coherent
* platform facility.
*/
long cxl_h_validate_adapter_image(u64 unit_address,
u64 list_address, u64 num,
u64 *out)
{
return cxl_h_download_facility(unit_address,
H_DOWNLOAD_CA_FACILITY_VALIDATE,
list_address, num, out);
}
| linux-master | drivers/misc/cxl/hcalls.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "cxl.h"
static struct dentry *cxl_debugfs;
/* Helpers to export CXL mmaped IO registers via debugfs */
static int debugfs_io_u64_get(void *data, u64 *val)
{
*val = in_be64((u64 __iomem *)data);
return 0;
}
static int debugfs_io_u64_set(void *data, u64 val)
{
out_be64((u64 __iomem *)data, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
"0x%016llx\n");
static void debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
debugfs_create_file_unsafe(name, mode, parent, (void __force *)value,
&fops_io_x64);
}
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
debugfs_create_io_x64("fir_mask", 0400, dir,
_cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK));
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
debugfs_create_io_x64("debug", 0600, dir,
_cxl_p1_addr(adapter, CXL_PSL9_DEBUG));
debugfs_create_io_x64("xsl-debug", 0600, dir,
_cxl_p1_addr(adapter, CXL_XSL9_DBG));
}
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
}
void cxl_debugfs_adapter_add(struct cxl *adapter)
{
struct dentry *dir;
char buf[32];
if (!cxl_debugfs)
return;
snprintf(buf, 32, "card%i", adapter->adapter_num);
dir = debugfs_create_dir(buf, cxl_debugfs);
adapter->debugfs = dir;
debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
if (adapter->native->sl_ops->debugfs_add_adapter_regs)
adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir);
}
void cxl_debugfs_adapter_remove(struct cxl *adapter)
{
debugfs_remove_recursive(adapter->debugfs);
}
void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
{
debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
}
void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
{
debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
}
void cxl_debugfs_afu_add(struct cxl_afu *afu)
{
struct dentry *dir;
char buf[32];
if (!afu->adapter->debugfs)
return;
snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice);
dir = debugfs_create_dir(buf, afu->adapter->debugfs);
afu->debugfs = dir;
debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
if (afu->adapter->native->sl_ops->debugfs_add_afu_regs)
afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir);
}
void cxl_debugfs_afu_remove(struct cxl_afu *afu)
{
debugfs_remove_recursive(afu->debugfs);
}
void __init cxl_debugfs_init(void)
{
if (!cpu_has_feature(CPU_FTR_HVMODE))
return;
cxl_debugfs = debugfs_create_dir("cxl", NULL);
}
void cxl_debugfs_exit(void)
{
debugfs_remove_recursive(cxl_debugfs);
}
| linux-master | drivers/misc/cxl/debugfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <asm/opal.h>
#include <asm/msi_bitmap.h>
#include <asm/pnv-pci.h>
#include <asm/io.h>
#include <asm/reg.h>
#include "cxl.h"
#include <misc/cxl.h>
#define CXL_PCI_VSEC_ID 0x1280
#define CXL_VSEC_MIN_SIZE 0x80
#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
{ \
pci_read_config_word(dev, vsec + 0x6, dest); \
*dest >>= 4; \
}
#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0x8, dest)
#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0x9, dest)
#define CXL_STATUS_SECOND_PORT 0x80
#define CXL_STATUS_MSI_X_FULL 0x40
#define CXL_STATUS_MSI_X_SINGLE 0x20
#define CXL_STATUS_FLASH_RW 0x08
#define CXL_STATUS_FLASH_RO 0x04
#define CXL_STATUS_LOADABLE_AFU 0x02
#define CXL_STATUS_LOADABLE_PSL 0x01
/* If we see these features we won't try to use the card */
#define CXL_UNSUPPORTED_FEATURES \
(CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0xa, dest)
#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
pci_write_config_byte(dev, vsec + 0xa, val)
#define CXL_VSEC_PROTOCOL_MASK 0xe0
#define CXL_VSEC_PROTOCOL_1024TB 0x80
#define CXL_VSEC_PROTOCOL_512TB 0x40
#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */
#define CXL_VSEC_PROTOCOL_ENABLE 0x01
#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
pci_read_config_word(dev, vsec + 0xc, dest)
#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0xe, dest)
#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0xf, dest)
#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
pci_read_config_word(dev, vsec + 0x10, dest)
#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
pci_read_config_byte(dev, vsec + 0x13, dest)
#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
pci_write_config_byte(dev, vsec + 0x13, val)
#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
pci_read_config_dword(dev, vsec + 0x20, dest)
#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
pci_read_config_dword(dev, vsec + 0x24, dest)
#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
pci_read_config_dword(dev, vsec + 0x28, dest)
#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
pci_read_config_dword(dev, vsec + 0x2c, dest)
/* This works a little different than the p1/p2 register accesses to make it
* easier to pull out individual fields */
#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
{ }
};
MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
/*
* Mostly using these wrappers to avoid confusion:
* priv 1 is BAR2, while priv 2 is BAR0
*/
static inline resource_size_t p1_base(struct pci_dev *dev)
{
return pci_resource_start(dev, 2);
}
static inline resource_size_t p1_size(struct pci_dev *dev)
{
return pci_resource_len(dev, 2);
}
static inline resource_size_t p2_base(struct pci_dev *dev)
{
return pci_resource_start(dev, 0);
}
static inline resource_size_t p2_size(struct pci_dev *dev)
{
return pci_resource_len(dev, 0);
}
static int find_cxl_vsec(struct pci_dev *dev)
{
return pci_find_vsec_capability(dev, PCI_VENDOR_ID_IBM, CXL_PCI_VSEC_ID);
}
static void dump_cxl_config_space(struct pci_dev *dev)
{
int vsec;
u32 val;
dev_info(&dev->dev, "dump_cxl_config_space\n");
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
dev_info(&dev->dev, "BAR0: %#.8x\n", val);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
dev_info(&dev->dev, "BAR1: %#.8x\n", val);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
dev_info(&dev->dev, "BAR2: %#.8x\n", val);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
dev_info(&dev->dev, "BAR3: %#.8x\n", val);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
dev_info(&dev->dev, "BAR4: %#.8x\n", val);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
dev_info(&dev->dev, "BAR5: %#.8x\n", val);
dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
p1_base(dev), p1_size(dev));
dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
p2_base(dev), p2_size(dev));
dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
pci_resource_start(dev, 4), pci_resource_len(dev, 4));
if (!(vsec = find_cxl_vsec(dev)))
return;
#define show_reg(name, what) \
dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
pci_read_config_dword(dev, vsec + 0x0, &val);
show_reg("Cap ID", (val >> 0) & 0xffff);
show_reg("Cap Ver", (val >> 16) & 0xf);
show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
pci_read_config_dword(dev, vsec + 0x4, &val);
show_reg("VSEC ID", (val >> 0) & 0xffff);
show_reg("VSEC Rev", (val >> 16) & 0xf);
show_reg("VSEC Length", (val >> 20) & 0xfff);
pci_read_config_dword(dev, vsec + 0x8, &val);
show_reg("Num AFUs", (val >> 0) & 0xff);
show_reg("Status", (val >> 8) & 0xff);
show_reg("Mode Control", (val >> 16) & 0xff);
show_reg("Reserved", (val >> 24) & 0xff);
pci_read_config_dword(dev, vsec + 0xc, &val);
show_reg("PSL Rev", (val >> 0) & 0xffff);
show_reg("CAIA Ver", (val >> 16) & 0xffff);
pci_read_config_dword(dev, vsec + 0x10, &val);
show_reg("Base Image Rev", (val >> 0) & 0xffff);
show_reg("Reserved", (val >> 16) & 0x0fff);
show_reg("Image Control", (val >> 28) & 0x3);
show_reg("Reserved", (val >> 30) & 0x1);
show_reg("Image Loaded", (val >> 31) & 0x1);
pci_read_config_dword(dev, vsec + 0x14, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x18, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x1c, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x20, &val);
show_reg("AFU Descriptor Offset", val);
pci_read_config_dword(dev, vsec + 0x24, &val);
show_reg("AFU Descriptor Size", val);
pci_read_config_dword(dev, vsec + 0x28, &val);
show_reg("Problem State Offset", val);
pci_read_config_dword(dev, vsec + 0x2c, &val);
show_reg("Problem State Size", val);
pci_read_config_dword(dev, vsec + 0x30, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x34, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x38, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x3c, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x40, &val);
show_reg("PSL Programming Port", val);
pci_read_config_dword(dev, vsec + 0x44, &val);
show_reg("PSL Programming Control", val);
pci_read_config_dword(dev, vsec + 0x48, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x4c, &val);
show_reg("Reserved", val);
pci_read_config_dword(dev, vsec + 0x50, &val);
show_reg("Flash Address Register", val);
pci_read_config_dword(dev, vsec + 0x54, &val);
show_reg("Flash Size Register", val);
pci_read_config_dword(dev, vsec + 0x58, &val);
show_reg("Flash Status/Control Register", val);
pci_read_config_dword(dev, vsec + 0x58, &val);
show_reg("Flash Data Port", val);
#undef show_reg
}
static void dump_afu_descriptor(struct cxl_afu *afu)
{
u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
int i;
#define show_reg(name, what) \
dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
val = AFUD_READ_INFO(afu);
show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
show_reg("num_of_processes", AFUD_NUM_PROCS(val));
show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
show_reg("req_prog_mode", val & 0xffffULL);
afu_cr_num = AFUD_NUM_CRS(val);
val = AFUD_READ(afu, 0x8);
show_reg("Reserved", val);
val = AFUD_READ(afu, 0x10);
show_reg("Reserved", val);
val = AFUD_READ(afu, 0x18);
show_reg("Reserved", val);
val = AFUD_READ_CR(afu);
show_reg("Reserved", (val >> (63-7)) & 0xff);
show_reg("AFU_CR_len", AFUD_CR_LEN(val));
afu_cr_len = AFUD_CR_LEN(val) * 256;
val = AFUD_READ_CR_OFF(afu);
afu_cr_off = val;
show_reg("AFU_CR_offset", val);
val = AFUD_READ_PPPSA(afu);
show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
val = AFUD_READ_PPPSA_OFF(afu);
show_reg("PerProcessPSA_offset", val);
val = AFUD_READ_EB(afu);
show_reg("Reserved", (val >> (63-7)) & 0xff);
show_reg("AFU_EB_len", AFUD_EB_LEN(val));
val = AFUD_READ_EB_OFF(afu);
show_reg("AFU_EB_offset", val);
for (i = 0; i < afu_cr_num; i++) {
val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
show_reg("CR Vendor", val & 0xffff);
show_reg("CR Device", (val >> 16) & 0xffff);
}
#undef show_reg
}
#define P8_CAPP_UNIT0_ID 0xBA
#define P8_CAPP_UNIT1_ID 0XBE
#define P9_CAPP_UNIT0_ID 0xC0
#define P9_CAPP_UNIT1_ID 0xE0
static int get_phb_index(struct device_node *np, u32 *phb_index)
{
if (of_property_read_u32(np, "ibm,phb-index", phb_index))
return -ENODEV;
return 0;
}
static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
{
/*
* POWER 8:
* - For chips other than POWER8NVL, we only have CAPP 0,
* irrespective of which PHB is used.
* - For POWER8NVL, assume CAPP 0 is attached to PHB0 and
* CAPP 1 is attached to PHB1.
*/
if (cxl_is_power8()) {
if (!pvr_version_is(PVR_POWER8NVL))
return P8_CAPP_UNIT0_ID;
if (phb_index == 0)
return P8_CAPP_UNIT0_ID;
if (phb_index == 1)
return P8_CAPP_UNIT1_ID;
}
/*
* POWER 9:
* PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000)
* PEC1 (PHB1 - PHB2). No capi mode
* PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000)
*/
if (cxl_is_power9()) {
if (phb_index == 0)
return P9_CAPP_UNIT0_ID;
if (phb_index == 3)
return P9_CAPP_UNIT1_ID;
}
return 0;
}
int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
u32 *phb_index, u64 *capp_unit_id)
{
int rc;
struct device_node *np;
const __be32 *prop;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
*chipid = be32_to_cpup(prop);
rc = get_phb_index(np, phb_index);
if (rc) {
pr_err("cxl: invalid phb index\n");
of_node_put(np);
return rc;
}
*capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
*chipid, *phb_index);
return -ENODEV;
}
return 0;
}
static DEFINE_MUTEX(indications_mutex);
static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
u64 *nbwind)
{
static u64 nbw, asn, capi = 0;
struct device_node *np;
const __be32 *prop;
mutex_lock(&indications_mutex);
if (!capi) {
if (!(np = pnv_pci_get_phb_node(dev))) {
mutex_unlock(&indications_mutex);
return -ENODEV;
}
prop = of_get_property(np, "ibm,phb-indications", NULL);
if (!prop) {
nbw = 0x0300UL; /* legacy values */
asn = 0x0400UL;
capi = 0x0200UL;
} else {
nbw = (u64)be32_to_cpu(prop[2]);
asn = (u64)be32_to_cpu(prop[1]);
capi = (u64)be32_to_cpu(prop[0]);
}
of_node_put(np);
}
*capiind = capi;
*asnind = asn;
*nbwind = nbw;
mutex_unlock(&indications_mutex);
return 0;
}
int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
{
u64 xsl_dsnctl;
u64 capiind, asnind, nbwind;
/*
* CAPI Identifier bits [0:7]
* bit 61:60 MSI bits --> 0
* bit 59 TVT selector --> 0
*/
if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
return -ENODEV;
/*
* Tell XSL where to route data to.
* The field chipid should match the PHB CAPI_CMPM register
*/
xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */
xsl_dsnctl |= (capp_unit_id << (63-15));
/* nMMU_ID Defaults to: b’000001001’*/
xsl_dsnctl |= ((u64)0x09 << (63-28));
/*
* Used to identify CAPI packets which should be sorted into
* the Non-Blocking queues by the PHB. This field should match
* the PHB PBL_NBW_CMPM register
* nbwind=0x03, bits [57:58], must include capi indicator.
* Not supported on P9 DD1.
*/
xsl_dsnctl |= (nbwind << (63-55));
/*
* Upper 16b address bits of ASB_Notify messages sent to the
* system. Need to match the PHB’s ASN Compare/Mask Register.
* Not supported on P9 DD1.
*/
xsl_dsnctl |= asnind;
*reg = xsl_dsnctl;
return 0;
}
static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
struct pci_dev *dev)
{
u64 xsl_dsnctl, psl_fircntl;
u64 chipid;
u32 phb_index;
u64 capp_unit_id;
u64 psl_debug;
int rc;
rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
if (rc)
return rc;
cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
/* Set fir_cntl to recommended value for production env */
psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
psl_fircntl |= 0x1ULL; /* ce_thresh */
cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
/* Setup the PSL to transmit packets on the PCIe before the
* CAPP is enabled. Make sure that CAPP virtual machines are disabled
*/
cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
/*
* A response to an ASB_Notify request is returned by the
* system as an MMIO write to the address defined in
* the PSL_TNR_ADDR register.
* keep the Reset Value: 0x00020000E0000000
*/
/* Enable XSL rty limit */
cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
/* Change XSL_INV dummy read threshold */
cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
if (phb_index == 3) {
/* disable machines 31-47 and 20-27 for DMA */
cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
}
/* Snoop machines */
cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
/* Enable NORST and DD2 features */
cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
/*
* Check if PSL has data-cache. We need to flush adapter datacache
* when as its about to be removed.
*/
psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
if (psl_debug & CXL_PSL_DEBUG_CDC) {
dev_dbg(&dev->dev, "No data-cache present\n");
adapter->native->no_data_cache = true;
}
return 0;
}
static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
{
u64 psl_dsnctl, psl_fircntl;
u64 chipid;
u32 phb_index;
u64 capp_unit_id;
int rc;
rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
/* Tell PSL where to route data to */
psl_dsnctl |= (chipid << (63-5));
psl_dsnctl |= (capp_unit_id << (63-13));
cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
/* snoop write mask */
cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
/* set fir_cntl to recommended value for production env */
psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
psl_fircntl |= 0x1ULL; /* ce_thresh */
cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
/* for debugging with trace arrays */
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
return 0;
}
/* PSL */
#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
/* For the PSL this is a multiple for 0 < n <= 7: */
#define PSL_2048_250MHZ_CYCLES 1
static void write_timebase_ctrl_psl8(struct cxl *adapter)
{
cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
}
static u64 timebase_read_psl9(struct cxl *adapter)
{
return cxl_p1_read(adapter, CXL_PSL9_Timebase);
}
static u64 timebase_read_psl8(struct cxl *adapter)
{
return cxl_p1_read(adapter, CXL_PSL_Timebase);
}
static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
{
struct device_node *np;
adapter->psl_timebase_synced = false;
if (!(np = pnv_pci_get_phb_node(dev)))
return;
/* Do not fail when CAPP timebase sync is not supported by OPAL */
of_node_get(np);
if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
of_node_put(np);
dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
return;
}
of_node_put(np);
/*
* Setup PSL Timebase Control and Status register
* with the recommended Timebase Sync Count value
*/
if (adapter->native->sl_ops->write_timebase_ctrl)
adapter->native->sl_ops->write_timebase_ctrl(adapter);
/* Enable PSL Timebase */
cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
return;
}
static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
{
return 0;
}
static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
{
/* read/write masks for this slice */
cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
/* APC read/write masks for this slice */
cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
/* for debugging with trace arrays */
cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
return 0;
}
int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
unsigned int virq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
}
int cxl_update_image_control(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
int rc;
int vsec;
u8 image_state;
if (!(vsec = find_cxl_vsec(dev))) {
dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
return -ENODEV;
}
if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
dev_err(&dev->dev, "failed to read image state: %i\n", rc);
return rc;
}
if (adapter->perst_loads_image)
image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
else
image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
if (adapter->perst_select_user)
image_state |= CXL_VSEC_PERST_SELECT_USER;
else
image_state &= ~CXL_VSEC_PERST_SELECT_USER;
if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
dev_err(&dev->dev, "failed to update image control: %i\n", rc);
return rc;
}
return 0;
}
int cxl_pci_alloc_one_irq(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirqs(dev, 1);
}
void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_release_hwirqs(dev, hwirq, 1);
}
int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
struct cxl *adapter, unsigned int num)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
}
void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
pnv_cxl_release_hwirq_ranges(irqs, dev);
}
static int setup_cxl_bars(struct pci_dev *dev)
{
/* Safety check in case we get backported to < 3.17 without M64 */
if ((p1_base(dev) < 0x100000000ULL) ||
(p2_base(dev) < 0x100000000ULL)) {
dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
return -ENODEV;
}
/*
* BAR 4/5 has a special meaning for CXL and must be programmed with a
* special value corresponding to the CXL protocol address range.
* For POWER 8/9 that means bits 48:49 must be set to 10
*/
pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
return 0;
}
/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
static int switch_card_to_cxl(struct pci_dev *dev)
{
int vsec;
u8 val;
int rc;
dev_info(&dev->dev, "switch card to CXL\n");
if (!(vsec = find_cxl_vsec(dev))) {
dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
return -ENODEV;
}
if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
dev_err(&dev->dev, "failed to read current mode control: %i", rc);
return rc;
}
val &= ~CXL_VSEC_PROTOCOL_MASK;
val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
return rc;
}
/*
* The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
* we must wait 100ms after this mode switch before touching
* PCIe config space.
*/
msleep(100);
return 0;
}
static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
u64 p1n_base, p2n_base, afu_desc;
const u64 p1n_size = 0x100;
const u64 p2n_size = 0x1000;
p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
p2n_base = p2_base(dev) + (afu->slice * p2n_size);
afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
goto err;
if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
goto err1;
if (afu_desc) {
if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
goto err2;
}
return 0;
err2:
iounmap(afu->p2n_mmio);
err1:
iounmap(afu->native->p1n_mmio);
err:
dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
return -ENOMEM;
}
static void pci_unmap_slice_regs(struct cxl_afu *afu)
{
if (afu->p2n_mmio) {
iounmap(afu->p2n_mmio);
afu->p2n_mmio = NULL;
}
if (afu->native->p1n_mmio) {
iounmap(afu->native->p1n_mmio);
afu->native->p1n_mmio = NULL;
}
if (afu->native->afu_desc_mmio) {
iounmap(afu->native->afu_desc_mmio);
afu->native->afu_desc_mmio = NULL;
}
}
void cxl_pci_release_afu(struct device *dev)
{
struct cxl_afu *afu = to_cxl_afu(dev);
pr_devel("%s\n", __func__);
idr_destroy(&afu->contexts_idr);
cxl_release_spa(afu);
kfree(afu->native);
kfree(afu);
}
/* Expects AFU struct to have recently been zeroed out */
static int cxl_read_afu_descriptor(struct cxl_afu *afu)
{
u64 val;
val = AFUD_READ_INFO(afu);
afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
afu->crs_num = AFUD_NUM_CRS(val);
if (AFUD_AFU_DIRECTED(val))
afu->modes_supported |= CXL_MODE_DIRECTED;
if (AFUD_DEDICATED_PROCESS(val))
afu->modes_supported |= CXL_MODE_DEDICATED;
if (AFUD_TIME_SLICED(val))
afu->modes_supported |= CXL_MODE_TIME_SLICED;
val = AFUD_READ_PPPSA(afu);
afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
afu->psa = AFUD_PPPSA_PSA(val);
if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
val = AFUD_READ_CR(afu);
afu->crs_len = AFUD_CR_LEN(val) * 256;
afu->crs_offset = AFUD_READ_CR_OFF(afu);
/* eb_len is in multiple of 4K */
afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
afu->eb_offset = AFUD_READ_EB_OFF(afu);
/* eb_off is 4K aligned so lower 12 bits are always zero */
if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
dev_warn(&afu->dev,
"Invalid AFU error buffer offset %Lx\n",
afu->eb_offset);
dev_info(&afu->dev,
"Ignoring AFU error buffer in the descriptor\n");
/* indicate that no afu buffer exists */
afu->eb_len = 0;
}
return 0;
}
static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
{
int i, rc;
u32 val;
if (afu->psa && afu->adapter->ps_size <
(afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
return -ENODEV;
}
if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
for (i = 0; i < afu->crs_num; i++) {
rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
if (rc || val == 0) {
dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
return -EINVAL;
}
}
if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
/*
* We could also check this for the dedicated process model
* since the architecture indicates it should be set to 1, but
* in that case we ignore the value and I'd rather not risk
* breaking any existing dedicated process AFUs that left it as
* 0 (not that I'm aware of any). It is clearly an error for an
* AFU directed AFU to set this to 0, and would have previously
* triggered a bug resulting in the maximum not being enforced
* at all since idr_alloc treats 0 as no maximum.
*/
dev_err(&afu->dev, "AFU does not support any processes\n");
return -EINVAL;
}
return 0;
}
static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
{
u64 reg;
/*
* Clear out any regs that contain either an IVTE or address or may be
* waiting on an acknowledgment to try to be a bit safer as we bring
* it online
*/
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
if (cxl_ops->afu_reset(afu))
return -EIO;
if (cxl_afu_disable(afu))
return -EIO;
if (cxl_psl_purge(afu))
return -EIO;
}
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
if (reg) {
dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
if (reg & CXL_PSL9_DSISR_An_TF)
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
}
if (afu->adapter->native->sl_ops->register_serr_irq) {
reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
if (reg) {
if (reg & ~0x000000007fffffff)
dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
}
}
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
if (reg) {
dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
}
return 0;
}
static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
{
u64 reg;
/*
* Clear out any regs that contain either an IVTE or address or may be
* waiting on an acknowledgement to try to be a bit safer as we bring
* it online
*/
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
if (cxl_ops->afu_reset(afu))
return -EIO;
if (cxl_afu_disable(afu))
return -EIO;
if (cxl_psl_purge(afu))
return -EIO;
}
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
if (reg) {
dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
if (reg & CXL_PSL_DSISR_TRANS)
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
}
if (afu->adapter->native->sl_ops->register_serr_irq) {
reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
if (reg) {
if (reg & ~0xffff)
dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
}
}
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
if (reg) {
dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
}
return 0;
}
#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
/*
* afu_eb_read:
* Called from sysfs and reads the afu error info buffer. The h/w only supports
* 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
* aligned the function uses a bounce buffer which can be max PAGE_SIZE.
*/
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count)
{
loff_t aligned_start, aligned_end;
size_t aligned_length;
void *tbuf;
const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
return 0;
/* calculate aligned read window */
count = min((size_t)(afu->eb_len - off), count);
aligned_start = round_down(off, 8);
aligned_end = round_up(off + count, 8);
aligned_length = aligned_end - aligned_start;
/* max we can copy in one read is PAGE_SIZE */
if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
aligned_length = ERR_BUFF_MAX_COPY_SIZE;
count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
}
/* use bounce buffer for copy */
tbuf = (void *)__get_free_page(GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
/* perform aligned read from the mmio region */
memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
memcpy(buf, tbuf + (off & 0x7), count);
free_page((unsigned long)tbuf);
return count;
}
static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
int rc;
if ((rc = pci_map_slice_regs(afu, adapter, dev)))
return rc;
if (adapter->native->sl_ops->sanitise_afu_regs) {
rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
if (rc)
goto err1;
}
/* We need to reset the AFU before we can read the AFU descriptor */
if ((rc = cxl_ops->afu_reset(afu)))
goto err1;
if (cxl_verbose)
dump_afu_descriptor(afu);
if ((rc = cxl_read_afu_descriptor(afu)))
goto err1;
if ((rc = cxl_afu_descriptor_looks_ok(afu)))
goto err1;
if (adapter->native->sl_ops->afu_regs_init)
if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
goto err1;
if (adapter->native->sl_ops->register_serr_irq)
if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
goto err1;
if ((rc = cxl_native_register_psl_irq(afu)))
goto err2;
atomic_set(&afu->configured_state, 0);
return 0;
err2:
if (adapter->native->sl_ops->release_serr_irq)
adapter->native->sl_ops->release_serr_irq(afu);
err1:
pci_unmap_slice_regs(afu);
return rc;
}
static void pci_deconfigure_afu(struct cxl_afu *afu)
{
/*
* It's okay to deconfigure when AFU is already locked, otherwise wait
* until there are no readers
*/
if (atomic_read(&afu->configured_state) != -1) {
while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
schedule();
}
cxl_native_release_psl_irq(afu);
if (afu->adapter->native->sl_ops->release_serr_irq)
afu->adapter->native->sl_ops->release_serr_irq(afu);
pci_unmap_slice_regs(afu);
}
static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
{
struct cxl_afu *afu;
int rc = -ENOMEM;
afu = cxl_alloc_afu(adapter, slice);
if (!afu)
return -ENOMEM;
afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
if (!afu->native)
goto err_free_afu;
mutex_init(&afu->native->spa_mutex);
rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
if (rc)
goto err_free_native;
rc = pci_configure_afu(afu, adapter, dev);
if (rc)
goto err_free_native;
/* Don't care if this fails */
cxl_debugfs_afu_add(afu);
/*
* After we call this function we must not free the afu directly, even
* if it returns an error!
*/
if ((rc = cxl_register_afu(afu)))
goto err_put_dev;
if ((rc = cxl_sysfs_afu_add(afu)))
goto err_del_dev;
adapter->afu[afu->slice] = afu;
if ((rc = cxl_pci_vphb_add(afu)))
dev_info(&afu->dev, "Can't register vPHB\n");
return 0;
err_del_dev:
device_del(&afu->dev);
err_put_dev:
pci_deconfigure_afu(afu);
cxl_debugfs_afu_remove(afu);
put_device(&afu->dev);
return rc;
err_free_native:
kfree(afu->native);
err_free_afu:
kfree(afu);
return rc;
}
static void cxl_pci_remove_afu(struct cxl_afu *afu)
{
pr_devel("%s\n", __func__);
if (!afu)
return;
cxl_pci_vphb_remove(afu);
cxl_sysfs_afu_remove(afu);
cxl_debugfs_afu_remove(afu);
spin_lock(&afu->adapter->afu_list_lock);
afu->adapter->afu[afu->slice] = NULL;
spin_unlock(&afu->adapter->afu_list_lock);
cxl_context_detach_all(afu);
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
pci_deconfigure_afu(afu);
device_unregister(&afu->dev);
}
int cxl_pci_reset(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
int rc;
if (adapter->perst_same_image) {
dev_warn(&dev->dev,
"cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
return -EINVAL;
}
dev_info(&dev->dev, "CXL reset\n");
/*
* The adapter is about to be reset, so ignore errors.
*/
cxl_data_cache_flush(adapter);
/* pcie_warm_reset requests a fundamental pci reset which includes a
* PERST assert/deassert. PERST triggers a loading of the image
* if "user" or "factory" is selected in sysfs */
if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
return rc;
}
return rc;
}
static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
{
if (pci_request_region(dev, 2, "priv 2 regs"))
goto err1;
if (pci_request_region(dev, 0, "priv 1 regs"))
goto err2;
pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
goto err3;
if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
goto err4;
return 0;
err4:
iounmap(adapter->native->p1_mmio);
adapter->native->p1_mmio = NULL;
err3:
pci_release_region(dev, 0);
err2:
pci_release_region(dev, 2);
err1:
return -ENOMEM;
}
static void cxl_unmap_adapter_regs(struct cxl *adapter)
{
if (adapter->native->p1_mmio) {
iounmap(adapter->native->p1_mmio);
adapter->native->p1_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 2);
}
if (adapter->native->p2_mmio) {
iounmap(adapter->native->p2_mmio);
adapter->native->p2_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 0);
}
}
static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
{
int vsec;
u32 afu_desc_off, afu_desc_size;
u32 ps_off, ps_size;
u16 vseclen;
u8 image_state;
if (!(vsec = find_cxl_vsec(dev))) {
dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
return -ENODEV;
}
CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
if (vseclen < CXL_VSEC_MIN_SIZE) {
dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
return -EINVAL;
}
CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
/* Convert everything to bytes, because there is NO WAY I'd look at the
* code a month later and forget what units these are in ;-) */
adapter->native->ps_off = ps_off * 64 * 1024;
adapter->ps_size = ps_size * 64 * 1024;
adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
/* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
return 0;
}
/*
* Workaround a PCIe Host Bridge defect on some cards, that can cause
* malformed Transaction Layer Packet (TLP) errors to be erroneously
* reported. Mask this error in the Uncorrectable Error Mask Register.
*
* The upper nibble of the PSL revision is used to distinguish between
* different cards. The affected ones have it set to 0.
*/
static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
{
int aer;
u32 data;
if (adapter->psl_rev & 0xf000)
return;
if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
return;
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
if (data & PCI_ERR_UNC_MALF_TLP)
if (data & PCI_ERR_UNC_INTN)
return;
data |= PCI_ERR_UNC_MALF_TLP;
data |= PCI_ERR_UNC_INTN;
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
}
static bool cxl_compatible_caia_version(struct cxl *adapter)
{
if (cxl_is_power8() && (adapter->caia_major == 1))
return true;
if (cxl_is_power9() && (adapter->caia_major == 2))
return true;
return false;
}
static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
{
if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
return -EBUSY;
if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
return -EINVAL;
}
if (!cxl_compatible_caia_version(adapter)) {
dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
adapter->caia_major);
return -ENODEV;
}
if (!adapter->slices) {
/* Once we support dynamic reprogramming we can use the card if
* it supports loadable AFUs */
dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
return -EINVAL;
}
if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
return -EINVAL;
}
if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
dev_err(&dev->dev, "ABORTING: Problem state size larger than "
"available in BAR2: 0x%llx > 0x%llx\n",
adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
return -EINVAL;
}
return 0;
}
ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
{
return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
}
static void cxl_release_adapter(struct device *dev)
{
struct cxl *adapter = to_cxl_adapter(dev);
pr_devel("cxl_release_adapter\n");
cxl_remove_adapter_nr(adapter);
kfree(adapter->native);
kfree(adapter);
}
#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
static int sanitise_adapter_regs(struct cxl *adapter)
{
int rc = 0;
/* Clear PSL tberror bit by writing 1 to it */
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
if (adapter->native->sl_ops->invalidate_all) {
/* do not invalidate ERAT entries when not reloading on PERST */
if (cxl_is_power9() && (adapter->perst_loads_image))
return 0;
rc = adapter->native->sl_ops->invalidate_all(adapter);
}
return rc;
}
/* This should contain *only* operations that can safely be done in
* both creation and recovery.
*/
static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
{
int rc;
adapter->dev.parent = &dev->dev;
adapter->dev.release = cxl_release_adapter;
pci_set_drvdata(dev, adapter);
rc = pci_enable_device(dev);
if (rc) {
dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
return rc;
}
if ((rc = cxl_read_vsec(adapter, dev)))
return rc;
if ((rc = cxl_vsec_looks_ok(adapter, dev)))
return rc;
cxl_fixup_malformed_tlp(adapter, dev);
if ((rc = setup_cxl_bars(dev)))
return rc;
if ((rc = switch_card_to_cxl(dev)))
return rc;
if ((rc = cxl_update_image_control(adapter)))
return rc;
if ((rc = cxl_map_adapter_regs(adapter, dev)))
return rc;
if ((rc = sanitise_adapter_regs(adapter)))
goto err;
if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
goto err;
/* Required for devices using CAPP DMA mode, harmless for others */
pci_set_master(dev);
adapter->tunneled_ops_supported = false;
if (cxl_is_power9()) {
if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
dev_info(&dev->dev, "Tunneled operations unsupported\n");
else
adapter->tunneled_ops_supported = true;
}
if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
goto err;
/* If recovery happened, the last step is to turn on snooping.
* In the non-recovery case this has no effect */
if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
goto err;
/* Ignore error, adapter init is not dependant on timebase sync */
cxl_setup_psl_timebase(adapter, dev);
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
return 0;
err:
cxl_unmap_adapter_regs(adapter);
return rc;
}
static void cxl_deconfigure_adapter(struct cxl *adapter)
{
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
if (cxl_is_power9())
pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
cxl_native_release_psl_err_irq(adapter);
cxl_unmap_adapter_regs(adapter);
pci_disable_device(pdev);
}
static void cxl_stop_trace_psl9(struct cxl *adapter)
{
int traceid;
u64 trace_state, trace_mask;
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
/* read each tracearray state and issue mmio to stop them is needed */
for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
trace_mask = (0x3ULL << (62 - traceid * 2));
trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
traceid, trace_state);
/* issue mmio if the trace array isn't in FIN state */
if (trace_state != CXL_PSL9_TRACESTATE_FIN)
cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
0x8400000000000000ULL | traceid);
}
}
static void cxl_stop_trace_psl8(struct cxl *adapter)
{
int slice;
/* Stop the trace */
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
/* Stop the slice traces */
spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) {
if (adapter->afu[slice])
cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
0x8000000000000000LL);
}
spin_unlock(&adapter->afu_list_lock);
}
static const struct cxl_service_layer_ops psl9_ops = {
.adapter_regs_init = init_implementation_adapter_regs_psl9,
.invalidate_all = cxl_invalidate_all_psl9,
.afu_regs_init = init_implementation_afu_regs_psl9,
.sanitise_afu_regs = sanitise_afu_regs_psl9,
.register_serr_irq = cxl_native_register_serr_irq,
.release_serr_irq = cxl_native_release_serr_irq,
.handle_interrupt = cxl_irq_psl9,
.fail_irq = cxl_fail_irq_psl,
.activate_dedicated_process = cxl_activate_dedicated_process_psl9,
.attach_afu_directed = cxl_attach_afu_directed_psl9,
.attach_dedicated_process = cxl_attach_dedicated_process_psl9,
.update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
.err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
.debugfs_stop_trace = cxl_stop_trace_psl9,
.timebase_read = timebase_read_psl9,
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
.needs_reset_before_disable = true,
};
static const struct cxl_service_layer_ops psl8_ops = {
.adapter_regs_init = init_implementation_adapter_regs_psl8,
.invalidate_all = cxl_invalidate_all_psl8,
.afu_regs_init = init_implementation_afu_regs_psl8,
.sanitise_afu_regs = sanitise_afu_regs_psl8,
.register_serr_irq = cxl_native_register_serr_irq,
.release_serr_irq = cxl_native_release_serr_irq,
.handle_interrupt = cxl_irq_psl8,
.fail_irq = cxl_fail_irq_psl,
.activate_dedicated_process = cxl_activate_dedicated_process_psl8,
.attach_afu_directed = cxl_attach_afu_directed_psl8,
.attach_dedicated_process = cxl_attach_dedicated_process_psl8,
.update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
.err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
.debugfs_stop_trace = cxl_stop_trace_psl8,
.write_timebase_ctrl = write_timebase_ctrl_psl8,
.timebase_read = timebase_read_psl8,
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
.needs_reset_before_disable = true,
};
static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{
if (cxl_is_power8()) {
dev_info(&dev->dev, "Device uses a PSL8\n");
adapter->native->sl_ops = &psl8_ops;
} else {
dev_info(&dev->dev, "Device uses a PSL9\n");
adapter->native->sl_ops = &psl9_ops;
}
}
static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
{
struct cxl *adapter;
int rc;
adapter = cxl_alloc_adapter();
if (!adapter)
return ERR_PTR(-ENOMEM);
adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
if (!adapter->native) {
rc = -ENOMEM;
goto err_release;
}
set_sl_ops(adapter, dev);
/* Set defaults for parameters which need to persist over
* configure/reconfigure
*/
adapter->perst_loads_image = true;
adapter->perst_same_image = false;
rc = cxl_configure_adapter(adapter, dev);
if (rc) {
pci_disable_device(dev);
goto err_release;
}
/* Don't care if this one fails: */
cxl_debugfs_adapter_add(adapter);
/*
* After we call this function we must not free the adapter directly,
* even if it returns an error!
*/
if ((rc = cxl_register_adapter(adapter)))
goto err_put_dev;
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_del_dev;
/* Release the context lock as adapter is configured */
cxl_adapter_context_unlock(adapter);
return adapter;
err_del_dev:
device_del(&adapter->dev);
err_put_dev:
/* This should mirror cxl_remove_adapter, except without the
* sysfs parts
*/
cxl_debugfs_adapter_remove(adapter);
cxl_deconfigure_adapter(adapter);
put_device(&adapter->dev);
return ERR_PTR(rc);
err_release:
cxl_release_adapter(&adapter->dev);
return ERR_PTR(rc);
}
static void cxl_pci_remove_adapter(struct cxl *adapter)
{
pr_devel("cxl_remove_adapter\n");
cxl_sysfs_adapter_remove(adapter);
cxl_debugfs_adapter_remove(adapter);
/*
* Flush adapter datacache as its about to be removed.
*/
cxl_data_cache_flush(adapter);
cxl_deconfigure_adapter(adapter);
device_unregister(&adapter->dev);
}
#define CXL_MAX_PCIEX_PARENT 2
int cxl_slot_is_switched(struct pci_dev *dev)
{
struct device_node *np;
int depth = 0;
if (!(np = pci_device_to_OF_node(dev))) {
pr_err("cxl: np = NULL\n");
return -ENODEV;
}
of_node_get(np);
while (np) {
np = of_get_next_parent(np);
if (!of_node_is_type(np, "pciex"))
break;
depth++;
}
of_node_put(np);
return (depth > CXL_MAX_PCIEX_PARENT);
}
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct cxl *adapter;
int slice;
int rc;
if (cxl_pci_is_vphb_device(dev)) {
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
return -ENODEV;
}
if (cxl_slot_is_switched(dev)) {
dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
return -ENODEV;
}
if (cxl_is_power9() && !radix_enabled()) {
dev_info(&dev->dev, "Only Radix mode supported\n");
return -ENODEV;
}
if (cxl_verbose)
dump_cxl_config_space(dev);
adapter = cxl_pci_init_adapter(dev);
if (IS_ERR(adapter)) {
dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
return PTR_ERR(adapter);
}
for (slice = 0; slice < adapter->slices; slice++) {
if ((rc = pci_init_afu(adapter, slice, dev))) {
dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
continue;
}
rc = cxl_afu_select_best_mode(adapter->afu[slice]);
if (rc)
dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
}
return 0;
}
static void cxl_remove(struct pci_dev *dev)
{
struct cxl *adapter = pci_get_drvdata(dev);
struct cxl_afu *afu;
int i;
/*
* Lock to prevent someone grabbing a ref through the adapter list as
* we are removing it
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
cxl_pci_remove_afu(afu);
}
cxl_pci_remove_adapter(adapter);
}
static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
pci_channel_state_t state)
{
struct pci_dev *afu_dev;
struct pci_driver *afu_drv;
const struct pci_error_handlers *err_handler;
pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
/* There should only be one entry, but go through the list
* anyway
*/
if (afu == NULL || afu->phb == NULL)
return result;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
afu_drv = to_pci_driver(afu_dev->dev.driver);
if (!afu_drv)
continue;
afu_dev->error_state = state;
err_handler = afu_drv->err_handler;
if (err_handler)
afu_result = err_handler->error_detected(afu_dev,
state);
/* Disconnect trumps all, NONE trumps NEED_RESET */
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
result = PCI_ERS_RESULT_DISCONNECT;
else if ((afu_result == PCI_ERS_RESULT_NONE) &&
(result == PCI_ERS_RESULT_NEED_RESET))
result = PCI_ERS_RESULT_NONE;
}
return result;
}
static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct cxl *adapter = pci_get_drvdata(pdev);
struct cxl_afu *afu;
pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
int i;
/* At this point, we could still have an interrupt pending.
* Let's try to get them out of the way before they do
* anything we don't like.
*/
schedule();
/* If we're permanently dead, give up. */
if (state == pci_channel_io_perm_failure) {
spin_lock(&adapter->afu_list_lock);
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
/*
* Tell the AFU drivers; but we don't care what they
* say, we're going away.
*/
cxl_vphb_error_detected(afu, state);
}
spin_unlock(&adapter->afu_list_lock);
return PCI_ERS_RESULT_DISCONNECT;
}
/* Are we reflashing?
*
* If we reflash, we could come back as something entirely
* different, including a non-CAPI card. As such, by default
* we don't participate in the process. We'll be unbound and
* the slot re-probed. (TODO: check EEH doesn't blindly rebind
* us!)
*
* However, this isn't the entire story: for reliablity
* reasons, we usually want to reflash the FPGA on PERST in
* order to get back to a more reliable known-good state.
*
* This causes us a bit of a problem: if we reflash we can't
* trust that we'll come back the same - we could have a new
* image and been PERSTed in order to load that
* image. However, most of the time we actually *will* come
* back the same - for example a regular EEH event.
*
* Therefore, we allow the user to assert that the image is
* indeed the same and that we should continue on into EEH
* anyway.
*/
if (adapter->perst_loads_image && !adapter->perst_same_image) {
/* TODO take the PHB out of CXL mode */
dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
return PCI_ERS_RESULT_NONE;
}
/*
* At this point, we want to try to recover. We'll always
* need a complete slot reset: we don't trust any other reset.
*
* Now, we go through each AFU:
* - We send the driver, if bound, an error_detected callback.
* We expect it to clean up, but it can also tell us to give
* up and permanently detach the card. To simplify things, if
* any bound AFU driver doesn't support EEH, we give up on EEH.
*
* - We detach all contexts associated with the AFU. This
* does not free them, but puts them into a CLOSED state
* which causes any the associated files to return useful
* errors to userland. It also unmaps, but does not free,
* any IRQs.
*
* - We clean up our side: releasing and unmapping resources we hold
* so we can wire them up again when the hardware comes back up.
*
* Driver authors should note:
*
* - Any contexts you create in your kernel driver (except
* those associated with anonymous file descriptors) are
* your responsibility to free and recreate. Likewise with
* any attached resources.
*
* - We will take responsibility for re-initialising the
* device context (the one set up for you in
* cxl_pci_enable_device_hook and accessed through
* cxl_get_context). If you've attached IRQs or other
* resources to it, they remains yours to free.
*
* You can call the same functions to release resources as you
* normally would: we make sure that these functions continue
* to work when the hardware is down.
*
* Two examples:
*
* 1) If you normally free all your resources at the end of
* each request, or if you use anonymous FDs, your
* error_detected callback can simply set a flag to tell
* your driver not to start any new calls. You can then
* clear the flag in the resume callback.
*
* 2) If you normally allocate your resources on startup:
* * Set a flag in error_detected as above.
* * Let CXL detach your contexts.
* * In slot_reset, free the old resources and allocate new ones.
* * In resume, clear the flag to allow things to start.
*/
/* Make sure no one else changes the afu list */
spin_lock(&adapter->afu_list_lock);
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
if (afu == NULL)
continue;
afu_result = cxl_vphb_error_detected(afu, state);
cxl_context_detach_all(afu);
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
pci_deconfigure_afu(afu);
/* Disconnect trumps all, NONE trumps NEED_RESET */
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
result = PCI_ERS_RESULT_DISCONNECT;
else if ((afu_result == PCI_ERS_RESULT_NONE) &&
(result == PCI_ERS_RESULT_NEED_RESET))
result = PCI_ERS_RESULT_NONE;
}
spin_unlock(&adapter->afu_list_lock);
/* should take the context lock here */
if (cxl_adapter_context_lock(adapter) != 0)
dev_warn(&adapter->dev,
"Couldn't take context lock with %d active-contexts\n",
atomic_read(&adapter->contexts_num));
cxl_deconfigure_adapter(adapter);
return result;
}
static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
{
struct cxl *adapter = pci_get_drvdata(pdev);
struct cxl_afu *afu;
struct cxl_context *ctx;
struct pci_dev *afu_dev;
struct pci_driver *afu_drv;
const struct pci_error_handlers *err_handler;
pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
int i;
if (cxl_configure_adapter(adapter, pdev))
goto err;
/*
* Unlock context activation for the adapter. Ideally this should be
* done in cxl_pci_resume but cxlflash module tries to activate the
* master context as part of slot_reset callback.
*/
cxl_adapter_context_unlock(adapter);
spin_lock(&adapter->afu_list_lock);
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
if (afu == NULL)
continue;
if (pci_configure_afu(afu, adapter, pdev))
goto err_unlock;
if (cxl_afu_select_best_mode(afu))
goto err_unlock;
if (afu->phb == NULL)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
/* Reset the device context.
* TODO: make this less disruptive
*/
ctx = cxl_get_context(afu_dev);
if (ctx && cxl_release_context(ctx))
goto err_unlock;
ctx = cxl_dev_context_init(afu_dev);
if (IS_ERR(ctx))
goto err_unlock;
afu_dev->dev.archdata.cxl_ctx = ctx;
if (cxl_ops->afu_check_and_enable(afu))
goto err_unlock;
afu_dev->error_state = pci_channel_io_normal;
/* If there's a driver attached, allow it to
* chime in on recovery. Drivers should check
* if everything has come back OK, but
* shouldn't start new work until we call
* their resume function.
*/
afu_drv = to_pci_driver(afu_dev->dev.driver);
if (!afu_drv)
continue;
err_handler = afu_drv->err_handler;
if (err_handler && err_handler->slot_reset)
afu_result = err_handler->slot_reset(afu_dev);
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
result = PCI_ERS_RESULT_DISCONNECT;
}
}
spin_unlock(&adapter->afu_list_lock);
return result;
err_unlock:
spin_unlock(&adapter->afu_list_lock);
err:
/* All the bits that happen in both error_detected and cxl_remove
* should be idempotent, so we don't need to worry about leaving a mix
* of unconfigured and reconfigured resources.
*/
dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
static void cxl_pci_resume(struct pci_dev *pdev)
{
struct cxl *adapter = pci_get_drvdata(pdev);
struct cxl_afu *afu;
struct pci_dev *afu_dev;
struct pci_driver *afu_drv;
const struct pci_error_handlers *err_handler;
int i;
/* Everything is back now. Drivers should restart work now.
* This is not the place to be checking if everything came back up
* properly, because there's no return value: do that in slot_reset.
*/
spin_lock(&adapter->afu_list_lock);
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
if (afu == NULL || afu->phb == NULL)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
afu_drv = to_pci_driver(afu_dev->dev.driver);
if (!afu_drv)
continue;
err_handler = afu_drv->err_handler;
if (err_handler && err_handler->resume)
err_handler->resume(afu_dev);
}
}
spin_unlock(&adapter->afu_list_lock);
}
static const struct pci_error_handlers cxl_err_handler = {
.error_detected = cxl_pci_error_detected,
.slot_reset = cxl_pci_slot_reset,
.resume = cxl_pci_resume,
};
struct pci_driver cxl_pci_driver = {
.name = "cxl-pci",
.id_table = cxl_pci_tbl,
.probe = cxl_probe,
.remove = cxl_remove,
.shutdown = cxl_remove,
.err_handler = &cxl_err_handler,
};
| linux-master | drivers/misc/cxl/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015 IBM Corp.
*/
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include "cxl.h"
#include "hcalls.h"
#include "trace.h"
#define CXL_ERROR_DETECTED_EVENT 1
#define CXL_SLOT_RESET_EVENT 2
#define CXL_RESUME_EVENT 3
static void pci_error_handlers(struct cxl_afu *afu,
int bus_error_event,
pci_channel_state_t state)
{
struct pci_dev *afu_dev;
struct pci_driver *afu_drv;
const struct pci_error_handlers *err_handler;
if (afu->phb == NULL)
return;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
afu_drv = to_pci_driver(afu_dev->dev.driver);
if (!afu_drv)
continue;
err_handler = afu_drv->err_handler;
switch (bus_error_event) {
case CXL_ERROR_DETECTED_EVENT:
afu_dev->error_state = state;
if (err_handler &&
err_handler->error_detected)
err_handler->error_detected(afu_dev, state);
break;
case CXL_SLOT_RESET_EVENT:
afu_dev->error_state = state;
if (err_handler &&
err_handler->slot_reset)
err_handler->slot_reset(afu_dev);
break;
case CXL_RESUME_EVENT:
if (err_handler &&
err_handler->resume)
err_handler->resume(afu_dev);
break;
}
}
}
static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
u64 errstat)
{
pr_devel("in %s\n", __func__);
dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
return cxl_ops->ack_irq(ctx, 0, errstat);
}
static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
void *buf, size_t len)
{
unsigned int entries, mod;
unsigned long **vpd_buf = NULL;
struct sg_list *le;
int rc = 0, i, tocopy;
u64 out = 0;
if (buf == NULL)
return -EINVAL;
/* number of entries in the list */
entries = len / SG_BUFFER_SIZE;
mod = len % SG_BUFFER_SIZE;
if (mod)
entries++;
if (entries > SG_MAX_ENTRIES) {
entries = SG_MAX_ENTRIES;
len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
mod = 0;
}
vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
if (!vpd_buf)
return -ENOMEM;
le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
if (!le) {
rc = -ENOMEM;
goto err1;
}
for (i = 0; i < entries; i++) {
vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!vpd_buf[i]) {
rc = -ENOMEM;
goto err2;
}
le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
if ((i == (entries - 1)) && mod)
le[i].len = cpu_to_be64(mod);
}
if (adapter)
rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
virt_to_phys(le), entries, &out);
else
rc = cxl_h_collect_vpd(afu->guest->handle, 0,
virt_to_phys(le), entries, &out);
pr_devel("length of available (entries: %i), vpd: %#llx\n",
entries, out);
if (!rc) {
/*
* hcall returns in 'out' the size of available VPDs.
* It fills the buffer with as much data as possible.
*/
if (out < len)
len = out;
rc = len;
if (out) {
for (i = 0; i < entries; i++) {
if (len < SG_BUFFER_SIZE)
tocopy = len;
else
tocopy = SG_BUFFER_SIZE;
memcpy(buf, vpd_buf[i], tocopy);
buf += tocopy;
len -= tocopy;
}
}
}
err2:
for (i = 0; i < entries; i++) {
if (vpd_buf[i])
free_page((unsigned long) vpd_buf[i]);
}
free_page((unsigned long) le);
err1:
kfree(vpd_buf);
return rc;
}
static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
{
return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
}
static irqreturn_t guest_psl_irq(int irq, void *data)
{
struct cxl_context *ctx = data;
struct cxl_irq_info irq_info;
int rc;
pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
rc = guest_get_irq_info(ctx, &irq_info);
if (rc) {
WARN(1, "Unable to get IRQ info: %i\n", rc);
return IRQ_HANDLED;
}
rc = cxl_irq_psl8(irq, ctx, &irq_info);
return rc;
}
static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
{
u64 state;
int rc = 0;
if (!afu)
return -EIO;
rc = cxl_h_read_error_state(afu->guest->handle, &state);
if (!rc) {
WARN_ON(state != H_STATE_NORMAL &&
state != H_STATE_DISABLE &&
state != H_STATE_TEMP_UNAVAILABLE &&
state != H_STATE_PERM_UNAVAILABLE);
*state_out = state & 0xffffffff;
}
return rc;
}
static irqreturn_t guest_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
int rc;
u64 serr, afu_error, dsisr;
rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
if (rc) {
dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
return IRQ_HANDLED;
}
afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
cxl_afu_decode_psl_serr(afu, serr);
dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
if (rc)
dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
rc);
return IRQ_HANDLED;
}
static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
{
int i, n;
struct irq_avail *cur;
for (i = 0; i < adapter->guest->irq_nranges; i++) {
cur = &adapter->guest->irq_avail[i];
n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
0, len, 0);
if (n < cur->range) {
bitmap_set(cur->bitmap, n, len);
*irq = cur->offset + n;
pr_devel("guest: allocate IRQs %#x->%#x\n",
*irq, *irq + len - 1);
return 0;
}
}
return -ENOSPC;
}
static int irq_free_range(struct cxl *adapter, int irq, int len)
{
int i, n;
struct irq_avail *cur;
if (len == 0)
return -ENOENT;
for (i = 0; i < adapter->guest->irq_nranges; i++) {
cur = &adapter->guest->irq_avail[i];
if (irq >= cur->offset &&
(irq + len) <= (cur->offset + cur->range)) {
n = irq - cur->offset;
bitmap_clear(cur->bitmap, n, len);
pr_devel("guest: release IRQs %#x->%#x\n",
irq, irq + len - 1);
return 0;
}
}
return -ENOENT;
}
static int guest_reset(struct cxl *adapter)
{
struct cxl_afu *afu = NULL;
int i, rc;
pr_devel("Adapter reset request\n");
spin_lock(&adapter->afu_list_lock);
for (i = 0; i < adapter->slices; i++) {
if ((afu = adapter->afu[i])) {
pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
pci_channel_io_frozen);
cxl_context_detach_all(afu);
}
}
rc = cxl_h_reset_adapter(adapter->guest->handle);
for (i = 0; i < adapter->slices; i++) {
if (!rc && (afu = adapter->afu[i])) {
pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
pci_channel_io_normal);
pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
}
}
spin_unlock(&adapter->afu_list_lock);
return rc;
}
static int guest_alloc_one_irq(struct cxl *adapter)
{
int irq;
spin_lock(&adapter->guest->irq_alloc_lock);
if (irq_alloc_range(adapter, 1, &irq))
irq = -ENOSPC;
spin_unlock(&adapter->guest->irq_alloc_lock);
return irq;
}
static void guest_release_one_irq(struct cxl *adapter, int irq)
{
spin_lock(&adapter->guest->irq_alloc_lock);
irq_free_range(adapter, irq, 1);
spin_unlock(&adapter->guest->irq_alloc_lock);
}
static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
struct cxl *adapter, unsigned int num)
{
int i, try, irq;
memset(irqs, 0, sizeof(struct cxl_irq_ranges));
spin_lock(&adapter->guest->irq_alloc_lock);
for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
try = num;
while (try) {
if (irq_alloc_range(adapter, try, &irq) == 0)
break;
try /= 2;
}
if (!try)
goto error;
irqs->offset[i] = irq;
irqs->range[i] = try;
num -= try;
}
if (num)
goto error;
spin_unlock(&adapter->guest->irq_alloc_lock);
return 0;
error:
for (i = 0; i < CXL_IRQ_RANGES; i++)
irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
spin_unlock(&adapter->guest->irq_alloc_lock);
return -ENOSPC;
}
static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
struct cxl *adapter)
{
int i;
spin_lock(&adapter->guest->irq_alloc_lock);
for (i = 0; i < CXL_IRQ_RANGES; i++)
irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
spin_unlock(&adapter->guest->irq_alloc_lock);
}
static int guest_register_serr_irq(struct cxl_afu *afu)
{
afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
dev_name(&afu->dev));
if (!afu->err_irq_name)
return -ENOMEM;
if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
guest_slice_irq_err, afu, afu->err_irq_name))) {
kfree(afu->err_irq_name);
afu->err_irq_name = NULL;
return -ENOMEM;
}
return 0;
}
static void guest_release_serr_irq(struct cxl_afu *afu)
{
cxl_unmap_irq(afu->serr_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
kfree(afu->err_irq_name);
}
static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
{
return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
tfc >> 32, (psl_reset_mask != 0));
}
static void disable_afu_irqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
unsigned int virq;
int r, i;
pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
for (r = 0; r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
virq = irq_find_mapping(NULL, hwirq);
disable_irq(virq);
}
}
}
static void enable_afu_irqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
unsigned int virq;
int r, i;
pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
for (r = 0; r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
virq = irq_find_mapping(NULL, hwirq);
enable_irq(virq);
}
}
}
static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
u64 offset, u64 *val)
{
unsigned long cr;
char c;
int rc = 0;
if (afu->crs_len < sz)
return -ENOENT;
if (unlikely(offset >= afu->crs_len))
return -ERANGE;
cr = get_zeroed_page(GFP_KERNEL);
if (!cr)
return -ENOMEM;
rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
virt_to_phys((void *)cr), sz);
if (rc)
goto err;
switch (sz) {
case 1:
c = *((char *) cr);
*val = c;
break;
case 2:
*val = in_le16((u16 *)cr);
break;
case 4:
*val = in_le32((unsigned *)cr);
break;
case 8:
*val = in_le64((u64 *)cr);
break;
default:
WARN_ON(1);
}
err:
free_page(cr);
return rc;
}
static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
u32 *out)
{
int rc;
u64 val;
rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
if (!rc)
*out = (u32) val;
return rc;
}
static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
u16 *out)
{
int rc;
u64 val;
rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
if (!rc)
*out = (u16) val;
return rc;
}
static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
u8 *out)
{
int rc;
u64 val;
rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
if (!rc)
*out = (u8) val;
return rc;
}
static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
u64 *out)
{
return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
}
static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
{
/* config record is not writable from guest */
return -EPERM;
}
static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
{
/* config record is not writable from guest */
return -EPERM;
}
static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
{
/* config record is not writable from guest */
return -EPERM;
}
static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_process_element_hcall *elem;
struct cxl *adapter = ctx->afu->adapter;
const struct cred *cred;
u32 pid, idx;
int rc, r, i;
u64 mmio_addr, mmio_size;
__be64 flags = 0;
/* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
if (!(elem = (struct cxl_process_element_hcall *)
get_zeroed_page(GFP_KERNEL)))
return -ENOMEM;
elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
if (ctx->kernel) {
pid = 0;
flags |= CXL_PE_TRANSLATION_ENABLED;
flags |= CXL_PE_PRIVILEGED_PROCESS;
if (mfmsr() & MSR_SF)
flags |= CXL_PE_64_BIT;
} else {
pid = current->pid;
flags |= CXL_PE_PROBLEM_STATE;
flags |= CXL_PE_TRANSLATION_ENABLED;
if (!test_tsk_thread_flag(current, TIF_32BIT))
flags |= CXL_PE_64_BIT;
cred = get_current_cred();
if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
flags |= CXL_PE_PRIVILEGED_PROCESS;
put_cred(cred);
}
elem->flags = cpu_to_be64(flags);
elem->common.tid = cpu_to_be32(0); /* Unused */
elem->common.pid = cpu_to_be32(pid);
elem->common.csrp = cpu_to_be64(0); /* disable */
elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
cxl_prefault(ctx, wed);
elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
/*
* Ensure we have at least one interrupt allocated to take faults for
* kernel contexts that may not have allocated any AFU IRQs at all:
*/
if (ctx->irqs.range[0] == 0) {
rc = afu_register_irqs(ctx, 0);
if (rc)
goto out_free;
}
for (r = 0; r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
if (r == 0 && i == 0) {
elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
} else {
idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
}
}
}
elem->common.amr = cpu_to_be64(amr);
elem->common.wed = cpu_to_be64(wed);
disable_afu_irqs(ctx);
rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
&ctx->process_token, &mmio_addr, &mmio_size);
if (rc == H_SUCCESS) {
if (ctx->master || !ctx->afu->pp_psa) {
ctx->psn_phys = ctx->afu->psn_phys;
ctx->psn_size = ctx->afu->adapter->ps_size;
} else {
ctx->psn_phys = mmio_addr;
ctx->psn_size = mmio_size;
}
if (ctx->afu->pp_psa && mmio_size &&
ctx->afu->pp_size == 0) {
/*
* There's no property in the device tree to read the
* pp_size. We only find out at the 1st attach.
* Compared to bare-metal, it is too late and we
* should really lock here. However, on powerVM,
* pp_size is really only used to display in /sys.
* Being discussed with pHyp for their next release.
*/
ctx->afu->pp_size = mmio_size;
}
/* from PAPR: process element is bytes 4-7 of process token */
ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
ctx->pe, ctx->external_pe, ctx->psn_size);
ctx->pe_inserted = true;
enable_afu_irqs(ctx);
}
out_free:
free_page((u64)elem);
return rc;
}
static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
{
pr_devel("in %s\n", __func__);
ctx->kernel = kernel;
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return attach_afu_directed(ctx, wed, amr);
/* dedicated mode not supported on FW840 */
return -EINVAL;
}
static int detach_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
return 0;
if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
return -1;
return 0;
}
static int guest_detach_process(struct cxl_context *ctx)
{
pr_devel("in %s\n", __func__);
trace_cxl_detach(ctx);
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return detach_afu_directed(ctx);
return -EINVAL;
}
static void guest_release_afu(struct device *dev)
{
struct cxl_afu *afu = to_cxl_afu(dev);
pr_devel("%s\n", __func__);
idr_destroy(&afu->contexts_idr);
kfree(afu->guest);
kfree(afu);
}
ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
{
return guest_collect_vpd(NULL, afu, buf, len);
}
#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count)
{
void *tbuf = NULL;
int rc = 0;
tbuf = (void *) get_zeroed_page(GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
rc = cxl_h_get_afu_err(afu->guest->handle,
off & 0x7,
virt_to_phys(tbuf),
count);
if (rc)
goto err;
if (count > ERR_BUFF_MAX_COPY_SIZE)
count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
memcpy(buf, tbuf, count);
err:
free_page((u64)tbuf);
return rc;
}
static int guest_afu_check_and_enable(struct cxl_afu *afu)
{
return 0;
}
static bool guest_support_attributes(const char *attr_name,
enum cxl_attrs type)
{
switch (type) {
case CXL_ADAPTER_ATTRS:
if ((strcmp(attr_name, "base_image") == 0) ||
(strcmp(attr_name, "load_image_on_perst") == 0) ||
(strcmp(attr_name, "perst_reloads_same_image") == 0) ||
(strcmp(attr_name, "image_loaded") == 0))
return false;
break;
case CXL_AFU_MASTER_ATTRS:
if ((strcmp(attr_name, "pp_mmio_off") == 0))
return false;
break;
case CXL_AFU_ATTRS:
break;
default:
break;
}
return true;
}
static int activate_afu_directed(struct cxl_afu *afu)
{
int rc;
dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
afu->current_mode = CXL_MODE_DIRECTED;
afu->num_procs = afu->max_procs_virtualised;
if ((rc = cxl_chardev_m_afu_add(afu)))
return rc;
if ((rc = cxl_sysfs_afu_m_add(afu)))
goto err;
if ((rc = cxl_chardev_s_afu_add(afu)))
goto err1;
return 0;
err1:
cxl_sysfs_afu_m_remove(afu);
err:
cxl_chardev_afu_remove(afu);
return rc;
}
static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
{
if (!mode)
return 0;
if (!(mode & afu->modes_supported))
return -EINVAL;
if (mode == CXL_MODE_DIRECTED)
return activate_afu_directed(afu);
if (mode == CXL_MODE_DEDICATED)
dev_err(&afu->dev, "Dedicated mode not supported\n");
return -EINVAL;
}
static int deactivate_afu_directed(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
afu->current_mode = 0;
afu->num_procs = 0;
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
cxl_ops->afu_reset(afu);
return 0;
}
static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
{
if (!mode)
return 0;
if (!(mode & afu->modes_supported))
return -EINVAL;
if (mode == CXL_MODE_DIRECTED)
return deactivate_afu_directed(afu);
return 0;
}
static int guest_afu_reset(struct cxl_afu *afu)
{
pr_devel("AFU(%d) reset request\n", afu->slice);
return cxl_h_reset_afu(afu->guest->handle);
}
static int guest_map_slice_regs(struct cxl_afu *afu)
{
if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
afu->slice);
return -ENOMEM;
}
return 0;
}
static void guest_unmap_slice_regs(struct cxl_afu *afu)
{
if (afu->p2n_mmio)
iounmap(afu->p2n_mmio);
}
static int afu_update_state(struct cxl_afu *afu)
{
int rc, cur_state;
rc = afu_read_error_state(afu, &cur_state);
if (rc)
return rc;
if (afu->guest->previous_state == cur_state)
return 0;
pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
switch (cur_state) {
case H_STATE_NORMAL:
afu->guest->previous_state = cur_state;
break;
case H_STATE_DISABLE:
pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
pci_channel_io_frozen);
cxl_context_detach_all(afu);
if ((rc = cxl_ops->afu_reset(afu)))
pr_devel("reset hcall failed %d\n", rc);
rc = afu_read_error_state(afu, &cur_state);
if (!rc && cur_state == H_STATE_NORMAL) {
pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
pci_channel_io_normal);
pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
}
afu->guest->previous_state = 0;
break;
case H_STATE_TEMP_UNAVAILABLE:
afu->guest->previous_state = cur_state;
break;
case H_STATE_PERM_UNAVAILABLE:
dev_err(&afu->dev, "AFU is in permanent error state\n");
pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
pci_channel_io_perm_failure);
afu->guest->previous_state = cur_state;
break;
default:
pr_err("Unexpected AFU(%d) error state: %#x\n",
afu->slice, cur_state);
return -EINVAL;
}
return rc;
}
static void afu_handle_errstate(struct work_struct *work)
{
struct cxl_afu_guest *afu_guest =
container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
if (!afu_update_state(afu_guest->parent) &&
afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
return;
if (afu_guest->handle_err)
schedule_delayed_work(&afu_guest->work_err,
msecs_to_jiffies(3000));
}
static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
{
int state;
if (afu && (!afu_read_error_state(afu, &state))) {
if (state == H_STATE_NORMAL)
return true;
}
return false;
}
static int afu_properties_look_ok(struct cxl_afu *afu)
{
if (afu->pp_irqs < 0) {
dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
return -EINVAL;
}
if (afu->max_procs_virtualised < 1) {
dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
return -EINVAL;
}
return 0;
}
int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
{
struct cxl_afu *afu;
bool free = true;
int rc;
pr_devel("in %s - AFU(%d)\n", __func__, slice);
if (!(afu = cxl_alloc_afu(adapter, slice)))
return -ENOMEM;
if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
kfree(afu);
return -ENOMEM;
}
if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
adapter->adapter_num,
slice)))
goto err1;
adapter->slices++;
if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
goto err1;
if ((rc = cxl_ops->afu_reset(afu)))
goto err1;
if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
goto err1;
if ((rc = afu_properties_look_ok(afu)))
goto err1;
if ((rc = guest_map_slice_regs(afu)))
goto err1;
if ((rc = guest_register_serr_irq(afu)))
goto err2;
/*
* After we call this function we must not free the afu directly, even
* if it returns an error!
*/
if ((rc = cxl_register_afu(afu)))
goto err_put_dev;
if ((rc = cxl_sysfs_afu_add(afu)))
goto err_del_dev;
/*
* pHyp doesn't expose the programming models supported by the
* AFU. pHyp currently only supports directed mode. If it adds
* dedicated mode later, this version of cxl has no way to
* detect it. So we'll initialize the driver, but the first
* attach will fail.
* Being discussed with pHyp to do better (likely new property)
*/
if (afu->max_procs_virtualised == 1)
afu->modes_supported = CXL_MODE_DEDICATED;
else
afu->modes_supported = CXL_MODE_DIRECTED;
if ((rc = cxl_afu_select_best_mode(afu)))
goto err_remove_sysfs;
adapter->afu[afu->slice] = afu;
afu->enabled = true;
/*
* wake up the cpu periodically to check the state
* of the AFU using "afu" stored in the guest structure.
*/
afu->guest->parent = afu;
afu->guest->handle_err = true;
INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
if ((rc = cxl_pci_vphb_add(afu)))
dev_info(&afu->dev, "Can't register vPHB\n");
return 0;
err_remove_sysfs:
cxl_sysfs_afu_remove(afu);
err_del_dev:
device_del(&afu->dev);
err_put_dev:
put_device(&afu->dev);
free = false;
guest_release_serr_irq(afu);
err2:
guest_unmap_slice_regs(afu);
err1:
if (free) {
kfree(afu->guest);
kfree(afu);
}
return rc;
}
void cxl_guest_remove_afu(struct cxl_afu *afu)
{
if (!afu)
return;
/* flush and stop pending job */
afu->guest->handle_err = false;
flush_delayed_work(&afu->guest->work_err);
cxl_pci_vphb_remove(afu);
cxl_sysfs_afu_remove(afu);
spin_lock(&afu->adapter->afu_list_lock);
afu->adapter->afu[afu->slice] = NULL;
spin_unlock(&afu->adapter->afu_list_lock);
cxl_context_detach_all(afu);
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
guest_release_serr_irq(afu);
guest_unmap_slice_regs(afu);
device_unregister(&afu->dev);
}
static void free_adapter(struct cxl *adapter)
{
struct irq_avail *cur;
int i;
if (adapter->guest) {
if (adapter->guest->irq_avail) {
for (i = 0; i < adapter->guest->irq_nranges; i++) {
cur = &adapter->guest->irq_avail[i];
bitmap_free(cur->bitmap);
}
kfree(adapter->guest->irq_avail);
}
kfree(adapter->guest->status);
kfree(adapter->guest);
}
cxl_remove_adapter_nr(adapter);
kfree(adapter);
}
static int properties_look_ok(struct cxl *adapter)
{
/* The absence of this property means that the operational
* status is unknown or okay
*/
if (strlen(adapter->guest->status) &&
strcmp(adapter->guest->status, "okay")) {
pr_err("ABORTING:Bad operational status of the device\n");
return -EINVAL;
}
return 0;
}
ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
{
return guest_collect_vpd(adapter, NULL, buf, len);
}
void cxl_guest_remove_adapter(struct cxl *adapter)
{
pr_devel("in %s\n", __func__);
cxl_sysfs_adapter_remove(adapter);
cxl_guest_remove_chardev(adapter);
device_unregister(&adapter->dev);
}
static void release_adapter(struct device *dev)
{
free_adapter(to_cxl_adapter(dev));
}
struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
{
struct cxl *adapter;
bool free = true;
int rc;
if (!(adapter = cxl_alloc_adapter()))
return ERR_PTR(-ENOMEM);
if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
free_adapter(adapter);
return ERR_PTR(-ENOMEM);
}
adapter->slices = 0;
adapter->guest->pdev = pdev;
adapter->dev.parent = &pdev->dev;
adapter->dev.release = release_adapter;
dev_set_drvdata(&pdev->dev, adapter);
/*
* Hypervisor controls PSL timebase initialization (p1 register).
* On FW840, PSL is initialized.
*/
adapter->psl_timebase_synced = true;
if ((rc = cxl_of_read_adapter_handle(adapter, np)))
goto err1;
if ((rc = cxl_of_read_adapter_properties(adapter, np)))
goto err1;
if ((rc = properties_look_ok(adapter)))
goto err1;
if ((rc = cxl_guest_add_chardev(adapter)))
goto err1;
/*
* After we call this function we must not free the adapter directly,
* even if it returns an error!
*/
if ((rc = cxl_register_adapter(adapter)))
goto err_put_dev;
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_del_dev;
/* release the context lock as the adapter is configured */
cxl_adapter_context_unlock(adapter);
return adapter;
err_del_dev:
device_del(&adapter->dev);
err_put_dev:
put_device(&adapter->dev);
free = false;
cxl_guest_remove_chardev(adapter);
err1:
if (free)
free_adapter(adapter);
return ERR_PTR(rc);
}
void cxl_guest_reload_module(struct cxl *adapter)
{
struct platform_device *pdev;
pdev = adapter->guest->pdev;
cxl_guest_remove_adapter(adapter);
cxl_of_probe(pdev);
}
const struct cxl_backend_ops cxl_guest_ops = {
.module = THIS_MODULE,
.adapter_reset = guest_reset,
.alloc_one_irq = guest_alloc_one_irq,
.release_one_irq = guest_release_one_irq,
.alloc_irq_ranges = guest_alloc_irq_ranges,
.release_irq_ranges = guest_release_irq_ranges,
.setup_irq = NULL,
.handle_psl_slice_error = guest_handle_psl_slice_error,
.psl_interrupt = guest_psl_irq,
.ack_irq = guest_ack_irq,
.attach_process = guest_attach_process,
.detach_process = guest_detach_process,
.update_ivtes = NULL,
.support_attributes = guest_support_attributes,
.link_ok = guest_link_ok,
.release_afu = guest_release_afu,
.afu_read_err_buffer = guest_afu_read_err_buffer,
.afu_check_and_enable = guest_afu_check_and_enable,
.afu_activate_mode = guest_afu_activate_mode,
.afu_deactivate_mode = guest_afu_deactivate_mode,
.afu_reset = guest_afu_reset,
.afu_cr_read8 = guest_afu_cr_read8,
.afu_cr_read16 = guest_afu_cr_read16,
.afu_cr_read32 = guest_afu_cr_read32,
.afu_cr_read64 = guest_afu_cr_read64,
.afu_cr_write8 = guest_afu_cr_write8,
.afu_cr_write16 = guest_afu_cr_write16,
.afu_cr_write32 = guest_afu_cr_write32,
.read_adapter_vpd = cxl_guest_read_adapter_vpd,
};
| linux-master | drivers/misc/cxl/guest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <asm/errno.h>
#include <misc/cxl-base.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include "cxl.h"
/* protected by rcu */
static struct cxl_calls *cxl_calls;
atomic_t cxl_use_count = ATOMIC_INIT(0);
EXPORT_SYMBOL(cxl_use_count);
#ifdef CONFIG_CXL_MODULE
static inline struct cxl_calls *cxl_calls_get(void)
{
struct cxl_calls *calls = NULL;
rcu_read_lock();
calls = rcu_dereference(cxl_calls);
if (calls && !try_module_get(calls->owner))
calls = NULL;
rcu_read_unlock();
return calls;
}
static inline void cxl_calls_put(struct cxl_calls *calls)
{
BUG_ON(calls != cxl_calls);
/* we don't need to rcu this, as we hold a reference to the module */
module_put(cxl_calls->owner);
}
#else /* !defined CONFIG_CXL_MODULE */
static inline struct cxl_calls *cxl_calls_get(void)
{
return cxl_calls;
}
static inline void cxl_calls_put(struct cxl_calls *calls) { }
#endif /* CONFIG_CXL_MODULE */
/* AFU refcount management */
struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
{
return (get_device(&afu->dev) == NULL) ? NULL : afu;
}
EXPORT_SYMBOL_GPL(cxl_afu_get);
void cxl_afu_put(struct cxl_afu *afu)
{
put_device(&afu->dev);
}
EXPORT_SYMBOL_GPL(cxl_afu_put);
void cxl_slbia(struct mm_struct *mm)
{
struct cxl_calls *calls;
calls = cxl_calls_get();
if (!calls)
return;
if (cxl_ctx_in_use())
calls->cxl_slbia(mm);
cxl_calls_put(calls);
}
int register_cxl_calls(struct cxl_calls *calls)
{
if (cxl_calls)
return -EBUSY;
rcu_assign_pointer(cxl_calls, calls);
return 0;
}
EXPORT_SYMBOL_GPL(register_cxl_calls);
void unregister_cxl_calls(struct cxl_calls *calls)
{
BUG_ON(cxl_calls->owner != calls->owner);
RCU_INIT_POINTER(cxl_calls, NULL);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(unregister_cxl_calls);
int cxl_update_properties(struct device_node *dn,
struct property *new_prop)
{
return of_update_property(dn, new_prop);
}
EXPORT_SYMBOL_GPL(cxl_update_properties);
static int __init cxl_base_init(void)
{
struct device_node *np;
struct platform_device *dev;
int count = 0;
/*
* Scan for compatible devices in guest only
*/
if (cpu_has_feature(CPU_FTR_HVMODE))
return 0;
for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
dev = of_platform_device_create(np, NULL, NULL);
if (dev)
count++;
}
pr_devel("Found %d cxl device(s)\n", count);
return 0;
}
device_initcall(cxl_base_init);
| linux-master | drivers/misc/cxl/base.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2017 IBM Corp.
*/
#include <linux/hugetlb.h>
#include <linux/sched/mm.h>
#include <asm/opal-api.h>
#include <asm/pnv-pci.h>
#include <misc/cxllib.h>
#include "cxl.h"
#define CXL_INVALID_DRA ~0ull
#define CXL_DUMMY_READ_SIZE 128
#define CXL_DUMMY_READ_ALIGN 8
#define CXL_CAPI_WINDOW_START 0x2000000000000ull
#define CXL_CAPI_WINDOW_LOG_SIZE 48
#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1
bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags)
{
int rc;
u32 phb_index;
u64 chip_id, capp_unit_id;
/* No flags currently supported */
if (flags)
return false;
if (!cpu_has_feature(CPU_FTR_HVMODE))
return false;
if (!cxl_is_power9())
return false;
if (cxl_slot_is_switched(dev))
return false;
/* on p9, some pci slots are not connected to a CAPP unit */
rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
if (rc)
return false;
return true;
}
EXPORT_SYMBOL_GPL(cxllib_slot_is_supported);
static DEFINE_MUTEX(dra_mutex);
static u64 dummy_read_addr = CXL_INVALID_DRA;
static int allocate_dummy_read_buf(void)
{
u64 buf, vaddr;
size_t buf_size;
/*
* Dummy read buffer is 128-byte long, aligned on a
* 256-byte boundary and we need the physical address.
*/
buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN);
buf = (u64) kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) &
(~0ull << CXL_DUMMY_READ_ALIGN);
WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size),
"Dummy read buffer alignment issue");
dummy_read_addr = virt_to_phys((void *) vaddr);
return 0;
}
int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
{
int rc;
u32 phb_index;
u64 chip_id, capp_unit_id;
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
mutex_lock(&dra_mutex);
if (dummy_read_addr == CXL_INVALID_DRA) {
rc = allocate_dummy_read_buf();
if (rc) {
mutex_unlock(&dra_mutex);
return rc;
}
}
mutex_unlock(&dra_mutex);
rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
if (rc)
return rc;
rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
if (rc)
return rc;
cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
cfg->bar_addr = CXL_CAPI_WINDOW_START;
cfg->dra = dummy_read_addr;
return 0;
}
EXPORT_SYMBOL_GPL(cxllib_get_xsl_config);
int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
unsigned long flags)
{
int rc = 0;
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
switch (mode) {
case CXL_MODE_PCI:
/*
* We currently don't support going back to PCI mode
* However, we'll turn the invalidations off, so that
* the firmware doesn't have to ack them and can do
* things like reset, etc.. with no worries.
* So always return EPERM (can't go back to PCI) or
* EBUSY if we couldn't even turn off snooping
*/
rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF);
if (rc)
rc = -EBUSY;
else
rc = -EPERM;
break;
case CXL_MODE_CXL:
/* DMA only supported on TVT1 for the time being */
if (flags != CXL_MODE_DMA_TVT1)
return -EINVAL;
rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1);
if (rc)
return rc;
rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON);
break;
default:
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode);
/*
* When switching the PHB to capi mode, the TVT#1 entry for
* the Partitionable Endpoint is set in bypass mode, like
* in PCI mode.
* Configure the device dma to use TVT#1, which is done
* by calling dma_set_mask() with a mask large enough.
*/
int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags)
{
int rc;
if (flags)
return -EINVAL;
rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
return rc;
}
EXPORT_SYMBOL_GPL(cxllib_set_device_dma);
int cxllib_get_PE_attributes(struct task_struct *task,
unsigned long translation_mode,
struct cxllib_pe_attributes *attr)
{
if (translation_mode != CXL_TRANSLATED_MODE &&
translation_mode != CXL_REAL_MODE)
return -EINVAL;
attr->sr = cxl_calculate_sr(false,
task == NULL,
translation_mode == CXL_REAL_MODE,
true);
attr->lpid = mfspr(SPRN_LPID);
if (task) {
struct mm_struct *mm = get_task_mm(task);
if (mm == NULL)
return -EINVAL;
/*
* Caller is keeping a reference on mm_users for as long
* as XSL uses the memory context
*/
attr->pid = mm->context.id;
mmput(mm);
attr->tid = task->thread.tidr;
} else {
attr->pid = 0;
attr->tid = 0;
}
return 0;
}
EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
static int get_vma_info(struct mm_struct *mm, u64 addr,
u64 *vma_start, u64 *vma_end,
unsigned long *page_size)
{
struct vm_area_struct *vma = NULL;
int rc = 0;
mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
rc = -EFAULT;
goto out;
}
*page_size = vma_kernel_pagesize(vma);
*vma_start = vma->vm_start;
*vma_end = vma->vm_end;
out:
mmap_read_unlock(mm);
return rc;
}
int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
{
int rc;
u64 dar, vma_start, vma_end;
unsigned long page_size;
if (mm == NULL)
return -EFAULT;
/*
* The buffer we have to process can extend over several pages
* and may also cover several VMAs.
* We iterate over all the pages. The page size could vary
* between VMAs.
*/
rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
if (rc)
return rc;
for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
dar += page_size) {
if (dar < vma_start || dar >= vma_end) {
/*
* We don't hold mm->mmap_lock while iterating, since
* the lock is required by one of the lower-level page
* fault processing functions and it could
* create a deadlock.
*
* It means the VMAs can be altered between 2
* loop iterations and we could theoretically
* miss a page (however unlikely). But that's
* not really a problem, as the driver will
* retry access, get another page fault on the
* missing page and call us again.
*/
rc = get_vma_info(mm, dar, &vma_start, &vma_end,
&page_size);
if (rc)
return rc;
}
rc = cxl_handle_mm_fault(mm, flags, dar);
if (rc)
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL_GPL(cxllib_handle_fault);
| linux-master | drivers/misc/cxl/cxllib.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sched/task.h>
#include <asm/cputable.h>
#include <asm/mmu.h>
#include <misc/cxl-base.h>
#include "cxl.h"
#include "trace.h"
static DEFINE_SPINLOCK(adapter_idr_lock);
static DEFINE_IDR(cxl_adapter_idr);
uint cxl_verbose;
module_param_named(verbose, cxl_verbose, uint, 0600);
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
const struct cxl_backend_ops *cxl_ops;
int cxl_afu_slbia(struct cxl_afu *afu)
{
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
pr_devel("cxl_afu_slbia issuing SLBIA command\n");
cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
return -EBUSY;
}
/* If the adapter has gone down, we can assume that we
* will PERST it and that will invalidate everything.
*/
if (!cxl_ops->link_ok(afu->adapter, afu))
return -EIO;
cpu_relax();
}
return 0;
}
static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
{
unsigned long flags;
if (ctx->mm != mm)
return;
pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
spin_lock_irqsave(&ctx->sste_lock, flags);
trace_cxl_slbia(ctx);
memset(ctx->sstp, 0, ctx->sst_size);
spin_unlock_irqrestore(&ctx->sste_lock, flags);
mb();
cxl_afu_slbia(ctx->afu);
}
static inline void cxl_slbia_core(struct mm_struct *mm)
{
struct cxl *adapter;
struct cxl_afu *afu;
struct cxl_context *ctx;
int card, slice, id;
pr_devel("%s called\n", __func__);
spin_lock(&adapter_idr_lock);
idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
/* XXX: Make this lookup faster with link from mm to ctx */
spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) {
afu = adapter->afu[slice];
if (!afu || !afu->enabled)
continue;
rcu_read_lock();
idr_for_each_entry(&afu->contexts_idr, ctx, id)
_cxl_slbia(ctx, mm);
rcu_read_unlock();
}
spin_unlock(&adapter->afu_list_lock);
}
spin_unlock(&adapter_idr_lock);
}
static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core,
.owner = THIS_MODULE,
};
int cxl_alloc_sst(struct cxl_context *ctx)
{
unsigned long vsid;
u64 ea_mask, size, sstp0, sstp1;
sstp0 = 0;
sstp1 = 0;
ctx->sst_size = PAGE_SIZE;
ctx->sst_lru = 0;
ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
if (!ctx->sstp) {
pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
return -ENOMEM;
}
pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
WARN(1, "Impossible segment table size\n");
return -EINVAL;
}
sstp0 |= size;
if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
ea_mask = 0xfffff00ULL;
else
ea_mask = 0xffffffff00ULL;
sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
sstp1 |= (u64)ctx->sstp & ea_mask;
sstp1 |= CXL_SSTP1_An_V;
pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
(u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
/* Store calculated sstp hardware points for use later */
ctx->sstp0 = sstp0;
ctx->sstp1 = sstp1;
return 0;
}
/* print buffer content as integers when debugging */
void cxl_dump_debug_buffer(void *buf, size_t buf_len)
{
#ifdef DEBUG
int i, *ptr;
/*
* We want to regroup up to 4 integers per line, which means they
* need to be in the same pr_devel() statement
*/
ptr = (int *) buf;
for (i = 0; i * 4 < buf_len; i += 4) {
if ((i + 3) * 4 < buf_len)
pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
ptr[i + 2], ptr[i + 3]);
else if ((i + 2) * 4 < buf_len)
pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
ptr[i + 2]);
else if ((i + 1) * 4 < buf_len)
pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
else
pr_devel("%.8x\n", ptr[i]);
}
#endif /* DEBUG */
}
/* Find a CXL adapter by it's number and increase it's refcount */
struct cxl *get_cxl_adapter(int num)
{
struct cxl *adapter;
spin_lock(&adapter_idr_lock);
if ((adapter = idr_find(&cxl_adapter_idr, num)))
get_device(&adapter->dev);
spin_unlock(&adapter_idr_lock);
return adapter;
}
static int cxl_alloc_adapter_nr(struct cxl *adapter)
{
int i;
idr_preload(GFP_KERNEL);
spin_lock(&adapter_idr_lock);
i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
spin_unlock(&adapter_idr_lock);
idr_preload_end();
if (i < 0)
return i;
adapter->adapter_num = i;
return 0;
}
void cxl_remove_adapter_nr(struct cxl *adapter)
{
idr_remove(&cxl_adapter_idr, adapter->adapter_num);
}
struct cxl *cxl_alloc_adapter(void)
{
struct cxl *adapter;
if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
return NULL;
spin_lock_init(&adapter->afu_list_lock);
if (cxl_alloc_adapter_nr(adapter))
goto err1;
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
/* start with context lock taken */
atomic_set(&adapter->contexts_num, -1);
return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
kfree(adapter);
return NULL;
}
struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
{
struct cxl_afu *afu;
if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
return NULL;
afu->adapter = adapter;
afu->dev.parent = &adapter->dev;
afu->dev.release = cxl_ops->release_afu;
afu->slice = slice;
idr_init(&afu->contexts_idr);
mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock);
atomic_set(&afu->configured_state, -1);
afu->prefault_mode = CXL_PREFAULT_NONE;
afu->irqs_max = afu->adapter->user_irqs;
return afu;
}
int cxl_afu_select_best_mode(struct cxl_afu *afu)
{
if (afu->modes_supported & CXL_MODE_DIRECTED)
return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
if (afu->modes_supported & CXL_MODE_DEDICATED)
return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
dev_warn(&afu->dev, "No supported programming modes available\n");
/* We don't fail this so the user can inspect sysfs */
return 0;
}
int cxl_adapter_context_get(struct cxl *adapter)
{
int rc;
rc = atomic_inc_unless_negative(&adapter->contexts_num);
return rc ? 0 : -EBUSY;
}
void cxl_adapter_context_put(struct cxl *adapter)
{
atomic_dec_if_positive(&adapter->contexts_num);
}
int cxl_adapter_context_lock(struct cxl *adapter)
{
int rc;
/* no active contexts -> contexts_num == 0 */
rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
return rc ? -EBUSY : 0;
}
void cxl_adapter_context_unlock(struct cxl *adapter)
{
int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
/*
* contexts lock taken -> contexts_num == -1
* If not true then show a warning and force reset the lock.
* This will happen when context_unlock was requested without
* doing a context_lock.
*/
if (val != -1) {
atomic_set(&adapter->contexts_num, 0);
WARN(1, "Adapter context unlocked with %d active contexts",
val);
}
}
static int __init init_cxl(void)
{
int rc = 0;
if (!tlbie_capable)
return -EINVAL;
if ((rc = cxl_file_init()))
return rc;
cxl_debugfs_init();
/*
* we don't register the callback on P9. slb callack is only
* used for the PSL8 MMU and CX4.
*/
if (cxl_is_power8()) {
rc = register_cxl_calls(&cxl_calls);
if (rc)
goto err;
}
if (cpu_has_feature(CPU_FTR_HVMODE)) {
cxl_ops = &cxl_native_ops;
rc = pci_register_driver(&cxl_pci_driver);
}
#ifdef CONFIG_PPC_PSERIES
else {
cxl_ops = &cxl_guest_ops;
rc = platform_driver_register(&cxl_of_driver);
}
#endif
if (rc)
goto err1;
return 0;
err1:
if (cxl_is_power8())
unregister_cxl_calls(&cxl_calls);
err:
cxl_debugfs_exit();
cxl_file_exit();
return rc;
}
static void exit_cxl(void)
{
if (cpu_has_feature(CPU_FTR_HVMODE))
pci_unregister_driver(&cxl_pci_driver);
#ifdef CONFIG_PPC_PSERIES
else
platform_driver_unregister(&cxl_of_driver);
#endif
cxl_debugfs_exit();
cxl_file_exit();
if (cxl_is_power8())
unregister_cxl_calls(&cxl_calls);
idr_destroy(&cxl_adapter_idr);
}
module_init(init_cxl);
module_exit(exit_cxl);
MODULE_DESCRIPTION("IBM Coherent Accelerator");
MODULE_AUTHOR("Ian Munsie <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/cxl/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/irqdomain.h>
#include <asm/synch.h>
#include <asm/switch_to.h>
#include <misc/cxl-base.h>
#include "cxl.h"
#include "trace.h"
static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
u64 result, u64 mask, bool enabled)
{
u64 AFU_Cntl;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
spin_lock(&afu->afu_cntl_lock);
pr_devel("AFU command starting: %llx\n", command);
trace_cxl_afu_ctrl(afu, command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
while ((AFU_Cntl & mask) != result) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
rc = -EBUSY;
goto out;
}
if (!cxl_ops->link_ok(afu->adapter, afu)) {
afu->enabled = enabled;
rc = -EIO;
goto out;
}
pr_devel_ratelimited("AFU control... (0x%016llx)\n",
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
}
if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
/*
* Workaround for a bug in the XSL used in the Mellanox CX4
* that fails to clear the RA bit after an AFU reset,
* preventing subsequent AFU resets from working.
*/
cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
}
pr_devel("AFU command complete: %llx\n", command);
afu->enabled = enabled;
out:
trace_cxl_afu_ctrl_done(afu, command, rc);
spin_unlock(&afu->afu_cntl_lock);
return rc;
}
static int afu_enable(struct cxl_afu *afu)
{
pr_devel("AFU enable request\n");
return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
CXL_AFU_Cntl_An_ES_Enabled,
CXL_AFU_Cntl_An_ES_MASK, true);
}
int cxl_afu_disable(struct cxl_afu *afu)
{
pr_devel("AFU disable request\n");
return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_ES_MASK, false);
}
/* This will disable as well as reset */
static int native_afu_reset(struct cxl_afu *afu)
{
int rc;
u64 serr;
pr_devel("AFU reset request\n");
rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
false);
/*
* Re-enable any masked interrupts when the AFU is not
* activated to avoid side effects after attaching a process
* in dedicated mode.
*/
if (afu->current_mode == 0) {
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
}
return rc;
}
static int native_afu_check_and_enable(struct cxl_afu *afu)
{
if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Refusing to enable afu while link down!\n");
return -EIO;
}
if (afu->enabled)
return 0;
return afu_enable(afu);
}
int cxl_psl_purge(struct cxl_afu *afu)
{
u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
u64 dsisr, dar;
u64 start, end;
u64 trans_fault = 0x0ULL;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
pr_devel("PSL purge request\n");
if (cxl_is_power8())
trans_fault = CXL_PSL_DSISR_TRANS;
if (cxl_is_power9())
trans_fault = CXL_PSL9_DSISR_An_TF;
if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
rc = -EIO;
goto out;
}
if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
WARN(1, "psl_purge request while AFU not disabled!\n");
cxl_afu_disable(afu);
}
cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
start = local_clock();
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
== CXL_PSL_SCNTL_An_Ps_Pending) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
rc = -EBUSY;
goto out;
}
if (!cxl_ops->link_ok(afu->adapter, afu)) {
rc = -EIO;
goto out;
}
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
PSL_CNTL, dsisr);
if (dsisr & trans_fault) {
dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
dsisr, dar);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
} else if (dsisr) {
dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
dsisr);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
} else {
cpu_relax();
}
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
}
end = local_clock();
pr_devel("PSL purged in %lld ns\n", end - start);
cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
out:
trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
return rc;
}
static int spa_max_procs(int spa_size)
{
/*
* From the CAIA:
* end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
* Most of that junk is really just an overly-complicated way of saying
* the last 256 bytes are __aligned(128), so it's really:
* end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
* and
* end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
* so
* sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
* Ignore the alignment (which is safe in this case as long as we are
* careful with our rounding) and solve for n:
*/
return ((spa_size / 8) - 96) / 17;
}
static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
{
unsigned spa_size;
/* Work out how many pages to allocate */
afu->native->spa_order = -1;
do {
afu->native->spa_order++;
spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
if (spa_size > 0x100000) {
dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
afu->native->spa_max_procs, afu->native->spa_size);
if (mode != CXL_MODE_DEDICATED)
afu->num_procs = afu->native->spa_max_procs;
break;
}
afu->native->spa_size = spa_size;
afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
} while (afu->native->spa_max_procs < afu->num_procs);
if (!(afu->native->spa = (struct cxl_process_element *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
return -ENOMEM;
}
pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
return 0;
}
static void attach_spa(struct cxl_afu *afu)
{
u64 spap;
afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
((afu->native->spa_max_procs + 3) * 128));
spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
spap |= CXL_PSL_SPAP_V;
pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
afu->native->spa, afu->native->spa_max_procs,
afu->native->sw_command_status, spap);
cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
}
void cxl_release_spa(struct cxl_afu *afu)
{
if (afu->native->spa) {
free_pages((unsigned long) afu->native->spa,
afu->native->spa_order);
afu->native->spa = NULL;
}
}
/*
* Invalidation of all ERAT entries is no longer required by CAIA2. Use
* only for debug.
*/
int cxl_invalidate_all_psl9(struct cxl *adapter)
{
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
u64 ierat;
pr_devel("CXL adapter - invalidation of all ERAT entries\n");
/* Invalidates all ERAT entries for Radix or HPT */
ierat = CXL_XSL9_IERAT_IALL;
if (radix_enabled())
ierat |= CXL_XSL9_IERAT_INVR;
cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&adapter->dev,
"WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
return -EBUSY;
}
if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
return 0;
}
int cxl_invalidate_all_psl8(struct cxl *adapter)
{
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
pr_devel("CXL adapter wide TLBIA & SLBIA\n");
cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
return -EBUSY;
}
if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
return -EBUSY;
}
if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
return 0;
}
int cxl_data_cache_flush(struct cxl *adapter)
{
u64 reg;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
/*
* Do a datacache flush only if datacache is available.
* In case of PSL9D datacache absent hence flush operation.
* would timeout.
*/
if (adapter->native->no_data_cache) {
pr_devel("No PSL data cache. Ignoring cache flush req.\n");
return 0;
}
pr_devel("Flushing data cache\n");
reg = cxl_p1_read(adapter, CXL_PSL_Control);
reg |= CXL_PSL_Control_Fr;
cxl_p1_write(adapter, CXL_PSL_Control, reg);
reg = cxl_p1_read(adapter, CXL_PSL_Control);
while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
return -EBUSY;
}
if (!cxl_ops->link_ok(adapter, NULL)) {
dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
return -EIO;
}
cpu_relax();
reg = cxl_p1_read(adapter, CXL_PSL_Control);
}
reg &= ~CXL_PSL_Control_Fr;
cxl_p1_write(adapter, CXL_PSL_Control, reg);
return 0;
}
static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
{
int rc;
/* 1. Disable SSTP by writing 0 to SSTP1[V] */
cxl_p2n_write(afu, CXL_SSTP1_An, 0);
/* 2. Invalidate all SLB entries */
if ((rc = cxl_afu_slbia(afu)))
return rc;
/* 3. Set SSTP0_An */
cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
/* 4. Set SSTP1_An */
cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
return 0;
}
/* Using per slice version may improve performance here. (ie. SLBIA_An) */
static void slb_invalid(struct cxl_context *ctx)
{
struct cxl *adapter = ctx->afu->adapter;
u64 slbia;
WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
cxl_p1_write(adapter, CXL_PSL_LBISEL,
((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
be32_to_cpu(ctx->elem->lpid));
cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
while (1) {
if (!cxl_ops->link_ok(adapter, NULL))
break;
slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
if (!(slbia & CXL_TLB_SLB_P))
break;
cpu_relax();
}
}
static int do_process_element_cmd(struct cxl_context *ctx,
u64 cmd, u64 pe_state)
{
u64 state;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
trace_cxl_llcmd(ctx, cmd);
WARN_ON(!ctx->afu->enabled);
ctx->elem->software_state = cpu_to_be32(pe_state);
smp_wmb();
*(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
smp_mb();
cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
while (1) {
if (time_after_eq(jiffies, timeout)) {
dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
rc = -EBUSY;
goto out;
}
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
rc = -EIO;
goto out;
}
state = be64_to_cpup(ctx->afu->native->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
rc = -1;
goto out;
}
if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
(cmd | (cmd >> 16) | ctx->pe))
break;
/*
* The command won't finish in the PSL if there are
* outstanding DSIs. Hence we need to yield here in
* case there are outstanding DSIs that we need to
* service. Tuning possiblity: we could wait for a
* while before sched
*/
schedule();
}
out:
trace_cxl_llcmd_done(ctx, cmd, rc);
return rc;
}
static int add_process_element(struct cxl_context *ctx)
{
int rc = 0;
mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
ctx->pe_inserted = true;
pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
static int terminate_process_element(struct cxl_context *ctx)
{
int rc = 0;
/* fast path terminate if it's already invalid */
if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
return rc;
mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
/* We could be asked to terminate when the hw is down. That
* should always succeed: it's not running if the hw has gone
* away and is being reset.
*/
if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
ctx->elem->software_state = 0; /* Remove Valid bit */
pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
static int remove_process_element(struct cxl_context *ctx)
{
int rc = 0;
mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
/* We could be asked to remove when the hw is down. Again, if
* the hw is down, the PE is gone, so we succeed.
*/
if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
if (!rc)
ctx->pe_inserted = false;
if (cxl_is_power8())
slb_invalid(ctx);
pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
void cxl_assign_psn_space(struct cxl_context *ctx)
{
if (!ctx->afu->pp_size || ctx->master) {
ctx->psn_phys = ctx->afu->psn_phys;
ctx->psn_size = ctx->afu->adapter->ps_size;
} else {
ctx->psn_phys = ctx->afu->psn_phys +
(ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
ctx->psn_size = ctx->afu->pp_size;
}
}
static int activate_afu_directed(struct cxl_afu *afu)
{
int rc;
dev_info(&afu->dev, "Activating AFU directed mode\n");
afu->num_procs = afu->max_procs_virtualised;
if (afu->native->spa == NULL) {
if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
return -ENOMEM;
}
attach_spa(afu);
cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
if (cxl_is_power8())
cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
afu->current_mode = CXL_MODE_DIRECTED;
if ((rc = cxl_chardev_m_afu_add(afu)))
return rc;
if ((rc = cxl_sysfs_afu_m_add(afu)))
goto err;
if ((rc = cxl_chardev_s_afu_add(afu)))
goto err1;
return 0;
err1:
cxl_sysfs_afu_m_remove(afu);
err:
cxl_chardev_afu_remove(afu);
return rc;
}
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
#else
#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
#endif
u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
{
u64 sr = 0;
set_endian(sr);
if (master)
sr |= CXL_PSL_SR_An_MP;
if (mfspr(SPRN_LPCR) & LPCR_TC)
sr |= CXL_PSL_SR_An_TC;
if (kernel) {
if (!real_mode)
sr |= CXL_PSL_SR_An_R;
sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
} else {
sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
if (radix_enabled())
sr |= CXL_PSL_SR_An_HV;
else
sr &= ~(CXL_PSL_SR_An_HV);
if (!test_tsk_thread_flag(current, TIF_32BIT))
sr |= CXL_PSL_SR_An_SF;
}
if (p9) {
if (radix_enabled())
sr |= CXL_PSL_SR_An_XLAT_ror;
else
sr |= CXL_PSL_SR_An_XLAT_hpt;
}
return sr;
}
static u64 calculate_sr(struct cxl_context *ctx)
{
return cxl_calculate_sr(ctx->master, ctx->kernel, false,
cxl_is_power9());
}
static void update_ivtes_directed(struct cxl_context *ctx)
{
bool need_update = (ctx->status == STARTED);
int r;
if (need_update) {
WARN_ON(terminate_process_element(ctx));
WARN_ON(remove_process_element(ctx));
}
for (r = 0; r < CXL_IRQ_RANGES; r++) {
ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
}
/*
* Theoretically we could use the update llcmd, instead of a
* terminate/remove/add (or if an atomic update was required we could
* do a suspend/update/resume), however it seems there might be issues
* with the update llcmd on some cards (including those using an XSL on
* an ASIC) so for now it's safest to go with the commands that are
* known to work. In the future if we come across a situation where the
* card may be performing transactions using the same PE while we are
* doing this update we might need to revisit this.
*/
if (need_update)
WARN_ON(add_process_element(ctx));
}
static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
{
u32 pid;
int rc;
cxl_assign_psn_space(ctx);
ctx->elem->ctxtime = 0; /* disable */
ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
ctx->elem->haurp = 0; /* disable */
if (ctx->kernel)
pid = 0;
else {
if (ctx->mm == NULL) {
pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
__func__, ctx->pe, pid_nr(ctx->pid));
return -EINVAL;
}
pid = ctx->mm->context.id;
}
/* Assign a unique TIDR (thread id) for the current thread */
if (!(ctx->tidr) && (ctx->assign_tidr)) {
rc = set_thread_tidr(current);
if (rc)
return -ENODEV;
ctx->tidr = current->thread.tidr;
pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
}
ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
ctx->elem->common.pid = cpu_to_be32(pid);
ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
ctx->elem->common.csrp = 0; /* disable */
cxl_prefault(ctx, wed);
/*
* Ensure we have the multiplexed PSL interrupt set up to take faults
* for kernel contexts that may not have allocated any AFU IRQs at all:
*/
if (ctx->irqs.range[0] == 0) {
ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
ctx->irqs.range[0] = 1;
}
ctx->elem->common.amr = cpu_to_be64(amr);
ctx->elem->common.wed = cpu_to_be64(wed);
return 0;
}
int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
{
int result;
/* fill the process element entry */
result = process_element_entry_psl9(ctx, wed, amr);
if (result)
return result;
update_ivtes_directed(ctx);
/* first guy needs to enable */
result = cxl_ops->afu_check_and_enable(ctx->afu);
if (result)
return result;
return add_process_element(ctx);
}
int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
{
u32 pid;
int result;
cxl_assign_psn_space(ctx);
ctx->elem->ctxtime = 0; /* disable */
ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
ctx->elem->haurp = 0; /* disable */
ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
pid = current->pid;
if (ctx->kernel)
pid = 0;
ctx->elem->common.tid = 0;
ctx->elem->common.pid = cpu_to_be32(pid);
ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
ctx->elem->common.csrp = 0; /* disable */
ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
cxl_prefault(ctx, wed);
ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
/*
* Ensure we have the multiplexed PSL interrupt set up to take faults
* for kernel contexts that may not have allocated any AFU IRQs at all:
*/
if (ctx->irqs.range[0] == 0) {
ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
ctx->irqs.range[0] = 1;
}
update_ivtes_directed(ctx);
ctx->elem->common.amr = cpu_to_be64(amr);
ctx->elem->common.wed = cpu_to_be64(wed);
/* first guy needs to enable */
if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
return result;
return add_process_element(ctx);
}
static int deactivate_afu_directed(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Deactivating AFU directed mode\n");
afu->current_mode = 0;
afu->num_procs = 0;
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
/*
* The CAIA section 2.2.1 indicates that the procedure for starting and
* stopping an AFU in AFU directed mode is AFU specific, which is not
* ideal since this code is generic and with one exception has no
* knowledge of the AFU. This is in contrast to the procedure for
* disabling a dedicated process AFU, which is documented to just
* require a reset. The architecture does indicate that both an AFU
* reset and an AFU disable should result in the AFU being disabled and
* we do both followed by a PSL purge for safety.
*
* Notably we used to have some issues with the disable sequence on PSL
* cards, which is why we ended up using this heavy weight procedure in
* the first place, however a bug was discovered that had rendered the
* disable operation ineffective, so it is conceivable that was the
* sole explanation for those difficulties. Careful regression testing
* is recommended if anyone attempts to remove or reorder these
* operations.
*
* The XSL on the Mellanox CX4 behaves a little differently from the
* PSL based cards and will time out an AFU reset if the AFU is still
* enabled. That card is special in that we do have a means to identify
* it from this code, so in that case we skip the reset and just use a
* disable/purge to avoid the timeout and corresponding noise in the
* kernel log.
*/
if (afu->adapter->native->sl_ops->needs_reset_before_disable)
cxl_ops->afu_reset(afu);
cxl_afu_disable(afu);
cxl_psl_purge(afu);
return 0;
}
int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Activating dedicated process mode\n");
/*
* If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
* XSL and AFU are programmed to work with a single context.
* The context information should be configured in the SPA area
* index 0 (so PSL_SPAP must be configured before enabling the
* AFU).
*/
afu->num_procs = 1;
if (afu->native->spa == NULL) {
if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
return -ENOMEM;
}
attach_spa(afu);
cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
afu->current_mode = CXL_MODE_DEDICATED;
return cxl_chardev_d_afu_add(afu);
}
int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Activating dedicated process mode\n");
cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
afu->current_mode = CXL_MODE_DEDICATED;
afu->num_procs = 1;
return cxl_chardev_d_afu_add(afu);
}
void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
{
int r;
for (r = 0; r < CXL_IRQ_RANGES; r++) {
ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
}
}
void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
{
struct cxl_afu *afu = ctx->afu;
cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
(((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
(((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
(((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
((u64)ctx->irqs.offset[3] & 0xffff));
cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
(((u64)ctx->irqs.range[0] & 0xffff) << 48) |
(((u64)ctx->irqs.range[1] & 0xffff) << 32) |
(((u64)ctx->irqs.range[2] & 0xffff) << 16) |
((u64)ctx->irqs.range[3] & 0xffff));
}
int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_afu *afu = ctx->afu;
int result;
/* fill the process element entry */
result = process_element_entry_psl9(ctx, wed, amr);
if (result)
return result;
if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
/*
* Ideally we should do a wmb() here to make sure the changes to the
* PE are visible to the card before we call afu_enable.
* On ppc64 though all mmios are preceded by a 'sync' instruction hence
* we dont dont need one here.
*/
result = cxl_ops->afu_reset(afu);
if (result)
return result;
return afu_enable(afu);
}
int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_afu *afu = ctx->afu;
u64 pid;
int rc;
pid = (u64)current->pid << 32;
if (ctx->kernel)
pid = 0;
cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
return rc;
cxl_prefault(ctx, wed);
if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
/* master only context for dedicated */
cxl_assign_psn_space(ctx);
if ((rc = cxl_ops->afu_reset(afu)))
return rc;
cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
return afu_enable(afu);
}
static int deactivate_dedicated_process(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Deactivating dedicated process mode\n");
afu->current_mode = 0;
afu->num_procs = 0;
cxl_chardev_afu_remove(afu);
return 0;
}
static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
{
if (mode == CXL_MODE_DIRECTED)
return deactivate_afu_directed(afu);
if (mode == CXL_MODE_DEDICATED)
return deactivate_dedicated_process(afu);
return 0;
}
static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
{
if (!mode)
return 0;
if (!(mode & afu->modes_supported))
return -EINVAL;
if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Device link is down, refusing to activate!\n");
return -EIO;
}
if (mode == CXL_MODE_DIRECTED)
return activate_afu_directed(afu);
if ((mode == CXL_MODE_DEDICATED) &&
(afu->adapter->native->sl_ops->activate_dedicated_process))
return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
return -EINVAL;
}
static int native_attach_process(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr)
{
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
WARN(1, "Device link is down, refusing to attach process!\n");
return -EIO;
}
ctx->kernel = kernel;
if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
(ctx->afu->adapter->native->sl_ops->attach_afu_directed))
return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
(ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
return -EINVAL;
}
static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{
/*
* The CAIA section 2.1.1 indicates that we need to do an AFU reset to
* stop the AFU in dedicated mode (we therefore do not make that
* optional like we do in the afu directed path). It does not indicate
* that we need to do an explicit disable (which should occur
* implicitly as part of the reset) or purge, but we do these as well
* to be on the safe side.
*
* Notably we used to have some issues with the disable sequence
* (before the sequence was spelled out in the architecture) which is
* why we were so heavy weight in the first place, however a bug was
* discovered that had rendered the disable operation ineffective, so
* it is conceivable that was the sole explanation for those
* difficulties. Point is, we should be careful and do some regression
* testing if we ever attempt to remove any part of this procedure.
*/
cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu);
return 0;
}
static void native_update_ivtes(struct cxl_context *ctx)
{
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return update_ivtes_directed(ctx);
if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
(ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
WARN(1, "native_update_ivtes: Bad mode\n");
}
static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
return 0;
if (terminate_process_element(ctx))
return -1;
if (remove_process_element(ctx))
return -1;
return 0;
}
static int native_detach_process(struct cxl_context *ctx)
{
trace_cxl_detach(ctx);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
return detach_process_native_dedicated(ctx);
return detach_process_native_afu_directed(ctx);
}
static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
{
/* If the adapter has gone away, we can't get any meaningful
* information.
*/
if (!cxl_ops->link_ok(afu->adapter, afu))
return -EIO;
info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
if (cxl_is_power8())
info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
info->proc_handle = 0;
return 0;
}
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
{
u64 fir1, serr;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
cxl_afu_decode_psl_serr(ctx->afu, serr);
}
}
void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
{
u64 fir1, fir2, fir_slice, serr, afu_debug;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
cxl_afu_decode_psl_serr(ctx->afu, serr);
}
dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
}
static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
u64 dsisr, u64 errstat)
{
dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
}
return cxl_ops->ack_irq(ctx, 0, errstat);
}
static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
{
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
return true;
if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
return true;
return false;
}
irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
{
if (cxl_is_translation_fault(afu, irq_info->dsisr))
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
return IRQ_HANDLED;
}
static irqreturn_t native_irq_multiplexed(int irq, void *data)
{
struct cxl_afu *afu = data;
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
int ph, ret = IRQ_HANDLED, res;
/* check if eeh kicked in while the interrupt was in flight */
if (unlikely(phreg == ~0ULL)) {
dev_warn(&afu->dev,
"Ignoring slice interrupt(%d) due to fenced card",
irq);
return IRQ_HANDLED;
}
/* Mask the pe-handle from register value */
ph = phreg & 0xffff;
if ((res = native_get_irq_info(afu, &irq_info))) {
WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
if (afu->adapter->native->sl_ops->fail_irq)
return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
return ret;
}
rcu_read_lock();
ctx = idr_find(&afu->contexts_idr, ph);
if (ctx) {
if (afu->adapter->native->sl_ops->handle_interrupt)
ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
" %016llx\n(Possible AFU HW issue - was a term/remove acked"
" with outstanding transactions?)\n", ph, irq_info.dsisr,
irq_info.dar);
if (afu->adapter->native->sl_ops->fail_irq)
ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
return ret;
}
static void native_irq_wait(struct cxl_context *ctx)
{
u64 dsisr;
int timeout = 1000;
int ph;
/*
* Wait until no further interrupts are presented by the PSL
* for this context.
*/
while (timeout--) {
ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
if (ph != ctx->pe)
return;
dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
if (cxl_is_power8() &&
((dsisr & CXL_PSL_DSISR_PENDING) == 0))
return;
if (cxl_is_power9() &&
((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
return;
/*
* We are waiting for the workqueue to process our
* irq, so need to let that run here.
*/
msleep(1);
}
dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
" DSISR %016llx!\n", ph, dsisr);
return;
}
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
u64 errstat, serr, afu_error, dsisr;
u64 fir_slice, afu_debug, irq_mask;
/*
* slice err interrupt is only used with full PSL (no XSL)
*/
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
cxl_afu_decode_psl_serr(afu, serr);
if (cxl_is_power8()) {
fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
}
dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
/* mask off the IRQ so it won't retrigger until the AFU is reset */
irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
serr |= irq_mask;
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
return IRQ_HANDLED;
}
void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
{
u64 fir1;
fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
}
void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
{
u64 fir1, fir2;
fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
dev_crit(&adapter->dev,
"PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
fir1, fir2);
}
static irqreturn_t native_irq_err(int irq, void *data)
{
struct cxl *adapter = data;
u64 err_ivte;
WARN(1, "CXL ERROR interrupt %i\n", irq);
err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
if (adapter->native->sl_ops->debugfs_stop_trace) {
dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
adapter->native->sl_ops->debugfs_stop_trace(adapter);
}
if (adapter->native->sl_ops->err_irq_dump_registers)
adapter->native->sl_ops->err_irq_dump_registers(adapter);
return IRQ_HANDLED;
}
int cxl_native_register_psl_err_irq(struct cxl *adapter)
{
int rc;
adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
dev_name(&adapter->dev));
if (!adapter->irq_name)
return -ENOMEM;
if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
&adapter->native->err_hwirq,
&adapter->native->err_virq,
adapter->irq_name))) {
kfree(adapter->irq_name);
adapter->irq_name = NULL;
return rc;
}
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
return 0;
}
void cxl_native_release_psl_err_irq(struct cxl *adapter)
{
if (adapter->native->err_virq == 0 ||
adapter->native->err_virq !=
irq_find_mapping(NULL, adapter->native->err_hwirq))
return;
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
cxl_unmap_irq(adapter->native->err_virq, adapter);
cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
kfree(adapter->irq_name);
adapter->native->err_virq = 0;
}
int cxl_native_register_serr_irq(struct cxl_afu *afu)
{
u64 serr;
int rc;
afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
dev_name(&afu->dev));
if (!afu->err_irq_name)
return -ENOMEM;
if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
&afu->serr_hwirq,
&afu->serr_virq, afu->err_irq_name))) {
kfree(afu->err_irq_name);
afu->err_irq_name = NULL;
return rc;
}
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
if (cxl_is_power8())
serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
if (cxl_is_power9()) {
/*
* By default, all errors are masked. So don't set all masks.
* Slice errors will be transfered.
*/
serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
}
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
return 0;
}
void cxl_native_release_serr_irq(struct cxl_afu *afu)
{
if (afu->serr_virq == 0 ||
afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
return;
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
cxl_unmap_irq(afu->serr_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
kfree(afu->err_irq_name);
afu->serr_virq = 0;
}
int cxl_native_register_psl_irq(struct cxl_afu *afu)
{
int rc;
afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
dev_name(&afu->dev));
if (!afu->psl_irq_name)
return -ENOMEM;
if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
afu->psl_irq_name))) {
kfree(afu->psl_irq_name);
afu->psl_irq_name = NULL;
}
return rc;
}
void cxl_native_release_psl_irq(struct cxl_afu *afu)
{
if (afu->native->psl_virq == 0 ||
afu->native->psl_virq !=
irq_find_mapping(NULL, afu->native->psl_hwirq))
return;
cxl_unmap_irq(afu->native->psl_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
kfree(afu->psl_irq_name);
afu->native->psl_virq = 0;
}
static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
{
u64 dsisr;
pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
/* Clear PSL_DSISR[PE] */
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
/* Write 1s to clear error status bits */
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
}
static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
{
trace_cxl_psl_irq_ack(ctx, tfc);
if (tfc)
cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
if (psl_reset_mask)
recover_psl_err(ctx->afu, psl_reset_mask);
return 0;
}
int cxl_check_error(struct cxl_afu *afu)
{
return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
}
static bool native_support_attributes(const char *attr_name,
enum cxl_attrs type)
{
return true;
}
static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
{
if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
return -EIO;
if (unlikely(off >= afu->crs_len))
return -ERANGE;
*out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
(cr * afu->crs_len) + off);
return 0;
}
static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
{
if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
return -EIO;
if (unlikely(off >= afu->crs_len))
return -ERANGE;
*out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
(cr * afu->crs_len) + off);
return 0;
}
static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
{
u64 aligned_off = off & ~0x3L;
u32 val;
int rc;
rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
if (!rc)
*out = (val >> ((off & 0x3) * 8)) & 0xffff;
return rc;
}
static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
{
u64 aligned_off = off & ~0x3L;
u32 val;
int rc;
rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
if (!rc)
*out = (val >> ((off & 0x3) * 8)) & 0xff;
return rc;
}
static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
{
if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
return -EIO;
if (unlikely(off >= afu->crs_len))
return -ERANGE;
out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
(cr * afu->crs_len) + off, in);
return 0;
}
static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
{
u64 aligned_off = off & ~0x3L;
u32 val32, mask, shift;
int rc;
rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
if (rc)
return rc;
shift = (off & 0x3) * 8;
WARN_ON(shift == 24);
mask = 0xffff << shift;
val32 = (val32 & ~mask) | (in << shift);
rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
return rc;
}
static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
{
u64 aligned_off = off & ~0x3L;
u32 val32, mask, shift;
int rc;
rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
if (rc)
return rc;
shift = (off & 0x3) * 8;
mask = 0xff << shift;
val32 = (val32 & ~mask) | (in << shift);
rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
return rc;
}
const struct cxl_backend_ops cxl_native_ops = {
.module = THIS_MODULE,
.adapter_reset = cxl_pci_reset,
.alloc_one_irq = cxl_pci_alloc_one_irq,
.release_one_irq = cxl_pci_release_one_irq,
.alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
.release_irq_ranges = cxl_pci_release_irq_ranges,
.setup_irq = cxl_pci_setup_irq,
.handle_psl_slice_error = native_handle_psl_slice_error,
.psl_interrupt = NULL,
.ack_irq = native_ack_irq,
.irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
.update_ivtes = native_update_ivtes,
.support_attributes = native_support_attributes,
.link_ok = cxl_adapter_link_ok,
.release_afu = cxl_pci_release_afu,
.afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
.afu_check_and_enable = native_afu_check_and_enable,
.afu_activate_mode = native_afu_activate_mode,
.afu_deactivate_mode = native_afu_deactivate_mode,
.afu_reset = native_afu_reset,
.afu_cr_read8 = native_afu_cr_read8,
.afu_cr_read16 = native_afu_cr_read16,
.afu_cr_read32 = native_afu_cr_read32,
.afu_cr_read64 = native_afu_cr_read64,
.afu_cr_write8 = native_afu_cr_write8,
.afu_cr_write16 = native_afu_cr_write16,
.afu_cr_write32 = native_afu_cr_write32,
.read_adapter_vpd = cxl_pci_read_adapter_vpd,
};
| linux-master | drivers/misc/cxl/native.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015 IBM Corp.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include "cxl.h"
static const __be32 *read_prop_string(const struct device_node *np,
const char *prop_name)
{
const __be32 *prop;
prop = of_get_property(np, prop_name, NULL);
if (cxl_verbose && prop)
pr_info("%s: %s\n", prop_name, (char *) prop);
return prop;
}
static const __be32 *read_prop_dword(const struct device_node *np,
const char *prop_name, u32 *val)
{
const __be32 *prop;
prop = of_get_property(np, prop_name, NULL);
if (prop)
*val = be32_to_cpu(prop[0]);
if (cxl_verbose && prop)
pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
return prop;
}
static const __be64 *read_prop64_dword(const struct device_node *np,
const char *prop_name, u64 *val)
{
const __be64 *prop;
prop = of_get_property(np, prop_name, NULL);
if (prop)
*val = be64_to_cpu(prop[0]);
if (cxl_verbose && prop)
pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
return prop;
}
static int read_handle(struct device_node *np, u64 *handle)
{
const __be32 *prop;
u64 size;
/* Get address and size of the node */
prop = of_get_address(np, 0, &size, NULL);
if (size)
return -EINVAL;
/* Helper to read a big number; size is in cells (not bytes) */
*handle = of_read_number(prop, of_n_addr_cells(np));
return 0;
}
static int read_phys_addr(struct device_node *np, char *prop_name,
struct cxl_afu *afu)
{
int i, len, entry_size, naddr, nsize, type;
u64 addr, size;
const __be32 *prop;
naddr = of_n_addr_cells(np);
nsize = of_n_size_cells(np);
prop = of_get_property(np, prop_name, &len);
if (prop) {
entry_size = naddr + nsize;
for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
type = be32_to_cpu(prop[0]);
addr = of_read_number(prop, naddr);
size = of_read_number(&prop[naddr], nsize);
switch (type) {
case 0: /* unit address */
afu->guest->handle = addr;
break;
case 1: /* p2 area */
afu->guest->p2n_phys += addr;
afu->guest->p2n_size = size;
break;
case 2: /* problem state area */
afu->psn_phys += addr;
afu->adapter->ps_size = size;
break;
default:
pr_err("Invalid address type %d found in %s property of AFU\n",
type, prop_name);
return -EINVAL;
}
if (cxl_verbose)
pr_info("%s: %#x %#llx (size %#llx)\n",
prop_name, type, addr, size);
}
}
return 0;
}
static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
{
char vpd[256];
int rc;
size_t len = sizeof(vpd);
memset(vpd, 0, len);
if (adapter)
rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
else
rc = cxl_guest_read_afu_vpd(afu, vpd, len);
if (rc > 0) {
cxl_dump_debug_buffer(vpd, rc);
rc = 0;
}
return rc;
}
int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
{
if (read_handle(afu_np, &afu->guest->handle))
return -EINVAL;
pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
return 0;
}
int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
{
int i, len, rc;
char *p;
const __be32 *prop;
u16 device_id, vendor_id;
u32 val = 0, class_code;
/* Properties are read in the same order as listed in PAPR */
if (cxl_verbose) {
pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
prop = of_get_property(np, "compatible", &len);
i = 0;
while (i < len) {
p = (char *) prop + i;
pr_info("compatible: %s\n", p);
i += strlen(p) + 1;
}
read_prop_string(np, "name");
}
rc = read_phys_addr(np, "reg", afu);
if (rc)
return rc;
rc = read_phys_addr(np, "assigned-addresses", afu);
if (rc)
return rc;
if (afu->psn_phys == 0)
afu->psa = false;
else
afu->psa = true;
if (cxl_verbose) {
read_prop_string(np, "ibm,loc-code");
read_prop_string(np, "device_type");
}
read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
if (cxl_verbose) {
read_prop_dword(np, "ibm,scratchpad-size", &val);
read_prop_dword(np, "ibm,programmable", &val);
read_prop_string(np, "ibm,phandle");
read_vpd(NULL, afu);
}
read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
afu->irqs_max = afu->guest->max_ints;
prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
if (prop) {
/* One extra interrupt for the PSL interrupt is already
* included. Remove it now to keep only AFU interrupts and
* match the native case.
*/
afu->pp_irqs--;
}
if (cxl_verbose) {
read_prop_dword(np, "ibm,max-ints", &val);
read_prop_dword(np, "ibm,vpd-size", &val);
}
read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
afu->eb_offset = 0;
if (cxl_verbose)
read_prop_dword(np, "ibm,config-record-type", &val);
read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
afu->crs_offset = 0;
read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
if (cxl_verbose) {
for (i = 0; i < afu->crs_num; i++) {
rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
&device_id);
if (!rc)
pr_info("record %d - device-id: %#x\n",
i, device_id);
rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
&vendor_id);
if (!rc)
pr_info("record %d - vendor-id: %#x\n",
i, vendor_id);
rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
&class_code);
if (!rc) {
class_code >>= 8;
pr_info("record %d - class-code: %#x\n",
i, class_code);
}
}
read_prop_dword(np, "ibm,function-number", &val);
read_prop_dword(np, "ibm,privileged-function", &val);
read_prop_dword(np, "vendor-id", &val);
read_prop_dword(np, "device-id", &val);
read_prop_dword(np, "revision-id", &val);
read_prop_dword(np, "class-code", &val);
read_prop_dword(np, "subsystem-vendor-id", &val);
read_prop_dword(np, "subsystem-id", &val);
}
/*
* if "ibm,process-mmio" doesn't exist then per-process mmio is
* not supported
*/
val = 0;
prop = read_prop_dword(np, "ibm,process-mmio", &val);
if (prop && val == 1)
afu->pp_psa = true;
else
afu->pp_psa = false;
if (cxl_verbose) {
read_prop_dword(np, "ibm,supports-aur", &val);
read_prop_dword(np, "ibm,supports-csrp", &val);
read_prop_dword(np, "ibm,supports-prr", &val);
}
prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
if (prop)
afu->serr_hwirq = val;
pr_devel("AFU handle: %#llx\n", afu->guest->handle);
pr_devel("p2n_phys: %#llx (size %#llx)\n",
afu->guest->p2n_phys, afu->guest->p2n_size);
pr_devel("psn_phys: %#llx (size %#llx)\n",
afu->psn_phys, afu->adapter->ps_size);
pr_devel("Max number of processes virtualised=%i\n",
afu->max_procs_virtualised);
pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
afu->irqs_max);
pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
return 0;
}
static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
{
const __be32 *ranges;
int len, nranges, i;
struct irq_avail *cur;
ranges = of_get_property(np, "interrupt-ranges", &len);
if (ranges == NULL || len < (2 * sizeof(int)))
return -EINVAL;
/*
* encoded array of two cells per entry, each cell encoded as
* with encode-int
*/
nranges = len / (2 * sizeof(int));
if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
return -EINVAL;
adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail),
GFP_KERNEL);
if (adapter->guest->irq_avail == NULL)
return -ENOMEM;
adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
for (i = 0; i < nranges; i++) {
cur = &adapter->guest->irq_avail[i];
cur->offset = be32_to_cpu(ranges[i * 2]);
cur->range = be32_to_cpu(ranges[i * 2 + 1]);
cur->bitmap = bitmap_zalloc(cur->range, GFP_KERNEL);
if (cur->bitmap == NULL)
goto err;
if (cur->offset < adapter->guest->irq_base_offset)
adapter->guest->irq_base_offset = cur->offset;
if (cxl_verbose)
pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
cur->offset, cur->offset + cur->range - 1,
cur->range);
}
adapter->guest->irq_nranges = nranges;
spin_lock_init(&adapter->guest->irq_alloc_lock);
return 0;
err:
for (i--; i >= 0; i--) {
cur = &adapter->guest->irq_avail[i];
bitmap_free(cur->bitmap);
}
kfree(adapter->guest->irq_avail);
adapter->guest->irq_avail = NULL;
return -ENOMEM;
}
int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
{
if (read_handle(np, &adapter->guest->handle))
return -EINVAL;
pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
return 0;
}
int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
{
int rc, len, naddr, i;
char *p;
const __be32 *prop;
u32 val = 0;
/* Properties are read in the same order as listed in PAPR */
naddr = of_n_addr_cells(np);
if (cxl_verbose) {
pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
read_prop_dword(np, "#address-cells", &val);
read_prop_dword(np, "#size-cells", &val);
prop = of_get_property(np, "compatible", &len);
i = 0;
while (i < len) {
p = (char *) prop + i;
pr_info("compatible: %s\n", p);
i += strlen(p) + 1;
}
read_prop_string(np, "name");
read_prop_string(np, "model");
prop = of_get_property(np, "reg", NULL);
if (prop) {
pr_info("reg: addr:%#llx size:%#x\n",
of_read_number(prop, naddr),
be32_to_cpu(prop[naddr]));
}
read_prop_string(np, "ibm,loc-code");
}
if ((rc = read_adapter_irq_config(adapter, np)))
return rc;
if (cxl_verbose) {
read_prop_string(np, "device_type");
read_prop_string(np, "ibm,phandle");
}
prop = read_prop_dword(np, "ibm,caia-version", &val);
if (prop) {
adapter->caia_major = (val & 0xFF00) >> 8;
adapter->caia_minor = val & 0xFF;
}
prop = read_prop_dword(np, "ibm,psl-revision", &val);
if (prop)
adapter->psl_rev = val;
prop = read_prop_string(np, "status");
if (prop) {
adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
if (adapter->guest->status == NULL)
return -ENOMEM;
}
prop = read_prop_dword(np, "vendor-id", &val);
if (prop)
adapter->guest->vendor = val;
prop = read_prop_dword(np, "device-id", &val);
if (prop)
adapter->guest->device = val;
if (cxl_verbose) {
read_prop_dword(np, "ibm,privileged-facility", &val);
read_prop_dword(np, "revision-id", &val);
read_prop_dword(np, "class-code", &val);
}
prop = read_prop_dword(np, "subsystem-vendor-id", &val);
if (prop)
adapter->guest->subsystem_vendor = val;
prop = read_prop_dword(np, "subsystem-id", &val);
if (prop)
adapter->guest->subsystem = val;
if (cxl_verbose)
read_vpd(adapter, NULL);
return 0;
}
static int cxl_of_remove(struct platform_device *pdev)
{
struct cxl *adapter;
int afu;
adapter = dev_get_drvdata(&pdev->dev);
for (afu = 0; afu < adapter->slices; afu++)
cxl_guest_remove_afu(adapter->afu[afu]);
cxl_guest_remove_adapter(adapter);
return 0;
}
static void cxl_of_shutdown(struct platform_device *pdev)
{
cxl_of_remove(pdev);
}
int cxl_of_probe(struct platform_device *pdev)
{
struct device_node *np = NULL;
struct device_node *afu_np = NULL;
struct cxl *adapter = NULL;
int ret;
int slice = 0, slice_ok = 0;
pr_devel("in %s\n", __func__);
np = pdev->dev.of_node;
if (np == NULL)
return -ENODEV;
/* init adapter */
adapter = cxl_guest_init_adapter(np, pdev);
if (IS_ERR(adapter)) {
dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
return PTR_ERR(adapter);
}
/* init afu */
for_each_child_of_node(np, afu_np) {
if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
slice, ret);
else
slice_ok++;
slice++;
}
if (slice_ok == 0) {
dev_info(&pdev->dev, "No active AFU");
adapter->slices = 0;
}
return 0;
}
static const struct of_device_id cxl_of_match[] = {
{ .compatible = "ibm,coherent-platform-facility",},
{},
};
MODULE_DEVICE_TABLE(of, cxl_of_match);
struct platform_driver cxl_of_driver = {
.driver = {
.name = "cxl_of",
.of_match_table = cxl_of_match,
.owner = THIS_MODULE
},
.probe = cxl_of_probe,
.remove = cxl_of_remove,
.shutdown = cxl_of_shutdown,
};
| linux-master | drivers/misc/cxl/of.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
#include "cxl.h"
/*
* Allocates space for a CXL context.
*/
struct cxl_context *cxl_context_alloc(void)
{
return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
}
/*
* Initialises a CXL context.
*/
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
ctx->afu = afu;
ctx->master = master;
ctx->pid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = NULL;
ctx->tidr = 0;
ctx->assign_tidr = false;
if (cxl_is_power8()) {
spin_lock_init(&ctx->sste_lock);
/*
* Allocate the segment table before we put it in the IDR so that we
* can always access it when dereferenced from IDR. For the same
* reason, the segment table is only destroyed after the context is
* removed from the IDR. Access to this in the IOCTL is protected by
* Linux filesystem semantics (can't IOCTL until open is complete).
*/
i = cxl_alloc_sst(ctx);
if (i)
return i;
}
INIT_WORK(&ctx->fault_work, cxl_handle_fault);
init_waitqueue_head(&ctx->wq);
spin_lock_init(&ctx->lock);
ctx->irq_bitmap = NULL;
ctx->pending_irq = false;
ctx->pending_fault = false;
ctx->pending_afu_err = false;
INIT_LIST_HEAD(&ctx->irq_names);
/*
* When we have to destroy all contexts in cxl_context_detach_all() we
* end up with afu_release_irqs() called from inside a
* idr_for_each_entry(). Hence we need to make sure that anything
* dereferenced from this IDR is ok before we allocate the IDR here.
* This clears out the IRQ ranges to ensure this.
*/
for (i = 0; i < CXL_IRQ_RANGES; i++)
ctx->irqs.range[i] = 0;
mutex_init(&ctx->status_mutex);
ctx->status = OPENED;
/*
* Allocating IDR! We better make sure everything's setup that
* dereferences from it.
*/
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
if (i < 0)
return i;
ctx->pe = i;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
ctx->elem = &ctx->afu->native->spa[i];
ctx->external_pe = ctx->pe;
} else {
ctx->external_pe = -1; /* assigned when attaching */
}
ctx->pe_inserted = false;
/*
* take a ref on the afu so that it stays alive at-least till
* this context is reclaimed inside reclaim_ctx.
*/
cxl_afu_get(afu);
return 0;
}
void cxl_context_set_mapping(struct cxl_context *ctx,
struct address_space *mapping)
{
mutex_lock(&ctx->mapping_lock);
ctx->mapping = mapping;
mutex_unlock(&ctx->mapping_lock);
}
static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct cxl_context *ctx = vma->vm_file->private_data;
u64 area, offset;
vm_fault_t ret;
offset = vmf->pgoff << PAGE_SHIFT;
pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
__func__, ctx->pe, vmf->address, offset);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
if (offset >= ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
if (offset >= ctx->psn_size)
return VM_FAULT_SIGBUS;
}
mutex_lock(&ctx->status_mutex);
if (ctx->status != STARTED) {
mutex_unlock(&ctx->status_mutex);
pr_devel("%s: Context not started, failing problem state access\n", __func__);
if (ctx->mmio_err_ff) {
if (!ctx->ff_page) {
ctx->ff_page = alloc_page(GFP_USER);
if (!ctx->ff_page)
return VM_FAULT_OOM;
memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
}
get_page(ctx->ff_page);
vmf->page = ctx->ff_page;
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
return 0;
}
return VM_FAULT_SIGBUS;
}
ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
return ret;
}
static const struct vm_operations_struct cxl_mmap_vmops = {
.fault = cxl_mmap_fault,
};
/*
* Map a per-context mmio space into the given vma.
*/
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start;
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
if (start + len > ctx->afu->adapter->ps_size)
return -EINVAL;
if (cxl_is_power9()) {
/*
* Make sure there is a valid problem state
* area space for this AFU.
*/
if (ctx->master && !ctx->afu->psa) {
pr_devel("AFU doesn't support mmio space\n");
return -EINVAL;
}
/* Can't mmap until the AFU is enabled */
if (!ctx->afu->enabled)
return -EBUSY;
}
} else {
if (start + len > ctx->psn_size)
return -EINVAL;
/* Make sure there is a valid per process space for this AFU */
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
pr_devel("AFU doesn't support mmio space\n");
return -EINVAL;
}
/* Can't mmap until the AFU is enabled */
if (!ctx->afu->enabled)
return -EBUSY;
}
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &cxl_mmap_vmops;
return 0;
}
/*
* Detach a context from the hardware. This disables interrupts and doesn't
* return until all outstanding interrupts for this context have completed. The
* hardware should no longer access *ctx after this has returned.
*/
int __detach_context(struct cxl_context *ctx)
{
enum cxl_context_status status;
mutex_lock(&ctx->status_mutex);
status = ctx->status;
ctx->status = CLOSED;
mutex_unlock(&ctx->status_mutex);
if (status != STARTED)
return -EBUSY;
/* Only warn if we detached while the link was OK.
* If detach fails when hw is down, we don't care.
*/
WARN_ON(cxl_ops->detach_process(ctx) &&
cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
/*
* Wait until no further interrupts are presented by the PSL
* for this context.
*/
if (cxl_ops->irq_wait)
cxl_ops->irq_wait(ctx);
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
cxl_ctx_put();
/* Decrease the attached context count on the adapter */
cxl_adapter_context_put(ctx->afu->adapter);
/* Decrease the mm count on the context */
cxl_context_mm_count_put(ctx);
if (ctx->mm)
mm_context_remove_copro(ctx->mm);
ctx->mm = NULL;
return 0;
}
/*
* Detach the given context from the AFU. This doesn't actually
* free the context but it should stop the context running in hardware
* (ie. prevent this context from generating any further interrupts
* so that it can be freed).
*/
void cxl_context_detach(struct cxl_context *ctx)
{
int rc;
rc = __detach_context(ctx);
if (rc)
return;
afu_release_irqs(ctx, ctx);
wake_up_all(&ctx->wq);
}
/*
* Detach all contexts on the given AFU.
*/
void cxl_context_detach_all(struct cxl_afu *afu)
{
struct cxl_context *ctx;
int tmp;
mutex_lock(&afu->contexts_lock);
idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
/*
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
cxl_context_detach(ctx);
/*
* We are force detaching - remove any active PSA mappings so
* userspace cannot interfere with the card if it comes back.
* Easiest way to exercise this is to unbind and rebind the
* driver via sysfs while it is in use.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
static void reclaim_ctx(struct rcu_head *rcu)
{
struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
if (cxl_is_power8())
free_page((u64)ctx->sstp);
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
bitmap_free(ctx->irq_bitmap);
/* Drop ref to the afu device taken during cxl_context_init */
cxl_afu_put(ctx->afu);
kfree(ctx);
}
void cxl_context_free(struct cxl_context *ctx)
{
if (ctx->kernelapi && ctx->mapping)
cxl_release_mapping(ctx);
mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
mutex_unlock(&ctx->afu->contexts_lock);
call_rcu(&ctx->rcu, reclaim_ctx);
}
void cxl_context_mm_count_get(struct cxl_context *ctx)
{
if (ctx->mm)
mmgrab(ctx->mm);
}
void cxl_context_mm_count_put(struct cxl_context *ctx)
{
if (ctx->mm)
mmdrop(ctx->mm);
}
| linux-master | drivers/misc/cxl/context.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/poll.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
#include "cxl.h"
#include "trace.h"
#define CXL_NUM_MINORS 256 /* Total to reserve */
#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
static dev_t cxl_dev;
static struct class *cxl_class;
static int __afu_open(struct inode *inode, struct file *file, bool master)
{
struct cxl *adapter;
struct cxl_afu *afu;
struct cxl_context *ctx;
int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
int slice = CXL_DEVT_AFU(inode->i_rdev);
int rc = -ENODEV;
pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
if (!(adapter = get_cxl_adapter(adapter_num)))
return -ENODEV;
if (slice > adapter->slices)
goto err_put_adapter;
spin_lock(&adapter->afu_list_lock);
if (!(afu = adapter->afu[slice])) {
spin_unlock(&adapter->afu_list_lock);
goto err_put_adapter;
}
/*
* taking a ref to the afu so that it doesn't go away
* for rest of the function. This ref is released before
* we return.
*/
cxl_afu_get(afu);
spin_unlock(&adapter->afu_list_lock);
if (!afu->current_mode)
goto err_put_afu;
if (!cxl_ops->link_ok(adapter, afu)) {
rc = -EIO;
goto err_put_afu;
}
if (!(ctx = cxl_context_alloc())) {
rc = -ENOMEM;
goto err_put_afu;
}
rc = cxl_context_init(ctx, afu, master);
if (rc)
goto err_put_afu;
cxl_context_set_mapping(ctx, inode->i_mapping);
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
/* indicate success */
rc = 0;
err_put_afu:
/* release the ref taken earlier */
cxl_afu_put(afu);
err_put_adapter:
put_device(&adapter->dev);
return rc;
}
int afu_open(struct inode *inode, struct file *file)
{
return __afu_open(inode, file, false);
}
static int afu_master_open(struct inode *inode, struct file *file)
{
return __afu_open(inode, file, true);
}
int afu_release(struct inode *inode, struct file *file)
{
struct cxl_context *ctx = file->private_data;
pr_devel("%s: closing cxl file descriptor. pe: %i\n",
__func__, ctx->pe);
cxl_context_detach(ctx);
/*
* Delete the context's mapping pointer, unless it's created by the
* kernel API, in which case leave it so it can be freed by reclaim_ctx()
*/
if (!ctx->kernelapi) {
mutex_lock(&ctx->mapping_lock);
ctx->mapping = NULL;
mutex_unlock(&ctx->mapping_lock);
}
/*
* At this this point all bottom halfs have finished and we should be
* getting no more IRQs from the hardware for this context. Once it's
* removed from the IDR (and RCU synchronised) it's safe to free the
* sstp and context.
*/
cxl_context_free(ctx);
return 0;
}
static long afu_ioctl_start_work(struct cxl_context *ctx,
struct cxl_ioctl_start_work __user *uwork)
{
struct cxl_ioctl_start_work work;
u64 amr = 0;
int rc;
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
/* Do this outside the status_mutex to avoid a circular dependency with
* the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork, sizeof(work)))
return -EFAULT;
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
rc = -EIO;
goto out;
}
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
*/
if (work.reserved1 || work.reserved2 || work.reserved3 ||
work.reserved4 || work.reserved5 ||
(work.flags & ~CXL_START_WORK_ALL)) {
rc = -EINVAL;
goto out;
}
if (!(work.flags & CXL_START_WORK_NUM_IRQS))
work.num_interrupts = ctx->afu->pp_irqs;
else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
(work.num_interrupts > ctx->afu->irqs_max)) {
rc = -EINVAL;
goto out;
}
if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
goto out;
if (work.flags & CXL_START_WORK_AMR)
amr = work.amr & mfspr(SPRN_UAMOR);
if (work.flags & CXL_START_WORK_TID)
ctx->assign_tidr = true;
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
* Increment the mapped context count for adapter. This also checks
* if adapter_context_lock is taken.
*/
rc = cxl_adapter_context_get(ctx->afu->adapter);
if (rc) {
afu_release_irqs(ctx, ctx);
goto out;
}
/*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
* that performs this ioctl and not the process that opened the file.
* Also we grab the PID of the group leader so that if the task that
* has performed the attach operation exits the mm context of the
* process is still accessible.
*/
ctx->pid = get_task_pid(current, PIDTYPE_PID);
/* acquire a reference to the task's mm */
ctx->mm = get_task_mm(current);
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
if (ctx->mm) {
/* decrement the use count from above */
mmput(ctx->mm);
/* make TLBIs for this context global */
mm_context_add_copro(ctx->mm);
}
/*
* Increment driver use count. Enables global TLBIs for hash
* and callbacks to handle the segment table
*/
cxl_ctx_get();
/*
* A barrier is needed to make sure all TLBIs are global
* before we attach and the context starts being used by the
* adapter.
*
* Needed after mm_context_add_copro() for radix and
* cxl_ctx_get() for hash/p8.
*
* The barrier should really be mb(), since it involves a
* device. However, it's only useful when we have local
* vs. global TLBIs, i.e SMP=y. So keep smp_mb().
*/
smp_mb();
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
cxl_adapter_context_put(ctx->afu->adapter);
put_pid(ctx->pid);
ctx->pid = NULL;
cxl_ctx_put();
cxl_context_mm_count_put(ctx);
if (ctx->mm)
mm_context_remove_copro(ctx->mm);
goto out;
}
rc = 0;
if (work.flags & CXL_START_WORK_TID) {
work.tid = ctx->tidr;
if (copy_to_user(uwork, &work, sizeof(work)))
rc = -EFAULT;
}
ctx->status = STARTED;
out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
static long afu_ioctl_process_element(struct cxl_context *ctx,
int __user *upe)
{
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
return -EFAULT;
return 0;
}
static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
struct cxl_afu_id __user *upafuid)
{
struct cxl_afu_id afuid = { 0 };
afuid.card_id = ctx->afu->adapter->adapter_num;
afuid.afu_offset = ctx->afu->slice;
afuid.afu_mode = ctx->afu->current_mode;
/* set the flag bit in case the afu is a slave */
if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
afuid.flags |= CXL_AFUID_FLAG_SLAVE;
if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
return -EFAULT;
return 0;
}
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct cxl_context *ctx = file->private_data;
if (ctx->status == CLOSED)
return -EIO;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
pr_devel("afu_ioctl\n");
switch (cmd) {
case CXL_IOCTL_START_WORK:
return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
case CXL_IOCTL_GET_PROCESS_ELEMENT:
return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
case CXL_IOCTL_GET_AFU_ID:
return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
arg);
}
return -EINVAL;
}
static long afu_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return afu_ioctl(file, cmd, arg);
}
int afu_mmap(struct file *file, struct vm_area_struct *vm)
{
struct cxl_context *ctx = file->private_data;
/* AFU must be started before we can MMIO */
if (ctx->status != STARTED)
return -EIO;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
return cxl_context_iomap(ctx, vm);
}
static inline bool ctx_event_pending(struct cxl_context *ctx)
{
if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
return true;
if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
return true;
return false;
}
__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
__poll_t mask = 0;
unsigned long flags;
poll_wait(file, &ctx->wq, poll);
pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
spin_lock_irqsave(&ctx->lock, flags);
if (ctx_event_pending(ctx))
mask |= EPOLLIN | EPOLLRDNORM;
else if (ctx->status == CLOSED)
/* Only error on closed when there are no futher events pending
*/
mask |= EPOLLERR;
spin_unlock_irqrestore(&ctx->lock, flags);
pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
return mask;
}
static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
char __user *buf,
struct cxl_event *event,
struct cxl_event_afu_driver_reserved *pl)
{
/* Check event */
if (!pl) {
ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
return -EFAULT;
}
/* Check event size */
event->header.size += pl->data_size;
if (event->header.size > CXL_READ_MIN_SIZE) {
ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
return -EFAULT;
}
/* Copy event header */
if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
return -EFAULT;
}
/* Copy event data */
buf += sizeof(struct cxl_event_header);
if (copy_to_user(buf, &pl->data, pl->data_size)) {
ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
return -EFAULT;
}
ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
return event->header.size;
}
ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct cxl_context *ctx = file->private_data;
struct cxl_event_afu_driver_reserved *pl = NULL;
struct cxl_event event;
unsigned long flags;
int rc;
DEFINE_WAIT(wait);
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
if (count < CXL_READ_MIN_SIZE)
return -EINVAL;
spin_lock_irqsave(&ctx->lock, flags);
for (;;) {
prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
break;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
rc = -EIO;
goto out;
}
if (file->f_flags & O_NONBLOCK) {
rc = -EAGAIN;
goto out;
}
if (signal_pending(current)) {
rc = -ERESTARTSYS;
goto out;
}
spin_unlock_irqrestore(&ctx->lock, flags);
pr_devel("afu_read going to sleep...\n");
schedule();
pr_devel("afu_read woken up\n");
spin_lock_irqsave(&ctx->lock, flags);
}
finish_wait(&ctx->wq, &wait);
memset(&event, 0, sizeof(event));
event.header.process_element = ctx->pe;
event.header.size = sizeof(struct cxl_event_header);
if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
pr_devel("afu_read delivering AFU driver specific event\n");
pl = ctx->afu_driver_ops->fetch_event(ctx);
atomic_dec(&ctx->afu_driver_events);
event.header.type = CXL_EVENT_AFU_DRIVER;
} else if (ctx->pending_irq) {
pr_devel("afu_read delivering AFU interrupt\n");
event.header.size += sizeof(struct cxl_event_afu_interrupt);
event.header.type = CXL_EVENT_AFU_INTERRUPT;
event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
ctx->pending_irq = false;
} else if (ctx->pending_fault) {
pr_devel("afu_read delivering data storage fault\n");
event.header.size += sizeof(struct cxl_event_data_storage);
event.header.type = CXL_EVENT_DATA_STORAGE;
event.fault.addr = ctx->fault_addr;
event.fault.dsisr = ctx->fault_dsisr;
ctx->pending_fault = false;
} else if (ctx->pending_afu_err) {
pr_devel("afu_read delivering afu error\n");
event.header.size += sizeof(struct cxl_event_afu_error);
event.header.type = CXL_EVENT_AFU_ERROR;
event.afu_error.error = ctx->afu_err;
ctx->pending_afu_err = false;
} else if (ctx->status == CLOSED) {
pr_devel("afu_read fatal error\n");
spin_unlock_irqrestore(&ctx->lock, flags);
return -EIO;
} else
WARN(1, "afu_read must be buggy\n");
spin_unlock_irqrestore(&ctx->lock, flags);
if (event.header.type == CXL_EVENT_AFU_DRIVER)
return afu_driver_event_copy(ctx, buf, &event, pl);
if (copy_to_user(buf, &event, event.header.size))
return -EFAULT;
return event.header.size;
out:
finish_wait(&ctx->wq, &wait);
spin_unlock_irqrestore(&ctx->lock, flags);
return rc;
}
/*
* Note: if this is updated, we need to update api.c to patch the new ones in
* too
*/
const struct file_operations afu_fops = {
.owner = THIS_MODULE,
.open = afu_open,
.poll = afu_poll,
.read = afu_read,
.release = afu_release,
.unlocked_ioctl = afu_ioctl,
.compat_ioctl = afu_compat_ioctl,
.mmap = afu_mmap,
};
static const struct file_operations afu_master_fops = {
.owner = THIS_MODULE,
.open = afu_master_open,
.poll = afu_poll,
.read = afu_read,
.release = afu_release,
.unlocked_ioctl = afu_ioctl,
.compat_ioctl = afu_compat_ioctl,
.mmap = afu_mmap,
};
static char *cxl_devnode(const struct device *dev, umode_t *mode)
{
if (cpu_has_feature(CPU_FTR_HVMODE) &&
CXL_DEVT_IS_CARD(dev->devt)) {
/*
* These minor numbers will eventually be used to program the
* PSL and AFUs once we have dynamic reprogramming support
*/
return NULL;
}
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
}
extern struct class *cxl_class;
static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
struct device **chardev, char *postfix, char *desc,
const struct file_operations *fops)
{
struct device *dev;
int rc;
cdev_init(cdev, fops);
rc = cdev_add(cdev, devt, 1);
if (rc) {
dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
return rc;
}
dev = device_create(cxl_class, &afu->dev, devt, afu,
"afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
goto err;
}
*chardev = dev;
return 0;
err:
cdev_del(cdev);
return rc;
}
int cxl_chardev_d_afu_add(struct cxl_afu *afu)
{
return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
&afu->chardev_d, "d", "dedicated",
&afu_master_fops); /* Uses master fops */
}
int cxl_chardev_m_afu_add(struct cxl_afu *afu)
{
return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
&afu->chardev_m, "m", "master",
&afu_master_fops);
}
int cxl_chardev_s_afu_add(struct cxl_afu *afu)
{
return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
&afu->chardev_s, "s", "shared",
&afu_fops);
}
void cxl_chardev_afu_remove(struct cxl_afu *afu)
{
if (afu->chardev_d) {
cdev_del(&afu->afu_cdev_d);
device_unregister(afu->chardev_d);
afu->chardev_d = NULL;
}
if (afu->chardev_m) {
cdev_del(&afu->afu_cdev_m);
device_unregister(afu->chardev_m);
afu->chardev_m = NULL;
}
if (afu->chardev_s) {
cdev_del(&afu->afu_cdev_s);
device_unregister(afu->chardev_s);
afu->chardev_s = NULL;
}
}
int cxl_register_afu(struct cxl_afu *afu)
{
afu->dev.class = cxl_class;
return device_register(&afu->dev);
}
int cxl_register_adapter(struct cxl *adapter)
{
adapter->dev.class = cxl_class;
/*
* Future: When we support dynamically reprogramming the PSL & AFU we
* will expose the interface to do that via a chardev:
* adapter->dev.devt = CXL_CARD_MKDEV(adapter);
*/
return device_register(&adapter->dev);
}
dev_t cxl_get_dev(void)
{
return cxl_dev;
}
int __init cxl_file_init(void)
{
int rc;
/*
* If these change we really need to update API. Either change some
* flags or update API version number CXL_API_VERSION.
*/
BUILD_BUG_ON(CXL_API_VERSION != 3);
BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
pr_err("Unable to allocate CXL major number: %i\n", rc);
return rc;
}
pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
cxl_class = class_create("cxl");
if (IS_ERR(cxl_class)) {
pr_err("Unable to create CXL class\n");
rc = PTR_ERR(cxl_class);
goto err;
}
cxl_class->devnode = cxl_devnode;
return 0;
err:
unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
return rc;
}
void cxl_file_exit(void)
{
unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
class_destroy(cxl_class);
}
| linux-master | drivers/misc/cxl/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/pci.h>
#include <misc/cxl.h>
#include "cxl.h"
static int cxl_pci_probe_mode(struct pci_bus *bus)
{
return PCI_PROBE_NORMAL;
}
static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
return -ENODEV;
}
static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
{
/*
* MSI should never be set but need still need to provide this call
* back.
*/
}
static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *phb;
struct cxl_afu *afu;
struct cxl_context *ctx;
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
return false;
}
dev->dev.archdata.dma_offset = PAGE_OFFSET;
/*
* Allocate a context to do cxl things too. If we eventually do real
* DMA ops, we'll need a default context to attach them to
*/
ctx = cxl_dev_context_init(dev);
if (IS_ERR(ctx))
return false;
dev->dev.archdata.cxl_ctx = ctx;
return (cxl_ops->afu_check_and_enable(afu) == 0);
}
static void cxl_pci_disable_device(struct pci_dev *dev)
{
struct cxl_context *ctx = cxl_get_context(dev);
if (ctx) {
if (ctx->status == STARTED) {
dev_err(&dev->dev, "Default context started\n");
return;
}
dev->dev.archdata.cxl_ctx = NULL;
cxl_release_context(ctx);
}
}
static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
{
/* Should we do an AFU reset here ? */
}
static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
{
return (bus << 8) + devfn;
}
static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
{
struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
return phb ? phb->private_data : NULL;
}
static void cxl_afu_configured_put(struct cxl_afu *afu)
{
atomic_dec_if_positive(&afu->configured_state);
}
static bool cxl_afu_configured_get(struct cxl_afu *afu)
{
return atomic_inc_unless_negative(&afu->configured_state);
}
static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
struct cxl_afu *afu, int *_record)
{
int record;
record = cxl_pcie_cfg_record(bus->number, devfn);
if (record > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
*_record = record;
return 0;
}
static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
int rc, record;
struct cxl_afu *afu;
u8 val8;
u16 val16;
u32 val32;
afu = pci_bus_to_afu(bus);
/* Grab a reader lock on afu. */
if (afu == NULL || !cxl_afu_configured_get(afu))
return PCIBIOS_DEVICE_NOT_FOUND;
rc = cxl_pcie_config_info(bus, devfn, afu, &record);
if (rc)
goto out;
switch (len) {
case 1:
rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
*val = val8;
break;
case 2:
rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
*val = val16;
break;
case 4:
rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
*val = val32;
break;
default:
WARN_ON(1);
}
out:
cxl_afu_configured_put(afu);
return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
int rc, record;
struct cxl_afu *afu;
afu = pci_bus_to_afu(bus);
/* Grab a reader lock on afu. */
if (afu == NULL || !cxl_afu_configured_get(afu))
return PCIBIOS_DEVICE_NOT_FOUND;
rc = cxl_pcie_config_info(bus, devfn, afu, &record);
if (rc)
goto out;
switch (len) {
case 1:
rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
break;
case 2:
rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
break;
case 4:
rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
break;
default:
WARN_ON(1);
}
out:
cxl_afu_configured_put(afu);
return rc ? PCIBIOS_SET_FAILED : 0;
}
static struct pci_ops cxl_pcie_pci_ops =
{
.read = cxl_pcie_read_config,
.write = cxl_pcie_write_config,
};
static struct pci_controller_ops cxl_pci_controller_ops =
{
.probe_mode = cxl_pci_probe_mode,
.enable_device_hook = cxl_pci_enable_device_hook,
.disable_device = cxl_pci_disable_device,
.release_device = cxl_pci_disable_device,
.reset_secondary_bus = cxl_pci_reset_secondary_bus,
.setup_msi_irqs = cxl_setup_msi_irqs,
.teardown_msi_irqs = cxl_teardown_msi_irqs,
};
int cxl_pci_vphb_add(struct cxl_afu *afu)
{
struct pci_controller *phb;
struct device_node *vphb_dn;
struct device *parent;
/*
* If there are no AFU configuration records we won't have anything to
* expose under the vPHB, so skip creating one, returning success since
* this is still a valid case. This will also opt us out of EEH
* handling since we won't have anything special to do if there are no
* kernel drivers attached to the vPHB, and EEH handling is not yet
* supported in the peer model.
*/
if (!afu->crs_num)
return 0;
/* The parent device is the adapter. Reuse the device node of
* the adapter.
* We don't seem to care what device node is used for the vPHB,
* but tools such as lsvpd walk up the device parents looking
* for a valid location code, so we might as well show devices
* attached to the adapter as being located on that adapter.
*/
parent = afu->adapter->dev.parent;
vphb_dn = parent->of_node;
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(vphb_dn);
if (!phb)
return -ENODEV;
/* Setup parent in sysfs */
phb->parent = parent;
/* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops;
phb->cfg_addr = NULL;
phb->cfg_data = NULL;
phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops;
/* Scan the bus */
pcibios_scan_phb(phb);
if (phb->bus == NULL)
return -ENXIO;
/* Set release hook on root bus */
pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
pcibios_free_controller_deferred,
(void *) phb);
/* Claim resources. This might need some rework as well depending
* whether we are doing probe-only or not, like assigning unassigned
* resources etc...
*/
pcibios_claim_one_bus(phb->bus);
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
afu->phb = phb;
return 0;
}
void cxl_pci_vphb_remove(struct cxl_afu *afu)
{
struct pci_controller *phb;
/* If there is no configuration record we won't have one of these */
if (!afu || !afu->phb)
return;
phb = afu->phb;
afu->phb = NULL;
pci_remove_root_bus(phb->bus);
/*
* We don't free phb here - that's handled by
* pcibios_free_controller_deferred()
*/
}
bool cxl_pci_is_vphb_device(struct pci_dev *dev)
{
struct pci_controller *phb;
phb = pci_bus_to_host(dev->bus);
return (phb->ops == &cxl_pcie_pci_ops);
}
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
{
struct pci_controller *phb;
phb = pci_bus_to_host(dev->bus);
return (struct cxl_afu *)phb->private_data;
}
EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
{
return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
}
EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
| linux-master | drivers/misc/cxl/vphb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 IBM Corp.
*/
#include <linux/workqueue.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/pid.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "cxl" "."
#include <asm/current.h>
#include <asm/copro.h>
#include <asm/mmu.h>
#include "cxl.h"
#include "trace.h"
static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
{
return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
(sste->esid_data == cpu_to_be64(slb->esid)));
}
/*
* This finds a free SSTE for the given SLB, or returns NULL if it's already in
* the segment table.
*/
static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
struct copro_slb *slb)
{
struct cxl_sste *primary, *sste, *ret = NULL;
unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
unsigned int entry;
unsigned int hash;
if (slb->vsid & SLB_VSID_B_1T)
hash = (slb->esid >> SID_SHIFT_1T) & mask;
else /* 256M */
hash = (slb->esid >> SID_SHIFT) & mask;
primary = ctx->sstp + (hash << 3);
for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
ret = sste;
if (sste_matches(sste, slb))
return NULL;
}
if (ret)
return ret;
/* Nothing free, select an entry to cast out */
ret = primary + ctx->sst_lru;
ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
return ret;
}
static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
{
/* mask is the group index, we search primary and secondary here. */
struct cxl_sste *sste;
unsigned long flags;
spin_lock_irqsave(&ctx->sste_lock, flags);
sste = find_free_sste(ctx, slb);
if (!sste)
goto out_unlock;
pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
sste - ctx->sstp, slb->vsid, slb->esid);
trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
sste->vsid_data = cpu_to_be64(slb->vsid);
sste->esid_data = cpu_to_be64(slb->esid);
out_unlock:
spin_unlock_irqrestore(&ctx->sste_lock, flags);
}
static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
u64 ea)
{
struct copro_slb slb = {0,0};
int rc;
if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
cxl_load_segment(ctx, &slb);
}
return rc;
}
static void cxl_ack_ae(struct cxl_context *ctx)
{
unsigned long flags;
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
spin_lock_irqsave(&ctx->lock, flags);
ctx->pending_fault = true;
ctx->fault_addr = ctx->dar;
ctx->fault_dsisr = ctx->dsisr;
spin_unlock_irqrestore(&ctx->lock, flags);
wake_up_all(&ctx->wq);
}
static int cxl_handle_segment_miss(struct cxl_context *ctx,
struct mm_struct *mm, u64 ea)
{
int rc;
pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
trace_cxl_ste_miss(ctx, ea);
if ((rc = cxl_fault_segment(ctx, mm, ea)))
cxl_ack_ae(ctx);
else {
mb(); /* Order seg table write to TFC MMIO write */
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
return IRQ_HANDLED;
}
int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
{
vm_fault_t flt = 0;
int result;
unsigned long access, flags, inv_flags = 0;
/*
* Add the fault handling cpu to task mm cpumask so that we
* can do a safe lockless page table walk when inserting the
* hash page table entry. This function get called with a
* valid mm for user space addresses. Hence using the if (mm)
* check is sufficient here.
*/
if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
/*
* We need to make sure we walk the table only after
* we update the cpumask. The other side of the barrier
* is explained in serialize_against_pte_lookup()
*/
smp_mb();
}
if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
pr_devel("copro_handle_mm_fault failed: %#x\n", result);
return result;
}
if (!radix_enabled()) {
/*
* update_mmu_cache() will not have loaded the hash since current->trap
* is not a 0x400 or 0x300, so just call hash_page_mm() here.
*/
access = _PAGE_PRESENT | _PAGE_READ;
if (dsisr & CXL_PSL_DSISR_An_S)
access |= _PAGE_WRITE;
if (!mm && (get_region_id(dar) != USER_REGION_ID))
access |= _PAGE_PRIVILEGED;
if (dsisr & DSISR_NOHPTE)
inv_flags |= HPTE_NOHPTE_UPDATE;
local_irq_save(flags);
hash_page_mm(mm, dar, access, 0x300, inv_flags);
local_irq_restore(flags);
}
return 0;
}
static void cxl_handle_page_fault(struct cxl_context *ctx,
struct mm_struct *mm,
u64 dsisr, u64 dar)
{
trace_cxl_pte_miss(ctx, dsisr, dar);
if (cxl_handle_mm_fault(mm, dsisr, dar)) {
cxl_ack_ae(ctx);
} else {
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
}
/*
* Returns the mm_struct corresponding to the context ctx.
* mm_users == 0, the context may be in the process of being closed.
*/
static struct mm_struct *get_mem_context(struct cxl_context *ctx)
{
if (ctx->mm == NULL)
return NULL;
if (!mmget_not_zero(ctx->mm))
return NULL;
return ctx->mm;
}
static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
{
if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
return true;
return false;
}
static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
{
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
return true;
if (cxl_is_power9())
return true;
return false;
}
void cxl_handle_fault(struct work_struct *fault_work)
{
struct cxl_context *ctx =
container_of(fault_work, struct cxl_context, fault_work);
u64 dsisr = ctx->dsisr;
u64 dar = ctx->dar;
struct mm_struct *mm = NULL;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
/* Most likely explanation is harmless - a dedicated
* process has detached and these were cleared by the
* PSL purge, but warn about it just in case
*/
dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
return;
}
}
/* Early return if the context is being / has been detached */
if (ctx->status == CLOSED) {
cxl_ack_ae(ctx);
return;
}
pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
if (!ctx->kernel) {
mm = get_mem_context(ctx);
if (mm == NULL) {
pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
__func__, ctx->pe, pid_nr(ctx->pid));
cxl_ack_ae(ctx);
return;
} else {
pr_devel("Handling page fault for pe=%d pid=%i\n",
ctx->pe, pid_nr(ctx->pid));
}
}
if (cxl_is_segment_miss(ctx, dsisr))
cxl_handle_segment_miss(ctx, mm, dar);
else if (cxl_is_page_fault(ctx, dsisr))
cxl_handle_page_fault(ctx, mm, dsisr, dar);
else
WARN(1, "cxl_handle_fault has nothing to handle\n");
if (mm)
mmput(mm);
}
static u64 next_segment(u64 ea, u64 vsid)
{
if (vsid & SLB_VSID_B_1T)
ea |= (1ULL << 40) - 1;
else
ea |= (1ULL << 28) - 1;
return ea + 1;
}
static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
{
u64 ea, last_esid = 0;
struct copro_slb slb;
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int rc;
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
rc = copro_calculate_slb(mm, ea, &slb);
if (rc)
continue;
if (last_esid == slb.esid)
continue;
cxl_load_segment(ctx, &slb);
last_esid = slb.esid;
}
}
mmap_read_unlock(mm);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
{
struct mm_struct *mm = get_mem_context(ctx);
if (mm == NULL) {
pr_devel("cxl_prefault unable to get mm %i\n",
pid_nr(ctx->pid));
return;
}
switch (ctx->afu->prefault_mode) {
case CXL_PREFAULT_WED:
cxl_fault_segment(ctx, mm, wed);
break;
case CXL_PREFAULT_ALL:
cxl_prefault_vma(ctx, mm);
break;
default:
break;
}
mmput(mm);
}
| linux-master | drivers/misc/cxl/fault.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) partition support.
*
* This is the part of XPC that detects the presence/absence of
* other partitions. It provides a heartbeat and monitors the
* heartbeats of other partitions.
*
*/
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#include "xpc.h"
#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
/* this partition's reserved page pointers */
struct xpc_rsvd_page *xpc_rsvd_page;
static unsigned long *xpc_part_nasids;
unsigned long *xpc_mach_nasids;
static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */
int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */
struct xpc_partition *xpc_partitions;
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
if (*base == NULL)
return NULL;
if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base;
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL)
return NULL;
return (void *)L1_CACHE_ALIGN((u64)*base);
}
/*
* Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error.
*/
static unsigned long
xpc_get_rsvd_page_pa(int nasid)
{
enum xp_retval ret;
u64 cookie = 0;
unsigned long rp_pa = nasid; /* seed with nasid */
size_t len = 0;
size_t buf_len = 0;
void *buf = NULL;
void *buf_base = NULL;
enum xp_retval (*get_partition_rsvd_page_pa)
(void *, u64 *, unsigned long *, size_t *) =
xpc_arch_ops.get_partition_rsvd_page_pa;
while (1) {
/* !!! rp_pa will need to be _gpa on UV.
* ??? So do we save it into the architecture specific parts
* ??? of the xpc_partition structure? Do we rename this
* ??? function or have two versions? Rename rp_pa for UV to
* ??? rp_gpa?
*/
ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
"address=0x%016lx, len=0x%016lx\n", ret,
(unsigned long)cookie, rp_pa, len);
if (ret != xpNeedMoreInfo)
break;
if (len > buf_len) {
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", buf_len);
ret = xpNoMemory;
break;
}
}
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
}
}
kfree(buf_base);
if (ret != xpSuccess)
rp_pa = 0;
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
return rp_pa;
}
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
* communications.
*/
int
xpc_setup_rsvd_page(void)
{
int ret;
struct xpc_rsvd_page *rp;
unsigned long rp_pa;
unsigned long new_ts_jiffies;
/* get the local reserved page's address */
preempt_disable();
rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
preempt_enable();
if (rp_pa == 0) {
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
rp->SAL_partid &= 0xff;
}
BUG_ON(rp->SAL_partid != xp_partition_id);
if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
dev_err(xpc_part, "the reserved page's partid of %d is outside "
"supported range (< 0 || >= %d)\n", rp->SAL_partid,
xp_max_npartitions);
return -EINVAL;
}
rp->version = XPC_RP_VERSION;
rp->max_npartitions = xp_max_npartitions;
/* establish the actual sizes of the nasid masks */
if (rp->SAL_version == 1) {
/* SAL_version 1 didn't set the nasids_size field */
rp->SAL_nasids_size = 128;
}
xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
BITS_PER_BYTE);
/* setup the pointers to the various items in the reserved page */
xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
ret = xpc_arch_ops.setup_rsvd_page(rp);
if (ret != 0)
return ret;
/*
* Set timestamp of when reserved page was setup by XPC.
* This signifies to the remote partition that our reserved
* page is initialized.
*/
new_ts_jiffies = jiffies;
if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
new_ts_jiffies++;
rp->ts_jiffies = new_ts_jiffies;
xpc_rsvd_page = rp;
return 0;
}
void
xpc_teardown_rsvd_page(void)
{
/* a zero timestamp indicates our rsvd page is not initialized */
xpc_rsvd_page->ts_jiffies = 0;
}
/*
* Get a copy of a portion of the remote partition's rsvd page.
*
* remote_rp points to a buffer that is cacheline aligned for BTE copies and
* is large enough to contain a copy of their reserved page header and
* part_nasids mask.
*/
enum xp_retval
xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
{
int l;
enum xp_retval ret;
/* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0)
return xpNoRsvdPageAddr;
/* pull over the reserved page header and part_nasids mask */
ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
if (ret != xpSuccess)
return ret;
if (discovered_nasids != NULL) {
unsigned long *remote_part_nasids =
XPC_RP_PART_NASIDS(remote_rp);
for (l = 0; l < xpc_nasid_mask_nlongs; l++)
discovered_nasids[l] |= remote_part_nasids[l];
}
/* zero timestamp indicates the reserved page has not been setup */
if (remote_rp->ts_jiffies == 0)
return xpRsvdPageNotSet;
if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpBadVersion;
}
/* check that both remote and local partids are valid for each side */
if (remote_rp->SAL_partid < 0 ||
remote_rp->SAL_partid >= xp_max_npartitions ||
remote_rp->max_npartitions <= xp_partition_id) {
return xpInvalidPartid;
}
if (remote_rp->SAL_partid == xp_partition_id)
return xpLocalPartid;
return xpSuccess;
}
/*
* See if the other side has responded to a partition deactivate request
* from us. Though we requested the remote partition to deactivate with regard
* to us, we really only need to wait for the other side to disengage from us.
*/
static int __xpc_partition_disengaged(struct xpc_partition *part,
bool from_timer)
{
short partid = XPC_PARTID(part);
int disengaged;
disengaged = !xpc_arch_ops.partition_engaged(partid);
if (part->disengage_timeout) {
if (!disengaged) {
if (time_is_after_jiffies(part->disengage_timeout)) {
/* timelimit hasn't been reached yet */
return 0;
}
/*
* Other side hasn't responded to our deactivate
* request in a timely fashion, so assume it's dead.
*/
dev_info(xpc_part, "deactivate request to remote "
"partition %d timed out\n", partid);
xpc_disengage_timedout = 1;
xpc_arch_ops.assume_partition_disengaged(partid);
disengaged = 1;
}
part->disengage_timeout = 0;
/* Cancel the timer function if not called from it */
if (!from_timer)
del_timer_sync(&part->disengage_timer);
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
part->act_state != XPC_P_AS_INACTIVE);
if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
xpc_arch_ops.cancel_partition_deactivation_request(part);
}
return disengaged;
}
int xpc_partition_disengaged(struct xpc_partition *part)
{
return __xpc_partition_disengaged(part, false);
}
int xpc_partition_disengaged_from_timer(struct xpc_partition *part)
{
return __xpc_partition_disengaged(part, true);
}
/*
* Mark specified partition as active.
*/
enum xp_retval
xpc_mark_partition_active(struct xpc_partition *part)
{
unsigned long irq_flags;
enum xp_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_AS_ACTIVATING) {
part->act_state = XPC_P_AS_ACTIVE;
ret = xpSuccess;
} else {
DBUG_ON(part->reason == xpSuccess);
ret = part->reason;
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return ret;
}
/*
* Start the process of deactivating the specified partition.
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
enum xp_retval reason)
{
unsigned long irq_flags;
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_AS_INACTIVE) {
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
xpc_arch_ops.request_partition_reactivation(part);
}
return;
}
if (part->act_state == XPC_P_AS_DEACTIVATING) {
if ((part->reason == xpUnloading && reason != xpUnloading) ||
reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return;
}
part->act_state = XPC_P_AS_DEACTIVATING;
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
/* ask remote partition to deactivate with regard to us */
xpc_arch_ops.request_partition_deactivation(part);
/* set a timelimit on the disengage phase of the deactivation request */
part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
part->disengage_timer.expires = part->disengage_timeout;
add_timer(&part->disengage_timer);
dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
XPC_PARTID(part), reason);
xpc_partition_going_down(part, reason);
}
/*
* Mark specified partition as inactive.
*/
void
xpc_mark_partition_inactive(struct xpc_partition *part)
{
unsigned long irq_flags;
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_AS_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
}
/*
* SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine
* mask contains a bit for each even nasid in the entire machine.
*
* Using those two bit arrays, we can determine which nasids are
* known in the machine. Each should also have a reserved page
* initialized if they are available for partitioning.
*/
void
xpc_discovery(void)
{
void *remote_rp_base;
struct xpc_rsvd_page *remote_rp;
unsigned long remote_rp_pa;
int region;
int region_size;
int max_regions;
int nasid;
unsigned long *discovered_nasids;
enum xp_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xpc_nasid_mask_nbytes,
GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL)
return;
discovered_nasids = kcalloc(xpc_nasid_mask_nlongs, sizeof(long),
GFP_KERNEL);
if (discovered_nasids == NULL) {
kfree(remote_rp_base);
return;
}
/*
* The term 'region' in this context refers to the minimum number of
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
region_size = xp_region_size;
if (is_uv_system())
max_regions = 256;
else {
max_regions = 64;
switch (region_size) {
case 128:
max_regions *= 2;
fallthrough;
case 64:
max_regions *= 2;
fallthrough;
case 32:
max_regions *= 2;
region_size = 16;
}
}
for (region = 0; region < max_regions; region++) {
if (xpc_exiting)
break;
dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * region_size * 2);
nasid < ((region + 1) * region_size * 2); nasid += 2) {
if (xpc_exiting)
break;
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
if (test_bit(nasid / 2, xpc_part_nasids)) {
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
"part of the local partition; skipping "
"region\n", nasid);
break;
}
if (!(test_bit(nasid / 2, xpc_mach_nasids))) {
dev_dbg(xpc_part, "PROM indicates Nasid %d was "
"not on Numa-Link network at reset\n",
nasid);
continue;
}
if (test_bit(nasid / 2, discovered_nasids)) {
dev_dbg(xpc_part, "Nasid %d is part of a "
"partition which was previously "
"discovered\n", nasid);
continue;
}
/* pull over the rsvd page header & part_nasids mask */
ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
ret);
if (ret == xpLocalPartid)
break;
continue;
}
xpc_arch_ops.request_partition_activation(remote_rp,
remote_rp_pa, nasid);
}
}
kfree(discovered_nasids);
kfree(remote_rp_base);
}
/*
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
*/
enum xp_retval
xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{
struct xpc_partition *part;
unsigned long part_nasid_pa;
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0)
return xpPartitionDown;
memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
xpc_nasid_mask_nbytes);
}
| linux-master | drivers/misc/sgi-xp/xpc_partition.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) uv-based functions.
*
* Architecture specific implementation of common functions.
*
*/
#include <linux/device.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/grukservices.h"
#include "xp.h"
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_uv(void *addr)
{
return uv_gpa(addr);
}
/*
* Convert a global physical to socket physical address.
*/
static unsigned long
xp_socket_pa_uv(unsigned long gpa)
{
return uv_gpa_to_soc_phys_ram(gpa);
}
static enum xp_retval
xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
BUG_ON(len != 8);
ret = gru_read_gpa(dst_va, src_gpa);
if (ret == 0)
return xpSuccess;
dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
"len=%ld\n", dst_gpa, src_gpa, len);
return xpGruCopyError;
}
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
if (uv_gpa_in_mmr_space(src_gpa))
return xp_remote_mmr_read(dst_gpa, src_gpa, len);
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
"len=%ld\n", dst_gpa, src_gpa, len);
return xpGruCopyError;
}
static int
xp_cpu_to_nasid_uv(int cpuid)
{
/* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */
return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
}
static enum xp_retval
xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
{
int ret;
#if defined CONFIG_X86_64
ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
if (ret != BIOS_STATUS_SUCCESS) {
dev_err(xp, "uv_bios_change_memprotect(,, "
"UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
return xpBiosError;
}
#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
return xpSalError;
}
#else
#error not a supported configuration
#endif
return xpSuccess;
}
static enum xp_retval
xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
{
int ret;
#if defined CONFIG_X86_64
ret = uv_bios_change_memprotect(phys_addr, size,
UV_MEMPROT_RESTRICT_ACCESS);
if (ret != BIOS_STATUS_SUCCESS) {
dev_err(xp, "uv_bios_change_memprotect(,, "
"UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
return xpBiosError;
}
#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
return xpSalError;
}
#else
#error not a supported configuration
#endif
return xpSuccess;
}
enum xp_retval
xp_init_uv(void)
{
WARN_ON(!is_uv_system());
if (!is_uv_system())
return xpUnsupported;
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
#ifdef CONFIG_X86
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
#endif
xp_pa = xp_pa_uv;
xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;
xp_restrict_memprotect = xp_restrict_memprotect_uv;
return xpSuccess;
}
void
xp_exit_uv(void)
{
WARN_ON(!is_uv_system());
}
| linux-master | drivers/misc/sgi-xp/xp_uv.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) channel support.
*
* This is the part of XPC that manages the channels and
* sends/receives messages across them to/from other partitions.
*
*/
#include <linux/device.h>
#include "xpc.h"
/*
* Process a connect message from a remote partition.
*
* Note: xpc_process_connect() is expecting to be called with the
* spin_lock_irqsave held and will leave it locked upon return.
*/
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
/* nothing more to do for now */
return;
}
DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
if (!(ch->flags & XPC_C_SETUP)) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
ret = xpc_arch_ops.setup_msg_structures(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
else
ch->flags |= XPC_C_SETUP;
if (ch->flags & XPC_C_DISCONNECTING)
return;
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
ch->flags |= XPC_C_OPENREPLY;
xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENREPLY))
return;
if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENCOMPLETE))
return;
dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid);
ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
}
/*
* spin_lock_irqsave() is expected to be held on entry.
*/
static void
xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
/* make sure all activity has settled down first */
if (atomic_read(&ch->kthreads_assigned) > 0 ||
atomic_read(&ch->references) > 0) {
return;
}
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
if (xpc_arch_ops.partition_engaged(ch->partid))
return;
} else {
/* as long as the other side is up do the full protocol */
if (!(ch->flags & XPC_C_RCLOSEREQUEST))
return;
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_RCLOSEREPLY))
return;
}
/* wake those waiting for notify completion */
if (atomic_read(&ch->n_to_notify) > 0) {
/* we do callout while holding ch->lock, callout can't block */
xpc_arch_ops.notify_senders_of_disconnect(ch);
}
/* both sides are disconnected now */
if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
xpc_disconnect_callout(ch, xpDisconnected);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
/* it's now safe to free the channel's message queues */
xpc_arch_ops.teardown_msg_structures(ch);
ch->func = NULL;
ch->key = NULL;
ch->entry_size = 0;
ch->local_nentries = 0;
ch->remote_nentries = 0;
ch->kthreads_assigned_limit = 0;
ch->kthreads_idle_limit = 0;
/*
* Mark the channel disconnected and clear all other flags, including
* XPC_C_SETUP (because of call to
* xpc_arch_ops.teardown_msg_structures()) but not including
* XPC_C_WDISCONNECT (if it was set).
*/
ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
atomic_dec(&part->nchannels_active);
if (channel_was_connected) {
dev_info(xpc_chan, "channel %d to partition %d disconnected, "
"reason=%d\n", ch->number, ch->partid, ch->reason);
}
if (ch->flags & XPC_C_WDISCONNECT) {
/* we won't lose the CPU since we're holding ch->lock */
complete(&ch->wdisconnect_wait);
} else if (ch->delayed_chctl_flags) {
if (part->act_state != XPC_P_AS_DEACTIVATING) {
/* time to take action on any delayed chctl flags */
spin_lock(&part->chctl_lock);
part->chctl.flags[ch->number] |=
ch->delayed_chctl_flags;
spin_unlock(&part->chctl_lock);
}
ch->delayed_chctl_flags = 0;
}
}
/*
* Process a change in the channel's remote connection state.
*/
static void
xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
u8 chctl_flags)
{
unsigned long irq_flags;
struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
enum xp_retval reason;
enum xp_retval ret;
int create_kthread = 0;
spin_lock_irqsave(&ch->lock, irq_flags);
again:
if ((ch->flags & XPC_C_DISCONNECTED) &&
(ch->flags & XPC_C_WDISCONNECT)) {
/*
* Delay processing chctl flags until thread waiting disconnect
* has had a chance to see that the channel is disconnected.
*/
ch->delayed_chctl_flags |= chctl_flags;
goto out;
}
if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
"from partid=%d, channel=%d\n", args->reason,
ch->partid, ch->number);
/*
* If RCLOSEREQUEST is set, we're probably waiting for
* RCLOSEREPLY. We should find it and a ROPENREQUEST packed
* with this RCLOSEREQUEST in the chctl_flags.
*/
if (ch->flags & XPC_C_RCLOSEREQUEST) {
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
ch->flags |= XPC_C_RCLOSEREPLY;
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
goto again;
}
if (ch->flags & XPC_C_DISCONNECTED) {
if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
if (part->chctl.flags[ch_number] &
XPC_CHCTL_OPENREQUEST) {
DBUG_ON(ch->delayed_chctl_flags != 0);
spin_lock(&part->chctl_lock);
part->chctl.flags[ch_number] |=
XPC_CHCTL_CLOSEREQUEST;
spin_unlock(&part->chctl_lock);
}
goto out;
}
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
}
chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
XPC_CHCTL_OPENCOMPLETE);
/*
* The meaningful CLOSEREQUEST connection state fields are:
* reason = reason connection is to be closed
*/
ch->flags |= XPC_C_RCLOSEREQUEST;
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
if (reason <= xpSuccess || reason > xpUnknownReason)
reason = xpUnknownReason;
else if (reason == xpUnregistering)
reason = xpOtherUnregistering;
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
goto out;
}
xpc_process_disconnect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
"%d, channel=%d\n", ch->partid, ch->number);
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
if (part->chctl.flags[ch_number] &
XPC_CHCTL_CLOSEREQUEST) {
DBUG_ON(ch->delayed_chctl_flags != 0);
spin_lock(&part->chctl_lock);
part->chctl.flags[ch_number] |=
XPC_CHCTL_CLOSEREPLY;
spin_unlock(&part->chctl_lock);
}
goto out;
}
ch->flags |= XPC_C_RCLOSEREPLY;
if (ch->flags & XPC_C_CLOSEREPLY) {
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
}
}
if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
"local_nentries=%d) received from partid=%d, "
"channel=%d\n", args->entry_size, args->local_nentries,
ch->partid, ch->number);
if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
goto out;
}
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
goto out;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST)));
DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_OPENREPLY | XPC_C_CONNECTED));
/*
* The meaningful OPENREQUEST connection state fields are:
* entry_size = size of channel's messages in bytes
* local_nentries = remote partition's local_nentries
*/
if (args->entry_size == 0 || args->local_nentries == 0) {
/* assume OPENREQUEST was delayed by mistake */
goto out;
}
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
ch->remote_nentries = args->local_nentries;
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->entry_size != ch->entry_size) {
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
goto out;
}
} else {
ch->entry_size = args->entry_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
}
xpc_process_connect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_OPENREPLY) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
"0x%lx, local_nentries=%d, remote_nentries=%d) "
"received from partid=%d, channel=%d\n",
args->local_msgqueue_pa, args->local_nentries,
args->remote_nentries, ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
goto out;
if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
DBUG_ON(ch->flags & XPC_C_CONNECTED);
/*
* The meaningful OPENREPLY connection state fields are:
* local_msgqueue_pa = physical address of remote
* partition's local_msgqueue
* local_nentries = remote partition's local_nentries
* remote_nentries = remote partition's remote_nentries
*/
DBUG_ON(args->local_msgqueue_pa == 0);
DBUG_ON(args->local_nentries == 0);
DBUG_ON(args->remote_nentries == 0);
ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
args->local_msgqueue_pa);
if (ret != xpSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
goto out;
}
ch->flags |= XPC_C_ROPENREPLY;
if (args->local_nentries < ch->remote_nentries) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
"remote_nentries=%d, old remote_nentries=%d, "
"partid=%d, channel=%d\n",
args->local_nentries, ch->remote_nentries,
ch->partid, ch->number);
ch->remote_nentries = args->local_nentries;
}
if (args->remote_nentries < ch->local_nentries) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
"local_nentries=%d, old local_nentries=%d, "
"partid=%d, channel=%d\n",
args->remote_nentries, ch->local_nentries,
ch->partid, ch->number);
ch->local_nentries = args->remote_nentries;
}
xpc_process_connect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
"partid=%d, channel=%d\n", ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
goto out;
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_OPENREPLY)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
ch->flags |= XPC_C_ROPENCOMPLETE;
xpc_process_connect(ch, &irq_flags);
create_kthread = 1;
}
out:
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (create_kthread)
xpc_create_kthreads(ch, 1, 0);
}
/*
* Attempt to establish a channel connection to a remote partition.
*/
static enum xp_retval
xpc_connect_channel(struct xpc_channel *ch)
{
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
if (mutex_trylock(®istration->mutex) == 0)
return xpRetry;
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(®istration->mutex);
return xpUnregistered;
}
spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(ch->flags & XPC_C_CONNECTED);
DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
mutex_unlock(®istration->mutex);
return ch->reason;
}
/* add info from the channel connect registration to the channel */
ch->kthreads_assigned_limit = registration->assigned_limit;
ch->kthreads_idle_limit = registration->idle_limit;
DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
ch->func = registration->func;
DBUG_ON(registration->func == NULL);
ch->key = registration->key;
ch->local_nentries = registration->nentries;
if (ch->flags & XPC_C_ROPENREQUEST) {
if (registration->entry_size != ch->entry_size) {
/* the local and remote sides aren't the same */
/*
* Because XPC_DISCONNECT_CHANNEL() can block we're
* forced to up the registration sema before we unlock
* the channel lock. But that's okay here because we're
* done with the part that required the registration
* sema. XPC_DISCONNECT_CHANNEL() requires that the
* channel lock be locked and will unlock and relock
* the channel lock as needed.
*/
mutex_unlock(®istration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpUnequalMsgSizes;
}
} else {
ch->entry_size = registration->entry_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
}
mutex_unlock(®istration->mutex);
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
xpc_process_connect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
void
xpc_process_sent_chctl_flags(struct xpc_partition *part)
{
unsigned long irq_flags;
union xpc_channel_ctl_flags chctl;
struct xpc_channel *ch;
int ch_number;
u32 ch_flags;
chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
/*
* Initiate channel connections for registered channels.
*
* For each connected channel that has pending messages activate idle
* kthreads and/or create new kthreads as needed.
*/
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
/*
* Process any open or close related chctl flags, and then deal
* with connecting or disconnecting the channel as required.
*/
if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
xpc_process_openclose_chctl_flags(part, ch_number,
chctl.flags[ch_number]);
}
ch_flags = ch->flags; /* need an atomic snapshot of flags */
if (ch_flags & XPC_C_DISCONNECTING) {
spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_disconnect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
continue;
}
if (part->act_state == XPC_P_AS_DEACTIVATING)
continue;
if (!(ch_flags & XPC_C_CONNECTED)) {
if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP);
(void)xpc_connect_channel(ch);
}
continue;
}
/*
* Process any message related chctl flags, this may involve
* the activation of kthreads to deliver any pending messages
* sent from the other partition.
*/
if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
}
}
/*
* XPC's heartbeat code calls this function to inform XPC that a partition is
* going down. XPC responds by tearing down the XPartition Communication
* infrastructure used for the just downed partition.
*
* XPC's heartbeat code will never call this function and xpc_partition_up()
* at the same time. Nor will it ever make multiple calls to either function
* at the same time.
*/
void
xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
{
unsigned long irq_flags;
int ch_number;
struct xpc_channel *ch;
dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
XPC_PARTID(part), reason);
if (!xpc_part_ref(part)) {
/* infrastructure for this partition isn't currently set up */
return;
}
/* disconnect channels associated with the partition going down */
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
}
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
/*
* Called by XP at the time of channel connection registration to cause
* XPC to establish connections to all currently active partitions.
*/
void
xpc_initiate_connect(int ch_number)
{
short partid;
struct xpc_partition *part;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
/*
* Initiate the establishment of a connection on the
* newly registered channel to the remote partition.
*/
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
}
}
void
xpc_connected_callout(struct xpc_channel *ch)
{
/* let the registerer know that a connection has been established */
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
ch->func(xpConnected, ch->partid, ch->number,
(void *)(u64)ch->local_nentries, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
}
/*
* Called by XP at the time of channel connection unregistration to cause
* XPC to teardown all current connections for the specified channel.
*
* Before returning xpc_initiate_disconnect() will wait until all connections
* on the specified channel have been closed/torndown. So the caller can be
* assured that they will not be receiving any more callouts from XPC to the
* function they registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_initiate_disconnect(int ch_number)
{
unsigned long irq_flags;
short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
/* initiate the channel disconnect for every active partition */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
if (!(ch->flags & XPC_C_DISCONNECTED)) {
ch->flags |= XPC_C_WDISCONNECT;
XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
&irq_flags);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
}
}
xpc_disconnect_wait(ch_number);
}
/*
* To disconnect a channel, and reflect it back to all who may be waiting.
*
* An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
* xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
* xpc_disconnect_wait().
*
* THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
enum xp_retval reason, unsigned long *irq_flags)
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
lockdep_assert_held(&ch->lock);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
reason, line, ch->partid, ch->number);
XPC_SET_REASON(ch, reason, line);
ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
/* some of these may not have been set */
ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED);
xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED;
spin_unlock_irqrestore(&ch->lock, *irq_flags);
/* wake all idle kthreads so they can exit */
if (atomic_read(&ch->kthreads_idle) > 0) {
wake_up_all(&ch->idle_wq);
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
/* start a kthread that will do the xpDisconnecting callout */
xpc_create_kthreads(ch, 1, 1);
}
/* wake those waiting to allocate an entry from the local msg queue */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
void
xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
{
/*
* Let the channel's registerer know that the channel is being
* disconnected. We don't want to do this if the registerer was never
* informed of a connection being made.
*/
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
"channel=%d\n", reason, ch->partid, ch->number);
ch->func(reason, ch->partid, ch->number, NULL, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
"channel=%d\n", reason, ch->partid, ch->number);
}
}
/*
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
*/
enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xp_retval ret;
DEFINE_WAIT(wait);
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
return ch->reason;
}
atomic_inc(&ch->n_on_msg_allocate_wq);
prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
ret = schedule_timeout(1);
finish_wait(&ch->msg_allocate_wq, &wait);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
DBUG_ON(ch->reason == xpInterrupted);
} else if (ret == 0) {
ret = xpTimeout;
} else {
ret = xpInterrupted;
}
return ret;
}
/*
* Send a message that contains the user's payload on the specified channel
* connected to the specified partition.
*
* NOTE that this routine can sleep waiting for a message entry to become
* available. To not sleep, pass in the XPC_NOWAIT flag.
*
* Once sent, this routine will not wait for the message to be received, nor
* will notification be given when it does happen.
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* flags - see xp.h for valid flags.
* payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
*/
enum xp_retval
xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
u16 payload_size)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xp_retval ret = xpUnknownReason;
dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(payload == NULL);
if (xpc_part_ref(part)) {
ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
flags, payload, payload_size, 0, NULL, NULL);
xpc_part_deref(part);
}
return ret;
}
/*
* Send a message that contains the user's payload on the specified channel
* connected to the specified partition.
*
* NOTE that this routine can sleep waiting for a message entry to become
* available. To not sleep, pass in the XPC_NOWAIT flag.
*
* This routine will not wait for the message to be sent or received.
*
* Once the remote end of the channel has received the message, the function
* passed as an argument to xpc_initiate_send_notify() will be called. This
* allows the sender to free up or re-use any buffers referenced by the
* message, but does NOT mean the message has been processed at the remote
* end by a receiver.
*
* If this routine returns an error, the caller's function will NOT be called.
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* flags - see xp.h for valid flags.
* payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
* func - function to call with asynchronous notification of message
* receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called.
*/
enum xp_retval
xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
u16 payload_size, xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xp_retval ret = xpUnknownReason;
dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(payload == NULL);
DBUG_ON(func == NULL);
if (xpc_part_ref(part)) {
ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
flags, payload, payload_size, XPC_N_CALL, func, key);
xpc_part_deref(part);
}
return ret;
}
/*
* Deliver a message's payload to its intended recipient.
*/
void
xpc_deliver_payload(struct xpc_channel *ch)
{
void *payload;
payload = xpc_arch_ops.get_deliverable_payload(ch);
if (payload != NULL) {
/*
* This ref is taken to protect the payload itself from being
* freed before the user is finished with it, which the user
* indicates by calling xpc_initiate_received().
*/
xpc_msgqueue_ref(ch);
atomic_inc(&ch->kthreads_active);
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
"partid=%d channel=%d\n", payload, ch->partid,
ch->number);
/* deliver the message to its intended recipient */
ch->func(xpMsgReceived, ch->partid, ch->number, payload,
ch->key);
dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
"partid=%d channel=%d\n", payload, ch->partid,
ch->number);
}
atomic_dec(&ch->kthreads_active);
}
}
/*
* Acknowledge receipt of a delivered message's payload.
*
* This function, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # message received on.
* payload - pointer to the payload area allocated via
* xpc_initiate_send() or xpc_initiate_send_notify().
*/
void
xpc_initiate_received(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
xpc_arch_ops.received_payload(ch, payload);
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
xpc_msgqueue_deref(ch);
}
| linux-master | drivers/misc/sgi-xp/xpc_channel.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) support - standard version.
*
* XPC provides a message passing capability that crosses partition
* boundaries. This module is made up of two parts:
*
* partition This part detects the presence/absence of other
* partitions. It provides a heartbeat and monitors
* the heartbeats of other partitions.
*
* channel This part manages the channels and sends/receives
* messages across them to/from other partitions.
*
* There are a couple of additional functions residing in XP, which
* provide an interface to XPC for its users.
*
*
* Caveats:
*
* . Currently on sn2, we have no way to determine which nasid an IRQ
* came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
* followed by an IPI. The amo indicates where data is to be pulled
* from, so after the IPI arrives, the remote partition checks the amo
* word. The IPI can actually arrive before the amo however, so other
* code must periodically check for this case. Also, remote amo
* operations do not reliably time out. Thus we do a remote PIO read
* solely to know whether the remote partition is down and whether we
* should stop sending IPIs to it. This remote PIO read operation is
* set up in a special nofault region so SAL knows to ignore (and
* cleanup) any errors due to the remote amo write, PIO read, and/or
* PIO write operations.
*
* If/when new hardware solves this IPI problem, we should abandon
* the current approach.
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/kdebug.h>
#include <linux/kthread.h>
#include "xpc.h"
#ifdef CONFIG_X86_64
#include <asm/traps.h>
#endif
/* define two XPC debug device structures to be used with dev_dbg() et al */
static struct device_driver xpc_dbg_name = {
.name = "xpc"
};
static struct device xpc_part_dbg_subname = {
.init_name = "", /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
static struct device xpc_chan_dbg_subname = {
.init_name = "", /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;
static int xpc_kdebug_ignore;
/* systune related variables for /proc/sys directories */
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
static int xpc_hb_min_interval = 1;
static int xpc_hb_max_interval = 10;
static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
static int xpc_hb_check_min_interval = 10;
static int xpc_hb_check_max_interval = 120;
int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit; /* = 0 */
static int xpc_disengage_max_timelimit = 120;
static struct ctl_table xpc_sys_xpc_hb[] = {
{
.procname = "hb_interval",
.data = &xpc_hb_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_hb_min_interval,
.extra2 = &xpc_hb_max_interval},
{
.procname = "hb_check_interval",
.data = &xpc_hb_check_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval},
{}
};
static struct ctl_table xpc_sys_xpc[] = {
{
.procname = "disengage_timelimit",
.data = &xpc_disengage_timelimit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_disengage_min_timelimit,
.extra2 = &xpc_disengage_max_timelimit},
{}
};
static struct ctl_table_header *xpc_sysctl;
static struct ctl_table_header *xpc_sysctl_hb;
/* non-zero if any remote partition disengage was timed out */
int xpc_disengage_timedout;
/* #of activate IRQs received and not yet processed */
int xpc_activate_IRQ_rcvd;
DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
/* IRQ handler notifies this wait queue on receipt of an IRQ */
DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
static unsigned long xpc_hb_check_timeout;
static struct timer_list xpc_hb_timer;
/* notification that the xpc_hb_checker thread has exited */
static DECLARE_COMPLETION(xpc_hb_checker_exited);
/* notification that the xpc_discovery thread has exited */
static DECLARE_COMPLETION(xpc_discovery_exited);
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_reboot_notifier = {
.notifier_call = xpc_system_reboot,
};
static int xpc_system_die(struct notifier_block *, unsigned long, void *);
static struct notifier_block xpc_die_notifier = {
.notifier_call = xpc_system_die,
};
struct xpc_arch_operations xpc_arch_ops;
/*
* Timer function to enforce the timelimit on the partition disengage.
*/
static void
xpc_timeout_partition_disengage(struct timer_list *t)
{
struct xpc_partition *part = from_timer(part, t, disengage_timer);
DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
xpc_partition_disengaged_from_timer(part);
DBUG_ON(part->disengage_timeout != 0);
DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
}
/*
* Timer to produce the heartbeat. The timer structures function is
* already set when this is initially called. A tunable is used to
* specify when the next timeout should occur.
*/
static void
xpc_hb_beater(struct timer_list *unused)
{
xpc_arch_ops.increment_heartbeat();
if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
wake_up_interruptible(&xpc_activate_IRQ_wq);
xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
add_timer(&xpc_hb_timer);
}
static void
xpc_start_hb_beater(void)
{
xpc_arch_ops.heartbeat_init();
timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
xpc_hb_beater(NULL);
}
static void
xpc_stop_hb_beater(void)
{
del_timer_sync(&xpc_hb_timer);
xpc_arch_ops.heartbeat_exit();
}
/*
* At periodic intervals, scan through all active partitions and ensure
* their heartbeat is still active. If not, the partition is deactivated.
*/
static void
xpc_check_remote_hb(void)
{
struct xpc_partition *part;
short partid;
enum xp_retval ret;
for (partid = 0; partid < xp_max_npartitions; partid++) {
if (xpc_exiting)
break;
if (partid == xp_partition_id)
continue;
part = &xpc_partitions[partid];
if (part->act_state == XPC_P_AS_INACTIVE ||
part->act_state == XPC_P_AS_DEACTIVATING) {
continue;
}
ret = xpc_arch_ops.get_remote_heartbeat(part);
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(part, ret);
}
}
/*
* This thread is responsible for nearly all of the partition
* activation/deactivation.
*/
static int
xpc_hb_checker(void *ignore)
{
int force_IRQ = 0;
/* this thread was marked active by xpc_hb_init() */
set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
/* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
xpc_start_hb_beater();
while (!xpc_exiting) {
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
"been received\n",
(int)(xpc_hb_check_timeout - jiffies),
xpc_activate_IRQ_rcvd);
/* checking of remote heartbeats is skewed by IRQ handling */
if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
xpc_hb_check_timeout = jiffies +
(xpc_hb_check_interval * HZ);
dev_dbg(xpc_part, "checking remote heartbeats\n");
xpc_check_remote_hb();
}
/* check for outstanding IRQs */
if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
force_IRQ = 0;
dev_dbg(xpc_part, "processing activate IRQs "
"received\n");
xpc_arch_ops.process_activate_IRQ_rcvd();
}
/* wait for IRQ or timeout */
(void)wait_event_interruptible(xpc_activate_IRQ_wq,
(time_is_before_eq_jiffies(
xpc_hb_check_timeout) ||
xpc_activate_IRQ_rcvd > 0 ||
xpc_exiting));
}
xpc_stop_hb_beater();
dev_dbg(xpc_part, "heartbeat checker is exiting\n");
/* mark this thread as having exited */
complete(&xpc_hb_checker_exited);
return 0;
}
/*
* This thread will attempt to discover other partitions to activate
* based on info provided by SAL. This new thread is short lived and
* will exit once discovery is complete.
*/
static int
xpc_initiate_discovery(void *ignore)
{
xpc_discovery();
dev_dbg(xpc_part, "discovery thread is exiting\n");
/* mark this thread as having exited */
complete(&xpc_discovery_exited);
return 0;
}
/*
* The first kthread assigned to a newly activated partition is the one
* created by XPC HB with which it calls xpc_activating(). XPC hangs on to
* that kthread until the partition is brought down, at which time that kthread
* returns back to XPC HB. (The return of that kthread will signify to XPC HB
* that XPC has dismantled all communication infrastructure for the associated
* partition.) This kthread becomes the channel manager for that partition.
*
* Each active partition has a channel manager, who, besides connecting and
* disconnecting channels, will ensure that each of the partition's connected
* channels has the required number of assigned kthreads to get the work done.
*/
static void
xpc_channel_mgr(struct xpc_partition *part)
{
while (part->act_state != XPC_P_AS_DEACTIVATING ||
atomic_read(&part->nchannels_active) > 0 ||
!xpc_partition_disengaged(part)) {
xpc_process_sent_chctl_flags(part);
/*
* Wait until we've been requested to activate kthreads or
* all of the channel's message queues have been torn down or
* a signal is pending.
*
* The channel_mgr_requests is set to 1 after being awakened,
* This is done to prevent the channel mgr from making one pass
* through the loop for each request, since he will
* be servicing all the requests in one pass. The reason it's
* set to 1 instead of 0 is so that other kthreads will know
* that the channel mgr is running and won't bother trying to
* wake him up.
*/
atomic_dec(&part->channel_mgr_requests);
(void)wait_event_interruptible(part->channel_mgr_wq,
(atomic_read(&part->channel_mgr_requests) > 0 ||
part->chctl.all_flags != 0 ||
(part->act_state == XPC_P_AS_DEACTIVATING &&
atomic_read(&part->nchannels_active) == 0 &&
xpc_partition_disengaged(part))));
atomic_set(&part->channel_mgr_requests, 1);
}
}
/*
* Guarantee that the kzalloc'd memory is cacheline aligned.
*/
void *
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kzalloc will give us cachline aligned memory by default */
*base = kzalloc(size, flags);
if (*base == NULL)
return NULL;
if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base;
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kzalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL)
return NULL;
return (void *)L1_CACHE_ALIGN((u64)*base);
}
/*
* Setup the channel structures necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition *part)
{
enum xp_retval ret;
int ch_number;
struct xpc_channel *ch;
short partid = XPC_PARTID(part);
/*
* Allocate all of the channel structures as a contiguous chunk of
* memory.
*/
DBUG_ON(part->channels != NULL);
part->channels = kcalloc(XPC_MAX_NCHANNELS,
sizeof(struct xpc_channel),
GFP_KERNEL);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
return xpNoMemory;
}
/* allocate the remote open and close args */
part->remote_openclose_args =
xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
GFP_KERNEL, &part->
remote_openclose_args_base);
if (part->remote_openclose_args == NULL) {
dev_err(xpc_chan, "can't get memory for remote connect args\n");
ret = xpNoMemory;
goto out_1;
}
part->chctl.all_flags = 0;
spin_lock_init(&part->chctl_lock);
atomic_set(&part->channel_mgr_requests, 1);
init_waitqueue_head(&part->channel_mgr_wq);
part->nchannels = XPC_MAX_NCHANNELS;
atomic_set(&part->nchannels_active, 0);
atomic_set(&part->nchannels_engaged, 0);
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
ch->partid = partid;
ch->number = ch_number;
ch->flags = XPC_C_DISCONNECTED;
atomic_set(&ch->kthreads_assigned, 0);
atomic_set(&ch->kthreads_idle, 0);
atomic_set(&ch->kthreads_active, 0);
atomic_set(&ch->references, 0);
atomic_set(&ch->n_to_notify, 0);
spin_lock_init(&ch->lock);
init_completion(&ch->wdisconnect_wait);
atomic_set(&ch->n_on_msg_allocate_wq, 0);
init_waitqueue_head(&ch->msg_allocate_wq);
init_waitqueue_head(&ch->idle_wq);
}
ret = xpc_arch_ops.setup_ch_structures(part);
if (ret != xpSuccess)
goto out_2;
/*
* With the setting of the partition setup_state to XPC_P_SS_SETUP,
* we're declaring that this partition is ready to go.
*/
part->setup_state = XPC_P_SS_SETUP;
return xpSuccess;
/* setup of ch structures failed */
out_2:
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
out_1:
kfree(part->channels);
part->channels = NULL;
return ret;
}
/*
* Teardown the channel structures necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
DBUG_ON(atomic_read(&part->nchannels_active) != 0);
/*
* Make this partition inaccessible to local processes by marking it
* as no longer setup. Then wait before proceeding with the teardown
* until all existing references cease.
*/
DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
part->setup_state = XPC_P_SS_WTEARDOWN;
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
/* now we can begin tearing down the infrastructure */
xpc_arch_ops.teardown_ch_structures(part);
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
kfree(part->channels);
part->channels = NULL;
part->setup_state = XPC_P_SS_TORNDOWN;
}
/*
* When XPC HB determines that a partition has come up, it will create a new
* kthread and that kthread will call this function to attempt to set up the
* basic infrastructure used for Cross Partition Communication with the newly
* upped partition.
*
* The kthread that was created by XPC HB and which setup the XPC
* infrastructure will remain assigned to the partition becoming the channel
* manager for that partition until the partition is deactivating, at which
* time the kthread will teardown the XPC infrastructure and then exit.
*/
static int
xpc_activating(void *__partid)
{
short partid = (u64)__partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_AS_DEACTIVATING) {
part->act_state = XPC_P_AS_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
/* indicate the thread is activating */
DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
part->act_state = XPC_P_AS_ACTIVATING;
XPC_SET_REASON(part, 0, 0);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
dev_dbg(xpc_part, "activating partition %d\n", partid);
xpc_arch_ops.allow_hb(partid);
if (xpc_setup_ch_structures(part) == xpSuccess) {
(void)xpc_part_ref(part); /* this will always succeed */
if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
xpc_mark_partition_active(part);
xpc_channel_mgr(part);
/* won't return until partition is deactivating */
}
xpc_part_deref(part);
xpc_teardown_ch_structures(part);
}
xpc_arch_ops.disallow_hb(partid);
xpc_mark_partition_inactive(part);
if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */
xpc_arch_ops.request_partition_reactivation(part);
}
return 0;
}
void
xpc_activate_partition(struct xpc_partition *part)
{
short partid = XPC_PARTID(part);
unsigned long irq_flags;
struct task_struct *kthread;
spin_lock_irqsave(&part->act_lock, irq_flags);
DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
part->act_state = XPC_P_AS_ACTIVATION_REQ;
XPC_SET_REASON(part, xpCloneKThread, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
partid);
if (IS_ERR(kthread)) {
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
}
}
void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
int idle = atomic_read(&ch->kthreads_idle);
int assigned = atomic_read(&ch->kthreads_assigned);
int wakeup;
DBUG_ON(needed <= 0);
if (idle > 0) {
wakeup = (needed > idle) ? idle : needed;
needed -= wakeup;
dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
"channel=%d\n", wakeup, ch->partid, ch->number);
/* only wakeup the requested number of kthreads */
wake_up_nr(&ch->idle_wq, wakeup);
}
if (needed <= 0)
return;
if (needed + assigned > ch->kthreads_assigned_limit) {
needed = ch->kthreads_assigned_limit - assigned;
if (needed <= 0)
return;
}
dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
needed, ch->partid, ch->number);
xpc_create_kthreads(ch, needed, 0);
}
/*
* This function is where XPC's kthreads wait for messages to deliver.
*/
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
int (*n_of_deliverable_payloads) (struct xpc_channel *) =
xpc_arch_ops.n_of_deliverable_payloads;
do {
/* deliver messages to their intended recipients */
while (n_of_deliverable_payloads(ch) > 0 &&
!(ch->flags & XPC_C_DISCONNECTING)) {
xpc_deliver_payload(ch);
}
if (atomic_inc_return(&ch->kthreads_idle) >
ch->kthreads_idle_limit) {
/* too many idle kthreads on this channel */
atomic_dec(&ch->kthreads_idle);
break;
}
dev_dbg(xpc_chan, "idle kthread calling "
"wait_event_interruptible_exclusive()\n");
(void)wait_event_interruptible_exclusive(ch->idle_wq,
(n_of_deliverable_payloads(ch) > 0 ||
(ch->flags & XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle);
} while (!(ch->flags & XPC_C_DISCONNECTING));
}
static int
xpc_kthread_start(void *args)
{
short partid = XPC_UNPACK_ARG1(args);
u16 ch_number = XPC_UNPACK_ARG2(args);
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
int n_needed;
unsigned long irq_flags;
int (*n_of_deliverable_payloads) (struct xpc_channel *) =
xpc_arch_ops.n_of_deliverable_payloads;
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
partid, ch_number);
ch = &part->channels[ch_number];
if (!(ch->flags & XPC_C_DISCONNECTING)) {
/* let registerer know that connection has been established */
spin_lock_irqsave(&ch->lock, irq_flags);
if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
ch->flags |= XPC_C_CONNECTEDCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_connected_callout(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
spin_unlock_irqrestore(&ch->lock, irq_flags);
/*
* It is possible that while the callout was being
* made that the remote partition sent some messages.
* If that is the case, we may need to activate
* additional kthreads to help deliver them. We only
* need one less than total #of messages to deliver.
*/
n_needed = n_of_deliverable_payloads(ch) - 1;
if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
xpc_activate_kthreads(ch, n_needed);
} else {
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
xpc_kthread_waitmsgs(part, ch);
}
/* let registerer know that connection is disconnecting */
spin_lock_irqsave(&ch->lock, irq_flags);
if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_disconnect_callout(ch, xpDisconnecting);
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
xpc_arch_ops.indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
partid, ch_number);
xpc_part_deref(part);
return 0;
}
/*
* For each partition that XPC has established communications with, there is
* a minimum of one kernel thread assigned to perform any operation that
* may potentially sleep or block (basically the callouts to the asynchronous
* functions registered via xpc_connect()).
*
* Additional kthreads are created and destroyed by XPC as the workload
* demands.
*
* A kthread is assigned to one of the active channels that exists for a given
* partition.
*/
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
int ignore_disconnecting)
{
unsigned long irq_flags;
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
struct xpc_partition *part = &xpc_partitions[ch->partid];
struct task_struct *kthread;
void (*indicate_partition_disengaged) (struct xpc_partition *) =
xpc_arch_ops.indicate_partition_disengaged;
while (needed-- > 0) {
/*
* The following is done on behalf of the newly created
* kthread. That kthread is responsible for doing the
* counterpart to the following before it exits.
*/
if (ignore_disconnecting) {
if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
/* kthreads assigned had gone to zero */
BUG_ON(!(ch->flags &
XPC_C_DISCONNECTINGCALLOUT_MADE));
break;
}
} else if (ch->flags & XPC_C_DISCONNECTING) {
break;
} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
atomic_inc_return(&part->nchannels_engaged) == 1) {
xpc_arch_ops.indicate_partition_engaged(part);
}
(void)xpc_part_ref(part);
xpc_msgqueue_ref(ch);
kthread = kthread_run(xpc_kthread_start, (void *)args,
"xpc%02dc%d", ch->partid, ch->number);
if (IS_ERR(kthread)) {
/* the fork failed */
/*
* NOTE: if (ignore_disconnecting &&
* !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
* then we'll deadlock if all other kthreads assigned
* to this channel are blocked in the channel's
* registerer, because the only thing that will unblock
* them is the xpDisconnecting callout that this
* failed kthread_run() would have made.
*/
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
if (atomic_read(&ch->kthreads_assigned) <
ch->kthreads_idle_limit) {
/*
* Flag this as an error only if we have an
* insufficient #of kthreads for the channel
* to function.
*/
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
break;
}
}
}
void
xpc_disconnect_wait(int ch_number)
{
unsigned long irq_flags;
short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
int wakeup_channel_mgr;
/* now wait for all callouts to the caller's function to cease */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (!xpc_part_ref(part))
continue;
ch = &part->channels[ch_number];
if (!(ch->flags & XPC_C_WDISCONNECT)) {
xpc_part_deref(part);
continue;
}
wait_for_completion(&ch->wdisconnect_wait);
spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
wakeup_channel_mgr = 0;
if (ch->delayed_chctl_flags) {
if (part->act_state != XPC_P_AS_DEACTIVATING) {
spin_lock(&part->chctl_lock);
part->chctl.flags[ch->number] |=
ch->delayed_chctl_flags;
spin_unlock(&part->chctl_lock);
wakeup_channel_mgr = 1;
}
ch->delayed_chctl_flags = 0;
}
ch->flags &= ~XPC_C_WDISCONNECT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (wakeup_channel_mgr)
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
}
static int
xpc_setup_partitions(void)
{
short partid;
struct xpc_partition *part;
xpc_partitions = kcalloc(xp_max_npartitions,
sizeof(struct xpc_partition),
GFP_KERNEL);
if (xpc_partitions == NULL) {
dev_err(xpc_part, "can't get memory for partition structure\n");
return -ENOMEM;
}
/*
* The first few fields of each entry of xpc_partitions[] need to
* be initialized now so that calls to xpc_connect() and
* xpc_disconnect() can be made prior to the activation of any remote
* partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
* ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
* PARTITION HAS BEEN ACTIVATED.
*/
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
part->activate_IRQ_rcvd = 0;
spin_lock_init(&part->act_lock);
part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, 0, 0);
timer_setup(&part->disengage_timer,
xpc_timeout_partition_disengage, 0);
part->setup_state = XPC_P_SS_UNSET;
init_waitqueue_head(&part->teardown_wq);
atomic_set(&part->references, 0);
}
return xpc_arch_ops.setup_partitions();
}
static void
xpc_teardown_partitions(void)
{
xpc_arch_ops.teardown_partitions();
kfree(xpc_partitions);
}
static void
xpc_do_exit(enum xp_retval reason)
{
short partid;
int active_part_count, printed_waiting_msg = 0;
struct xpc_partition *part;
unsigned long printmsg_time, disengage_timeout = 0;
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
DBUG_ON(xpc_exiting == 1);
/*
* Let the heartbeat checker thread and the discovery thread
* (if one is running) know that they should exit. Also wake up
* the heartbeat checker thread in case it's sleeping.
*/
xpc_exiting = 1;
wake_up_interruptible(&xpc_activate_IRQ_wq);
/* wait for the discovery thread to exit */
wait_for_completion(&xpc_discovery_exited);
/* wait for the heartbeat checker thread to exit */
wait_for_completion(&xpc_hb_checker_exited);
/* sleep for a 1/3 of a second or so */
(void)msleep_interruptible(300);
/* wait for all partitions to become inactive */
printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
xpc_disengage_timedout = 0;
do {
active_part_count = 0;
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_partition_disengaged(part) &&
part->act_state == XPC_P_AS_INACTIVE) {
continue;
}
active_part_count++;
XPC_DEACTIVATE_PARTITION(part, reason);
if (part->disengage_timeout > disengage_timeout)
disengage_timeout = part->disengage_timeout;
}
if (xpc_arch_ops.any_partition_engaged()) {
if (time_is_before_jiffies(printmsg_time)) {
dev_info(xpc_part, "waiting for remote "
"partitions to deactivate, timeout in "
"%ld seconds\n", (disengage_timeout -
jiffies) / HZ);
printmsg_time = jiffies +
(XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
printed_waiting_msg = 1;
}
} else if (active_part_count > 0) {
if (printed_waiting_msg) {
dev_info(xpc_part, "waiting for local partition"
" to deactivate\n");
printed_waiting_msg = 0;
}
} else {
if (!xpc_disengage_timedout) {
dev_info(xpc_part, "all partitions have "
"deactivated\n");
}
break;
}
/* sleep for a 1/3 of a second or so */
(void)msleep_interruptible(300);
} while (1);
DBUG_ON(xpc_arch_ops.any_partition_engaged());
xpc_teardown_rsvd_page();
if (reason == xpUnloading) {
(void)unregister_die_notifier(&xpc_die_notifier);
(void)unregister_reboot_notifier(&xpc_reboot_notifier);
}
/* clear the interface to XPC's functions */
xpc_clear_interface();
if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl);
if (xpc_sysctl_hb)
unregister_sysctl_table(xpc_sysctl_hb);
xpc_teardown_partitions();
if (is_uv_system())
xpc_exit_uv();
}
/*
* This function is called when the system is being rebooted.
*/
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
enum xp_retval reason;
switch (event) {
case SYS_RESTART:
reason = xpSystemReboot;
break;
case SYS_HALT:
reason = xpSystemHalt;
break;
case SYS_POWER_OFF:
reason = xpSystemPoweroff;
break;
default:
reason = xpSystemGoingDown;
}
xpc_do_exit(reason);
return NOTIFY_DONE;
}
/* Used to only allow one cpu to complete disconnect */
static unsigned int xpc_die_disconnecting;
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
*/
static void
xpc_die_deactivate(void)
{
struct xpc_partition *part;
short partid;
int any_engaged;
long keep_waiting;
long wait_to_print;
if (cmpxchg(&xpc_die_disconnecting, 0, 1))
return;
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_arch_ops.partition_engaged(partid) ||
part->act_state != XPC_P_AS_INACTIVE) {
xpc_arch_ops.request_partition_deactivation(part);
xpc_arch_ops.indicate_partition_disengaged(part);
}
}
/*
* Though we requested that all other partitions deactivate from us,
* we only wait until they've all disengaged or we've reached the
* defined timelimit.
*
* Given that one iteration through the following while-loop takes
* approximately 200 microseconds, calculate the #of loops to take
* before bailing and the #of loops before printing a waiting message.
*/
keep_waiting = xpc_disengage_timelimit * 1000 * 5;
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
while (1) {
any_engaged = xpc_arch_ops.any_partition_engaged();
if (!any_engaged) {
dev_info(xpc_part, "all partitions have deactivated\n");
break;
}
if (!keep_waiting--) {
for (partid = 0; partid < xp_max_npartitions;
partid++) {
if (xpc_arch_ops.partition_engaged(partid)) {
dev_info(xpc_part, "deactivate from "
"remote partition %d timed "
"out\n", partid);
}
}
break;
}
if (!wait_to_print--) {
dev_info(xpc_part, "waiting for remote partitions to "
"deactivate, timeout in %ld seconds\n",
keep_waiting / (1000 * 5));
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1000 * 5;
}
udelay(200);
}
}
/*
* This function is called when the system is being restarted or halted due
* to some sort of system failure. If this is the case we need to notify the
* other partitions to disengage from all references to our memory.
* This function can also be called when our heartbeater could be offlined
* for a time. In this case we need to notify other partitions to not worry
* about the lack of a heartbeat.
*/
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
case DIE_MACHINE_RESTART:
case DIE_MACHINE_HALT:
xpc_die_deactivate();
break;
case DIE_KDEBUG_ENTER:
/* Should lack of heartbeat be ignored by other partitions? */
if (!xpc_kdebug_ignore)
break;
fallthrough;
case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER:
xpc_arch_ops.offline_heartbeat();
break;
case DIE_KDEBUG_LEAVE:
/* Is lack of heartbeat being ignored by other partitions? */
if (!xpc_kdebug_ignore)
break;
fallthrough;
case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE:
xpc_arch_ops.online_heartbeat();
break;
}
#else
struct die_args *die_args = _die_args;
switch (event) {
case DIE_TRAP:
if (die_args->trapnr == X86_TRAP_DF)
xpc_die_deactivate();
if (((die_args->trapnr == X86_TRAP_MF) ||
(die_args->trapnr == X86_TRAP_XF)) &&
!user_mode(die_args->regs))
xpc_die_deactivate();
break;
case DIE_INT3:
case DIE_DEBUG:
break;
case DIE_OOPS:
case DIE_GPF:
default:
xpc_die_deactivate();
}
#endif
return NOTIFY_DONE;
}
static int __init
xpc_init(void)
{
int ret;
struct task_struct *kthread;
dev_set_name(xpc_part, "part");
dev_set_name(xpc_chan, "chan");
if (is_uv_system()) {
ret = xpc_init_uv();
} else {
ret = -ENODEV;
}
if (ret != 0)
return ret;
ret = xpc_setup_partitions();
if (ret != 0) {
dev_err(xpc_part, "can't get memory for partition structure\n");
goto out_1;
}
xpc_sysctl = register_sysctl("xpc", xpc_sys_xpc);
xpc_sysctl_hb = register_sysctl("xpc/hb", xpc_sys_xpc_hb);
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
* communications.
*/
ret = xpc_setup_rsvd_page();
if (ret != 0) {
dev_err(xpc_part, "can't setup our reserved page\n");
goto out_2;
}
/* add ourselves to the reboot_notifier_list */
ret = register_reboot_notifier(&xpc_reboot_notifier);
if (ret != 0)
dev_warn(xpc_part, "can't register reboot notifier\n");
/* add ourselves to the die_notifier list */
ret = register_die_notifier(&xpc_die_notifier);
if (ret != 0)
dev_warn(xpc_part, "can't register die notifier\n");
/*
* The real work-horse behind xpc. This processes incoming
* interrupts and monitors remote heartbeats.
*/
kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
if (IS_ERR(kthread)) {
dev_err(xpc_part, "failed while forking hb check thread\n");
ret = -EBUSY;
goto out_3;
}
/*
* Startup a thread that will attempt to discover other partitions to
* activate based on info provided by SAL. This new thread is short
* lived and will exit once discovery is complete.
*/
kthread = kthread_run(xpc_initiate_discovery, NULL,
XPC_DISCOVERY_THREAD_NAME);
if (IS_ERR(kthread)) {
dev_err(xpc_part, "failed while forking discovery thread\n");
/* mark this new thread as a non-starter */
complete(&xpc_discovery_exited);
xpc_do_exit(xpUnloading);
return -EBUSY;
}
/* set the interface to point at XPC's functions */
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
xpc_initiate_send, xpc_initiate_send_notify,
xpc_initiate_received, xpc_initiate_partid_to_nasids);
return 0;
/* initialization was not successful */
out_3:
xpc_teardown_rsvd_page();
(void)unregister_die_notifier(&xpc_die_notifier);
(void)unregister_reboot_notifier(&xpc_reboot_notifier);
out_2:
if (xpc_sysctl_hb)
unregister_sysctl_table(xpc_sysctl_hb);
if (xpc_sysctl)
unregister_sysctl_table(xpc_sysctl);
xpc_teardown_partitions();
out_1:
if (is_uv_system())
xpc_exit_uv();
return ret;
}
module_init(xpc_init);
static void __exit
xpc_exit(void)
{
xpc_do_exit(xpUnloading);
}
module_exit(xpc_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");
module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
"heartbeat increments.");
module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
"heartbeat checks.");
module_param(xpc_disengage_timelimit, int, 0);
MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
"for disengage to complete.");
module_param(xpc_kdebug_ignore, int, 0);
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
"other partitions when dropping into kdebug.");
| linux-master | drivers/misc/sgi-xp/xpc_main.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) base.
*
* XP provides a base from which its users can interact
* with XPC, yet not be dependent on XPC.
*
*/
#include <linux/module.h>
#include <linux/device.h>
#include "xp.h"
/* define the XP debug device structures to be used with dev_dbg() et al */
static struct device_driver xp_dbg_name = {
.name = "xp"
};
static struct device xp_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xp_dbg_name
};
struct device *xp = &xp_dbg_subname;
/* max #of partitions possible */
short xp_max_npartitions;
EXPORT_SYMBOL_GPL(xp_max_npartitions);
short xp_partition_id;
EXPORT_SYMBOL_GPL(xp_partition_id);
u8 xp_region_size;
EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
unsigned long (*xp_socket_pa) (unsigned long gpa);
EXPORT_SYMBOL_GPL(xp_socket_pa);
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
int (*xp_cpu_to_nasid) (int cpuid);
EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
unsigned long size);
EXPORT_SYMBOL_GPL(xp_expand_memprotect);
enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
unsigned long size);
EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
*/
struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
EXPORT_SYMBOL_GPL(xpc_registrations);
/*
* Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
*/
struct xpc_interface xpc_interface = { };
EXPORT_SYMBOL_GPL(xpc_interface);
/*
* XPC calls this when it (the XPC module) has been loaded.
*/
void
xpc_set_interface(void (*connect) (int),
void (*disconnect) (int),
enum xp_retval (*send) (short, int, u32, void *, u16),
enum xp_retval (*send_notify) (short, int, u32, void *, u16,
xpc_notify_func, void *),
void (*received) (short, int, void *),
enum xp_retval (*partid_to_nasids) (short, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
xpc_interface.send = send;
xpc_interface.send_notify = send_notify;
xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids;
}
EXPORT_SYMBOL_GPL(xpc_set_interface);
/*
* XPC calls this when it (the XPC module) is being unloaded.
*/
void
xpc_clear_interface(void)
{
memset(&xpc_interface, 0, sizeof(xpc_interface));
}
EXPORT_SYMBOL_GPL(xpc_clear_interface);
/*
* Register for automatic establishment of a channel connection whenever
* a partition comes up.
*
* Arguments:
*
* ch_number - channel # to register for connection.
* func - function to call for asynchronous notification of channel
* state changes (i.e., connection, disconnection, error) and
* the arrival of incoming messages.
* key - pointer to optional user-defined value that gets passed back
* to the user on any callouts made to func.
* payload_size - size in bytes of the XPC message's payload area which
* contains a user-defined message. The user should make
* this large enough to hold their largest message.
* nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection
* is established and may be less then requested, will be
* passed to the user via the xpConnected callout.
* assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given
* instant.
*/
enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL);
DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
return xpPayloadTooBig;
registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(®istration->mutex) != 0)
return xpInterrupted;
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
mutex_unlock(®istration->mutex);
return xpAlreadyRegistered;
}
/* register the channel for connection */
registration->entry_size = XPC_MSG_SIZE(payload_size);
registration->nentries = nentries;
registration->assigned_limit = assigned_limit;
registration->idle_limit = idle_limit;
registration->key = key;
registration->func = func;
mutex_unlock(®istration->mutex);
if (xpc_interface.connect)
xpc_interface.connect(ch_number);
return xpSuccess;
}
EXPORT_SYMBOL_GPL(xpc_connect);
/*
* Remove the registration for automatic connection of the specified channel
* when a partition comes up.
*
* Before returning this xpc_disconnect() will wait for all connections on the
* specified channel have been closed/torndown. So the caller can be assured
* that they will not be receiving any more callouts from XPC to their
* function registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_disconnect(int ch_number)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
registration = &xpc_registrations[ch_number];
/*
* We've decided not to make this a down_interruptible(), since we
* figured XPC's users will just turn around and call xpc_disconnect()
* again anyways, so we might as well wait, if need be.
*/
mutex_lock(®istration->mutex);
/* if !XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func == NULL) {
mutex_unlock(®istration->mutex);
return;
}
/* remove the connection registration for the specified channel */
registration->func = NULL;
registration->key = NULL;
registration->nentries = 0;
registration->entry_size = 0;
registration->assigned_limit = 0;
registration->idle_limit = 0;
if (xpc_interface.disconnect)
xpc_interface.disconnect(ch_number);
mutex_unlock(®istration->mutex);
return;
}
EXPORT_SYMBOL_GPL(xpc_disconnect);
static int __init
xp_init(void)
{
enum xp_retval ret;
int ch_number;
/* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
if (is_uv_system())
ret = xp_init_uv();
else
ret = 0;
if (ret != xpSuccess)
return ret;
return 0;
}
module_init(xp_init);
static void __exit
xp_exit(void)
{
if (is_uv_system())
xp_exit_uv();
}
module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/sgi-xp/xp_main.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Cross Partition Network Interface (XPNET) support
*
* XPNET provides a virtual network layered on top of the Cross
* Partition communication layer.
*
* XPNET provides direct point-to-point and broadcast-like support
* for an ethernet-like device. The ethernet broadcast medium is
* replaced with a point-to-point message structure which passes
* pointers to a DMA-capable block that a remote partition should
* retrieve and pass to the upper level networking layer.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "xp.h"
/*
* The message payload transferred by XPC.
*
* buf_pa is the physical address where the DMA should pull from.
*
* NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
* cacheline boundary. To accomplish this, we record the number of
* bytes from the beginning of the first cacheline to the first useful
* byte of the skb (leadin_ignore) and the number of bytes from the
* last useful byte of the skb to the end of the last cacheline
* (tailout_ignore).
*
* size is the number of bytes to transfer which includes the skb->len
* (useful bytes of the senders skb) plus the leadin and tailout
*/
struct xpnet_message {
u16 version; /* Version for this message */
u16 embedded_bytes; /* #of bytes embedded in XPC message */
u32 magic; /* Special number indicating this is xpnet */
unsigned long buf_pa; /* phys address of buffer to retrieve */
u32 size; /* #of bytes in buffer */
u8 leadin_ignore; /* #of bytes to ignore at the beginning */
u8 tailout_ignore; /* #of bytes to ignore at the end */
unsigned char data; /* body of small packets */
};
/*
* Determine the size of our message, the cacheline aligned size,
* and then the number of message will request from XPC.
*
* XPC expects each message to exist in an individual cacheline.
*/
#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE
#define XPNET_MSG_DATA_MAX \
(XPNET_MSG_SIZE - offsetof(struct xpnet_message, data))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
/*
* Version number of XPNET implementation. XPNET can always talk to versions
* with same major #, and never talk to versions with a different version.
*/
#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
&& (msg->magic == XPNET_MAGIC))
#define XPNET_DEVICE_NAME "xp0"
/*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the
* notification function is called, we use the cookie to decide
* whether all outstanding message sends have completed. The skb can
* then be released.
*/
struct xpnet_pending_msg {
struct sk_buff *skb;
atomic_t use_count;
};
static struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
* our bitmask of partitions to which we broadcast.
*/
static unsigned long *xpnet_broadcast_partitions;
/* protect above */
static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/*
* Since the Block Transfer Engine (BTE) is being used for the transfer
* and it relies upon cache-line size transfers, we need to reserve at
* least one cache-line for head and tail alignment. The BTE is
* limited to 8MB transfers.
*
* Testing has shown that changing MTU to greater than 64KB has no effect
* on TCP as the two sides negotiate a Max Segment Size that is limited
* to 64K. Other protocols May use packets greater than this, but for
* now, the default is 64KB.
*/
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
/* 68 comes from min TCP+IP+MAC header */
#define XPNET_MIN_MTU 68
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
/*
* The partid is encapsulated in the MAC address beginning in the following
* octet and it consists of two octets.
*/
#define XPNET_PARTID_OCTET 2
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
static struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
static struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
static struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
*/
static void
xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
void *dst;
enum xp_retval ret;
if (!XPNET_VALID_MSG(msg)) {
/*
* Packet with a different XPC version. Ignore.
*/
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
/*
* The allocated skb has some reserved space.
* In order to use xp_remote_memcpy(), we need to get the
* skb->data pointer moved forward.
*/
skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
(L1_CACHE_BYTES - 1)) +
msg->leadin_ignore));
/*
* Update the tail pointer to indicate data actually
* transferred.
*/
skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
/*
* Move the data over from the other side.
*/
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
(msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data,
(size_t)msg->embedded_bytes);
skb_copy_to_linear_data(skb, &msg->data,
(size_t)msg->embedded_bytes);
} else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"xp_remote_memcpy(0x%p, 0x%p, %u)\n", dst,
(void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
if (ret != xpSuccess) {
/*
* !!! Need better way of cleaning skb. Currently skb
* !!! appears in_use and we can't just call
* !!! dev_kfree_skb.
*/
dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%x) "
"returned error=0x%x\n", dst,
(void *)msg->buf_pa, msg->size, ret);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
}
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(xpnet, "passing skb to network layer\n"
"\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
xpnet_device->stats.rx_packets++;
xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx(skb);
xpc_received(partid, channel, (void *)msg);
}
/*
* This is the handler which XPC calls during any sort of change in
* state or message reception on a connection.
*/
static void
xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
void *data, void *key)
{
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(channel != XPC_NET_CHANNEL);
switch (reason) {
case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *)data);
break;
case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
__set_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
netif_carrier_on(xpnet_device);
dev_dbg(xpnet, "%s connected to partition %d\n",
xpnet_device->name, partid);
break;
default:
spin_lock_bh(&xpnet_broadcast_lock);
__clear_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
if (bitmap_empty(xpnet_broadcast_partitions,
xp_max_npartitions)) {
netif_carrier_off(xpnet_device);
}
dev_dbg(xpnet, "%s disconnected from partition %d\n",
xpnet_device->name, partid);
break;
}
}
static int
xpnet_dev_open(struct net_device *dev)
{
enum xp_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
(unsigned long)XPNET_MSG_SIZE,
(unsigned long)XPNET_MSG_NENTRIES,
(unsigned long)XPNET_MAX_KTHREADS,
(unsigned long)XPNET_MAX_IDLE_KTHREADS);
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
return -ENOMEM;
}
dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
return 0;
}
static int
xpnet_dev_stop(struct net_device *dev)
{
xpc_disconnect(XPC_NET_CHANNEL);
dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
return 0;
}
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
* our side. When all recipients are done processing, we
* release the skb and then release our pending message structure.
*/
static void
xpnet_send_completed(enum xp_retval reason, short partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
DBUG_ON(queued_msg == NULL);
dev_dbg(xpnet, "message to %d notified with reason %d\n",
partid, reason);
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
(void *)queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg);
}
}
static void
xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
{
u8 msg_buffer[XPNET_MSG_SIZE];
struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer;
u16 msg_size = sizeof(struct xpnet_message);
enum xp_retval ret;
msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED;
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
&msg->data, skb->data, (size_t)embedded_bytes);
skb_copy_from_linear_data(skb, &msg->data,
(size_t)embedded_bytes);
msg_size += embedded_bytes - 1;
} else {
msg->version = XPNET_VERSION;
}
msg->magic = XPNET_MAGIC;
msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64)skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = xp_pa((void *)start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\n"
"msg->buf_pa=0x%lx, msg->size=%u, "
"msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg,
msg_size, xpnet_send_completed, queued_msg);
if (unlikely(ret != xpSuccess))
atomic_dec(&queued_msg->use_count);
}
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
* which have connected with us and are targets of this packet.
*
* MAC-NOTE: For the XPNET driver, the MAC address contains the
* destination partid. If the destination partid octets are 0xffff,
* this packet is to be broadcast to all connected partitions.
*/
static netdev_tx_t
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
u64 start_addr, end_addr;
short dest_partid;
u16 embedded_bytes = 0;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
if (skb->data[0] == 0x33) {
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* nothing needed to be done */
}
/*
* The xpnet_pending_msg tracks how many outstanding
* xpc_send_notifies are relying on this skb. When none
* remain, release the skb.
*/
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
"packet\n", sizeof(struct xpnet_pending_msg));
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* get the beginning of the first cacheline and end of last */
start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
/* skb->data does fit so embed */
embedded_bytes = skb->len;
}
/*
* Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before
* we are done sending will not free the skb. We will be left
* with that task during exit. This also handles the case of
* a packet destined for a partition which is no longer up.
*/
atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb;
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
} else {
dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
if (dest_partid >= 0 &&
dest_partid < xp_max_npartitions &&
test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
return NETDEV_TX_OK;
}
/*
* Deal with transmit timeouts coming from the network layer.
*/
static void
xpnet_dev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
dev->stats.tx_errors++;
}
static const struct net_device_ops xpnet_netdev_ops = {
.ndo_open = xpnet_dev_open,
.ndo_stop = xpnet_dev_stop,
.ndo_start_xmit = xpnet_dev_hard_start_xmit,
.ndo_tx_timeout = xpnet_dev_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init
xpnet_init(void)
{
u8 addr[ETH_ALEN];
int result;
if (!is_uv_system())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
xpnet_broadcast_partitions = bitmap_zalloc(xp_max_npartitions,
GFP_KERNEL);
if (xpnet_broadcast_partitions == NULL)
return -ENOMEM;
/*
* use ether_setup() to init the majority of our device
* structure and then override the necessary pieces.
*/
xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, NET_NAME_UNKNOWN,
ether_setup);
if (xpnet_device == NULL) {
bitmap_free(xpnet_broadcast_partitions);
return -ENOMEM;
}
netif_carrier_off(xpnet_device);
xpnet_device->netdev_ops = &xpnet_netdev_ops;
xpnet_device->mtu = XPNET_DEF_MTU;
xpnet_device->min_mtu = XPNET_MIN_MTU;
xpnet_device->max_mtu = XPNET_MAX_MTU;
memset(addr, 0, sizeof(addr));
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
addr[0] = 0x02; /* locally administered, no OUI */
addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
eth_hw_addr_set(xpnet_device, addr);
/*
* ether_setup() sets this to a multicast device. We are
* really not supporting multicast at this time.
*/
xpnet_device->flags &= ~IFF_MULTICAST;
/*
* No need to checksum as it is a DMA transfer. The BTE will
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
free_netdev(xpnet_device);
bitmap_free(xpnet_broadcast_partitions);
}
return result;
}
module_init(xpnet_init);
static void __exit
xpnet_exit(void)
{
dev_info(xpnet, "unregistering network device %s\n",
xpnet_device[0].name);
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
bitmap_free(xpnet_broadcast_partitions);
}
module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/sgi-xp/xpnet.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) uv-based functions.
*
* Architecture specific implementation of common functions.
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/numa.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#include <asm/uv/uv_irq.h>
#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/gru.h"
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
#if defined CONFIG_IA64_SGI_UV
struct uv_IO_APIC_route_entry {
__u64 vector : 8,
delivery_mode : 3,
dest_mode : 1,
delivery_status : 1,
polarity : 1,
__reserved_1 : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15,
dest : 32;
};
#define sn_partition_id 0
#endif
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_ACTIVATE_MSG_SIZE_UV)
#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
static int xpc_mq_node = NUMA_NO_NODE;
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
static int
xpc_setup_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
spin_lock_init(&part_uv->flags_lock);
part_uv->remote_act_state = XPC_P_AS_INACTIVE;
}
return 0;
}
static void
xpc_teardown_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
unsigned long irq_flags;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
if (part_uv->cached_activate_gru_mq_desc != NULL) {
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
kfree(part_uv->cached_activate_gru_mq_desc);
part_uv->cached_activate_gru_mq_desc = NULL;
mutex_unlock(&part_uv->
cached_activate_gru_mq_desc_mutex);
}
}
}
static int
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
{
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
UV_AFFINITY_CPU);
if (mq->irq < 0)
return mq->irq;
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
#elif defined CONFIG_IA64_SGI_UV
if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
mq->irq = SGI_XPC_ACTIVATE;
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
mq->irq = SGI_XPC_NOTIFY;
else
return -EINVAL;
mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
#else
#error not a supported configuration
#endif
return 0;
}
static void
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
{
#if defined CONFIG_X86_64
uv_teardown_irq(mq->irq);
#elif defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;
mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mmr_value = 1UL << 16;
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
#else
#error not a supported configuration
#endif
}
static int
xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
#if defined CONFIG_IA64_SGI_UV
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
#elif defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
"ret=%d\n", ret);
return ret;
}
#else
#error not a supported configuration
#endif
mq->watchlist_num = ret;
return 0;
}
static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
#endif
}
static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
irq_handler_t irq_handler)
{
enum xp_retval xp_ret;
int ret;
int nid;
int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
struct uv_IO_APIC_route_entry *mmr_value;
mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
if (mq == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
"a xpc_gru_mq_uv structure\n");
ret = -ENOMEM;
goto out_0;
}
mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
GFP_KERNEL);
if (mq->gru_mq_desc == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
"a gru_message_queue_desc structure\n");
ret = -ENOMEM;
goto out_1;
}
pg_order = get_order(mq_size);
mq->order = pg_order + PAGE_SHIFT;
mq_size = 1UL << mq->order;
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
page = __alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
ret = -ENOMEM;
goto out_2;
}
mq->address = page_address(page);
/* enable generation of irq when GRU mq operation occurs to this mq */
ret = xpc_gru_mq_watchlist_alloc_uv(mq);
if (ret != 0)
goto out_3;
ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
if (ret != 0)
goto out_4;
ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
if (ret != 0) {
dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
mq->irq, -ret);
goto out_5;
}
nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
ret = -EINVAL;
goto out_6;
}
/* allow other partitions to access this GRU mq */
xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
if (xp_ret != xpSuccess) {
ret = -EACCES;
goto out_6;
}
return mq;
/* something went wrong */
out_6:
free_irq(mq->irq, NULL);
out_5:
xpc_release_gru_mq_irq_uv(mq);
out_4:
xpc_gru_mq_watchlist_free_uv(mq);
out_3:
free_pages((unsigned long)mq->address, pg_order);
out_2:
kfree(mq->gru_mq_desc);
out_1:
kfree(mq);
out_0:
return ERR_PTR(ret);
}
static void
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
unsigned int mq_size;
int pg_order;
int ret;
/* disallow other partitions to access GRU mq */
mq_size = 1UL << mq->order;
ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
BUG_ON(ret != xpSuccess);
/* unregister irq handler and release mq irq/vector mapping */
free_irq(mq->irq, NULL);
xpc_release_gru_mq_irq_uv(mq);
/* disable generation of irq when GRU mq op occurs to this mq */
xpc_gru_mq_watchlist_free_uv(mq);
pg_order = mq->order - PAGE_SHIFT;
free_pages((unsigned long)mq->address, pg_order);
kfree(mq);
}
static enum xp_retval
xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
size_t msg_size)
{
enum xp_retval xp_ret;
int ret;
while (1) {
ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
if (ret == MQE_OK) {
xp_ret = xpSuccess;
break;
}
if (ret == MQE_QUEUE_FULL) {
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
"error=MQE_QUEUE_FULL\n");
/* !!! handle QLimit reached; delay & try again */
/* ??? Do we add a limit to the number of retries? */
(void)msleep_interruptible(10);
} else if (ret == MQE_CONGESTION) {
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
"error=MQE_CONGESTION\n");
/* !!! handle LB Overflow; simply try again */
/* ??? Do we add a limit to the number of retries? */
} else {
/* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
dev_err(xpc_chan, "gru_send_message_gpa() returned "
"error=%d\n", ret);
xp_ret = xpGruSendMqError;
break;
}
}
return xp_ret;
}
static void
xpc_process_activate_IRQ_rcvd_uv(void)
{
unsigned long irq_flags;
short partid;
struct xpc_partition *part;
u8 act_state_req;
DBUG_ON(xpc_activate_IRQ_rcvd == 0);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part = &xpc_partitions[partid];
if (part->sn.uv.act_state_req == 0)
continue;
xpc_activate_IRQ_rcvd--;
BUG_ON(xpc_activate_IRQ_rcvd < 0);
act_state_req = part->sn.uv.act_state_req;
part->sn.uv.act_state_req = 0;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_activate_partition(part);
else if (part->act_state == XPC_P_AS_DEACTIVATING)
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_activate_partition(part);
else
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
} else {
BUG();
}
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (xpc_activate_IRQ_rcvd == 0)
break;
}
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
}
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct xpc_openclose_args *args;
part_uv->remote_act_state = msg_hdr->act_state;
switch (msg_hdr->type) {
case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
/* syncing of remote_act_state was just done above */
break;
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_activate_req_uv *msg;
/*
* ??? Do we deal here with ts_jiffies being different
* ??? if act_state != XPC_P_AS_INACTIVE instead of
* ??? below?
*/
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_activate_req_uv, hdr);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
part_uv->heartbeat_gpa = msg->heartbeat_gpa;
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
break;
}
case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_deactivate_req_uv *msg;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_deactivate_req_uv, hdr);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = msg->reason;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
return;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->reason = msg->reason;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->entry_size = msg->entry_size;
args->local_nentries = msg->local_nentries;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->remote_nentries = msg->remote_nentries;
args->local_nentries = msg->local_nentries;
args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
}
fallthrough;
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
break;
case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
break;
default:
dev_err(xpc_part, "received unknown activate_mq msg type=%d "
"from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = xpBadMsgType;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
return;
}
if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
part->remote_rp_ts_jiffies != 0) {
/*
* ??? Does what we do here need to be sensitive to
* ??? act_state or remote_act_state?
*/
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
}
}
static irqreturn_t
xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
{
struct xpc_activate_mq_msghdr_uv *msg_hdr;
short partid;
struct xpc_partition *part;
int wakeup_hb_checker = 0;
int part_referenced;
while (1) {
msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
if (msg_hdr == NULL)
break;
partid = msg_hdr->partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
"received invalid partid=0x%x in message\n",
partid);
} else {
part = &xpc_partitions[partid];
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
}
gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
}
if (wakeup_hb_checker)
wake_up_interruptible(&xpc_activate_IRQ_wq);
return IRQ_HANDLED;
}
static enum xp_retval
xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
unsigned long gru_mq_desc_gpa)
{
enum xp_retval ret;
ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
sizeof(struct gru_message_queue_desc));
if (ret == xpSuccess)
gru_mq_desc->mq = NULL;
return ret;
}
static enum xp_retval
xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
int msg_type)
{
struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct gru_message_queue_desc *gru_mq_desc;
unsigned long irq_flags;
enum xp_retval ret;
DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
msg_hdr->type = msg_type;
msg_hdr->partid = xp_partition_id;
msg_hdr->act_state = part->act_state;
msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
again:
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
if (gru_mq_desc == NULL) {
gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
GFP_ATOMIC);
if (gru_mq_desc == NULL) {
ret = xpNoMemory;
goto done;
}
part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
}
ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
part_uv->
activate_gru_mq_desc_gpa);
if (ret != xpSuccess)
goto done;
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
}
/* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
msg_size);
if (ret != xpSuccess) {
smp_rmb(); /* ensure a fresh copy of part_uv->flags */
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
goto again;
}
done:
mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
return ret;
}
static void
xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
size_t msg_size, int msg_type)
{
enum xp_retval ret;
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
if (unlikely(ret != xpSuccess))
XPC_DEACTIVATE_PARTITION(part, ret);
}
static void
xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
void *msg, size_t msg_size, int msg_type)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xp_retval ret;
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
if (unlikely(ret != xpSuccess)) {
if (irq_flags != NULL)
spin_unlock_irqrestore(&ch->lock, *irq_flags);
XPC_DEACTIVATE_PARTITION(part, ret);
if (irq_flags != NULL)
spin_lock_irqsave(&ch->lock, *irq_flags);
}
}
static void
xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
{
unsigned long irq_flags;
struct xpc_partition_uv *part_uv = &part->sn.uv;
/*
* !!! Make our side think that the remote partition sent an activate
* !!! mq message our way by doing what the activate IRQ handler would
* !!! do had one really been sent.
*/
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = act_state_req;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
wake_up_interruptible(&xpc_activate_IRQ_wq);
}
static enum xp_retval
xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
size_t *len)
{
s64 status;
enum xp_retval ret;
#if defined CONFIG_X86_64
status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
(u64 *)len);
if (status == BIOS_STATUS_SUCCESS)
ret = xpSuccess;
else if (status == BIOS_STATUS_MORE_PASSES)
ret = xpNeedMoreInfo;
else
ret = xpBiosError;
#elif defined CONFIG_IA64_SGI_UV
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
if (status == SALRET_OK)
ret = xpSuccess;
else if (status == SALRET_MORE_PASSES)
ret = xpNeedMoreInfo;
else
ret = xpSalError;
#else
#error not a supported configuration
#endif
return ret;
}
static int
xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
{
xpc_heartbeat_uv =
&xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
rp->sn.uv.activate_gru_mq_desc_gpa =
uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
return 0;
}
static void
xpc_allow_hb_uv(short partid)
{
}
static void
xpc_disallow_hb_uv(short partid)
{
}
static void
xpc_disallow_all_hbs_uv(void)
{
}
static void
xpc_increment_heartbeat_uv(void)
{
xpc_heartbeat_uv->value++;
}
static void
xpc_offline_heartbeat_uv(void)
{
xpc_increment_heartbeat_uv();
xpc_heartbeat_uv->offline = 1;
}
static void
xpc_online_heartbeat_uv(void)
{
xpc_increment_heartbeat_uv();
xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_init_uv(void)
{
xpc_heartbeat_uv->value = 1;
xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_exit_uv(void)
{
xpc_offline_heartbeat_uv();
}
static enum xp_retval
xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
enum xp_retval ret;
ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
part_uv->heartbeat_gpa,
sizeof(struct xpc_heartbeat_uv));
if (ret != xpSuccess)
return ret;
if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
!part_uv->cached_heartbeat.offline) {
ret = xpNoHeartbeat;
} else {
part->last_heartbeat = part_uv->cached_heartbeat.value;
}
return ret;
}
static void
xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
unsigned long remote_rp_gpa, int nasid)
{
short partid = remote_rp->SAL_partid;
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_activate_mq_msg_activate_req_uv msg;
part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
part->sn.uv.activate_gru_mq_desc_gpa =
remote_rp->sn.uv.activate_gru_mq_desc_gpa;
/*
* ??? Is it a good idea to make this conditional on what is
* ??? potentially stale state information?
*/
if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
msg.rp_gpa = uv_gpa(xpc_rsvd_page);
msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
msg.activate_gru_mq_desc_gpa =
xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
}
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
}
static void
xpc_request_partition_reactivation_uv(struct xpc_partition *part)
{
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
}
static void
xpc_request_partition_deactivation_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_deactivate_req_uv msg;
/*
* ??? Is it a good idea to make this conditional on what is
* ??? potentially stale state information?
*/
if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
msg.reason = part->reason;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
}
}
static void
xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
}
static void
xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
{
head->first = NULL;
head->last = NULL;
spin_lock_init(&head->lock);
head->n_entries = 0;
}
static void *
xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
{
unsigned long irq_flags;
struct xpc_fifo_entry_uv *first;
spin_lock_irqsave(&head->lock, irq_flags);
first = head->first;
if (head->first != NULL) {
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
head->n_entries--;
BUG_ON(head->n_entries < 0);
first->next = NULL;
}
spin_unlock_irqrestore(&head->lock, irq_flags);
return first;
}
static void
xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
struct xpc_fifo_entry_uv *last)
{
unsigned long irq_flags;
last->next = NULL;
spin_lock_irqsave(&head->lock, irq_flags);
if (head->last != NULL)
head->last->next = last;
else
head->first = last;
head->last = last;
head->n_entries++;
spin_unlock_irqrestore(&head->lock, irq_flags);
}
static int
xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
{
return head->n_entries;
}
/*
* Setup the channel structures that are uv specific.
*/
static enum xp_retval
xpc_setup_ch_structures_uv(struct xpc_partition *part)
{
struct xpc_channel_uv *ch_uv;
int ch_number;
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch_uv = &part->channels[ch_number].sn.uv;
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
}
return xpSuccess;
}
/*
* Teardown the channel structures that are uv specific.
*/
static void
xpc_teardown_ch_structures_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
}
static enum xp_retval
xpc_make_first_contact_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
/*
* We send a sync msg to get the remote partition's remote_act_state
* updated to our current act_state which at this point should
* be XPC_P_AS_ACTIVATING.
*/
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
(part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */
(void)msleep_interruptible(250);
if (part->act_state == XPC_P_AS_DEACTIVATING)
return part->reason;
}
return xpSuccess;
}
static u64
xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
{
unsigned long irq_flags;
union xpc_channel_ctl_flags chctl;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
chctl = part->chctl;
if (chctl.all_flags != 0)
part->chctl.all_flags = 0;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
return chctl.all_flags;
}
static enum xp_retval
xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
struct xpc_send_msg_slot_uv *msg_slot;
unsigned long irq_flags;
int nentries;
int entry;
size_t nbytes;
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
if (ch_uv->send_msg_slots == NULL)
continue;
for (entry = 0; entry < nentries; entry++) {
msg_slot = &ch_uv->send_msg_slots[entry];
msg_slot->msg_slot_number = entry;
xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
&msg_slot->next);
}
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->local_nentries)
ch->local_nentries = nentries;
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
return xpNoMemory;
}
static enum xp_retval
xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
struct xpc_notify_mq_msg_uv *msg_slot;
unsigned long irq_flags;
int nentries;
int entry;
size_t nbytes;
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->entry_size;
ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
if (ch_uv->recv_msg_slots == NULL)
continue;
for (entry = 0; entry < nentries; entry++) {
msg_slot = ch_uv->recv_msg_slots +
entry * ch->entry_size;
msg_slot->hdr.msg_slot_number = entry;
}
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries)
ch->remote_nentries = nentries;
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
return xpNoMemory;
}
/*
* Allocate msg_slots associated with the channel.
*/
static enum xp_retval
xpc_setup_msg_structures_uv(struct xpc_channel *ch)
{
static enum xp_retval ret;
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
DBUG_ON(ch->flags & XPC_C_SETUP);
ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
GFP_KERNEL);
if (ch_uv->cached_notify_gru_mq_desc == NULL)
return xpNoMemory;
ret = xpc_allocate_send_msg_slot_uv(ch);
if (ret == xpSuccess) {
ret = xpc_allocate_recv_msg_slot_uv(ch);
if (ret != xpSuccess) {
kfree(ch_uv->send_msg_slots);
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
}
}
return ret;
}
/*
* Free up msg_slots and clear other stuff that were setup for the specified
* channel.
*/
static void
xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
lockdep_assert_held(&ch->lock);
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
if (ch->flags & XPC_C_SETUP) {
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
kfree(ch_uv->send_msg_slots);
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
kfree(ch_uv->recv_msg_slots);
}
}
static void
xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
msg.ch_number = ch->number;
msg.reason = ch->reason;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
}
static void
xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_closereply_uv msg;
msg.ch_number = ch->number;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
}
static void
xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
msg.ch_number = ch->number;
msg.entry_size = ch->entry_size;
msg.local_nentries = ch->local_nentries;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
}
static void
xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_openreply_uv msg;
msg.ch_number = ch->number;
msg.local_nentries = ch->local_nentries;
msg.remote_nentries = ch->remote_nentries;
msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
}
static void
xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
msg.ch_number = ch->number;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
}
static void
xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
{
unsigned long irq_flags;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
}
static enum xp_retval
xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
unsigned long gru_mq_desc_gpa)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
gru_mq_desc_gpa);
}
static void
xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
}
static void
xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
}
static void
xpc_assume_partition_disengaged_uv(short partid)
{
struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
unsigned long irq_flags;
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
}
static int
xpc_partition_engaged_uv(short partid)
{
return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
}
static int
xpc_any_partition_engaged_uv(void)
{
struct xpc_partition_uv *part_uv;
short partid;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
return 1;
}
return 0;
}
static enum xp_retval
xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
struct xpc_send_msg_slot_uv **address_of_msg_slot)
{
enum xp_retval ret;
struct xpc_send_msg_slot_uv *msg_slot;
struct xpc_fifo_entry_uv *entry;
while (1) {
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
if (entry != NULL)
break;
if (flags & XPC_NOWAIT)
return xpNoWait;
ret = xpc_allocate_msg_wait(ch);
if (ret != xpInterrupted && ret != xpTimeout)
return ret;
}
msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
*address_of_msg_slot = msg_slot;
return xpSuccess;
}
static void
xpc_free_msg_slot_uv(struct xpc_channel *ch,
struct xpc_send_msg_slot_uv *msg_slot)
{
xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
/* wakeup anyone waiting for a free msg slot */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
}
static void
xpc_notify_sender_uv(struct xpc_channel *ch,
struct xpc_send_msg_slot_uv *msg_slot,
enum xp_retval reason)
{
xpc_notify_func func = msg_slot->func;
if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
atomic_dec(&ch->n_to_notify);
dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
msg_slot->msg_slot_number, ch->partid, ch->number);
func(reason, ch->partid, ch->number, msg_slot->key);
dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
msg_slot->msg_slot_number, ch->partid, ch->number);
}
}
static void
xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
struct xpc_notify_mq_msg_uv *msg)
{
struct xpc_send_msg_slot_uv *msg_slot;
int entry = msg->hdr.msg_slot_number % ch->local_nentries;
msg_slot = &ch->sn.uv.send_msg_slots[entry];
BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
msg_slot->msg_slot_number += ch->local_nentries;
if (msg_slot->func != NULL)
xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
xpc_free_msg_slot_uv(ch, msg_slot);
}
static void
xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
struct xpc_notify_mq_msg_uv *msg)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct xpc_channel *ch;
struct xpc_channel_uv *ch_uv;
struct xpc_notify_mq_msg_uv *msg_slot;
unsigned long irq_flags;
int ch_number = msg->hdr.ch_number;
if (unlikely(ch_number >= part->nchannels)) {
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
"channel number=0x%x in message from partid=%d\n",
ch_number, XPC_PARTID(part));
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = xpBadChannelNumber;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
wake_up_interruptible(&xpc_activate_IRQ_wq);
return;
}
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
return;
}
/* see if we're really dealing with an ACK for a previously sent msg */
if (msg->hdr.size == 0) {
xpc_handle_notify_mq_ack_uv(ch, msg);
xpc_msgqueue_deref(ch);
return;
}
/* we're dealing with a normal message sent via the notify_mq */
ch_uv = &ch->sn.uv;
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
/*
* If there is an existing idle kthread get it to deliver
* the payload, otherwise we'll have to get the channel mgr
* for this partition to create a kthread to do the delivery.
*/
if (atomic_read(&ch->kthreads_idle) > 0)
wake_up_nr(&ch->idle_wq, 1);
else
xpc_send_chctl_local_msgrequest_uv(part, ch->number);
}
xpc_msgqueue_deref(ch);
}
static irqreturn_t
xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
{
struct xpc_notify_mq_msg_uv *msg;
short partid;
struct xpc_partition *part;
while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
NULL) {
partid = msg->hdr.partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
"invalid partid=0x%x in message\n", partid);
} else {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
xpc_handle_notify_mq_msg_uv(part, msg);
xpc_part_deref(part);
}
}
gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
}
return IRQ_HANDLED;
}
static int
xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
{
return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
}
static void
xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
{
struct xpc_channel *ch = &part->channels[ch_number];
int ndeliverable_payloads;
xpc_msgqueue_ref(ch);
ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
if (ndeliverable_payloads > 0 &&
(ch->flags & XPC_C_CONNECTED) &&
(ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
xpc_activate_kthreads(ch, ndeliverable_payloads);
}
xpc_msgqueue_deref(ch);
}
static enum xp_retval
xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
u16 payload_size, u8 notify_type, xpc_notify_func func,
void *key)
{
enum xp_retval ret = xpSuccess;
struct xpc_send_msg_slot_uv *msg_slot = NULL;
struct xpc_notify_mq_msg_uv *msg;
u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
size_t msg_size;
DBUG_ON(notify_type != XPC_N_CALL);
msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
if (msg_size > ch->entry_size)
return xpPayloadTooBig;
xpc_msgqueue_ref(ch);
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
goto out_1;
}
if (!(ch->flags & XPC_C_CONNECTED)) {
ret = xpNotConnected;
goto out_1;
}
ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
if (ret != xpSuccess)
goto out_1;
if (func != NULL) {
atomic_inc(&ch->n_to_notify);
msg_slot->key = key;
smp_wmb(); /* a non-NULL func must hit memory after the key */
msg_slot->func = func;
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
goto out_2;
}
}
msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
msg->hdr.partid = xp_partition_id;
msg->hdr.ch_number = ch->number;
msg->hdr.size = msg_size;
msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
memcpy(&msg->payload, payload, payload_size);
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
msg_size);
if (ret == xpSuccess)
goto out_1;
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
out_2:
if (func != NULL) {
/*
* Try to NULL the msg_slot's func field. If we fail, then
* xpc_notify_senders_of_disconnect_uv() beat us to it, in which
* case we need to pretend we succeeded to send the message
* since the user will get a callout for the disconnect error
* by xpc_notify_senders_of_disconnect_uv(), and to also get an
* error returned here will confuse them. Additionally, since
* in this case the channel is being disconnected we don't need
* to put the msg_slot back on the free list.
*/
if (cmpxchg(&msg_slot->func, func, NULL) != func) {
ret = xpSuccess;
goto out_1;
}
msg_slot->key = NULL;
atomic_dec(&ch->n_to_notify);
}
xpc_free_msg_slot_uv(ch, msg_slot);
out_1:
xpc_msgqueue_deref(ch);
return ret;
}
/*
* Tell the callers of xpc_send_notify() that the status of their payloads
* is unknown because the channel is now disconnecting.
*
* We don't worry about putting these msg_slots on the free list since the
* msg_slots themselves are about to be kfree'd.
*/
static void
xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
{
struct xpc_send_msg_slot_uv *msg_slot;
int entry;
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
for (entry = 0; entry < ch->local_nentries; entry++) {
if (atomic_read(&ch->n_to_notify) == 0)
break;
msg_slot = &ch->sn.uv.send_msg_slots[entry];
if (msg_slot->func != NULL)
xpc_notify_sender_uv(ch, msg_slot, ch->reason);
}
}
/*
* Get the next deliverable message's payload.
*/
static void *
xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
{
struct xpc_fifo_entry_uv *entry;
struct xpc_notify_mq_msg_uv *msg;
void *payload = NULL;
if (!(ch->flags & XPC_C_DISCONNECTING)) {
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
if (entry != NULL) {
msg = container_of(entry, struct xpc_notify_mq_msg_uv,
hdr.u.next);
payload = &msg->payload;
}
}
return payload;
}
static void
xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
{
struct xpc_notify_mq_msg_uv *msg;
enum xp_retval ret;
msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
/* return an ACK to the sender of this message */
msg->hdr.partid = xp_partition_id;
msg->hdr.size = 0; /* size of zero indicates this is an ACK */
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
}
static const struct xpc_arch_operations xpc_arch_ops_uv = {
.setup_partitions = xpc_setup_partitions_uv,
.teardown_partitions = xpc_teardown_partitions_uv,
.process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
.get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
.setup_rsvd_page = xpc_setup_rsvd_page_uv,
.allow_hb = xpc_allow_hb_uv,
.disallow_hb = xpc_disallow_hb_uv,
.disallow_all_hbs = xpc_disallow_all_hbs_uv,
.increment_heartbeat = xpc_increment_heartbeat_uv,
.offline_heartbeat = xpc_offline_heartbeat_uv,
.online_heartbeat = xpc_online_heartbeat_uv,
.heartbeat_init = xpc_heartbeat_init_uv,
.heartbeat_exit = xpc_heartbeat_exit_uv,
.get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
.request_partition_activation =
xpc_request_partition_activation_uv,
.request_partition_reactivation =
xpc_request_partition_reactivation_uv,
.request_partition_deactivation =
xpc_request_partition_deactivation_uv,
.cancel_partition_deactivation_request =
xpc_cancel_partition_deactivation_request_uv,
.setup_ch_structures = xpc_setup_ch_structures_uv,
.teardown_ch_structures = xpc_teardown_ch_structures_uv,
.make_first_contact = xpc_make_first_contact_uv,
.get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
.send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
.send_chctl_closereply = xpc_send_chctl_closereply_uv,
.send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
.send_chctl_openreply = xpc_send_chctl_openreply_uv,
.send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
.process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
.save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
.setup_msg_structures = xpc_setup_msg_structures_uv,
.teardown_msg_structures = xpc_teardown_msg_structures_uv,
.indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
.indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
.assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
.partition_engaged = xpc_partition_engaged_uv,
.any_partition_engaged = xpc_any_partition_engaged_uv,
.n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
.send_payload = xpc_send_payload_uv,
.get_deliverable_payload = xpc_get_deliverable_payload_uv,
.received_payload = xpc_received_payload_uv,
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
};
static int
xpc_init_mq_node(int nid)
{
int cpu;
cpus_read_lock();
for_each_cpu(cpu, cpumask_of_node(nid)) {
xpc_activate_mq_uv =
xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
XPC_ACTIVATE_IRQ_NAME,
xpc_handle_activate_IRQ_uv);
if (!IS_ERR(xpc_activate_mq_uv))
break;
}
if (IS_ERR(xpc_activate_mq_uv)) {
cpus_read_unlock();
return PTR_ERR(xpc_activate_mq_uv);
}
for_each_cpu(cpu, cpumask_of_node(nid)) {
xpc_notify_mq_uv =
xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
XPC_NOTIFY_IRQ_NAME,
xpc_handle_notify_IRQ_uv);
if (!IS_ERR(xpc_notify_mq_uv))
break;
}
if (IS_ERR(xpc_notify_mq_uv)) {
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
cpus_read_unlock();
return PTR_ERR(xpc_notify_mq_uv);
}
cpus_read_unlock();
return 0;
}
int
xpc_init_uv(void)
{
int nid;
int ret = 0;
xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
XPC_MSG_HDR_MAX_SIZE);
return -E2BIG;
}
if (xpc_mq_node < 0)
for_each_online_node(nid) {
ret = xpc_init_mq_node(nid);
if (!ret)
break;
}
else
ret = xpc_init_mq_node(xpc_mq_node);
if (ret < 0)
dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
-ret);
return ret;
}
void
xpc_exit_uv(void)
{
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
module_param(xpc_mq_node, int, 0);
MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
| linux-master | drivers/misc/sgi-xp/xpc_uv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Silicon Labs C2 port Linux support for Eurotech Duramar 2150
*
* Copyright (c) 2008 Rodolfo Giometti <[email protected]>
* Copyright (c) 2008 Eurotech S.p.A. <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/c2port.h>
#define DATA_PORT 0x325
#define DIR_PORT 0x326
#define C2D (1 << 0)
#define C2CK (1 << 1)
static DEFINE_MUTEX(update_lock);
/*
* C2 port operations
*/
static void duramar2150_c2port_access(struct c2port_device *dev, int status)
{
u8 v;
mutex_lock(&update_lock);
v = inb(DIR_PORT);
/* 0 = input, 1 = output */
if (status)
outb(v | (C2D | C2CK), DIR_PORT);
else
/* When access is "off" is important that both lines are set
* as inputs or hi-impedance */
outb(v & ~(C2D | C2CK), DIR_PORT);
mutex_unlock(&update_lock);
}
static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir)
{
u8 v;
mutex_lock(&update_lock);
v = inb(DIR_PORT);
if (dir)
outb(v & ~C2D, DIR_PORT);
else
outb(v | C2D, DIR_PORT);
mutex_unlock(&update_lock);
}
static int duramar2150_c2port_c2d_get(struct c2port_device *dev)
{
return inb(DATA_PORT) & C2D;
}
static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status)
{
u8 v;
mutex_lock(&update_lock);
v = inb(DATA_PORT);
if (status)
outb(v | C2D, DATA_PORT);
else
outb(v & ~C2D, DATA_PORT);
mutex_unlock(&update_lock);
}
static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status)
{
u8 v;
mutex_lock(&update_lock);
v = inb(DATA_PORT);
if (status)
outb(v | C2CK, DATA_PORT);
else
outb(v & ~C2CK, DATA_PORT);
mutex_unlock(&update_lock);
}
static struct c2port_ops duramar2150_c2port_ops = {
.block_size = 512, /* bytes */
.blocks_num = 30, /* total flash size: 15360 bytes */
.access = duramar2150_c2port_access,
.c2d_dir = duramar2150_c2port_c2d_dir,
.c2d_get = duramar2150_c2port_c2d_get,
.c2d_set = duramar2150_c2port_c2d_set,
.c2ck_set = duramar2150_c2port_c2ck_set,
};
static struct c2port_device *duramar2150_c2port_dev;
/*
* Module stuff
*/
static int __init duramar2150_c2port_init(void)
{
struct resource *res;
int ret = 0;
res = request_region(0x325, 2, "c2port");
if (!res)
return -EBUSY;
duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL);
if (IS_ERR(duramar2150_c2port_dev)) {
ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region;
}
return 0;
free_region:
release_region(0x325, 2);
return ret;
}
static void __exit duramar2150_c2port_exit(void)
{
/* Setup the GPIOs as input by default (access = 0) */
duramar2150_c2port_access(duramar2150_c2port_dev, 0);
c2port_device_unregister(duramar2150_c2port_dev);
release_region(0x325, 2);
}
module_init(duramar2150_c2port_init);
module_exit(duramar2150_c2port_exit);
MODULE_AUTHOR("Rodolfo Giometti <[email protected]>");
MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/c2port/c2port-duramar2150.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Silicon Labs C2 port core Linux support
*
* Copyright (c) 2007 Rodolfo Giometti <[email protected]>
* Copyright (c) 2007 Eurotech S.p.A. <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/c2port.h>
#define DRIVER_NAME "c2port"
#define DRIVER_VERSION "0.51.0"
static DEFINE_SPINLOCK(c2port_idr_lock);
static DEFINE_IDR(c2port_idr);
/*
* Local variables
*/
static struct class *c2port_class;
/*
* C2 registers & commands defines
*/
/* C2 registers */
#define C2PORT_DEVICEID 0x00
#define C2PORT_REVID 0x01
#define C2PORT_FPCTL 0x02
#define C2PORT_FPDAT 0xB4
/* C2 interface commands */
#define C2PORT_GET_VERSION 0x01
#define C2PORT_DEVICE_ERASE 0x03
#define C2PORT_BLOCK_READ 0x06
#define C2PORT_BLOCK_WRITE 0x07
#define C2PORT_PAGE_ERASE 0x08
/* C2 status return codes */
#define C2PORT_INVALID_COMMAND 0x00
#define C2PORT_COMMAND_FAILED 0x02
#define C2PORT_COMMAND_OK 0x0d
/*
* C2 port low level signal managements
*/
static void c2port_reset(struct c2port_device *dev)
{
struct c2port_ops *ops = dev->ops;
/* To reset the device we have to keep clock line low for at least
* 20us.
*/
local_irq_disable();
ops->c2ck_set(dev, 0);
udelay(25);
ops->c2ck_set(dev, 1);
local_irq_enable();
udelay(1);
}
static void c2port_strobe_ck(struct c2port_device *dev)
{
struct c2port_ops *ops = dev->ops;
/* During hi-low-hi transition we disable local IRQs to avoid
* interructions since C2 port specification says that it must be
* shorter than 5us, otherwise the microcontroller may consider
* it as a reset signal!
*/
local_irq_disable();
ops->c2ck_set(dev, 0);
udelay(1);
ops->c2ck_set(dev, 1);
local_irq_enable();
udelay(1);
}
/*
* C2 port basic functions
*/
static void c2port_write_ar(struct c2port_device *dev, u8 addr)
{
struct c2port_ops *ops = dev->ops;
int i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (11b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
/* ADDRESS field */
for (i = 0; i < 8; i++) {
ops->c2d_set(dev, addr & 0x01);
c2port_strobe_ck(dev);
addr >>= 1;
}
/* STOP field */
ops->c2d_dir(dev, 1);
c2port_strobe_ck(dev);
}
static int c2port_read_ar(struct c2port_device *dev, u8 *addr)
{
struct c2port_ops *ops = dev->ops;
int i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (10b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
/* ADDRESS field */
ops->c2d_dir(dev, 1);
*addr = 0;
for (i = 0; i < 8; i++) {
*addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
*addr |= 0x80;
}
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_write_dr(struct c2port_device *dev, u8 data)
{
struct c2port_ops *ops = dev->ops;
int timeout, i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (01b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 1);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* LENGTH field (00b, LSB first -> 1 byte) */
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* DATA field */
for (i = 0; i < 8; i++) {
ops->c2d_set(dev, data & 0x01);
c2port_strobe_ck(dev);
data >>= 1;
}
/* WAIT field */
ops->c2d_dir(dev, 1);
timeout = 20;
do {
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_read_dr(struct c2port_device *dev, u8 *data)
{
struct c2port_ops *ops = dev->ops;
int timeout, i;
/* START field */
c2port_strobe_ck(dev);
/* INS field (00b, LSB first) */
ops->c2d_dir(dev, 0);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* LENGTH field (00b, LSB first -> 1 byte) */
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
ops->c2d_set(dev, 0);
c2port_strobe_ck(dev);
/* WAIT field */
ops->c2d_dir(dev, 1);
timeout = 20;
do {
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
/* DATA field */
*data = 0;
for (i = 0; i < 8; i++) {
*data >>= 1; /* shift in 8-bit DATA field LSB first */
c2port_strobe_ck(dev);
if (ops->c2d_get(dev))
*data |= 0x80;
}
/* STOP field */
c2port_strobe_ck(dev);
return 0;
}
static int c2port_poll_in_busy(struct c2port_device *dev)
{
u8 addr;
int ret, timeout = 20;
do {
ret = (c2port_read_ar(dev, &addr));
if (ret < 0)
return -EIO;
if (!(addr & 0x02))
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
return 0;
}
static int c2port_poll_out_ready(struct c2port_device *dev)
{
u8 addr;
int ret, timeout = 10000; /* erase flash needs long time... */
do {
ret = (c2port_read_ar(dev, &addr));
if (ret < 0)
return -EIO;
if (addr & 0x01)
break;
udelay(1);
} while (--timeout > 0);
if (timeout == 0)
return -EIO;
return 0;
}
/*
* sysfs methods
*/
static ssize_t c2port_show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", c2dev->name);
}
static DEVICE_ATTR(name, 0444, c2port_show_name, NULL);
static ssize_t c2port_show_flash_blocks_num(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->blocks_num);
}
static DEVICE_ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL);
static ssize_t c2port_show_flash_block_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->block_size);
}
static DEVICE_ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL);
static ssize_t c2port_show_flash_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
}
static DEVICE_ATTR(flash_size, 0444, c2port_show_flash_size, NULL);
static ssize_t access_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->access);
}
static ssize_t access_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
int status, ret;
ret = sscanf(buf, "%d", &status);
if (ret != 1)
return -EINVAL;
mutex_lock(&c2dev->mutex);
c2dev->access = !!status;
/* If access is "on" clock should be HIGH _before_ setting the line
* as output and data line should be set as INPUT anyway */
if (c2dev->access)
ops->c2ck_set(c2dev, 1);
ops->access(c2dev, c2dev->access);
if (c2dev->access)
ops->c2d_dir(c2dev, 1);
mutex_unlock(&c2dev->mutex);
return count;
}
static DEVICE_ATTR_RW(access);
static ssize_t c2port_store_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
c2port_reset(c2dev);
c2dev->flash_access = 0;
mutex_unlock(&c2dev->mutex);
return count;
}
static DEVICE_ATTR(reset, 0200, NULL, c2port_store_reset);
static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
{
u8 data;
int ret;
/* Select DEVICEID register for C2 data register accesses */
c2port_write_ar(dev, C2PORT_DEVICEID);
/* Read and return the device ID register */
ret = c2port_read_dr(dev, &data);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", data);
}
static ssize_t c2port_show_dev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
ssize_t ret;
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_show_dev_id(c2dev, buf);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(dev, "cannot read from %s\n", c2dev->name);
return ret;
}
static DEVICE_ATTR(dev_id, 0444, c2port_show_dev_id, NULL);
static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
{
u8 data;
int ret;
/* Select REVID register for C2 data register accesses */
c2port_write_ar(dev, C2PORT_REVID);
/* Read and return the revision ID register */
ret = c2port_read_dr(dev, &data);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", data);
}
static ssize_t c2port_show_rev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
ssize_t ret;
/* Check the device access status */
if (!c2dev->access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_show_rev_id(c2dev, buf);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name);
return ret;
}
static DEVICE_ATTR(rev_id, 0444, c2port_show_rev_id, NULL);
static ssize_t c2port_show_flash_access(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->flash_access);
}
static ssize_t __c2port_store_flash_access(struct c2port_device *dev,
int status)
{
int ret;
/* Check the device access status */
if (!dev->access)
return -EBUSY;
dev->flash_access = !!status;
/* If flash_access is off we have nothing to do... */
if (dev->flash_access == 0)
return 0;
/* Target the C2 flash programming control register for C2 data
* register access */
c2port_write_ar(dev, C2PORT_FPCTL);
/* Write the first keycode to enable C2 Flash programming */
ret = c2port_write_dr(dev, 0x02);
if (ret < 0)
return ret;
/* Write the second keycode to enable C2 Flash programming */
ret = c2port_write_dr(dev, 0x01);
if (ret < 0)
return ret;
/* Delay for at least 20ms to ensure the target is ready for
* C2 flash programming */
mdelay(25);
return 0;
}
static ssize_t c2port_store_flash_access(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
int status;
ssize_t ret;
ret = sscanf(buf, "%d", &status);
if (ret != 1)
return -EINVAL;
mutex_lock(&c2dev->mutex);
ret = __c2port_store_flash_access(c2dev, status);
mutex_unlock(&c2dev->mutex);
if (ret < 0) {
dev_err(c2dev->dev, "cannot enable %s flash programming\n",
c2dev->name);
return ret;
}
return count;
}
static DEVICE_ATTR(flash_access, 0644, c2port_show_flash_access,
c2port_store_flash_access);
static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
{
u8 status;
int ret;
/* Target the C2 flash programming data register for C2 data register
* access.
*/
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send device erase command */
c2port_write_dr(dev, C2PORT_DEVICE_ERASE);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send a three-byte arming sequence to enable the device erase.
* If the sequence is not received correctly, the command will be
* ignored.
* Sequence is: 0xde, 0xad, 0xa5.
*/
c2port_write_dr(dev, 0xde);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
c2port_write_dr(dev, 0xad);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
c2port_write_dr(dev, 0xa5);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
return 0;
}
static ssize_t c2port_store_flash_erase(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
int ret;
/* Check the device and flash access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_write_flash_erase(c2dev);
mutex_unlock(&c2dev->mutex);
if (ret < 0) {
dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name);
return ret;
}
return count;
}
static DEVICE_ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase);
static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
{
struct c2port_ops *ops = dev->ops;
u8 status, nread = 128;
int i, ret;
/* Check for flash end */
if (offset >= ops->block_size * ops->blocks_num)
return 0;
if (ops->block_size * ops->blocks_num - offset < nread)
nread = ops->block_size * ops->blocks_num - offset;
if (count < nread)
nread = count;
if (nread == 0)
return nread;
/* Target the C2 flash programming data register for C2 data register
* access */
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send flash block read command */
c2port_write_dr(dev, C2PORT_BLOCK_READ);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send address high byte */
c2port_write_dr(dev, offset >> 8);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address low byte */
c2port_write_dr(dev, offset & 0x00ff);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address block size */
c2port_write_dr(dev, nread);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before reading FLASH block */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Read flash block */
for (i = 0; i < nread; i++) {
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
ret = c2port_read_dr(dev, buffer+i);
if (ret < 0)
return ret;
}
return nread;
}
static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
ssize_t ret;
/* Check the device and flash access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_read_flash_data(c2dev, buffer, offset, count);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name);
return ret;
}
static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
{
struct c2port_ops *ops = dev->ops;
u8 status, nwrite = 128;
int i, ret;
if (nwrite > count)
nwrite = count;
if (ops->block_size * ops->blocks_num - offset < nwrite)
nwrite = ops->block_size * ops->blocks_num - offset;
/* Check for flash end */
if (offset >= ops->block_size * ops->blocks_num)
return -EINVAL;
/* Target the C2 flash programming data register for C2 data register
* access */
c2port_write_ar(dev, C2PORT_FPDAT);
/* Send flash block write command */
c2port_write_dr(dev, C2PORT_BLOCK_WRITE);
/* Wait for input acknowledge */
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before starting FLASH access sequence */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Send address high byte */
c2port_write_dr(dev, offset >> 8);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address low byte */
c2port_write_dr(dev, offset & 0x00ff);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Send address block size */
c2port_write_dr(dev, nwrite);
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
/* Should check status before writing FLASH block */
/* Wait for status information */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
/* Read flash programming interface status */
ret = c2port_read_dr(dev, &status);
if (ret < 0)
return ret;
if (status != C2PORT_COMMAND_OK)
return -EBUSY;
/* Write flash block */
for (i = 0; i < nwrite; i++) {
ret = c2port_write_dr(dev, *(buffer+i));
if (ret < 0)
return ret;
ret = c2port_poll_in_busy(dev);
if (ret < 0)
return ret;
}
/* Wait for last flash write to complete */
ret = c2port_poll_out_ready(dev);
if (ret < 0)
return ret;
return nwrite;
}
static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
int ret;
/* Check the device access status */
if (!c2dev->access || !c2dev->flash_access)
return -EBUSY;
mutex_lock(&c2dev->mutex);
ret = __c2port_write_flash_data(c2dev, buffer, offset, count);
mutex_unlock(&c2dev->mutex);
if (ret < 0)
dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name);
return ret;
}
/* size is computed at run-time */
static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
c2port_write_flash_data, 0);
/*
* Class attributes
*/
static struct attribute *c2port_attrs[] = {
&dev_attr_name.attr,
&dev_attr_flash_blocks_num.attr,
&dev_attr_flash_block_size.attr,
&dev_attr_flash_size.attr,
&dev_attr_access.attr,
&dev_attr_reset.attr,
&dev_attr_dev_id.attr,
&dev_attr_rev_id.attr,
&dev_attr_flash_access.attr,
&dev_attr_flash_erase.attr,
NULL,
};
static struct bin_attribute *c2port_bin_attrs[] = {
&bin_attr_flash_data,
NULL,
};
static const struct attribute_group c2port_group = {
.attrs = c2port_attrs,
.bin_attrs = c2port_bin_attrs,
};
static const struct attribute_group *c2port_groups[] = {
&c2port_group,
NULL,
};
/*
* Exported functions
*/
struct c2port_device *c2port_device_register(char *name,
struct c2port_ops *ops, void *devdata)
{
struct c2port_device *c2dev;
int ret;
if (unlikely(!ops) || unlikely(!ops->access) || \
unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
return ERR_PTR(-EINVAL);
c2dev = kzalloc(sizeof(struct c2port_device), GFP_KERNEL);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
idr_preload(GFP_KERNEL);
spin_lock_irq(&c2port_idr_lock);
ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT);
spin_unlock_irq(&c2port_idr_lock);
idr_preload_end();
if (ret < 0)
goto error_idr_alloc;
c2dev->id = ret;
bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
if (IS_ERR(c2dev->dev)) {
ret = PTR_ERR(c2dev->dev);
goto error_device_create;
}
dev_set_drvdata(c2dev->dev, c2dev);
strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1);
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
/* By default C2 port access is off */
c2dev->access = c2dev->flash_access = 0;
ops->access(c2dev, 0);
dev_info(c2dev->dev, "C2 port %s added\n", name);
dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes "
"(%d bytes total)\n",
name, ops->blocks_num, ops->block_size,
ops->blocks_num * ops->block_size);
return c2dev;
error_device_create:
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
error_idr_alloc:
kfree(c2dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(c2port_device_register);
void c2port_device_unregister(struct c2port_device *c2dev)
{
if (!c2dev)
return;
dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
device_destroy(c2port_class, c2dev->id);
kfree(c2dev);
}
EXPORT_SYMBOL(c2port_device_unregister);
/*
* Module stuff
*/
static int __init c2port_init(void)
{
printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION
" - (C) 2007 Rodolfo Giometti\n");
c2port_class = class_create("c2port");
if (IS_ERR(c2port_class)) {
printk(KERN_ERR "c2port: failed to allocate class\n");
return PTR_ERR(c2port_class);
}
c2port_class->dev_groups = c2port_groups;
return 0;
}
static void __exit c2port_exit(void)
{
class_destroy(c2port_class);
}
module_init(c2port_init);
module_exit(c2port_exit);
MODULE_AUTHOR("Rodolfo Giometti <[email protected]>");
MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/c2port/core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Authors: Max Asböck <[email protected]>
* Vernon Mauery <[email protected]>
*/
/* Remote mouse and keyboard event handling functions */
#include <linux/pci.h>
#include "ibmasm.h"
#include "remote.h"
#define MOUSE_X_MAX 1600
#define MOUSE_Y_MAX 1200
static const unsigned short xlate_high[XLATE_SIZE] = {
[KEY_SYM_ENTER & 0xff] = KEY_ENTER,
[KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
[KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
[KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS,
[KEY_SYM_KPDOT & 0xff] = KEY_KPDOT,
[KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS,
[KEY_SYM_KP0 & 0xff] = KEY_KP0,
[KEY_SYM_KP1 & 0xff] = KEY_KP1,
[KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2,
[KEY_SYM_KP3 & 0xff] = KEY_KP3,
[KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4,
[KEY_SYM_KP5 & 0xff] = KEY_KP5,
[KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6,
[KEY_SYM_KP7 & 0xff] = KEY_KP7,
[KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8,
[KEY_SYM_KP9 & 0xff] = KEY_KP9,
[KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE,
[KEY_SYM_TAB & 0xff] = KEY_TAB,
[KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL,
[KEY_SYM_ALT & 0xff] = KEY_LEFTALT,
[KEY_SYM_INSERT & 0xff] = KEY_INSERT,
[KEY_SYM_DELETE & 0xff] = KEY_DELETE,
[KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT,
[KEY_SYM_UARROW & 0xff] = KEY_UP,
[KEY_SYM_DARROW & 0xff] = KEY_DOWN,
[KEY_SYM_LARROW & 0xff] = KEY_LEFT,
[KEY_SYM_RARROW & 0xff] = KEY_RIGHT,
[KEY_SYM_ESCAPE & 0xff] = KEY_ESC,
[KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP,
[KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN,
[KEY_SYM_HOME & 0xff] = KEY_HOME,
[KEY_SYM_END & 0xff] = KEY_END,
[KEY_SYM_F1 & 0xff] = KEY_F1,
[KEY_SYM_F2 & 0xff] = KEY_F2,
[KEY_SYM_F3 & 0xff] = KEY_F3,
[KEY_SYM_F4 & 0xff] = KEY_F4,
[KEY_SYM_F5 & 0xff] = KEY_F5,
[KEY_SYM_F6 & 0xff] = KEY_F6,
[KEY_SYM_F7 & 0xff] = KEY_F7,
[KEY_SYM_F8 & 0xff] = KEY_F8,
[KEY_SYM_F9 & 0xff] = KEY_F9,
[KEY_SYM_F10 & 0xff] = KEY_F10,
[KEY_SYM_F11 & 0xff] = KEY_F11,
[KEY_SYM_F12 & 0xff] = KEY_F12,
[KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK,
[KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
[KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
};
static const unsigned short xlate[XLATE_SIZE] = {
[NO_KEYCODE] = KEY_RESERVED,
[KEY_SYM_SPACE] = KEY_SPACE,
[KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
[KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1,
[KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2,
[KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3,
[KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4,
[KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5,
[KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6,
[KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7,
[KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8,
[KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9,
[KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0,
[KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS,
[KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL,
[KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE,
[KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE,
[KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH,
[KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE,
[KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON,
[KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA,
[KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT,
[KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH,
[KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A,
[KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B,
[KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C,
[KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D,
[KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E,
[KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F,
[KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G,
[KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H,
[KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I,
[KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J,
[KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K,
[KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L,
[KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M,
[KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N,
[KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O,
[KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P,
[KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q,
[KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R,
[KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S,
[KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T,
[KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U,
[KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V,
[KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W,
[KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X,
[KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y,
[KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
};
static void print_input(struct remote_input *input)
{
if (input->type == INPUT_TYPE_MOUSE) {
unsigned char buttons = input->mouse_buttons;
dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
input->data.mouse.x, input->data.mouse.y,
(buttons) ? " -- buttons:" : "",
(buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
(buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
(buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
);
} else {
dbg("remote keypress (code, flag, down):"
"%d (0x%x) [0x%x] [0x%x]\n",
input->data.keyboard.key_code,
input->data.keyboard.key_code,
input->data.keyboard.key_flag,
input->data.keyboard.key_down
);
}
}
static void send_mouse_event(struct input_dev *dev, struct remote_input *input)
{
unsigned char buttons = input->mouse_buttons;
input_report_abs(dev, ABS_X, input->data.mouse.x);
input_report_abs(dev, ABS_Y, input->data.mouse.y);
input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT);
input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE);
input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT);
input_sync(dev);
}
static void send_keyboard_event(struct input_dev *dev,
struct remote_input *input)
{
unsigned int key;
unsigned short code = input->data.keyboard.key_code;
if (code & 0xff00)
key = xlate_high[code & 0xff];
else
key = xlate[code];
input_report_key(dev, key, input->data.keyboard.key_down);
input_sync(dev);
}
void ibmasm_handle_mouse_interrupt(struct service_processor *sp)
{
unsigned long reader;
unsigned long writer;
struct remote_input input;
reader = get_queue_reader(sp);
writer = get_queue_writer(sp);
while (reader != writer) {
memcpy_fromio(&input, get_queue_entry(sp, reader),
sizeof(struct remote_input));
print_input(&input);
if (input.type == INPUT_TYPE_MOUSE) {
send_mouse_event(sp->remote.mouse_dev, &input);
} else if (input.type == INPUT_TYPE_KEYBOARD) {
send_keyboard_event(sp->remote.keybd_dev, &input);
} else
break;
reader = advance_queue_reader(sp, reader);
writer = get_queue_writer(sp);
}
}
int ibmasm_init_remote_input_dev(struct service_processor *sp)
{
/* set up the mouse input device */
struct input_dev *mouse_dev, *keybd_dev;
struct pci_dev *pdev = to_pci_dev(sp->dev);
int error = -ENOMEM;
int i;
sp->remote.mouse_dev = mouse_dev = input_allocate_device();
sp->remote.keybd_dev = keybd_dev = input_allocate_device();
if (!mouse_dev || !keybd_dev)
goto err_free_devices;
mouse_dev->id.bustype = BUS_PCI;
mouse_dev->id.vendor = pdev->vendor;
mouse_dev->id.product = pdev->device;
mouse_dev->id.version = 1;
mouse_dev->dev.parent = sp->dev;
mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
set_bit(BTN_TOUCH, mouse_dev->keybit);
mouse_dev->name = "ibmasm RSA I remote mouse";
input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
keybd_dev->id.bustype = BUS_PCI;
keybd_dev->id.vendor = pdev->vendor;
keybd_dev->id.product = pdev->device;
keybd_dev->id.version = 2;
keybd_dev->dev.parent = sp->dev;
keybd_dev->evbit[0] = BIT_MASK(EV_KEY);
keybd_dev->name = "ibmasm RSA I remote keyboard";
for (i = 0; i < XLATE_SIZE; i++) {
if (xlate_high[i])
set_bit(xlate_high[i], keybd_dev->keybit);
if (xlate[i])
set_bit(xlate[i], keybd_dev->keybit);
}
error = input_register_device(mouse_dev);
if (error)
goto err_free_devices;
error = input_register_device(keybd_dev);
if (error)
goto err_unregister_mouse_dev;
enable_mouse_interrupts(sp);
printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number);
return 0;
err_unregister_mouse_dev:
input_unregister_device(mouse_dev);
mouse_dev = NULL; /* so we don't try to free it again below */
err_free_devices:
input_free_device(mouse_dev);
input_free_device(keybd_dev);
return error;
}
void ibmasm_free_remote_input_dev(struct service_processor *sp)
{
disable_mouse_interrupts(sp);
input_unregister_device(sp->remote.mouse_dev);
input_unregister_device(sp->remote.keybd_dev);
}
| linux-master | drivers/misc/ibmasm/remote.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*
* This driver is based on code originally written by Pete Reynolds
* and others.
*/
/*
* The ASM device driver does the following things:
*
* 1) When loaded it sends a message to the service processor,
* indicating that an OS is * running. This causes the service processor
* to send periodic heartbeats to the OS.
*
* 2) Answers the periodic heartbeats sent by the service processor.
* Failure to do so would result in system reboot.
*
* 3) Acts as a pass through for dot commands sent from user applications.
* The interface for this is the ibmasmfs file system.
*
* 4) Allows user applications to register for event notification. Events
* are sent to the driver through interrupts. They can be read from user
* space through the ibmasmfs file system.
*
* 5) Allows user space applications to send heartbeats to the service
* processor (aka reverse heartbeats). Again this happens through ibmasmfs.
*
* 6) Handles remote mouse and keyboard event interrupts and makes them
* available to user applications through ibmasmfs.
*
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include "ibmasm.h"
#include "lowlevel.h"
#include "remote.h"
int ibmasm_debug = 0;
module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off");
static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result;
struct service_processor *sp;
if ((result = pci_enable_device(pdev))) {
dev_err(&pdev->dev, "Failed to enable PCI device\n");
return result;
}
if ((result = pci_request_regions(pdev, DRIVER_NAME))) {
dev_err(&pdev->dev, "Failed to allocate PCI resources\n");
goto error_resources;
}
/* vnc client won't work without bus-mastering */
pci_set_master(pdev);
sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL);
if (sp == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
result = -ENOMEM;
goto error_kmalloc;
}
spin_lock_init(&sp->lock);
INIT_LIST_HEAD(&sp->command_queue);
pci_set_drvdata(pdev, (void *)sp);
sp->dev = &pdev->dev;
sp->number = pdev->bus->number;
snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number);
snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number);
result = ibmasm_event_buffer_init(sp);
if (result) {
dev_err(sp->dev, "Failed to allocate event buffer\n");
goto error_eventbuffer;
}
result = ibmasm_heartbeat_init(sp);
if (result) {
dev_err(sp->dev, "Failed to allocate heartbeat command\n");
goto error_heartbeat;
}
sp->irq = pdev->irq;
sp->base_address = pci_ioremap_bar(pdev, 0);
if (!sp->base_address) {
dev_err(sp->dev, "Failed to ioremap pci memory\n");
result = -ENODEV;
goto error_ioremap;
}
result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp);
if (result) {
dev_err(sp->dev, "Failed to register interrupt handler\n");
goto error_request_irq;
}
enable_sp_interrupts(sp->base_address);
result = ibmasm_init_remote_input_dev(sp);
if (result) {
dev_err(sp->dev, "Failed to initialize remote queue\n");
goto error_init_remote;
}
result = ibmasm_send_driver_vpd(sp);
if (result) {
dev_err(sp->dev, "Failed to send driver VPD to service processor\n");
goto error_send_message;
}
result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP);
if (result) {
dev_err(sp->dev, "Failed to send OS state to service processor\n");
goto error_send_message;
}
ibmasmfs_add_sp(sp);
ibmasm_register_uart(sp);
return 0;
error_send_message:
ibmasm_free_remote_input_dev(sp);
error_init_remote:
disable_sp_interrupts(sp->base_address);
free_irq(sp->irq, (void *)sp);
error_request_irq:
iounmap(sp->base_address);
error_ioremap:
ibmasm_heartbeat_exit(sp);
error_heartbeat:
ibmasm_event_buffer_exit(sp);
error_eventbuffer:
kfree(sp);
error_kmalloc:
pci_release_regions(pdev);
error_resources:
pci_disable_device(pdev);
return result;
}
static void ibmasm_remove_one(struct pci_dev *pdev)
{
struct service_processor *sp = pci_get_drvdata(pdev);
dbg("Unregistering UART\n");
ibmasm_unregister_uart(sp);
dbg("Sending OS down message\n");
if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
err("failed to get response to 'Send OS State' command\n");
dbg("Disabling heartbeats\n");
ibmasm_heartbeat_exit(sp);
dbg("Disabling interrupts\n");
disable_sp_interrupts(sp->base_address);
dbg("Freeing SP irq\n");
free_irq(sp->irq, (void *)sp);
dbg("Cleaning up\n");
ibmasm_free_remote_input_dev(sp);
iounmap(sp->base_address);
ibmasm_event_buffer_exit(sp);
kfree(sp);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_device_id ibmasm_pci_table[] =
{
{ PCI_DEVICE(VENDORID_IBM, DEVICEID_RSA) },
{},
};
static struct pci_driver ibmasm_driver = {
.name = DRIVER_NAME,
.id_table = ibmasm_pci_table,
.probe = ibmasm_init_one,
.remove = ibmasm_remove_one,
};
static void __exit ibmasm_exit (void)
{
ibmasm_unregister_panic_notifier();
ibmasmfs_unregister();
pci_unregister_driver(&ibmasm_driver);
info(DRIVER_DESC " version " DRIVER_VERSION " unloaded");
}
static int __init ibmasm_init(void)
{
int result = pci_register_driver(&ibmasm_driver);
if (result)
return result;
result = ibmasmfs_register();
if (result) {
pci_unregister_driver(&ibmasm_driver);
err("Failed to register ibmasmfs file system");
return result;
}
ibmasm_register_panic_notifier();
info(DRIVER_DESC " version " DRIVER_VERSION " loaded");
return 0;
}
module_init(ibmasm_init);
module_exit(ibmasm_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ibmasm_pci_table);
| linux-master | drivers/misc/ibmasm/module.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include "ibmasm.h"
#include "lowlevel.h"
static void exec_next_command(struct service_processor *sp);
static atomic_t command_count = ATOMIC_INIT(0);
struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size)
{
struct command *cmd;
if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE)
return NULL;
cmd = kzalloc(sizeof(struct command), GFP_KERNEL);
if (cmd == NULL)
return NULL;
cmd->buffer = kzalloc(buffer_size, GFP_KERNEL);
if (cmd->buffer == NULL) {
kfree(cmd);
return NULL;
}
cmd->buffer_size = buffer_size;
kref_init(&cmd->kref);
cmd->lock = &sp->lock;
cmd->status = IBMASM_CMD_PENDING;
init_waitqueue_head(&cmd->wait);
INIT_LIST_HEAD(&cmd->queue_node);
atomic_inc(&command_count);
dbg("command count: %d\n", atomic_read(&command_count));
return cmd;
}
void ibmasm_free_command(struct kref *kref)
{
struct command *cmd = to_command(kref);
list_del(&cmd->queue_node);
atomic_dec(&command_count);
dbg("command count: %d\n", atomic_read(&command_count));
kfree(cmd->buffer);
kfree(cmd);
}
static void enqueue_command(struct service_processor *sp, struct command *cmd)
{
list_add_tail(&cmd->queue_node, &sp->command_queue);
}
static struct command *dequeue_command(struct service_processor *sp)
{
struct command *cmd;
struct list_head *next;
if (list_empty(&sp->command_queue))
return NULL;
next = sp->command_queue.next;
list_del_init(next);
cmd = list_entry(next, struct command, queue_node);
return cmd;
}
static inline void do_exec_command(struct service_processor *sp)
{
char tsbuf[32];
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
if (ibmasm_send_i2o_message(sp)) {
sp->current_command->status = IBMASM_CMD_FAILED;
wake_up(&sp->current_command->wait);
command_put(sp->current_command);
exec_next_command(sp);
}
}
/*
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
* is sent to the service processor. Once the interrupt handler gets a
* message of type command_response, the message is copied into
* the current commands buffer,
*/
void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
{
unsigned long flags;
char tsbuf[32];
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
spin_lock_irqsave(&sp->lock, flags);
if (!sp->current_command) {
sp->current_command = cmd;
command_get(sp->current_command);
spin_unlock_irqrestore(&sp->lock, flags);
do_exec_command(sp);
} else {
enqueue_command(sp, cmd);
spin_unlock_irqrestore(&sp->lock, flags);
}
}
static void exec_next_command(struct service_processor *sp)
{
unsigned long flags;
char tsbuf[32];
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
spin_lock_irqsave(&sp->lock, flags);
sp->current_command = dequeue_command(sp);
if (sp->current_command) {
command_get(sp->current_command);
spin_unlock_irqrestore(&sp->lock, flags);
do_exec_command(sp);
} else {
spin_unlock_irqrestore(&sp->lock, flags);
}
}
/*
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
*/
void ibmasm_wait_for_response(struct command *cmd, int timeout)
{
wait_event_interruptible_timeout(cmd->wait,
cmd->status == IBMASM_CMD_COMPLETE ||
cmd->status == IBMASM_CMD_FAILED,
timeout * HZ);
}
/*
* receive_command_response
* called by the interrupt handler when a dot command of type command_response
* was received.
*/
void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size)
{
struct command *cmd = sp->current_command;
if (!sp->current_command)
return;
memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
cmd->status = IBMASM_CMD_COMPLETE;
wake_up(&sp->current_command->wait);
command_put(sp->current_command);
exec_next_command(sp);
}
| linux-master | drivers/misc/ibmasm/command.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
/*
* Parts of this code are based on an article by Jonathan Corbet
* that appeared in Linux Weekly News.
*/
/*
* The IBMASM file virtual filesystem. It creates the following hierarchy
* dynamically when mounted from user space:
*
* /ibmasm
* |-- 0
* | |-- command
* | |-- event
* | |-- reverse_heartbeat
* | `-- remote_video
* | |-- depth
* | |-- height
* | `-- width
* .
* .
* .
* `-- n
* |-- command
* |-- event
* |-- reverse_heartbeat
* `-- remote_video
* |-- depth
* |-- height
* `-- width
*
* For each service processor the following files are created:
*
* command: execute dot commands
* write: execute a dot command on the service processor
* read: return the result of a previously executed dot command
*
* events: listen for service processor events
* read: sleep (interruptible) until an event occurs
* write: wakeup sleeping event listener
*
* reverse_heartbeat: send a heartbeat to the service processor
* read: sleep (interruptible) until the reverse heartbeat fails
* write: wakeup sleeping heartbeat listener
*
* remote_video/width
* remote_video/height
* remote_video/width: control remote display settings
* write: set value
* read: read value
*/
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include "ibmasm.h"
#include "remote.h"
#include "dot_command.h"
#define IBMASMFS_MAGIC 0x66726f67
static LIST_HEAD(service_processors);
static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode);
static void ibmasmfs_create_files (struct super_block *sb);
static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc);
static int ibmasmfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, ibmasmfs_fill_super);
}
static const struct fs_context_operations ibmasmfs_context_ops = {
.get_tree = ibmasmfs_get_tree,
};
static int ibmasmfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &ibmasmfs_context_ops;
return 0;
}
static const struct super_operations ibmasmfs_s_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
static struct file_system_type ibmasmfs_type = {
.owner = THIS_MODULE,
.name = "ibmasmfs",
.init_fs_context = ibmasmfs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("ibmasmfs");
static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *root;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = IBMASMFS_MAGIC;
sb->s_op = &ibmasmfs_s_ops;
sb->s_time_gran = 1;
root = ibmasmfs_make_inode (sb, S_IFDIR | 0500);
if (!root)
return -ENOMEM;
root->i_op = &simple_dir_inode_operations;
root->i_fop = ibmasmfs_dir_ops;
sb->s_root = d_make_root(root);
if (!sb->s_root)
return -ENOMEM;
ibmasmfs_create_files(sb);
return 0;
}
static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
{
struct inode *ret = new_inode(sb);
if (ret) {
ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
}
return ret;
}
static struct dentry *ibmasmfs_create_file(struct dentry *parent,
const char *name,
const struct file_operations *fops,
void *data,
int mode)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_fop = fops;
inode->i_private = data;
d_add(dentry, inode);
return dentry;
}
static struct dentry *ibmasmfs_create_dir(struct dentry *parent,
const char *name)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = ibmasmfs_dir_ops;
d_add(dentry, inode);
return dentry;
}
int ibmasmfs_register(void)
{
return register_filesystem(&ibmasmfs_type);
}
void ibmasmfs_unregister(void)
{
unregister_filesystem(&ibmasmfs_type);
}
void ibmasmfs_add_sp(struct service_processor *sp)
{
list_add(&sp->node, &service_processors);
}
/* struct to save state between command file operations */
struct ibmasmfs_command_data {
struct service_processor *sp;
struct command *command;
};
/* struct to save state between event file operations */
struct ibmasmfs_event_data {
struct service_processor *sp;
struct event_reader reader;
int active;
};
/* struct to save state between reverse heartbeat file operations */
struct ibmasmfs_heartbeat_data {
struct service_processor *sp;
struct reverse_heartbeat heartbeat;
int active;
};
static int command_file_open(struct inode *inode, struct file *file)
{
struct ibmasmfs_command_data *command_data;
if (!inode->i_private)
return -ENODEV;
command_data = kmalloc(sizeof(struct ibmasmfs_command_data), GFP_KERNEL);
if (!command_data)
return -ENOMEM;
command_data->command = NULL;
command_data->sp = inode->i_private;
file->private_data = command_data;
return 0;
}
static int command_file_close(struct inode *inode, struct file *file)
{
struct ibmasmfs_command_data *command_data = file->private_data;
if (command_data->command)
command_put(command_data->command);
kfree(command_data);
return 0;
}
static ssize_t command_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
struct ibmasmfs_command_data *command_data = file->private_data;
struct command *cmd;
int len;
unsigned long flags;
if (*offset < 0)
return -EINVAL;
if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
return 0;
if (*offset != 0)
return 0;
spin_lock_irqsave(&command_data->sp->lock, flags);
cmd = command_data->command;
if (cmd == NULL) {
spin_unlock_irqrestore(&command_data->sp->lock, flags);
return 0;
}
command_data->command = NULL;
spin_unlock_irqrestore(&command_data->sp->lock, flags);
if (cmd->status != IBMASM_CMD_COMPLETE) {
command_put(cmd);
return -EIO;
}
len = min(count, cmd->buffer_size);
if (copy_to_user(buf, cmd->buffer, len)) {
command_put(cmd);
return -EFAULT;
}
command_put(cmd);
return len;
}
static ssize_t command_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
{
struct ibmasmfs_command_data *command_data = file->private_data;
struct command *cmd;
unsigned long flags;
if (*offset < 0)
return -EINVAL;
if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
return 0;
if (*offset != 0)
return 0;
/* commands are executed sequentially, only one command at a time */
if (command_data->command)
return -EAGAIN;
cmd = ibmasm_new_command(command_data->sp, count);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd->buffer, ubuff, count)) {
command_put(cmd);
return -EFAULT;
}
spin_lock_irqsave(&command_data->sp->lock, flags);
if (command_data->command) {
spin_unlock_irqrestore(&command_data->sp->lock, flags);
command_put(cmd);
return -EAGAIN;
}
command_data->command = cmd;
spin_unlock_irqrestore(&command_data->sp->lock, flags);
ibmasm_exec_command(command_data->sp, cmd);
ibmasm_wait_for_response(cmd, get_dot_command_timeout(cmd->buffer));
return count;
}
static int event_file_open(struct inode *inode, struct file *file)
{
struct ibmasmfs_event_data *event_data;
struct service_processor *sp;
if (!inode->i_private)
return -ENODEV;
sp = inode->i_private;
event_data = kmalloc(sizeof(struct ibmasmfs_event_data), GFP_KERNEL);
if (!event_data)
return -ENOMEM;
ibmasm_event_reader_register(sp, &event_data->reader);
event_data->sp = sp;
event_data->active = 0;
file->private_data = event_data;
return 0;
}
static int event_file_close(struct inode *inode, struct file *file)
{
struct ibmasmfs_event_data *event_data = file->private_data;
ibmasm_event_reader_unregister(event_data->sp, &event_data->reader);
kfree(event_data);
return 0;
}
static ssize_t event_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
struct ibmasmfs_event_data *event_data = file->private_data;
struct event_reader *reader = &event_data->reader;
struct service_processor *sp = event_data->sp;
int ret;
unsigned long flags;
if (*offset < 0)
return -EINVAL;
if (count == 0 || count > IBMASM_EVENT_MAX_SIZE)
return 0;
if (*offset != 0)
return 0;
spin_lock_irqsave(&sp->lock, flags);
if (event_data->active) {
spin_unlock_irqrestore(&sp->lock, flags);
return -EBUSY;
}
event_data->active = 1;
spin_unlock_irqrestore(&sp->lock, flags);
ret = ibmasm_get_next_event(sp, reader);
if (ret <= 0)
goto out;
if (count < reader->data_size) {
ret = -EINVAL;
goto out;
}
if (copy_to_user(buf, reader->data, reader->data_size)) {
ret = -EFAULT;
goto out;
}
ret = reader->data_size;
out:
event_data->active = 0;
return ret;
}
static ssize_t event_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
{
struct ibmasmfs_event_data *event_data = file->private_data;
if (*offset < 0)
return -EINVAL;
if (count != 1)
return 0;
if (*offset != 0)
return 0;
ibmasm_cancel_next_event(&event_data->reader);
return 0;
}
static int r_heartbeat_file_open(struct inode *inode, struct file *file)
{
struct ibmasmfs_heartbeat_data *rhbeat;
if (!inode->i_private)
return -ENODEV;
rhbeat = kmalloc(sizeof(struct ibmasmfs_heartbeat_data), GFP_KERNEL);
if (!rhbeat)
return -ENOMEM;
rhbeat->sp = inode->i_private;
rhbeat->active = 0;
ibmasm_init_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat);
file->private_data = rhbeat;
return 0;
}
static int r_heartbeat_file_close(struct inode *inode, struct file *file)
{
struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
kfree(rhbeat);
return 0;
}
static ssize_t r_heartbeat_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
unsigned long flags;
int result;
if (*offset < 0)
return -EINVAL;
if (count == 0 || count > 1024)
return 0;
if (*offset != 0)
return 0;
/* allow only one reverse heartbeat per process */
spin_lock_irqsave(&rhbeat->sp->lock, flags);
if (rhbeat->active) {
spin_unlock_irqrestore(&rhbeat->sp->lock, flags);
return -EBUSY;
}
rhbeat->active = 1;
spin_unlock_irqrestore(&rhbeat->sp->lock, flags);
result = ibmasm_start_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat);
rhbeat->active = 0;
return result;
}
static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
{
struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
if (*offset < 0)
return -EINVAL;
if (count != 1)
return 0;
if (*offset != 0)
return 0;
if (rhbeat->active)
ibmasm_stop_reverse_heartbeat(&rhbeat->heartbeat);
return 1;
}
static int remote_settings_file_close(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
void __iomem *address = (void __iomem *)file->private_data;
int len = 0;
unsigned int value;
char lbuf[20];
value = readl(address);
len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
return simple_read_from_buffer(buf, count, offset, lbuf, len);
}
static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
{
void __iomem *address = (void __iomem *)file->private_data;
char *buff;
unsigned int value;
if (*offset < 0)
return -EINVAL;
if (count == 0 || count > 1024)
return 0;
if (*offset != 0)
return 0;
buff = kzalloc (count + 1, GFP_KERNEL);
if (!buff)
return -ENOMEM;
if (copy_from_user(buff, ubuff, count)) {
kfree(buff);
return -EFAULT;
}
value = simple_strtoul(buff, NULL, 10);
writel(value, address);
kfree(buff);
return count;
}
static const struct file_operations command_fops = {
.open = command_file_open,
.release = command_file_close,
.read = command_file_read,
.write = command_file_write,
.llseek = generic_file_llseek,
};
static const struct file_operations event_fops = {
.open = event_file_open,
.release = event_file_close,
.read = event_file_read,
.write = event_file_write,
.llseek = generic_file_llseek,
};
static const struct file_operations r_heartbeat_fops = {
.open = r_heartbeat_file_open,
.release = r_heartbeat_file_close,
.read = r_heartbeat_file_read,
.write = r_heartbeat_file_write,
.llseek = generic_file_llseek,
};
static const struct file_operations remote_settings_fops = {
.open = simple_open,
.release = remote_settings_file_close,
.read = remote_settings_file_read,
.write = remote_settings_file_write,
.llseek = generic_file_llseek,
};
static void ibmasmfs_create_files (struct super_block *sb)
{
struct list_head *entry;
struct service_processor *sp;
list_for_each(entry, &service_processors) {
struct dentry *dir;
struct dentry *remote_dir;
sp = list_entry(entry, struct service_processor, node);
dir = ibmasmfs_create_dir(sb->s_root, sp->dirname);
if (!dir)
continue;
ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
remote_dir = ibmasmfs_create_dir(dir, "remote_video");
if (!remote_dir)
continue;
ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
}
}
| linux-master | drivers/misc/ibmasm/ibmasmfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include "ibmasm.h"
#include "lowlevel.h"
#include "i2o.h"
#include "dot_command.h"
#include "remote.h"
static struct i2o_header header = I2O_HEADER_TEMPLATE;
int ibmasm_send_i2o_message(struct service_processor *sp)
{
u32 mfa;
unsigned int command_size;
struct i2o_message *message;
struct command *command = sp->current_command;
mfa = get_mfa_inbound(sp->base_address);
if (!mfa)
return 1;
command_size = get_dot_command_size(command->buffer);
header.message_size = outgoing_message_size(command_size);
message = get_i2o_message(sp->base_address, mfa);
memcpy_toio(&message->header, &header, sizeof(struct i2o_header));
memcpy_toio(&message->data, command->buffer, command_size);
set_mfa_inbound(sp->base_address, mfa);
return 0;
}
irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id)
{
u32 mfa;
struct service_processor *sp = (struct service_processor *)dev_id;
void __iomem *base_address = sp->base_address;
char tsbuf[32];
if (!sp_interrupt_pending(base_address))
return IRQ_NONE;
dbg("respond to interrupt at %s\n", get_timestamp(tsbuf));
if (mouse_interrupt_pending(sp)) {
ibmasm_handle_mouse_interrupt(sp);
clear_mouse_interrupt(sp);
}
mfa = get_mfa_outbound(base_address);
if (valid_mfa(mfa)) {
struct i2o_message *msg = get_i2o_message(base_address, mfa);
ibmasm_receive_message(sp, &msg->data, incoming_data_size(msg));
} else
dbg("didn't get a valid MFA\n");
set_mfa_outbound(base_address, mfa);
dbg("finished interrupt at %s\n", get_timestamp(tsbuf));
return IRQ_HANDLED;
}
| linux-master | drivers/misc/ibmasm/lowlevel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include <linux/sched/signal.h>
#include "ibmasm.h"
#include "dot_command.h"
/*
* Reverse Heartbeat, i.e. heartbeats sent from the driver to the
* service processor.
* These heartbeats are initiated by user level programs.
*/
/* the reverse heartbeat dot command */
#pragma pack(1)
static struct {
struct dot_command_header header;
unsigned char command[3];
} rhb_dot_cmd = {
.header = {
.type = sp_read,
.command_size = 3,
.data_size = 0,
.status = 0
},
.command = { 4, 3, 6 }
};
#pragma pack()
void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
init_waitqueue_head(&rhb->wait);
rhb->stopped = 0;
}
/*
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
* processor fails to respond 3 times or we were interrupted.
*/
int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
struct command *cmd;
int times_failed = 0;
int result = 1;
cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd);
if (!cmd)
return -ENOMEM;
while (times_failed < 3) {
memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd);
cmd->status = IBMASM_CMD_PENDING;
ibmasm_exec_command(sp, cmd);
ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
if (cmd->status != IBMASM_CMD_COMPLETE)
times_failed++;
wait_event_interruptible_timeout(rhb->wait,
rhb->stopped,
REVERSE_HEARTBEAT_TIMEOUT * HZ);
if (signal_pending(current) || rhb->stopped) {
result = -EINTR;
break;
}
}
command_put(cmd);
rhb->stopped = 0;
return result;
}
void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb)
{
rhb->stopped = 1;
wake_up_interruptible(&rhb->wait);
}
| linux-master | drivers/misc/ibmasm/r_heartbeat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include "ibmasm.h"
#include "lowlevel.h"
/*
* ASM service processor event handling routines.
*
* Events are signalled to the device drivers through interrupts.
* They have the format of dot commands, with the type field set to
* sp_event.
* The driver does not interpret the events, it simply stores them in a
* circular buffer.
*/
static void wake_up_event_readers(struct service_processor *sp)
{
struct event_reader *reader;
list_for_each_entry(reader, &sp->event_buffer->readers, node)
wake_up_interruptible(&reader->wait);
}
/*
* receive_event
* Called by the interrupt handler when a dot command of type sp_event is
* received.
* Store the event in the circular event buffer, wake up any sleeping
* event readers.
* There is no reader marker in the buffer, therefore readers are
* responsible for keeping up with the writer, or they will lose events.
*/
void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
{
struct event_buffer *buffer = sp->event_buffer;
struct ibmasm_event *event;
unsigned long flags;
data_size = min(data_size, IBMASM_EVENT_MAX_SIZE);
spin_lock_irqsave(&sp->lock, flags);
/* copy the event into the next slot in the circular buffer */
event = &buffer->events[buffer->next_index];
memcpy_fromio(event->data, data, data_size);
event->data_size = data_size;
event->serial_number = buffer->next_serial_number;
/* advance indices in the buffer */
buffer->next_index = (buffer->next_index + 1) % IBMASM_NUM_EVENTS;
buffer->next_serial_number++;
spin_unlock_irqrestore(&sp->lock, flags);
wake_up_event_readers(sp);
}
static inline int event_available(struct event_buffer *b, struct event_reader *r)
{
return (r->next_serial_number < b->next_serial_number);
}
/*
* get_next_event
* Called by event readers (initiated from user space through the file
* system).
* Sleeps until a new event is available.
*/
int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
{
struct event_buffer *buffer = sp->event_buffer;
struct ibmasm_event *event;
unsigned int index;
unsigned long flags;
reader->cancelled = 0;
if (wait_event_interruptible(reader->wait,
event_available(buffer, reader) || reader->cancelled))
return -ERESTARTSYS;
if (!event_available(buffer, reader))
return 0;
spin_lock_irqsave(&sp->lock, flags);
index = buffer->next_index;
event = &buffer->events[index];
while (event->serial_number < reader->next_serial_number) {
index = (index + 1) % IBMASM_NUM_EVENTS;
event = &buffer->events[index];
}
memcpy(reader->data, event->data, event->data_size);
reader->data_size = event->data_size;
reader->next_serial_number = event->serial_number + 1;
spin_unlock_irqrestore(&sp->lock, flags);
return event->data_size;
}
void ibmasm_cancel_next_event(struct event_reader *reader)
{
reader->cancelled = 1;
wake_up_interruptible(&reader->wait);
}
void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader)
{
unsigned long flags;
reader->next_serial_number = sp->event_buffer->next_serial_number;
init_waitqueue_head(&reader->wait);
spin_lock_irqsave(&sp->lock, flags);
list_add(&reader->node, &sp->event_buffer->readers);
spin_unlock_irqrestore(&sp->lock, flags);
}
void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader)
{
unsigned long flags;
spin_lock_irqsave(&sp->lock, flags);
list_del(&reader->node);
spin_unlock_irqrestore(&sp->lock, flags);
}
int ibmasm_event_buffer_init(struct service_processor *sp)
{
struct event_buffer *buffer;
struct ibmasm_event *event;
int i;
buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer->next_index = 0;
buffer->next_serial_number = 1;
event = buffer->events;
for (i=0; i<IBMASM_NUM_EVENTS; i++, event++)
event->serial_number = 0;
INIT_LIST_HEAD(&buffer->readers);
sp->event_buffer = buffer;
return 0;
}
void ibmasm_event_buffer_exit(struct service_processor *sp)
{
kfree(sp->event_buffer);
}
| linux-master | drivers/misc/ibmasm/event.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include "ibmasm.h"
#include "dot_command.h"
/*
* Dispatch an incoming message to the specific handler for the message.
* Called from interrupt context.
*/
void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size)
{
u32 size;
struct dot_command_header *header = (struct dot_command_header *)message;
if (message_size == 0)
return;
size = get_dot_command_size(message);
if (size == 0)
return;
if (size > message_size)
size = message_size;
switch (header->type) {
case sp_event:
ibmasm_receive_event(sp, message, size);
break;
case sp_command_response:
ibmasm_receive_command_response(sp, message, size);
break;
case sp_heartbeat:
ibmasm_receive_heartbeat(sp, message, size);
break;
default:
dev_err(sp->dev, "Received unknown message from service processor\n");
}
}
#define INIT_BUFFER_SIZE 32
/*
* send the 4.3.5.10 dot command (driver VPD) to the service processor
*/
int ibmasm_send_driver_vpd(struct service_processor *sp)
{
struct command *command;
struct dot_command_header *header;
u8 *vpd_command;
u8 *vpd_data;
int result = 0;
command = ibmasm_new_command(sp, INIT_BUFFER_SIZE);
if (command == NULL)
return -ENOMEM;
header = (struct dot_command_header *)command->buffer;
header->type = sp_write;
header->command_size = 4;
header->data_size = 16;
header->status = 0;
header->reserved = 0;
vpd_command = command->buffer + sizeof(struct dot_command_header);
vpd_command[0] = 0x4;
vpd_command[1] = 0x3;
vpd_command[2] = 0x5;
vpd_command[3] = 0xa;
vpd_data = vpd_command + header->command_size;
vpd_data[0] = 0;
strcat(vpd_data, IBMASM_DRIVER_VPD);
vpd_data[10] = 0;
vpd_data[15] = 0;
ibmasm_exec_command(sp, command);
ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
if (command->status != IBMASM_CMD_COMPLETE)
result = -ENODEV;
command_put(command);
return result;
}
struct os_state_command {
struct dot_command_header header;
unsigned char command[3];
unsigned char data;
};
/*
* send the 4.3.6 dot command (os state) to the service processor
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
* driver.
* During driver exit the function is called with os state "down",
* causing the service processor to stop the heartbeats.
*/
int ibmasm_send_os_state(struct service_processor *sp, int os_state)
{
struct command *cmd;
struct os_state_command *os_state_cmd;
int result = 0;
cmd = ibmasm_new_command(sp, sizeof(struct os_state_command));
if (cmd == NULL)
return -ENOMEM;
os_state_cmd = (struct os_state_command *)cmd->buffer;
os_state_cmd->header.type = sp_write;
os_state_cmd->header.command_size = 3;
os_state_cmd->header.data_size = 1;
os_state_cmd->header.status = 0;
os_state_cmd->command[0] = 4;
os_state_cmd->command[1] = 3;
os_state_cmd->command[2] = 6;
os_state_cmd->data = os_state;
ibmasm_exec_command(sp, cmd);
ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
if (cmd->status != IBMASM_CMD_COMPLETE)
result = -ENODEV;
command_put(cmd);
return result;
}
| linux-master | drivers/misc/ibmasm/dot_command.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include "ibmasm.h"
#include "dot_command.h"
#include "lowlevel.h"
static int suspend_heartbeats = 0;
/*
* Once the driver indicates to the service processor that it is running
* - see send_os_state() - the service processor sends periodic heartbeats
* to the driver. The driver must respond to the heartbeats or else the OS
* will be rebooted.
* In the case of a panic the interrupt handler continues to work and thus
* continues to respond to heartbeats, making the service processor believe
* the OS is still running and thus preventing a reboot.
* To prevent this from happening a callback is added the panic_notifier_list.
* Before responding to a heartbeat the driver checks if a panic has happened,
* if yes it suspends heartbeat, causing the service processor to reboot as
* expected.
*/
static int panic_happened(struct notifier_block *n, unsigned long val, void *v)
{
suspend_heartbeats = 1;
return 0;
}
static struct notifier_block panic_notifier = { panic_happened, NULL, 1 };
void ibmasm_register_panic_notifier(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
}
void ibmasm_unregister_panic_notifier(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_notifier);
}
int ibmasm_heartbeat_init(struct service_processor *sp)
{
sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE);
if (sp->heartbeat == NULL)
return -ENOMEM;
return 0;
}
void ibmasm_heartbeat_exit(struct service_processor *sp)
{
char tsbuf[32];
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL);
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
suspend_heartbeats = 1;
command_put(sp->heartbeat);
}
void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size)
{
struct command *cmd = sp->heartbeat;
struct dot_command_header *header = (struct dot_command_header *)cmd->buffer;
char tsbuf[32];
dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
if (suspend_heartbeats)
return;
/* return the received dot command to sender */
cmd->status = IBMASM_CMD_PENDING;
size = min(size, cmd->buffer_size);
memcpy_fromio(cmd->buffer, message, size);
header->type = sp_write;
ibmasm_exec_command(sp, cmd);
}
| linux-master | drivers/misc/ibmasm/heartbeat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM ASM Service Processor Device Driver
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <[email protected]>
*/
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include "ibmasm.h"
#include "lowlevel.h"
void ibmasm_register_uart(struct service_processor *sp)
{
struct uart_8250_port uart;
void __iomem *iomem_base;
iomem_base = sp->base_address + SCOUT_COM_B_BASE;
/* read the uart scratch register to determine if the UART
* is dedicated to the service processor or if the OS can use it
*/
if (0 == readl(iomem_base + UART_SCR)) {
dev_info(sp->dev, "IBM SP UART not registered, owned by service processor\n");
sp->serial_line = -1;
return;
}
memset(&uart, 0, sizeof(uart));
uart.port.irq = sp->irq;
uart.port.uartclk = 3686400;
uart.port.flags = UPF_SHARE_IRQ;
uart.port.iotype = UPIO_MEM;
uart.port.membase = iomem_base;
sp->serial_line = serial8250_register_8250_port(&uart);
if (sp->serial_line < 0) {
dev_err(sp->dev, "Failed to register serial port\n");
return;
}
enable_uart_interrupts(sp->base_address);
}
void ibmasm_unregister_uart(struct service_processor *sp)
{
if (sp->serial_line < 0)
return;
disable_uart_interrupts(sp->base_address);
serial8250_unregister_port(sp->serial_line);
}
| linux-master | drivers/misc/ibmasm/uart.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hw-txe.h"
#include "client.h"
#include "hbm.h"
#include "mei-trace.h"
#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
*
* @base_addr: registers base address
* @offset: register offset
*
* Return: register value
*/
static inline u32 mei_txe_reg_read(void __iomem *base_addr,
unsigned long offset)
{
return ioread32(base_addr + offset);
}
/**
* mei_txe_reg_write - Writes 32bit data to the txe device
*
* @base_addr: registers base address
* @offset: register offset
* @value: the value to write
*/
static inline void mei_txe_reg_write(void __iomem *base_addr,
unsigned long offset, u32 value)
{
iowrite32(value, base_addr + offset);
}
/**
* mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
*
* Doesn't check for aliveness while Reads 32bit data from the SeC BAR
*
* Return: register value
*/
static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
unsigned long offset)
{
return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
}
/**
* mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
*
* Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
*
* Return: register value
*/
static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
unsigned long offset)
{
WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
return mei_txe_sec_reg_read_silent(hw, offset);
}
/**
* mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
* doesn't check for aliveness
*
* @hw: the txe hardware structure
* @offset: register offset
* @value: value to write
*
* Doesn't check for aliveness while writes 32bit data from to the SeC BAR
*/
static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
}
/**
* mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
* @value: value to write
*
* Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
*/
static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
mei_txe_sec_reg_write_silent(hw, offset, value);
}
/**
* mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
*
* @hw: the txe hardware structure
* @offset: offset from which to read the data
*
* Return: the byte read.
*/
static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
unsigned long offset)
{
return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
}
/**
* mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
*
* @hw: the txe hardware structure
* @offset: offset from which to write the data
* @value: the byte to write
*/
static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
}
/**
* mei_txe_aliveness_set - request for aliveness change
*
* @dev: the device structure
* @req: requested aliveness value
*
* Request for aliveness change and returns true if the change is
* really needed and false if aliveness is already
* in the requested state
*
* Locking: called under "dev->device_lock" lock
*
* Return: true if request was send
*/
static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
bool do_req = hw->aliveness != req;
dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
dev->pg_event = MEI_PG_EVENT_WAIT;
mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
}
return do_req;
}
/**
* mei_txe_aliveness_req_get - get aliveness requested register value
*
* @dev: the device structure
*
* Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
* HICR_HOST_ALIVENESS_REQ register value
*
* Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value
*/
static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg;
reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
}
/**
* mei_txe_aliveness_get - get aliveness response register value
*
* @dev: the device structure
*
* Return: HICR_HOST_ALIVENESS_RESP_ACK bit from HICR_HOST_ALIVENESS_RESP
* register
*/
static u32 mei_txe_aliveness_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg;
reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
return reg & HICR_HOST_ALIVENESS_RESP_ACK;
}
/**
* mei_txe_aliveness_poll - waits for aliveness to settle
*
* @dev: the device structure
* @expected: expected aliveness value
*
* Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
* Return: 0 if the expected value was received, -ETIME otherwise
*/
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
ktime_t stop, start;
start = ktime_get();
stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
ktime_to_us(ktime_sub(ktime_get(), start)));
return 0;
}
usleep_range(20, 50);
} while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(dev->dev, "aliveness timed out\n");
return -ETIME;
}
/**
* mei_txe_aliveness_wait - waits for aliveness to settle
*
* @dev: the device structure
* @expected: expected aliveness value
*
* Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
* Return: 0 on success and < 0 otherwise
*/
static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
const unsigned long timeout =
msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
long err;
int ret;
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected)
return 0;
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(hw->wait_aliveness_resp,
dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
mutex_lock(&dev->device_lock);
hw->aliveness = mei_txe_aliveness_get(dev);
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
err, hw->aliveness, dev->pg_event);
else
dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
jiffies_to_msecs(timeout - err),
hw->aliveness, dev->pg_event);
dev->pg_event = MEI_PG_EVENT_IDLE;
return ret;
}
/**
* mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
*
* @dev: the device structure
* @req: requested aliveness value
*
* Return: 0 on success and < 0 otherwise
*/
int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
{
if (mei_txe_aliveness_set(dev, req))
return mei_txe_aliveness_wait(dev, req);
return 0;
}
/**
* mei_txe_pg_in_transition - is device now in pg transition
*
* @dev: the device structure
*
* Return: true if in pg transition, false otherwise
*/
static bool mei_txe_pg_in_transition(struct mei_device *dev)
{
return dev->pg_event == MEI_PG_EVENT_WAIT;
}
/**
* mei_txe_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
*
* Return: true is pg supported, false otherwise
*/
static bool mei_txe_pg_is_enabled(struct mei_device *dev)
{
return true;
}
/**
* mei_txe_pg_state - translate aliveness register value
* to the mei power gating state
*
* @dev: the device structure
*
* Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
*/
static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
}
/**
* mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
*
* @dev: the device structure
*/
static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 hintmsk;
/* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
}
/**
* mei_txe_input_doorbell_set - sets bit 0 in
* SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
*
* @hw: the txe hardware structure
*/
static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
{
/* Clear the interrupt cause */
clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
}
/**
* mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
*
* @hw: the txe hardware structure
*/
static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
{
mei_txe_br_reg_write(hw,
SICR_SEC_IPC_OUTPUT_STATUS_REG,
SEC_IPC_OUTPUT_STATUS_RDY);
}
/**
* mei_txe_is_input_ready - check if TXE is ready for receiving data
*
* @dev: the device structure
*
* Return: true if INPUT STATUS READY bit is set
*/
static bool mei_txe_is_input_ready(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 status;
status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
return !!(SEC_IPC_INPUT_STATUS_RDY & status);
}
/**
* mei_txe_intr_clear - clear all interrupts
*
* @dev: the device structure
*/
static inline void mei_txe_intr_clear(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
SEC_IPC_HOST_INT_STATUS_PENDING);
mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
}
/**
* mei_txe_intr_disable - disable all interrupts
*
* @dev: the device structure
*/
static void mei_txe_intr_disable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, HHIER_REG, 0);
mei_txe_br_reg_write(hw, HIER_REG, 0);
}
/**
* mei_txe_intr_enable - enable all interrupts
*
* @dev: the device structure
*/
static void mei_txe_intr_enable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
}
/**
* mei_txe_synchronize_irq - wait for pending IRQ handlers
*
* @dev: the device structure
*/
static void mei_txe_synchronize_irq(struct mei_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
synchronize_irq(pdev->irq);
}
/**
* mei_txe_pending_interrupts - check if there are pending interrupts
* only Aliveness, Input ready, and output doorbell are of relevance
*
* @dev: the device structure
*
* Checks if there are pending interrupts
* only Aliveness, Readiness, Input ready, and Output doorbell are relevant
*
* Return: true if there are pending interrupts
*/
static bool mei_txe_pending_interrupts(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
TXE_INTR_ALIVENESS |
TXE_INTR_IN_READY |
TXE_INTR_OUT_DB));
if (ret) {
dev_dbg(dev->dev,
"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
!!(hw->intr_cause & TXE_INTR_IN_READY),
!!(hw->intr_cause & TXE_INTR_READINESS),
!!(hw->intr_cause & TXE_INTR_ALIVENESS),
!!(hw->intr_cause & TXE_INTR_OUT_DB));
}
return ret;
}
/**
* mei_txe_input_payload_write - write a dword to the host buffer
* at offset idx
*
* @dev: the device structure
* @idx: index in the host buffer
* @value: value
*/
static void mei_txe_input_payload_write(struct mei_device *dev,
unsigned long idx, u32 value)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
(idx * sizeof(u32)), value);
}
/**
* mei_txe_out_data_read - read dword from the device buffer
* at offset idx
*
* @dev: the device structure
* @idx: index in the device buffer
*
* Return: register value at index
*/
static u32 mei_txe_out_data_read(const struct mei_device *dev,
unsigned long idx)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return mei_txe_br_reg_read(hw,
BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
}
/* Readiness */
/**
* mei_txe_readiness_set_host_rdy - set host readiness bit
*
* @dev: the device structure
*/
static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw,
SICR_HOST_IPC_READINESS_REQ_REG,
SICR_HOST_IPC_READINESS_HOST_RDY);
}
/**
* mei_txe_readiness_clear - clear host readiness bit
*
* @dev: the device structure
*/
static void mei_txe_readiness_clear(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
SICR_HOST_IPC_READINESS_RDY_CLR);
}
/**
* mei_txe_readiness_get - Reads and returns
* the HICR_SEC_IPC_READINESS register value
*
* @dev: the device structure
*
* Return: the HICR_SEC_IPC_READINESS register value
*/
static u32 mei_txe_readiness_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
}
/**
* mei_txe_readiness_is_sec_rdy - check readiness
* for HICR_SEC_IPC_READINESS_SEC_RDY
*
* @readiness: cached readiness state
*
* Return: true if readiness bit is set
*/
static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
{
return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
}
/**
* mei_txe_hw_is_ready - check if the hw is ready
*
* @dev: the device structure
*
* Return: true if sec is ready
*/
static bool mei_txe_hw_is_ready(struct mei_device *dev)
{
u32 readiness = mei_txe_readiness_get(dev);
return mei_txe_readiness_is_sec_rdy(readiness);
}
/**
* mei_txe_host_is_ready - check if the host is ready
*
* @dev: the device structure
*
* Return: true if host is ready
*/
static inline bool mei_txe_host_is_ready(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
}
/**
* mei_txe_readiness_wait - wait till readiness settles
*
* @dev: the device structure
*
* Return: 0 on success and -ETIME on timeout
*/
static int mei_txe_readiness_wait(struct mei_device *dev)
{
if (mei_txe_hw_is_ready(dev))
return 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait for readiness failed\n");
return -ETIME;
}
dev->recvd_hw_ready = false;
return 0;
}
static const struct mei_fw_status mei_txe_fw_sts = {
.count = 2,
.status[0] = PCI_CFG_TXE_FW_STS0,
.status[1] = PCI_CFG_TXE_FW_STS1
};
/**
* mei_txe_fw_status - read fw status register from pci config space
*
* @dev: mei device
* @fw_status: fw status register values
*
* Return: 0 on success, error otherwise
*/
static int mei_txe_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
int i;
if (!fw_status)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = pci_read_config_dword(pdev, fw_src->status[i],
&fw_status->status[i]);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
return ret;
}
return 0;
}
/**
* mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
*
* Return: always 0
*/
static int mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
return 0;
}
/**
* mei_txe_write - writes a message to device.
*
* @dev: the device structure
* @hdr: header of message
* @hdr_len: header length in bytes - must multiplication of a slot (4bytes)
* @data: payload
* @data_len: paylead length in bytes
*
* Return: 0 if success, < 0 - otherwise.
*/
static int mei_txe_write(struct mei_device *dev,
const void *hdr, size_t hdr_len,
const void *data, size_t data_len)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
const u32 *reg_buf;
u32 slots = TXE_HBUF_DEPTH;
u32 dw_cnt;
unsigned long i, j;
if (WARN_ON(!hdr || !data || hdr_len & 0x3))
return -EINVAL;
dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > slots)
return -EMSGSIZE;
if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
return -EAGAIN;
/* Enable Input Ready Interrupt. */
mei_txe_input_ready_interrupt_enable(dev);
if (!mei_txe_is_input_ready(dev)) {
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
return -EAGAIN;
}
reg_buf = hdr;
for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
mei_txe_input_payload_write(dev, i, reg_buf[i]);
reg_buf = data;
for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(®, (const u8 *)data + data_len - rem, rem);
mei_txe_input_payload_write(dev, i + j, reg);
}
/* after each write the whole buffer is consumed */
hw->slots = 0;
/* Set Input-Doorbell */
mei_txe_input_doorbell_set(hw);
return 0;
}
/**
* mei_txe_hbuf_depth - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
* Return: the TXE_HBUF_DEPTH
*/
static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
{
return TXE_HBUF_DEPTH;
}
/**
* mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
* Return: always TXE_HBUF_DEPTH
*/
static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return hw->slots;
}
/**
* mei_txe_count_full_read_slots - mimics the me device circular buffer
*
* @dev: the device structure
*
* Return: always buffer size in dwords count
*/
static int mei_txe_count_full_read_slots(struct mei_device *dev)
{
/* read buffers has static size */
return TXE_HBUF_DEPTH;
}
/**
* mei_txe_read_hdr - read message header which is always in 4 first bytes
*
* @dev: the device structure
*
* Return: mei message header
*/
static u32 mei_txe_read_hdr(const struct mei_device *dev)
{
return mei_txe_out_data_read(dev, 0);
}
/**
* mei_txe_read - reads a message from the txe device.
*
* @dev: the device structure
* @buf: message buffer will be written
* @len: message size will be read
*
* Return: -EINVAL on error wrong argument and 0 on success
*/
static int mei_txe_read(struct mei_device *dev,
unsigned char *buf, unsigned long len)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 *reg_buf, reg;
u32 rem;
u32 i;
if (WARN_ON(!buf || !len))
return -EINVAL;
reg_buf = (u32 *)buf;
rem = len & 0x3;
dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
*reg_buf++ = reg;
}
if (rem) {
reg = mei_txe_out_data_read(dev, i + 1);
memcpy(reg_buf, ®, rem);
}
mei_txe_output_ready_set(hw);
return 0;
}
/**
* mei_txe_hw_reset - resets host and fw.
*
* @dev: the device structure
* @intr_enable: if interrupt should be enabled after reset.
*
* Return: 0 on success and < 0 in case of error
*/
static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 aliveness_req;
/*
* read input doorbell to ensure consistency between Bridge and SeC
* return value might be garbage return
*/
(void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
aliveness_req = mei_txe_aliveness_req_get(dev);
hw->aliveness = mei_txe_aliveness_get(dev);
/* Disable interrupts in this stage we will poll */
mei_txe_intr_disable(dev);
/*
* If Aliveness Request and Aliveness Response are not equal then
* wait for them to be equal
* Since we might have interrupts disabled - poll for it
*/
if (aliveness_req != hw->aliveness)
if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
return -EIO;
}
/*
* If Aliveness Request and Aliveness Response are set then clear them
*/
if (aliveness_req) {
mei_txe_aliveness_set(dev, 0);
if (mei_txe_aliveness_poll(dev, 0) < 0) {
dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
return -EIO;
}
}
/*
* Set readiness RDY_CLR bit
*/
mei_txe_readiness_clear(dev);
return 0;
}
/**
* mei_txe_hw_start - start the hardware after reset
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_txe_hw_start(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
int ret;
u32 hisr;
/* bring back interrupts */
mei_txe_intr_enable(dev);
ret = mei_txe_readiness_wait(dev);
if (ret < 0) {
dev_err(dev->dev, "waiting for readiness failed\n");
return ret;
}
/*
* If HISR.INT2_STS interrupt status bit is set then clear it.
*/
hisr = mei_txe_br_reg_read(hw, HISR_REG);
if (hisr & HISR_INT_2_STS)
mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
/* Clear the interrupt cause of OutputDoorbell */
clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
ret = mei_txe_aliveness_set_sync(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
return ret;
}
pm_runtime_set_active(dev->dev);
/* enable input ready interrupts:
* SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
*/
mei_txe_input_ready_interrupt_enable(dev);
/* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
mei_txe_output_ready_set(hw);
/* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
*/
mei_txe_readiness_set_host_rdy(dev);
return 0;
}
/**
* mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
* single bit mask and acknowledge the interrupts
*
* @dev: the device structure
* @do_ack: acknowledge interrupts
*
* Return: true if found interrupts to process.
*/
static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 hisr;
u32 hhisr;
u32 ipc_isr;
u32 aliveness;
bool generated;
/* read interrupt registers */
hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
generated = (hhisr & IPC_HHIER_MSK);
if (!generated)
goto out;
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
} else {
ipc_isr = 0;
hhisr &= ~IPC_HHIER_SEC;
}
if (do_ack) {
/* Save the interrupt causes */
hw->intr_cause |= hisr & HISR_INT_STS_MSK;
if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
hw->intr_cause |= TXE_INTR_IN_READY;
mei_txe_intr_disable(dev);
/* Clear the interrupts in hierarchy:
* IPC and Bridge, than the High Level */
mei_txe_sec_reg_write_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
mei_txe_br_reg_write(hw, HISR_REG, hisr);
mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
}
out:
return generated;
}
/**
* mei_txe_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: IRQ_WAKE_THREAD if interrupt is designed for the device
* IRQ_NONE otherwise
*/
irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = dev_id;
if (mei_txe_check_and_ack_intrs(dev, true))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
/**
* mei_txe_irq_thread_handler - txe interrupt thread
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: IRQ_HANDLED
*/
irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_txe_hw *hw = to_txe_hw(dev);
struct list_head cmpl_list;
s32 slots;
int rets = 0;
dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
mei_txe_br_reg_read(hw, HHISR_REG),
mei_txe_br_reg_read(hw, HISR_REG),
mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
/* initialize our complete list */
mutex_lock(&dev->device_lock);
INIT_LIST_HEAD(&cmpl_list);
if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
mei_txe_check_and_ack_intrs(dev, true);
/* show irq events */
mei_txe_pending_interrupts(dev);
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
/* Readiness:
* Detection of TXE driver going through reset
* or TXE driver resetting the HECI interface.
*/
if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
/* Check if SeC is going through reset */
if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
dev_dbg(dev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
} else {
dev->recvd_hw_ready = false;
if (dev->dev_state != MEI_DEV_RESETTING) {
dev_warn(dev->dev, "FW not ready: resetting.\n");
schedule_work(&dev->reset_work);
goto end;
}
}
wake_up(&dev->wait_hw_ready);
}
/************************************************************/
/* Check interrupt cause:
* Aliveness: Detection of SeC acknowledge of host request that
* it remain alive or host cancellation of that request.
*/
if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
/* Clear the interrupt cause */
dev_dbg(dev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
dev->pg_event = MEI_PG_EVENT_RECEIVED;
if (waitqueue_active(&hw->wait_aliveness_resp))
wake_up(&hw->wait_aliveness_resp);
}
/* Output Doorbell:
* Detection of SeC having sent output to host
*/
slots = mei_count_full_read_slots(dev);
if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
/* Read from TXE */
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
if (rets &&
(dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_POWER_DOWN)) {
dev_err(dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
schedule_work(&dev->reset_work);
goto end;
}
}
/* Input Ready: Detection if host can write to SeC */
if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
dev->hbuf_is_ready = true;
hw->slots = TXE_HBUF_DEPTH;
}
if (hw->aliveness && dev->hbuf_is_ready) {
/* get the real register value */
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
rets = mei_irq_write_handler(dev, &cmpl_list);
if (rets && rets != -EMSGSIZE)
dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
rets);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
mei_irq_compl_handler(dev, &cmpl_list);
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
mutex_unlock(&dev->device_lock);
mei_enable_interrupts(dev);
return IRQ_HANDLED;
}
static const struct mei_hw_ops mei_txe_hw_ops = {
.host_is_ready = mei_txe_host_is_ready,
.fw_status = mei_txe_fw_status,
.pg_state = mei_txe_pg_state,
.hw_is_ready = mei_txe_hw_is_ready,
.hw_reset = mei_txe_hw_reset,
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
.pg_in_transition = mei_txe_pg_in_transition,
.pg_is_enabled = mei_txe_pg_is_enabled,
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
.synchronize_irq = mei_txe_synchronize_irq,
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
.hbuf_depth = mei_txe_hbuf_depth,
.write = mei_txe_write,
.rdbuf_full_slots = mei_txe_count_full_read_slots,
.read_hdr = mei_txe_read_hdr,
.read = mei_txe_read,
};
/**
* mei_txe_dev_init - allocates and initializes txe hardware specific structure
*
* @pdev: pci device
*
* Return: struct mei_device * on success or NULL
*/
struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
hw = to_txe_hw(dev);
init_waitqueue_head(&hw->wait_aliveness_resp);
return dev;
}
/**
* mei_txe_setup_satt2 - SATT2 configuration for DMA support.
*
* @dev: the device structure
* @addr: physical address start of the range
* @range: physical range size
*
* Return: 0 on success an error code otherwise
*/
int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 lo32 = lower_32_bits(addr);
u32 hi32 = upper_32_bits(addr);
u32 ctrl;
/* SATT is limited to 36 Bits */
if (hi32 & ~0xF)
return -EINVAL;
/* SATT has to be 16Byte aligned */
if (lo32 & 0xF)
return -EINVAL;
/* SATT range has to be 4Bytes aligned */
if (range & 0x4)
return -EINVAL;
/* SATT is limited to 32 MB range*/
if (range > SATT_RANGE_MAX)
return -EINVAL;
ctrl = SATT2_CTRL_VALID_MSK;
ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
range, lo32, ctrl);
return 0;
}
| linux-master | drivers/misc/mei/hw-txe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
/**
* mei_irq_compl_handler - dispatch complete handlers
* for the completed callbacks
*
* @dev: mei device
* @cmpl_list: list of completed cbs
*/
void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
{
struct mei_cl_cb *cb, *next;
struct mei_cl *cl;
list_for_each_entry_safe(cb, next, cmpl_list, list) {
cl = cb->cl;
list_del_init(&cb->list);
dev_dbg(dev->dev, "completing call back.\n");
mei_cl_complete(cl, cb);
}
}
EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
/**
* mei_cl_hbm_equal - check if hbm is addressed to the client
*
* @cl: host client
* @mei_hdr: header of mei client message
*
* Return: true if matches, false otherwise
*/
static inline int mei_cl_hbm_equal(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr)
{
return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
mei_cl_me_id(cl) == mei_hdr->me_addr;
}
/**
* mei_irq_discard_msg - discard received message
*
* @dev: mei device
* @hdr: message header
* @discard_len: the length of the message to discard (excluding header)
*/
static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
size_t discard_len)
{
if (hdr->dma_ring) {
mei_dma_ring_read(dev, NULL,
hdr->extension[dev->rd_msg_hdr_count - 2]);
discard_len = 0;
}
/*
* no need to check for size as it is guarantied
* that length fits into rd_msg_buf
*/
mei_read_slots(dev, dev->rd_msg_buf, discard_len);
dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
MEI_HDR_PRM(hdr));
}
/**
* mei_cl_irq_read_msg - process client message
*
* @cl: reading client
* @mei_hdr: header of mei client message
* @meta: extend meta header
* @cmpl_list: completion list
*
* Return: always 0
*/
static int mei_cl_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
struct mei_ext_meta_hdr *meta,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
struct mei_ext_hdr_vtag *vtag_hdr = NULL;
struct mei_ext_hdr_gsc_f2h *gsc_f2h = NULL;
size_t buf_sz;
u32 length;
u32 ext_len;
length = mei_hdr->length;
ext_len = 0;
if (mei_hdr->extended) {
ext_len = sizeof(*meta) + mei_slots2data(meta->size);
length -= ext_len;
}
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
if (!cb) {
if (!mei_cl_is_fixed_address(cl)) {
cl_err(dev, cl, "pending read cb not found\n");
goto discard;
}
cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
if (!cb)
goto discard;
list_add_tail(&cb->list, &cl->rd_pending);
}
if (mei_hdr->extended) {
struct mei_ext_hdr *ext = mei_ext_begin(meta);
do {
switch (ext->type) {
case MEI_EXT_HDR_VTAG:
vtag_hdr = (struct mei_ext_hdr_vtag *)ext;
break;
case MEI_EXT_HDR_GSC:
gsc_f2h = (struct mei_ext_hdr_gsc_f2h *)ext;
cb->ext_hdr = kzalloc(sizeof(*gsc_f2h), GFP_KERNEL);
if (!cb->ext_hdr) {
cb->status = -ENOMEM;
goto discard;
}
break;
case MEI_EXT_HDR_NONE:
fallthrough;
default:
cl_err(dev, cl, "unknown extended header\n");
cb->status = -EPROTO;
break;
}
ext = mei_ext_next(ext);
} while (!mei_ext_last(meta, ext));
if (!vtag_hdr && !gsc_f2h) {
cl_dbg(dev, cl, "no vtag or gsc found in extended header.\n");
cb->status = -EPROTO;
goto discard;
}
}
if (vtag_hdr) {
cl_dbg(dev, cl, "vtag: %d\n", vtag_hdr->vtag);
if (cb->vtag && cb->vtag != vtag_hdr->vtag) {
cl_err(dev, cl, "mismatched tag: %d != %d\n",
cb->vtag, vtag_hdr->vtag);
cb->status = -EPROTO;
goto discard;
}
cb->vtag = vtag_hdr->vtag;
}
if (gsc_f2h) {
u32 ext_hdr_len = mei_ext_hdr_len(&gsc_f2h->hdr);
if (!dev->hbm_f_gsc_supported) {
cl_err(dev, cl, "gsc extended header is not supported\n");
cb->status = -EPROTO;
goto discard;
}
if (length) {
cl_err(dev, cl, "no data allowed in cb with gsc\n");
cb->status = -EPROTO;
goto discard;
}
if (ext_hdr_len > sizeof(*gsc_f2h)) {
cl_err(dev, cl, "gsc extended header is too big %u\n", ext_hdr_len);
cb->status = -EPROTO;
goto discard;
}
memcpy(cb->ext_hdr, gsc_f2h, ext_hdr_len);
}
if (!mei_cl_is_connected(cl)) {
cl_dbg(dev, cl, "not connected\n");
cb->status = -ENODEV;
goto discard;
}
if (mei_hdr->dma_ring)
length = mei_hdr->extension[mei_data2slots(ext_len)];
buf_sz = length + cb->buf_idx;
/* catch for integer overflow */
if (buf_sz < cb->buf_idx) {
cl_err(dev, cl, "message is too big len %d idx %zu\n",
length, cb->buf_idx);
cb->status = -EMSGSIZE;
goto discard;
}
if (cb->buf.size < buf_sz) {
cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, length, cb->buf_idx);
cb->status = -EMSGSIZE;
goto discard;
}
if (mei_hdr->dma_ring) {
mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
/* for DMA read 0 length to generate interrupt to the device */
mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0);
} else {
mei_read_slots(dev, cb->buf.data + cb->buf_idx, length);
}
cb->buf_idx += length;
if (mei_hdr->msg_complete) {
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
list_move_tail(&cb->list, cmpl_list);
} else {
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
}
return 0;
discard:
if (cb)
list_move_tail(&cb->list, cmpl_list);
mei_irq_discard_msg(dev, mei_hdr, length);
return 0;
}
/**
* mei_cl_irq_disconnect_rsp - send disconnection response message
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
list_move_tail(&cb->list, cmpl_list);
return ret;
}
/**
* mei_cl_irq_read - processes client read related operation from the
* interrupt thread context - request for flow control credits
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
if (!list_empty(&cl->rd_pending))
return 0;
msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_flow_control_req(dev, cl);
if (ret) {
cl->status = ret;
cb->buf_idx = 0;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
list_move_tail(&cb->list, &cl->rd_pending);
return 0;
}
static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
{
return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
}
static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
{
return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
}
static inline int hdr_is_valid(u32 msg_hdr)
{
struct mei_msg_hdr *mei_hdr;
u32 expected_len = 0;
mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
if (!msg_hdr || mei_hdr->reserved)
return -EBADMSG;
if (mei_hdr->dma_ring)
expected_len += MEI_SLOT_SIZE;
if (mei_hdr->extended)
expected_len += MEI_SLOT_SIZE;
if (mei_hdr->length < expected_len)
return -EBADMSG;
return 0;
}
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
* @slots: slots to read.
*
* Return: 0 on success, <0 on failure.
*/
int mei_irq_read_handler(struct mei_device *dev,
struct list_head *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_ext_meta_hdr *meta_hdr = NULL;
struct mei_cl *cl;
int ret;
u32 hdr_size_left;
u32 hdr_size_ext;
int i;
int ext_hdr_end;
if (!dev->rd_msg_hdr[0]) {
dev->rd_msg_hdr[0] = mei_read_hdr(dev);
dev->rd_msg_hdr_count = 1;
(*slots)--;
dev_dbg(dev->dev, "slots =%08x.\n", *slots);
ret = hdr_is_valid(dev->rd_msg_hdr[0]);
if (ret) {
dev_err(dev->dev, "corrupted message header 0x%08X\n",
dev->rd_msg_hdr[0]);
goto end;
}
}
mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_slots2data(*slots) < mei_hdr->length) {
dev_err(dev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
ret = -ENODATA;
goto end;
}
ext_hdr_end = 1;
hdr_size_left = mei_hdr->length;
if (mei_hdr->extended) {
if (!dev->rd_msg_hdr[1]) {
dev->rd_msg_hdr[1] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
}
meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]);
if (check_add_overflow((u32)sizeof(*meta_hdr),
mei_slots2data(meta_hdr->size),
&hdr_size_ext)) {
dev_err(dev->dev, "extended message size too big %d\n",
meta_hdr->size);
return -EBADMSG;
}
if (hdr_size_left < hdr_size_ext) {
dev_err(dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
hdr_size_left -= hdr_size_ext;
ext_hdr_end = meta_hdr->size + 2;
for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
dev->rd_msg_hdr[i] = mei_read_hdr(dev);
dev_dbg(dev->dev, "extended header %d is %08x\n", i,
dev->rd_msg_hdr[i]);
dev->rd_msg_hdr_count++;
(*slots)--;
}
}
if (mei_hdr->dma_ring) {
if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
dev_err(dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]);
}
/* HBM message */
if (hdr_is_hbm(mei_hdr)) {
ret = mei_hbm_dispatch(dev, mei_hdr);
if (ret) {
dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
ret);
goto end;
}
goto reset_slots;
}
/* find recipient cl */
list_for_each_entry(cl, &dev->file_list, link) {
if (mei_cl_hbm_equal(cl, mei_hdr)) {
cl_dbg(dev, cl, "got a message\n");
ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
goto reset_slots;
}
}
/* if no recipient cl was found we assume corrupted header */
/* A message for not connected fixed address clients
* should be silently discarded
* On power down client may be force cleaned,
* silently discard such messages
*/
if (hdr_is_fixed(mei_hdr) ||
dev->dev_state == MEI_DEV_POWER_DOWN) {
mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
ret = 0;
goto reset_slots;
}
dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
ret = -EBADMSG;
goto end;
reset_slots:
/* reset the number of slots and header */
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
dev->rd_msg_hdr_count = 0;
*slots = mei_count_full_read_slots(dev);
if (*slots == -EOVERFLOW) {
/* overflow - reset */
dev_err(dev->dev, "resetting due to slots overflow.\n");
/* set the event since message has been read */
ret = -ERANGE;
goto end;
}
end:
return ret;
}
EXPORT_SYMBOL_GPL(mei_irq_read_handler);
/**
* mei_irq_write_handler - dispatch write requests
* after irq received
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
*
* Return: 0 on success, <0 on failure.
*/
int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
{
struct mei_cl *cl;
struct mei_cl_cb *cb, *next;
s32 slots;
int ret;
if (!mei_hbuf_acquire(dev))
return 0;
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if (slots == 0)
return -EMSGSIZE;
/* complete all waiting for write CB */
dev_dbg(dev->dev, "complete all waiting for write cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
cl = cb->cl;
cl->status = 0;
cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
list_move_tail(&cb->list, cmpl_list);
}
/* complete control write list CB */
dev_dbg(dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
cl = cb->cl;
switch (cb->fop_type) {
case MEI_FOP_DISCONNECT:
/* send disconnect message */
ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_READ:
/* send flow control message */
ret = mei_cl_irq_read(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_CONNECT:
/* connect message */
ret = mei_cl_irq_connect(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_DISCONNECT_RSP:
/* send disconnect resp */
ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_NOTIFY_START:
case MEI_FOP_NOTIFY_STOP:
ret = mei_cl_irq_notify(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_DMA_MAP:
ret = mei_cl_irq_dma_map(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_DMA_UNMAP:
ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list);
if (ret)
return ret;
break;
default:
BUG();
}
}
/* complete write list CB */
dev_dbg(dev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
cl = cb->cl;
ret = mei_cl_irq_write(cl, cb, cmpl_list);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(mei_irq_write_handler);
/**
* mei_connect_timeout - connect/disconnect timeouts
*
* @cl: host client
*/
static void mei_connect_timeout(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_CONNECTING) {
if (dev->hbm_f_dot_supported) {
cl->state = MEI_FILE_DISCONNECT_REQUIRED;
wake_up(&cl->wait);
return;
}
}
mei_reset(dev);
}
#define MEI_STALL_TIMER_FREQ (2 * HZ)
/**
* mei_schedule_stall_timer - re-arm stall_timer work
*
* Schedule stall timer
*
* @dev: the device structure
*/
void mei_schedule_stall_timer(struct mei_device *dev)
{
schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
}
/**
* mei_timer - timer function.
*
* @work: pointer to the work_struct structure
*
*/
void mei_timer(struct work_struct *work)
{
struct mei_cl *cl;
struct mei_device *dev = container_of(work,
struct mei_device, timer_work.work);
bool reschedule_timer = false;
mutex_lock(&dev->device_lock);
/* Catch interrupt stalls during HBM init handshake */
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->hbm_state != MEI_HBM_IDLE) {
if (dev->init_clients_timer) {
if (--dev->init_clients_timer == 0) {
dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
dev->hbm_state);
mei_reset(dev);
goto out;
}
reschedule_timer = true;
}
}
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
/*** connect/disconnect timeouts ***/
list_for_each_entry(cl, &dev->file_list, link) {
if (cl->timer_count) {
if (--cl->timer_count == 0) {
dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
mei_connect_timeout(cl);
goto out;
}
reschedule_timer = true;
}
}
out:
if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
mei_schedule_stall_timer(dev);
mutex_unlock(&dev->device_lock);
}
| linux-master | drivers/misc/mei/interrupt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "client.h"
#include "hw.h"
static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused)
{
struct mei_device *dev = m->private;
struct mei_me_client *me_cl;
int i = 0;
if (!dev)
return -ENODEV;
down_read(&dev->me_clients_rwsem);
seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|vt|\n");
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
list_for_each_entry(me_cl, &dev->me_clients, list) {
if (!mei_me_cl_get(me_cl))
continue;
seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|%2d|\n",
i++, me_cl->client_id,
me_cl->props.fixed_address,
&me_cl->props.protocol_name,
me_cl->props.max_number_of_connections,
me_cl->props.max_msg_length,
me_cl->props.single_recv_buf,
kref_read(&me_cl->refcnt),
me_cl->props.vt_supported);
mei_me_cl_put(me_cl);
}
out:
up_read(&dev->me_clients_rwsem);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_meclients);
static int mei_dbgfs_active_show(struct seq_file *m, void *unused)
{
struct mei_device *dev = m->private;
struct mei_cl *cl;
int i = 0;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
seq_puts(m, " |me|host|state|rd|wr|wrq\n");
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
goto out;
list_for_each_entry(cl, &dev->file_list, link) {
seq_printf(m, "%3d|%2d|%4d|%5d|%2d|%2d|%3u\n",
i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
!list_empty(&cl->rd_completed), cl->writing_state,
cl->tx_cb_queued);
i++;
}
out:
mutex_unlock(&dev->device_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active);
static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state)
{
#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state
switch (state) {
MEI_PXP_MODE(DEFAULT);
MEI_PXP_MODE(INIT);
MEI_PXP_MODE(SETUP);
MEI_PXP_MODE(READY);
default:
return "unknown";
}
#undef MEI_PXP_MODE
}
static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
{
struct mei_device *dev = m->private;
seq_printf(m, "dev: %s\n", mei_dev_state_str(dev->dev_state));
seq_printf(m, "hbm: %s\n", mei_hbm_state_str(dev->hbm_state));
if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
dev->hbm_state <= MEI_HBM_STARTED) {
seq_puts(m, "hbm features:\n");
seq_printf(m, "\tPG: %01d\n", dev->hbm_f_pg_supported);
seq_printf(m, "\tDC: %01d\n", dev->hbm_f_dc_supported);
seq_printf(m, "\tIE: %01d\n", dev->hbm_f_ie_supported);
seq_printf(m, "\tDOT: %01d\n", dev->hbm_f_dot_supported);
seq_printf(m, "\tEV: %01d\n", dev->hbm_f_ev_supported);
seq_printf(m, "\tFA: %01d\n", dev->hbm_f_fa_supported);
seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported);
seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported);
seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported);
seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported);
seq_printf(m, "\tCD: %01d\n", dev->hbm_f_cd_supported);
}
seq_printf(m, "pg: %s, %s\n",
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
mei_pg_state_str(mei_pg_state(dev)));
seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mei_device *dev;
int ret;
dev = container_of(file->private_data,
struct mei_device, allow_fixed_address);
ret = debugfs_write_file_bool(file, user_buf, count, ppos);
if (ret < 0)
return ret;
dev->override_fixed_address = true;
return ret;
}
static const struct file_operations mei_dbgfs_allow_fa_fops = {
.open = simple_open,
.read = debugfs_read_file_bool,
.write = mei_dbgfs_write_allow_fa,
.llseek = generic_file_llseek,
};
/**
* mei_dbgfs_deregister - Remove the debugfs files and directories
*
* @dev: the mei device structure
*/
void mei_dbgfs_deregister(struct mei_device *dev)
{
if (!dev->dbgfs_dir)
return;
debugfs_remove_recursive(dev->dbgfs_dir);
dev->dbgfs_dir = NULL;
}
/**
* mei_dbgfs_register - Add the debugfs files
*
* @dev: the mei device structure
* @name: the mei device name
*/
void mei_dbgfs_register(struct mei_device *dev, const char *name)
{
struct dentry *dir;
dir = debugfs_create_dir(name, NULL);
dev->dbgfs_dir = dir;
debugfs_create_file("meclients", S_IRUSR, dir, dev,
&mei_dbgfs_meclients_fops);
debugfs_create_file("active", S_IRUSR, dir, dev,
&mei_dbgfs_active_fops);
debugfs_create_file("devstate", S_IRUSR, dir, dev,
&mei_dbgfs_devstate_fops);
debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
&dev->allow_fixed_address,
&mei_dbgfs_allow_fa_fops);
}
| linux-master | drivers/misc/mei/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
switch (state) {
MEI_DEV_STATE(INITIALIZING);
MEI_DEV_STATE(INIT_CLIENTS);
MEI_DEV_STATE(ENABLED);
MEI_DEV_STATE(RESETTING);
MEI_DEV_STATE(DISABLED);
MEI_DEV_STATE(POWERING_DOWN);
MEI_DEV_STATE(POWER_DOWN);
MEI_DEV_STATE(POWER_UP);
default:
return "unknown";
}
#undef MEI_DEV_STATE
}
const char *mei_pg_state_str(enum mei_pg_state state)
{
#define MEI_PG_STATE(state) case MEI_PG_##state: return #state
switch (state) {
MEI_PG_STATE(OFF);
MEI_PG_STATE(ON);
default:
return "unknown";
}
#undef MEI_PG_STATE
}
/**
* mei_fw_status2str - convert fw status registers to printable string
*
* @fw_status: firmware status
* @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
* @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
*
* Return: number of bytes written or -EINVAL if buffer is to small
*/
ssize_t mei_fw_status2str(struct mei_fw_status *fw_status,
char *buf, size_t len)
{
ssize_t cnt = 0;
int i;
buf[0] = '\0';
if (len < MEI_FW_STATUS_STR_SZ)
return -EINVAL;
for (i = 0; i < fw_status->count; i++)
cnt += scnprintf(buf + cnt, len - cnt, "%08X ",
fw_status->status[i]);
/* drop last space */
buf[cnt] = '\0';
return cnt;
}
EXPORT_SYMBOL_GPL(mei_fw_status2str);
/**
* mei_cancel_work - Cancel mei background jobs
*
* @dev: the device structure
*/
void mei_cancel_work(struct mei_device *dev)
{
cancel_work_sync(&dev->reset_work);
cancel_work_sync(&dev->bus_rescan_work);
cancel_delayed_work_sync(&dev->timer_work);
}
EXPORT_SYMBOL_GPL(mei_cancel_work);
/**
* mei_reset - resets host and fw.
*
* @dev: the device structure
*
* Return: 0 on success or < 0 if the reset hasn't succeeded
*/
int mei_reset(struct mei_device *dev)
{
enum mei_dev_state state = dev->dev_state;
bool interrupts_enabled;
int ret;
if (state != MEI_DEV_INITIALIZING &&
state != MEI_DEV_DISABLED &&
state != MEI_DEV_POWER_DOWN &&
state != MEI_DEV_POWER_UP) {
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
}
mei_clear_interrupts(dev);
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
* we need to call it before hw start
* so the hbm watchdog won't kick in
*/
mei_hbm_idle(dev);
/* enter reset flow */
interrupts_enabled = state != MEI_DEV_POWER_DOWN;
mei_set_devstate(dev, MEI_DEV_RESETTING);
dev->reset_count++;
if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return -ENODEV;
}
ret = mei_hw_reset(dev, interrupts_enabled);
/* fall through and remove the sw state even if hw reset has failed */
/* no need to clean up software state in case of power up */
if (state != MEI_DEV_INITIALIZING && state != MEI_DEV_POWER_UP)
mei_cl_all_disconnect(dev);
mei_hbm_reset(dev);
/* clean stale FW version */
dev->fw_ver_received = 0;
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
if (ret) {
dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
return ret;
}
if (state == MEI_DEV_POWER_DOWN) {
dev_dbg(dev->dev, "powering down: end of reset\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return 0;
}
ret = mei_hw_start(dev);
if (ret) {
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
dev_err(dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
return ret;
}
if (dev->dev_state != MEI_DEV_RESETTING) {
dev_dbg(dev->dev, "wrong state = %d on link start\n", dev->dev_state);
return 0;
}
dev_dbg(dev->dev, "link is established start sending messages.\n");
mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
ret = mei_hbm_start_req(dev);
if (ret) {
dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
mei_set_devstate(dev, MEI_DEV_RESETTING);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(mei_reset);
/**
* mei_start - initializes host and fw to start work.
*
* @dev: the device structure
*
* Return: 0 on success, <0 on failure.
*/
int mei_start(struct mei_device *dev)
{
int ret;
mutex_lock(&dev->device_lock);
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
ret = mei_hw_config(dev);
if (ret)
goto err;
dev_dbg(dev->dev, "reset in start the mei device.\n");
dev->reset_count = 0;
do {
mei_set_devstate(dev, MEI_DEV_INITIALIZING);
ret = mei_reset(dev);
if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "reset failed ret = %d", ret);
goto err;
}
} while (ret);
if (mei_hbm_start_wait(dev)) {
dev_err(dev->dev, "HBM haven't started");
goto err;
}
if (!mei_hbm_version_is_supported(dev)) {
dev_dbg(dev->dev, "MEI start failed.\n");
goto err;
}
dev_dbg(dev->dev, "link layer has been established.\n");
mutex_unlock(&dev->device_lock);
return 0;
err:
dev_err(dev->dev, "link layer initialization failed.\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(mei_start);
/**
* mei_restart - restart device after suspend
*
* @dev: the device structure
*
* Return: 0 on success or -ENODEV if the restart hasn't succeeded
*/
int mei_restart(struct mei_device *dev)
{
int err;
mutex_lock(&dev->device_lock);
mei_set_devstate(dev, MEI_DEV_POWER_UP);
dev->reset_count = 0;
err = mei_reset(dev);
mutex_unlock(&dev->device_lock);
if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "device disabled = %d\n", err);
return -ENODEV;
}
/* try to start again */
if (err)
schedule_work(&dev->reset_work);
return 0;
}
EXPORT_SYMBOL_GPL(mei_restart);
static void mei_reset_work(struct work_struct *work)
{
struct mei_device *dev =
container_of(work, struct mei_device, reset_work);
int ret;
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
mutex_lock(&dev->device_lock);
ret = mei_reset(dev);
mutex_unlock(&dev->device_lock);
if (dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "device disabled = %d\n", ret);
return;
}
/* retry reset in case of failure */
if (ret)
schedule_work(&dev->reset_work);
}
void mei_stop(struct mei_device *dev)
{
dev_dbg(dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
mei_set_devstate(dev, MEI_DEV_POWERING_DOWN);
mutex_unlock(&dev->device_lock);
mei_cl_bus_remove_devices(dev);
mutex_lock(&dev->device_lock);
mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
mutex_unlock(&dev->device_lock);
mei_cancel_work(dev);
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
/* to catch HW-initiated reset */
mei_cancel_work(dev);
mutex_lock(&dev->device_lock);
mei_reset(dev);
/* move device to disabled state unconditionally */
mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
}
EXPORT_SYMBOL_GPL(mei_stop);
/**
* mei_write_is_idle - check if the write queues are idle
*
* @dev: the device structure
*
* Return: true of there is no pending write
*/
bool mei_write_is_idle(struct mei_device *dev)
{
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
list_empty(&dev->ctrl_wr_list) &&
list_empty(&dev->write_list) &&
list_empty(&dev->write_waiting_list));
dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list),
list_empty(&dev->write_list),
list_empty(&dev->write_waiting_list));
return idle;
}
EXPORT_SYMBOL_GPL(mei_write_is_idle);
/**
* mei_device_init - initialize mei_device structure
*
* @dev: the mei device
* @device: the device structure
* @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->device_list);
INIT_LIST_HEAD(&dev->me_clients);
mutex_init(&dev->device_lock);
init_rwsem(&dev->me_clients_rwsem);
mutex_init(&dev->cl_bus_lock);
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
INIT_LIST_HEAD(&dev->write_list);
INIT_LIST_HEAD(&dev->write_waiting_list);
INIT_LIST_HEAD(&dev->ctrl_wr_list);
INIT_LIST_HEAD(&dev->ctrl_rd_list);
dev->tx_queue_limit = MEI_TX_QUEUE_LIMIT_DEFAULT;
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
INIT_WORK(&dev->reset_work, mei_reset_work);
INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
dev->pxp_mode = MEI_DEV_PXP_DEFAULT;
/*
* Reserving the first client ID
* 0: Reserved for MEI Bus Message communications
*/
bitmap_set(dev->host_clients_map, 0, 1);
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
dev->dev = device;
dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT;
dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
if (slow_fw) {
dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW);
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW);
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW);
} else {
dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
}
}
EXPORT_SYMBOL_GPL(mei_device_init);
| linux-master | drivers/misc/mei/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/mei.h>
#include "mei_dev.h"
/**
* mei_dmam_dscr_alloc() - allocate a managed coherent buffer
* for the dma descriptor
* @dev: mei_device
* @dscr: dma descriptor
*
* Return:
* * 0 - on success or zero allocation request
* * -EINVAL - if size is not power of 2
* * -ENOMEM - of allocation has failed
*/
static int mei_dmam_dscr_alloc(struct mei_device *dev,
struct mei_dma_dscr *dscr)
{
if (!dscr->size)
return 0;
if (WARN_ON(!is_power_of_2(dscr->size)))
return -EINVAL;
if (dscr->vaddr)
return 0;
dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
GFP_KERNEL);
if (!dscr->vaddr)
return -ENOMEM;
return 0;
}
/**
* mei_dmam_dscr_free() - free a managed coherent buffer
* from the dma descriptor
* @dev: mei_device
* @dscr: dma descriptor
*/
static void mei_dmam_dscr_free(struct mei_device *dev,
struct mei_dma_dscr *dscr)
{
if (!dscr->vaddr)
return;
dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
dscr->vaddr = NULL;
}
/**
* mei_dmam_ring_free() - free dma ring buffers
* @dev: mei device
*/
void mei_dmam_ring_free(struct mei_device *dev)
{
int i;
for (i = 0; i < DMA_DSCR_NUM; i++)
mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
}
/**
* mei_dmam_ring_alloc() - allocate dma ring buffers
* @dev: mei device
*
* Return: -ENOMEM on allocation failure 0 otherwise
*/
int mei_dmam_ring_alloc(struct mei_device *dev)
{
int i;
for (i = 0; i < DMA_DSCR_NUM; i++)
if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
goto err;
return 0;
err:
mei_dmam_ring_free(dev);
return -ENOMEM;
}
/**
* mei_dma_ring_is_allocated() - check if dma ring is allocated
* @dev: mei device
*
* Return: true if dma ring is allocated
*/
bool mei_dma_ring_is_allocated(struct mei_device *dev)
{
return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
}
static inline
struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
{
return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
}
/**
* mei_dma_ring_reset() - reset the dma control block
* @dev: mei device
*/
void mei_dma_ring_reset(struct mei_device *dev)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
if (!ctrl)
return;
memset(ctrl, 0, sizeof(*ctrl));
}
/**
* mei_dma_copy_from() - copy from dma ring into buffer
* @dev: mei device
* @buf: data buffer
* @offset: offset in slots.
* @n: number of slots to copy.
*/
static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
u32 offset, u32 n)
{
unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
size_t b_offset = offset << 2;
size_t b_n = n << 2;
memcpy(buf, dbuf + b_offset, b_n);
return b_n;
}
/**
* mei_dma_copy_to() - copy to a buffer to the dma ring
* @dev: mei device
* @buf: data buffer
* @offset: offset in slots.
* @n: number of slots to copy.
*/
static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
u32 offset, u32 n)
{
unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
size_t b_offset = offset << 2;
size_t b_n = n << 2;
memcpy(hbuf + b_offset, buf, b_n);
return b_n;
}
/**
* mei_dma_ring_read() - read data from the ring
* @dev: mei device
* @buf: buffer to read into: may be NULL in case of droping the data.
* @len: length to read.
*/
void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
u32 dbuf_depth;
u32 rd_idx, rem, slots;
if (WARN_ON(!ctrl))
return;
dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
if (!len)
return;
dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
slots = mei_data2slots(len);
/* if buf is NULL we drop the packet by advancing the pointer.*/
if (!buf)
goto out;
if (rd_idx + slots > dbuf_depth) {
buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
rem = slots - (dbuf_depth - rd_idx);
rd_idx = 0;
} else {
rem = slots;
}
mei_dma_copy_from(dev, buf, rd_idx, rem);
out:
WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
}
static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
{
return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
}
/**
* mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
* @dev: mei_device
*
* Return: number of empty slots
*/
u32 mei_dma_ring_empty_slots(struct mei_device *dev)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
u32 wr_idx, rd_idx, hbuf_depth, empty;
if (!mei_dma_ring_is_allocated(dev))
return 0;
if (WARN_ON(!ctrl))
return 0;
/* easier to work in slots */
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
if (rd_idx > wr_idx)
empty = rd_idx - wr_idx;
else
empty = hbuf_depth - (wr_idx - rd_idx);
return empty;
}
/**
* mei_dma_ring_write - write data to dma ring host buffer
*
* @dev: mei_device
* @buf: data will be written
* @len: data length
*/
void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
{
struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
u32 hbuf_depth;
u32 wr_idx, rem, slots;
if (WARN_ON(!ctrl))
return;
dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
slots = mei_data2slots(len);
if (wr_idx + slots > hbuf_depth) {
buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
rem = slots - (hbuf_depth - wr_idx);
wr_idx = 0;
} else {
rem = slots;
}
mei_dma_copy_to(dev, buf, wr_idx, rem);
WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
}
| linux-master | drivers/misc/mei/dma-ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/delay.h>
#include "mei_dev.h"
#include "hbm.h"
#include "hw-me.h"
#include "hw-me-regs.h"
#include "mei-trace.h"
/**
* mei_me_reg_read - Reads 32bit data from the mei device
*
* @hw: the me hardware structure
* @offset: offset from which to read the data
*
* Return: register value (u32)
*/
static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
unsigned long offset)
{
return ioread32(hw->mem_addr + offset);
}
/**
* mei_me_reg_write - Writes 32bit data to the mei device
*
* @hw: the me hardware structure
* @offset: offset from which to write the data
* @value: register value to write (u32)
*/
static inline void mei_me_reg_write(const struct mei_me_hw *hw,
unsigned long offset, u32 value)
{
iowrite32(value, hw->mem_addr + offset);
}
/**
* mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
* read window register
*
* @dev: the device structure
*
* Return: ME_CB_RW register value (u32)
*/
static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
{
return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
}
/**
* mei_me_hcbww_write - write 32bit data to the host circular buffer
*
* @dev: the device structure
* @data: 32bit data to be written to the host circular buffer
*/
static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
{
mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
}
/**
* mei_me_mecsr_read - Reads 32bit data from the ME CSR
*
* @dev: the device structure
*
* Return: ME_CSR_HA register value (u32)
*/
static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
{
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
return reg;
}
/**
* mei_hcsr_read - Reads 32bit data from the host CSR
*
* @dev: the device structure
*
* Return: H_CSR register value (u32)
*/
static inline u32 mei_hcsr_read(const struct mei_device *dev)
{
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
return reg;
}
/**
* mei_hcsr_write - writes H_CSR register to the mei device
*
* @dev: the device structure
* @reg: new register value
*/
static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
{
trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
}
/**
* mei_hcsr_set - writes H_CSR register to the mei device,
* and ignores the H_IS bit for it is write-one-to-zero.
*
* @dev: the device structure
* @reg: new register value
*/
static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
{
reg &= ~H_CSR_IS_MASK;
mei_hcsr_write(dev, reg);
}
/**
* mei_hcsr_set_hig - set host interrupt (set H_IG)
*
* @dev: the device structure
*/
static inline void mei_hcsr_set_hig(struct mei_device *dev)
{
u32 hcsr;
hcsr = mei_hcsr_read(dev) | H_IG;
mei_hcsr_set(dev, hcsr);
}
/**
* mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
*
* @dev: the device structure
*
* Return: H_D0I3C register value (u32)
*/
static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
{
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
return reg;
}
/**
* mei_me_d0i3c_write - writes H_D0I3C register to device
*
* @dev: the device structure
* @reg: new register value
*/
static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
{
trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
}
/**
* mei_me_trc_status - read trc status register
*
* @dev: mei device
* @trc: trc status register value
*
* Return: 0 on success, error otherwise
*/
static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (!hw->cfg->hw_trc_supported)
return -EOPNOTSUPP;
*trc = mei_me_reg_read(hw, ME_TRC);
trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
return 0;
}
/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
* @fw_status: fw status register values
*
* Return: 0 on success, error otherwise
*/
static int mei_me_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
struct mei_me_hw *hw = to_me_hw(dev);
const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
int ret;
int i;
if (!fw_status || !hw->read_fws)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = hw->read_fws(dev, fw_src->status[i],
&fw_status->status[i]);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
return ret;
}
return 0;
}
/**
* mei_me_hw_config - configure hw dependent settings
*
* @dev: mei device
*
* Return:
* * -EINVAL when read_fws is not set
* * 0 on success
*
*/
static int mei_me_hw_config(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr, reg;
if (WARN_ON(!hw->read_fws))
return -EINVAL;
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
hw->read_fws(dev, PCI_CFG_HFS_1, ®);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
hw->pg_state = MEI_PG_OFF;
if (hw->d0i3_supported) {
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3)
hw->pg_state = MEI_PG_ON;
}
return 0;
}
/**
* mei_me_pg_state - translate internal pg state
* to the mei power gating state
*
* @dev: mei device
*
* Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
*/
static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
return hw->pg_state;
}
static inline u32 me_intr_src(u32 hcsr)
{
return hcsr & H_CSR_IS_MASK;
}
/**
* me_intr_disable - disables mei device interrupts
* using supplied hcsr register value.
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
*/
static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
{
hcsr &= ~H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
/**
* me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
*/
static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
{
if (me_intr_src(hcsr))
mei_hcsr_write(dev, hcsr);
}
/**
* mei_me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_clear(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
me_intr_clear(dev, hcsr);
}
/**
* mei_me_intr_enable - enables mei device interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
u32 hcsr;
if (mei_me_hw_use_polling(to_me_hw(dev)))
return;
hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
/**
* mei_me_intr_disable - disables mei device interrupts
*
* @dev: the device structure
*/
static void mei_me_intr_disable(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
me_intr_disable(dev, hcsr);
}
/**
* mei_me_synchronize_irq - wait for pending IRQ handlers
*
* @dev: the device structure
*/
static void mei_me_synchronize_irq(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (mei_me_hw_use_polling(hw))
return;
synchronize_irq(hw->irq);
}
/**
* mei_me_hw_reset_release - release device from the reset
*
* @dev: the device structure
*/
static void mei_me_hw_reset_release(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
}
/**
* mei_me_host_set_ready - enable device
*
* @dev: mei device
*/
static void mei_me_host_set_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
if (!mei_me_hw_use_polling(to_me_hw(dev)))
hcsr |= H_CSR_IE_MASK;
hcsr |= H_IG | H_RDY;
mei_hcsr_set(dev, hcsr);
}
/**
* mei_me_host_is_ready - check whether the host has turned ready
*
* @dev: mei device
* Return: bool
*/
static bool mei_me_host_is_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
return (hcsr & H_RDY) == H_RDY;
}
/**
* mei_me_hw_is_ready - check whether the me(hw) has turned ready
*
* @dev: mei device
* Return: bool
*/
static bool mei_me_hw_is_ready(struct mei_device *dev)
{
u32 mecsr = mei_me_mecsr_read(dev);
return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
}
/**
* mei_me_hw_is_resetting - check whether the me(hw) is in reset
*
* @dev: mei device
* Return: bool
*/
static bool mei_me_hw_is_resetting(struct mei_device *dev)
{
u32 mecsr = mei_me_mecsr_read(dev);
return (mecsr & ME_RST_HRA) == ME_RST_HRA;
}
/**
* mei_gsc_pxp_check - check for gsc firmware entering pxp mode
*
* @dev: the device structure
*/
static void mei_gsc_pxp_check(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 fwsts5 = 0;
if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
return;
hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
dev->pxp_mode = MEI_DEV_PXP_READY;
} else {
dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
}
}
/**
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
* or timeout is reached
*
* @dev: mei device
* Return: 0 on success, error otherwise
*/
static int mei_me_hw_ready_wait(struct mei_device *dev)
{
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait hw ready failed\n");
return -ETIME;
}
mei_gsc_pxp_check(dev);
mei_me_hw_reset_release(dev);
dev->recvd_hw_ready = false;
return 0;
}
/**
* mei_me_hw_start - hw start routine
*
* @dev: mei device
* Return: 0 on success, error otherwise
*/
static int mei_me_hw_start(struct mei_device *dev)
{
int ret = mei_me_hw_ready_wait(dev);
if (ret)
return ret;
dev_dbg(dev->dev, "hw is ready\n");
mei_me_host_set_ready(dev);
return ret;
}
/**
* mei_hbuf_filled_slots - gets number of device filled buffer slots
*
* @dev: the device structure
*
* Return: number of filled slots
*/
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
{
u32 hcsr;
char read_ptr, write_ptr;
hcsr = mei_hcsr_read(dev);
read_ptr = (char) ((hcsr & H_CBRP) >> 8);
write_ptr = (char) ((hcsr & H_CBWP) >> 16);
return (unsigned char) (write_ptr - read_ptr);
}
/**
* mei_me_hbuf_is_empty - checks if host buffer is empty.
*
* @dev: the device structure
*
* Return: true if empty, false - otherwise.
*/
static bool mei_me_hbuf_is_empty(struct mei_device *dev)
{
return mei_hbuf_filled_slots(dev) == 0;
}
/**
* mei_me_hbuf_empty_slots - counts write empty slots.
*
* @dev: the device structure
*
* Return: -EOVERFLOW if overflow, otherwise empty slots count
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned char filled_slots, empty_slots;
filled_slots = mei_hbuf_filled_slots(dev);
empty_slots = hw->hbuf_depth - filled_slots;
/* check for overflow */
if (filled_slots > hw->hbuf_depth)
return -EOVERFLOW;
return empty_slots;
}
/**
* mei_me_hbuf_depth - returns depth of the hw buffer.
*
* @dev: the device structure
*
* Return: size of hw buffer in slots
*/
static u32 mei_me_hbuf_depth(const struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
return hw->hbuf_depth;
}
/**
* mei_me_hbuf_write - writes a message to host hw buffer.
*
* @dev: the device structure
* @hdr: header of message
* @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
* @data: payload
* @data_len: payload length in bytes
*
* Return: 0 if success, < 0 - otherwise.
*/
static int mei_me_hbuf_write(struct mei_device *dev,
const void *hdr, size_t hdr_len,
const void *data, size_t data_len)
{
unsigned long rem;
unsigned long i;
const u32 *reg_buf;
u32 dw_cnt;
int empty_slots;
if (WARN_ON(!hdr || hdr_len & 0x3))
return -EINVAL;
if (!data && data_len) {
dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
return -EINVAL;
}
dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
if (empty_slots < 0)
return -EOVERFLOW;
dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > (u32)empty_slots)
return -EMSGSIZE;
reg_buf = hdr;
for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
mei_me_hcbww_write(dev, reg_buf[i]);
reg_buf = data;
for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
mei_me_hcbww_write(dev, reg_buf[i]);
rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(®, (const u8 *)data + data_len - rem, rem);
mei_me_hcbww_write(dev, reg);
}
mei_hcsr_set_hig(dev);
if (!mei_me_hw_is_ready(dev))
return -EIO;
return 0;
}
/**
* mei_me_count_full_read_slots - counts read full slots.
*
* @dev: the device structure
*
* Return: -EOVERFLOW if overflow, otherwise filled slots count
*/
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
u32 me_csr;
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
me_csr = mei_me_mecsr_read(dev);
buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
filled_slots = (unsigned char) (write_ptr - read_ptr);
/* check for overflow */
if (filled_slots > buffer_depth)
return -EOVERFLOW;
dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
/**
* mei_me_read_slots - reads a message from mei device.
*
* @dev: the device structure
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*
* Return: always 0
*/
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
*reg_buf++ = mei_me_mecbrw_read(dev);
if (buffer_length > 0) {
u32 reg = mei_me_mecbrw_read(dev);
memcpy(reg_buf, ®, buffer_length);
}
mei_hcsr_set_hig(dev);
return 0;
}
/**
* mei_me_pg_set - write pg enter register
*
* @dev: the device structure
*/
static void mei_me_pg_set(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
reg |= H_HPG_CSR_PGI;
trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
/**
* mei_me_pg_unset - write pg exit register
*
* @dev: the device structure
*/
static void mei_me_pg_unset(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
reg |= H_HPG_CSR_PGIHEXR;
trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
/**
* mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
int ret;
dev->pg_event = MEI_PG_EVENT_WAIT;
ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
if (ret)
return ret;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_RECEIVED,
dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
mei_me_pg_set(dev);
ret = 0;
} else {
ret = -ETIME;
}
dev->pg_event = MEI_PG_EVENT_IDLE;
hw->pg_state = MEI_PG_ON;
return ret;
}
/**
* mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
int ret;
if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
goto reply;
dev->pg_event = MEI_PG_EVENT_WAIT;
mei_me_pg_unset(dev);
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_RECEIVED,
dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
reply:
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
ret = -ETIME;
goto out;
}
dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
if (ret)
return ret;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
ret = 0;
else
ret = -ETIME;
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
hw->pg_state = MEI_PG_OFF;
return ret;
}
/**
* mei_me_pg_in_transition - is device now in pg transition
*
* @dev: the device structure
*
* Return: true if in pg transition, false otherwise
*/
static bool mei_me_pg_in_transition(struct mei_device *dev)
{
return dev->pg_event >= MEI_PG_EVENT_WAIT &&
dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
}
/**
* mei_me_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
*
* Return: true is pg supported, false otherwise
*/
static bool mei_me_pg_is_enabled(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 reg = mei_me_mecsr_read(dev);
if (hw->d0i3_supported)
return true;
if ((reg & ME_PGIC_HRA) == 0)
goto notsupported;
if (!dev->hbm_f_pg_supported)
goto notsupported;
return true;
notsupported:
dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
hw->d0i3_supported,
!!(reg & ME_PGIC_HRA),
dev->version.major_version,
dev->version.minor_version,
HBM_MAJOR_VERSION_PGI,
HBM_MINOR_VERSION_PGI);
return false;
}
/**
* mei_me_d0i3_set - write d0i3 register bit on mei device.
*
* @dev: the device structure
* @intr: ask for interrupt
*
* Return: D0I3C register value
*/
static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
{
u32 reg = mei_me_d0i3c_read(dev);
reg |= H_D0I3C_I3;
if (intr)
reg |= H_D0I3C_IR;
else
reg &= ~H_D0I3C_IR;
mei_me_d0i3c_write(dev, reg);
/* read it to ensure HW consistency */
reg = mei_me_d0i3c_read(dev);
return reg;
}
/**
* mei_me_d0i3_unset - clean d0i3 register bit on mei device.
*
* @dev: the device structure
*
* Return: D0I3C register value
*/
static u32 mei_me_d0i3_unset(struct mei_device *dev)
{
u32 reg = mei_me_d0i3c_read(dev);
reg &= ~H_D0I3C_I3;
reg |= H_D0I3C_IR;
mei_me_d0i3c_write(dev, reg);
/* read it to ensure HW consistency */
reg = mei_me_d0i3c_read(dev);
return reg;
}
/**
* mei_me_d0i3_enter_sync - perform d0i3 entry procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
int ret;
u32 reg;
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
dev_dbg(dev->dev, "d0i3 set not needed\n");
ret = 0;
goto on;
}
/* PGI entry procedure */
dev->pg_event = MEI_PG_EVENT_WAIT;
ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
if (ret)
/* FIXME: should we reset here? */
goto out;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_RECEIVED,
dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
ret = -ETIME;
goto out;
}
/* end PGI entry procedure */
dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
reg = mei_me_d0i3_set(dev, true);
if (!(reg & H_D0I3C_CIP)) {
dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
ret = 0;
goto on;
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
reg = mei_me_d0i3c_read(dev);
if (!(reg & H_D0I3C_I3)) {
ret = -ETIME;
goto out;
}
}
ret = 0;
on:
hw->pg_state = MEI_PG_ON;
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
return ret;
}
/**
* mei_me_d0i3_enter - perform d0i3 entry procedure
* no hbm PG handshake
* no waiting for confirmation; runs with interrupts
* disabled
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_d0i3_enter(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 reg;
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
dev_dbg(dev->dev, "already d0i3 : set not needed\n");
goto on;
}
mei_me_d0i3_set(dev, false);
on:
hw->pg_state = MEI_PG_ON;
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(dev->dev, "d0i3 enter\n");
return 0;
}
/**
* mei_me_d0i3_exit_sync - perform d0i3 exit procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
int ret;
u32 reg;
dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
reg = mei_me_d0i3c_read(dev);
if (!(reg & H_D0I3C_I3)) {
/* we are not in d0i3, nothing to do */
dev_dbg(dev->dev, "d0i3 exit not needed\n");
ret = 0;
goto off;
}
reg = mei_me_d0i3_unset(dev);
if (!(reg & H_D0I3C_CIP)) {
dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
ret = 0;
goto off;
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
ret = -ETIME;
goto out;
}
}
ret = 0;
off:
hw->pg_state = MEI_PG_OFF;
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
return ret;
}
/**
* mei_me_pg_legacy_intr - perform legacy pg processing
* in interrupt thread handler
*
* @dev: the device structure
*/
static void mei_me_pg_legacy_intr(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
return;
dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
hw->pg_state = MEI_PG_OFF;
if (waitqueue_active(&dev->wait_pg))
wake_up(&dev->wait_pg);
}
/**
* mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
*
* @dev: the device structure
* @intr_source: interrupt source
*/
static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
(intr_source & H_D0I3C_IS)) {
dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
if (hw->pg_state == MEI_PG_ON) {
hw->pg_state = MEI_PG_OFF;
if (dev->hbm_state != MEI_HBM_IDLE) {
/*
* force H_RDY because it could be
* wiped off during PG
*/
dev_dbg(dev->dev, "d0i3 set host ready\n");
mei_me_host_set_ready(dev);
}
} else {
hw->pg_state = MEI_PG_ON;
}
wake_up(&dev->wait_pg);
}
if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
/*
* HW sent some data and we are in D0i3, so
* we got here because of HW initiated exit from D0i3.
* Start runtime pm resume sequence to exit low power state.
*/
dev_dbg(dev->dev, "d0i3 want resume\n");
mei_hbm_pg_resume(dev);
}
}
/**
* mei_me_pg_intr - perform pg processing in interrupt thread handler
*
* @dev: the device structure
* @intr_source: interrupt source
*/
static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (hw->d0i3_supported)
mei_me_d0i3_intr(dev, intr_source);
else
mei_me_pg_legacy_intr(dev);
}
/**
* mei_me_pg_enter_sync - perform runtime pm entry procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
int mei_me_pg_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (hw->d0i3_supported)
return mei_me_d0i3_enter_sync(dev);
else
return mei_me_pg_legacy_enter_sync(dev);
}
/**
* mei_me_pg_exit_sync - perform runtime pm exit procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
int mei_me_pg_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
if (hw->d0i3_supported)
return mei_me_d0i3_exit_sync(dev);
else
return mei_me_pg_legacy_exit_sync(dev);
}
/**
* mei_me_hw_reset - resets fw via mei csr register.
*
* @dev: the device structure
* @intr_enable: if interrupt should be enabled after reset.
*
* Return: 0 on success an error code otherwise
*/
static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
{
struct mei_me_hw *hw = to_me_hw(dev);
int ret;
u32 hcsr;
if (intr_enable) {
mei_me_intr_enable(dev);
if (hw->d0i3_supported) {
ret = mei_me_d0i3_exit_sync(dev);
if (ret)
return ret;
} else {
hw->pg_state = MEI_PG_OFF;
}
}
pm_runtime_set_active(dev->dev);
hcsr = mei_hcsr_read(dev);
/* H_RST may be found lit before reset is started,
* for example if preceding reset flow hasn't completed.
* In that case asserting H_RST will be ignored, therefore
* we need to clean H_RST bit to start a successful reset sequence.
*/
if ((hcsr & H_RST) == H_RST) {
dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
hcsr = mei_hcsr_read(dev);
}
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
hcsr &= ~H_CSR_IE_MASK;
dev->recvd_hw_ready = false;
mei_hcsr_write(dev, hcsr);
/*
* Host reads the H_CSR once to ensure that the
* posted write to H_CSR completes.
*/
hcsr = mei_hcsr_read(dev);
if ((hcsr & H_RST) == 0)
dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
if ((hcsr & H_RDY) == H_RDY)
dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
if (!intr_enable) {
mei_me_hw_reset_release(dev);
if (hw->d0i3_supported) {
ret = mei_me_d0i3_enter(dev);
if (ret)
return ret;
}
}
return 0;
}
/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: irqreturn_t
*/
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *)dev_id;
u32 hcsr;
hcsr = mei_hcsr_read(dev);
if (!me_intr_src(hcsr))
return IRQ_NONE;
dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
/* disable interrupts on device */
me_intr_disable(dev, hcsr);
return IRQ_WAKE_THREAD;
}
EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
/**
* mei_me_irq_thread_handler - function called after ISR to handle the interrupt
* processing.
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: irqreturn_t
*
*/
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct list_head cmpl_list;
s32 slots;
u32 hcsr;
int rets = 0;
dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
hcsr = mei_hcsr_read(dev);
me_intr_clear(dev, hcsr);
INIT_LIST_HEAD(&cmpl_list);
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
dev->dev_state, dev->pxp_mode);
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN)
mei_cl_all_disconnect(dev);
else if (dev->dev_state != MEI_DEV_DISABLED)
schedule_work(&dev->reset_work);
goto end;
}
if (mei_me_hw_is_resetting(dev))
mei_hcsr_set_hig(dev);
mei_me_pg_intr(dev, me_intr_src(hcsr));
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
dev_dbg(dev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
wake_up(&dev->wait_hw_ready);
} else {
dev_dbg(dev->dev, "Spurious Interrupt\n");
}
goto end;
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
dev_dbg(dev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
/* There is a race between ME write and interrupt delivery:
* Not all data is always available immediately after the
* interrupt, so try to read again on the next interrupt.
*/
if (rets == -ENODATA)
break;
if (rets) {
dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
rets, dev->dev_state);
if (dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWERING_DOWN &&
dev->dev_state != MEI_DEV_POWER_DOWN)
schedule_work(&dev->reset_work);
goto end;
}
}
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
/*
* During PG handshake only allowed write is the replay to the
* PG exit message, so block calling write function
* if the pg event is in PG handshake
*/
if (dev->pg_event != MEI_PG_EVENT_WAIT &&
dev->pg_event != MEI_PG_EVENT_RECEIVED) {
rets = mei_irq_write_handler(dev, &cmpl_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
mei_irq_compl_handler(dev, &cmpl_list);
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
mei_me_intr_enable(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
#define MEI_POLLING_TIMEOUT_ACTIVE 100
#define MEI_POLLING_TIMEOUT_IDLE 500
/**
* mei_me_polling_thread - interrupt register polling thread
*
* The thread monitors the interrupt source register and calls
* mei_me_irq_thread_handler() to handle the firmware
* input.
*
* The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
* in case there was an event, in idle case the polling
* time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
* up to MEI_POLLING_TIMEOUT_IDLE.
*
* @_dev: mei device
*
* Return: always 0
*/
int mei_me_polling_thread(void *_dev)
{
struct mei_device *dev = _dev;
irqreturn_t irq_ret;
long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
dev_dbg(dev->dev, "kernel thread is running\n");
while (!kthread_should_stop()) {
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
wait_event_timeout(hw->wait_active,
hw->is_active || kthread_should_stop(),
msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
if (kthread_should_stop())
break;
hcsr = mei_hcsr_read(dev);
if (me_intr_src(hcsr)) {
polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
dev_err(dev->dev, "irq_ret %d\n", irq_ret);
} else {
/*
* Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
* up to MEI_POLLING_TIMEOUT_IDLE
*/
polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
MEI_POLLING_TIMEOUT_ACTIVE,
MEI_POLLING_TIMEOUT_IDLE);
}
schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
}
return 0;
}
EXPORT_SYMBOL_GPL(mei_me_polling_thread);
static const struct mei_hw_ops mei_me_hw_ops = {
.trc_status = mei_me_trc_status,
.fw_status = mei_me_fw_status,
.pg_state = mei_me_pg_state,
.host_is_ready = mei_me_host_is_ready,
.hw_is_ready = mei_me_hw_is_ready,
.hw_reset = mei_me_hw_reset,
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
.pg_in_transition = mei_me_pg_in_transition,
.pg_is_enabled = mei_me_pg_is_enabled,
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
.synchronize_irq = mei_me_synchronize_irq,
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
.hbuf_depth = mei_me_hbuf_depth,
.write = mei_me_hbuf_write,
.rdbuf_full_slots = mei_me_count_full_read_slots,
.read_hdr = mei_me_mecbrw_read,
.read = mei_me_read_slots
};
/**
* mei_me_fw_type_nm() - check for nm sku
*
* Read ME FW Status register to check for the Node Manager (NM) Firmware.
* The NM FW is only signaled in PCI function 0.
* __Note__: Deprecated by PCH8 and newer.
*
* @pdev: pci device
*
* Return: true in case of NM firmware
*/
static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
}
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
/**
* mei_me_fw_type_sps_4() - check for sps 4.0 sku
*
* Read ME FW Status register to check for SPS Firmware.
* The SPS FW is only signaled in the PCI function 0.
* __Note__: Deprecated by SPS 5.0 and newer.
*
* @pdev: pci device
*
* Return: true in case of SPS firmware
*/
static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
}
#define MEI_CFG_FW_SPS_4 \
.quirk_probe = mei_me_fw_type_sps_4
/**
* mei_me_fw_type_sps_ign() - check for sps or ign sku
*
* Read ME FW Status register to check for SPS or IGN Firmware.
* The SPS/IGN FW is only signaled in pci function 0
*
* @pdev: pci device
*
* Return: true in case of SPS/IGN firmware
*/
static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
{
u32 reg;
u32 fw_type;
unsigned int devfn;
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
}
#define MEI_CFG_KIND_ITOUCH \
.kind = "itouch"
#define MEI_CFG_TYPE_GSC \
.kind = "gsc"
#define MEI_CFG_TYPE_GSCFI \
.kind = "gscfi"
#define MEI_CFG_FW_SPS_IGN \
.quirk_probe = mei_me_fw_type_sps_ign
#define MEI_CFG_FW_VER_SUPP \
.fw_ver_supported = 1
#define MEI_CFG_ICH_HFS \
.fw_status.count = 0
#define MEI_CFG_ICH10_HFS \
.fw_status.count = 1, \
.fw_status.status[0] = PCI_CFG_HFS_1
#define MEI_CFG_PCH_HFS \
.fw_status.count = 2, \
.fw_status.status[0] = PCI_CFG_HFS_1, \
.fw_status.status[1] = PCI_CFG_HFS_2
#define MEI_CFG_PCH8_HFS \
.fw_status.count = 6, \
.fw_status.status[0] = PCI_CFG_HFS_1, \
.fw_status.status[1] = PCI_CFG_HFS_2, \
.fw_status.status[2] = PCI_CFG_HFS_3, \
.fw_status.status[3] = PCI_CFG_HFS_4, \
.fw_status.status[4] = PCI_CFG_HFS_5, \
.fw_status.status[5] = PCI_CFG_HFS_6
#define MEI_CFG_DMA_128 \
.dma_size[DMA_DSCR_HOST] = SZ_128K, \
.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
#define MEI_CFG_TRC \
.hw_trc_supported = 1
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
};
/* ICH devices */
static const struct mei_cfg mei_me_ich10_cfg = {
MEI_CFG_ICH10_HFS,
};
/* PCH6 devices */
static const struct mei_cfg mei_me_pch6_cfg = {
MEI_CFG_PCH_HFS,
};
/* PCH7 devices */
static const struct mei_cfg mei_me_pch7_cfg = {
MEI_CFG_PCH_HFS,
MEI_CFG_FW_VER_SUPP,
};
/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
MEI_CFG_PCH_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_NM,
};
/* PCH8 Lynx Point and newer devices */
static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
};
/* PCH8 Lynx Point and newer devices - iTouch */
static const struct mei_cfg mei_me_pch8_itouch_cfg = {
MEI_CFG_KIND_ITOUCH,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS_4,
};
/* LBG with quirk for SPS (4.0) Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS_4,
};
/* Cannon Lake and newer devices */
static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
};
/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
MEI_CFG_FW_SPS_IGN,
};
/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
* w/o DMA support.
*/
static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
MEI_CFG_KIND_ITOUCH,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS_IGN,
};
/* Tiger Lake and newer devices */
static const struct mei_cfg mei_me_pch15_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
MEI_CFG_TRC,
};
/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch15_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
MEI_CFG_TRC,
MEI_CFG_FW_SPS_IGN,
};
/* Graphics System Controller */
static const struct mei_cfg mei_me_gsc_cfg = {
MEI_CFG_TYPE_GSC,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
};
/* Graphics System Controller Firmware Interface */
static const struct mei_cfg mei_me_gscfi_cfg = {
MEI_CFG_TYPE_GSCFI,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
};
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
*/
static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_UNDEF_CFG] = NULL,
[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
[MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
[MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
[MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
[MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
{
BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG);
if (idx >= MEI_ME_NUM_CFG)
return NULL;
return mei_cfg_list[idx];
}
EXPORT_SYMBOL_GPL(mei_me_get_cfg);
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
* @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
* @slow_fw: configure longer timeouts as FW is slow
*
* Return: The mei_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg, bool slow_fw)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int i;
dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
hw = to_me_hw(dev);
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
dev->kind = cfg->kind;
return dev;
}
EXPORT_SYMBOL_GPL(mei_me_dev_init);
| linux-master | drivers/misc/mei/hw-me.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/sched/signal.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
/**
* mei_me_cl_init - initialize me client
*
* @me_cl: me client
*/
void mei_me_cl_init(struct mei_me_client *me_cl)
{
INIT_LIST_HEAD(&me_cl->list);
kref_init(&me_cl->refcnt);
}
/**
* mei_me_cl_get - increases me client refcount
*
* @me_cl: me client
*
* Locking: called under "dev->device_lock" lock
*
* Return: me client or NULL
*/
struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
{
if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
return me_cl;
return NULL;
}
/**
* mei_me_cl_release - free me client
*
* Locking: called under "dev->device_lock" lock
*
* @ref: me_client refcount
*/
static void mei_me_cl_release(struct kref *ref)
{
struct mei_me_client *me_cl =
container_of(ref, struct mei_me_client, refcnt);
kfree(me_cl);
}
/**
* mei_me_cl_put - decrease me client refcount and free client if necessary
*
* Locking: called under "dev->device_lock" lock
*
* @me_cl: me client
*/
void mei_me_cl_put(struct mei_me_client *me_cl)
{
if (me_cl)
kref_put(&me_cl->refcnt, mei_me_cl_release);
}
/**
* __mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
* @me_cl: me client
*
* Locking: dev->me_clients_rwsem
*/
static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
{
if (!me_cl)
return;
list_del_init(&me_cl->list);
mei_me_cl_put(me_cl);
}
/**
* mei_me_cl_del - delete me client from the list and decrease
* reference counter
*
* @dev: mei device
* @me_cl: me client
*/
void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
{
down_write(&dev->me_clients_rwsem);
__mei_me_cl_del(dev, me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_add - add me client to the list
*
* @dev: mei device
* @me_cl: me client
*/
void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
{
down_write(&dev->me_clients_rwsem);
list_add(&me_cl->list, &dev->me_clients);
up_write(&dev->me_clients_rwsem);
}
/**
* __mei_me_cl_by_uuid - locate me client by uuid
* increases ref count
*
* @dev: mei device
* @uuid: me client uuid
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid)
{
struct mei_me_client *me_cl;
const uuid_le *pn;
WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
list_for_each_entry(me_cl, &dev->me_clients, list) {
pn = &me_cl->props.protocol_name;
if (uuid_le_cmp(*uuid, *pn) == 0)
return mei_me_cl_get(me_cl);
}
return NULL;
}
/**
* mei_me_cl_by_uuid - locate me client by uuid
* increases ref count
*
* @dev: mei device
* @uuid: me client uuid
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
const uuid_le *uuid)
{
struct mei_me_client *me_cl;
down_read(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* mei_me_cl_by_id - locate me client by client id
* increases ref count
*
* @dev: the device structure
* @client_id: me client id
*
* Return: me client or NULL if not found
*
* Locking: dev->me_clients_rwsem
*/
struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
struct mei_me_client *__me_cl, *me_cl = NULL;
down_read(&dev->me_clients_rwsem);
list_for_each_entry(__me_cl, &dev->me_clients, list) {
if (__me_cl->client_id == client_id) {
me_cl = mei_me_cl_get(__me_cl);
break;
}
}
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* __mei_me_cl_by_uuid_id - locate me client by client id and uuid
* increases ref count
*
* @dev: the device structure
* @uuid: me client uuid
* @client_id: me client id
*
* Return: me client or null if not found
*
* Locking: dev->me_clients_rwsem
*/
static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id)
{
struct mei_me_client *me_cl;
const uuid_le *pn;
WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
list_for_each_entry(me_cl, &dev->me_clients, list) {
pn = &me_cl->props.protocol_name;
if (uuid_le_cmp(*uuid, *pn) == 0 &&
me_cl->client_id == client_id)
return mei_me_cl_get(me_cl);
}
return NULL;
}
/**
* mei_me_cl_by_uuid_id - locate me client by client id and uuid
* increases ref count
*
* @dev: the device structure
* @uuid: me client uuid
* @client_id: me client id
*
* Return: me client or null if not found
*/
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id)
{
struct mei_me_client *me_cl;
down_read(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
up_read(&dev->me_clients_rwsem);
return me_cl;
}
/**
* mei_me_cl_rm_by_uuid - remove all me clients matching uuid
*
* @dev: the device structure
* @uuid: me client uuid
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl\n", uuid);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
*
* @dev: the device structure
* @uuid: me client uuid
* @id: me client id
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
{
struct mei_me_client *me_cl;
dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_me_cl_rm_all - remove all me clients
*
* @dev: the device structure
*
* Locking: called under "dev->device_lock" lock
*/
void mei_me_cl_rm_all(struct mei_device *dev)
{
struct mei_me_client *me_cl, *next;
down_write(&dev->me_clients_rwsem);
list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
__mei_me_cl_del(dev, me_cl);
up_write(&dev->me_clients_rwsem);
}
/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
*/
void mei_io_cb_free(struct mei_cl_cb *cb)
{
if (cb == NULL)
return;
list_del(&cb->list);
kfree(cb->buf.data);
kfree(cb->ext_hdr);
kfree(cb);
}
/**
* mei_tx_cb_enqueue - queue tx callback
*
* Locking: called under "dev->device_lock" lock
*
* @cb: mei callback struct
* @head: an instance of list to queue on
*/
static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
struct list_head *head)
{
list_add_tail(&cb->list, head);
cb->cl->tx_cb_queued++;
}
/**
* mei_tx_cb_dequeue - dequeue tx callback
*
* Locking: called under "dev->device_lock" lock
*
* @cb: mei callback struct to dequeue and free
*/
static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
{
if (!WARN_ON(cb->cl->tx_cb_queued == 0))
cb->cl->tx_cb_queued--;
mei_io_cb_free(cb);
}
/**
* mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
*
* Locking: called under "dev->device_lock" lock
*
* @cl: mei client
* @fp: pointer to file structure
*/
static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->fp == fp) {
cl_vtag->pending_read = true;
return;
}
}
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl: mei client
* @type: operation type
* @fp: pointer to file structure
*
* Return: mei_cl_cb pointer or NULL;
*/
static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
enum mei_cb_file_ops type,
const struct file *fp)
{
struct mei_cl_cb *cb;
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
INIT_LIST_HEAD(&cb->list);
cb->fp = fp;
cb->cl = cl;
cb->buf_idx = 0;
cb->fop_type = type;
cb->vtag = 0;
cb->ext_hdr = NULL;
return cb;
}
/**
* mei_io_list_flush_cl - removes cbs belonging to the cl.
*
* @head: an instance of our list structure
* @cl: host client
*/
static void mei_io_list_flush_cl(struct list_head *head,
const struct mei_cl *cl)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
if (cl == cb->cl) {
list_del_init(&cb->list);
if (cb->fop_type == MEI_FOP_READ)
mei_io_cb_free(cb);
}
}
}
/**
* mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
*
* @head: An instance of our list structure
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*/
static void mei_io_tx_list_free_cl(struct list_head *head,
const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
if (cl == cb->cl && (!fp || fp == cb->fp))
mei_tx_cb_dequeue(cb);
}
}
/**
* mei_io_list_free_fp - free cb from a list that matches file pointer
*
* @head: io list
* @fp: file pointer (matching cb file object), may be NULL
*/
static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list)
if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
}
/**
* mei_cl_free_pending - free pending cb
*
* @cl: host client
*/
static void mei_cl_free_pending(struct mei_cl *cl)
{
struct mei_cl_cb *cb;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
mei_io_cb_free(cb);
}
/**
* mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
* @cl: host client
* @length: size of the buffer
* @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
*/
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
cb = mei_io_cb_init(cl, fop_type, fp);
if (!cb)
return NULL;
if (length == 0)
return cb;
cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
if (!cb->buf.data) {
mei_io_cb_free(cb);
return NULL;
}
cb->buf.size = length;
return cb;
}
/**
* mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
* and enqueuing of the control commands cb
*
* @cl: host client
* @length: size of the buffer
* @fop_type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
* Locking: called under "dev->device_lock" lock
*/
struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
/* for RX always allocate at least client's mtu */
if (length)
length = max_t(size_t, length, mei_cl_mtu(cl));
cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
if (!cb)
return NULL;
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
return cb;
}
/**
* mei_cl_read_cb - find this cl's callback in the read list
* for a specific file
*
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*
* Return: cb on success, NULL if cb is not found
*/
struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *ret_cb = NULL;
spin_lock(&cl->rd_completed_lock);
list_for_each_entry(cb, &cl->rd_completed, list)
if (!fp || fp == cb->fp) {
ret_cb = cb;
break;
}
spin_unlock(&cl->rd_completed_lock);
return ret_cb;
}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @cl: host client
* @fp: file pointer (matching cb file object), may be NULL
*
* Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
*/
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
/* free pending and control cb only in final flush */
if (!fp) {
mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
mei_cl_free_pending(cl);
}
spin_lock(&cl->rd_completed_lock);
mei_io_list_free_fp(&cl->rd_completed, fp);
spin_unlock(&cl->rd_completed_lock);
return 0;
}
/**
* mei_cl_init - initializes cl.
*
* @cl: host client to be initialized
* @dev: mei device
*/
static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
memset(cl, 0, sizeof(*cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
init_waitqueue_head(&cl->ev_wait);
INIT_LIST_HEAD(&cl->vtag_map);
spin_lock_init(&cl->rd_completed_lock);
INIT_LIST_HEAD(&cl->rd_completed);
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
cl->writing_state = MEI_IDLE;
cl->state = MEI_FILE_UNINITIALIZED;
cl->dev = dev;
}
/**
* mei_cl_allocate - allocates cl structure and sets it up.
*
* @dev: mei device
* Return: The allocated file or NULL on failure
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return NULL;
mei_cl_init(cl, dev);
return cl;
}
/**
* mei_cl_link - allocate host id in the host map
*
* @cl: host client
*
* Return: 0 on success
* -EINVAL on incorrect values
* -EMFILE if open count exceeded.
*/
int mei_cl_link(struct mei_cl *cl)
{
struct mei_device *dev;
int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
dev_err(dev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
dev->open_handle_count++;
cl->host_client_id = id;
list_add_tail(&cl->link, &dev->file_list);
set_bit(id, dev->host_clients_map);
cl->state = MEI_FILE_INITIALIZING;
cl_dbg(dev, cl, "link cl\n");
return 0;
}
/**
* mei_cl_unlink - remove host client from the list
*
* @cl: host client
*
* Return: always 0
*/
int mei_cl_unlink(struct mei_cl *cl)
{
struct mei_device *dev;
/* don't shout on error exit path */
if (!cl)
return 0;
if (WARN_ON(!cl->dev))
return 0;
dev = cl->dev;
cl_dbg(dev, cl, "unlink client");
if (cl->state == MEI_FILE_UNINITIALIZED)
return 0;
if (dev->open_handle_count > 0)
dev->open_handle_count--;
/* never clear the 0 bit */
if (cl->host_client_id)
clear_bit(cl->host_client_id, dev->host_clients_map);
list_del_init(&cl->link);
cl->state = MEI_FILE_UNINITIALIZED;
cl->writing_state = MEI_IDLE;
WARN_ON(!list_empty(&cl->rd_completed) ||
!list_empty(&cl->rd_pending) ||
!list_empty(&cl->link));
return 0;
}
void mei_host_client_init(struct mei_device *dev)
{
mei_set_devstate(dev, MEI_DEV_ENABLED);
dev->reset_count = 0;
schedule_work(&dev->bus_rescan_work);
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
pm_request_autosuspend(dev->dev);
}
/**
* mei_hbuf_acquire - try to acquire host buffer
*
* @dev: the device structure
* Return: true if host buffer was acquired
*/
bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n");
return false;
}
if (!dev->hbuf_is_ready) {
dev_dbg(dev->dev, "hbuf is not ready\n");
return false;
}
dev->hbuf_is_ready = false;
return true;
}
/**
* mei_cl_wake_all - wake up readers, writers and event waiters so
* they can be interrupted
*
* @cl: host client
*/
static void mei_cl_wake_all(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
/* synchronized under device mutex */
if (waitqueue_active(&cl->rx_wait)) {
cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->tx_wait)) {
cl_dbg(dev, cl, "Waking up writing client!\n");
wake_up_interruptible(&cl->tx_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->ev_wait)) {
cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
wake_up_interruptible(&cl->ev_wait);
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->wait)) {
cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
wake_up(&cl->wait);
}
}
/**
* mei_cl_set_disconnected - set disconnected state and clear
* associated states and resources
*
* @cl: host client
*/
static void mei_cl_set_disconnected(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_DISCONNECTED ||
cl->state <= MEI_FILE_INITIALIZING)
return;
cl->state = MEI_FILE_DISCONNECTED;
mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
mei_cl_wake_all(cl);
cl->rx_flow_ctrl_creds = 0;
cl->tx_flow_ctrl_creds = 0;
cl->timer_count = 0;
if (!cl->me_cl)
return;
if (!WARN_ON(cl->me_cl->connect_count == 0))
cl->me_cl->connect_count--;
if (cl->me_cl->connect_count == 0)
cl->me_cl->tx_flow_ctrl_creds = 0;
mei_me_cl_put(cl->me_cl);
cl->me_cl = NULL;
}
static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
{
if (!mei_me_cl_get(me_cl))
return -ENOENT;
/* only one connection is allowed for fixed address clients */
if (me_cl->props.fixed_address) {
if (me_cl->connect_count) {
mei_me_cl_put(me_cl);
return -EBUSY;
}
}
cl->me_cl = me_cl;
cl->state = MEI_FILE_CONNECTING;
cl->me_cl->connect_count++;
return 0;
}
/*
* mei_cl_send_disconnect - send disconnect request
*
* @cl: host client
* @cb: callback block
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
int ret;
dev = cl->dev;
ret = mei_hbm_cl_disconnect_req(dev, cl);
cl->status = ret;
if (ret) {
cl->state = MEI_FILE_DISCONNECT_REPLY;
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_cl_irq_disconnect - processes close related operation from
* interrupt thread context - send disconnect request
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_cl_send_disconnect(cl, cb);
if (ret)
list_move_tail(&cb->list, cmpl_list);
return ret;
}
/**
* __mei_cl_disconnect - disconnect host client from the me one
* internal function runtime pm has to be already acquired
*
* @cl: host client
*
* Return: 0 on success, <0 on failure.
*/
static int __mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
dev = cl->dev;
cl->state = MEI_FILE_DISCONNECTING;
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
rets = mei_cl_send_disconnect(cl, cb);
if (rets) {
cl_err(dev, cl, "failed to disconnect.\n");
goto out;
}
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl->state == MEI_FILE_DISCONNECTED,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
rets = cl->status;
if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
cl->state != MEI_FILE_DISCONNECTED) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
out:
/* we disconnect also on error */
mei_cl_set_disconnected(cl);
if (!rets)
cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_disconnect - disconnect host client from the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
cl_dbg(dev, cl, "disconnecting");
if (!mei_cl_is_connected(cl))
return 0;
if (mei_cl_is_fixed_address(cl)) {
mei_cl_set_disconnected(cl);
return 0;
}
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl);
return 0;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = __mei_cl_disconnect(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return rets;
}
/**
* mei_cl_is_other_connecting - checks if other
* client with the same me client id is connecting
*
* @cl: private data of the file object
*
* Return: true if other client is connected, false - otherwise.
*/
static bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
dev = cl->dev;
list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
if (cb->fop_type == MEI_FOP_CONNECT &&
mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
return true;
}
return false;
}
/**
* mei_cl_send_connect - send connect request
*
* @cl: host client
* @cb: callback block
*
* Return: 0, OK; otherwise, error.
*/
static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
int ret;
dev = cl->dev;
ret = mei_hbm_cl_connect_req(dev, cl);
cl->status = ret;
if (ret) {
cl->state = MEI_FILE_DISCONNECT_REPLY;
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_cl_irq_connect - send connect request in irq_thread context
*
* @cl: host client
* @cb: callback block
* @cmpl_list: complete list
*
* Return: 0, OK; otherwise, error.
*/
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int rets;
if (mei_cl_is_other_connecting(cl))
return 0;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
rets = mei_cl_send_connect(cl, cb);
if (rets)
list_move_tail(&cb->list, cmpl_list);
return rets;
}
/**
* mei_cl_connect - connect host client to the me one
*
* @cl: host client
* @me_cl: me client
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev || !me_cl))
return -ENODEV;
dev = cl->dev;
rets = mei_cl_set_connecting(cl, me_cl);
if (rets)
goto nortpm;
if (mei_cl_is_fixed_address(cl)) {
cl->state = MEI_FILE_CONNECTED;
rets = 0;
goto nortpm;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
rets = mei_cl_send_connect(cl, cb);
if (rets)
goto out;
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
/* ignore disconnect return valuue;
* in case of failure reset will be invoked
*/
__mei_cl_disconnect(cl);
rets = -EFAULT;
goto out;
}
/* timeout or something went really wrong */
if (!cl->status)
cl->status = -EFAULT;
}
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
nortpm:
if (!mei_cl_is_connected(cl))
mei_cl_set_disconnected(cl);
return rets;
}
/**
* mei_cl_alloc_linked - allocate and link host client
*
* @dev: the device structure
*
* Return: cl on success ERR_PTR on failure
*/
struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
{
struct mei_cl *cl;
int ret;
cl = mei_cl_allocate(dev);
if (!cl) {
ret = -ENOMEM;
goto err;
}
ret = mei_cl_link(cl);
if (ret)
goto err;
return cl;
err:
kfree(cl);
return ERR_PTR(ret);
}
/**
* mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
*
* @cl: host client
*
* Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
*/
static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
if (cl->tx_flow_ctrl_creds > 0)
return 1;
if (mei_cl_is_fixed_address(cl))
return 1;
if (mei_cl_is_single_recv_buf(cl)) {
if (cl->me_cl->tx_flow_ctrl_creds > 0)
return 1;
}
return 0;
}
/**
* mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
* for a client
*
* @cl: host client
*
* Return:
* 0 on success
* -EINVAL when ctrl credits are <= 0
*/
static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
if (mei_cl_is_fixed_address(cl))
return 0;
if (mei_cl_is_single_recv_buf(cl)) {
if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
cl->me_cl->tx_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
cl->tx_flow_ctrl_creds--;
}
return 0;
}
/**
* mei_cl_vtag_alloc - allocate and fill the vtag structure
*
* @fp: pointer to file structure
* @vtag: vm tag
*
* Return:
* * Pointer to allocated struct - on success
* * ERR_PTR(-ENOMEM) on memory allocation failure
*/
struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
{
struct mei_cl_vtag *cl_vtag;
cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
if (!cl_vtag)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cl_vtag->list);
cl_vtag->vtag = vtag;
cl_vtag->fp = fp;
return cl_vtag;
}
/**
* mei_cl_fp_by_vtag - obtain the file pointer by vtag
*
* @cl: host client
* @vtag: virtual tag
*
* Return:
* * A file pointer - on success
* * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
*/
const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list)
/* The client on bus has one fixed fp */
if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag)
return vtag_l->fp;
return ERR_PTR(-ENOENT);
}
/**
* mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
*
* @cl: host client
* @vtag: vm tag
*/
static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) {
/* The client on bus has one fixed vtag map */
if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag) {
vtag_l->pending_read = false;
break;
}
}
}
/**
* mei_cl_read_vtag_add_fc - add flow control for next pending reader
* in the vtag list
*
* @cl: host client
*/
static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->pending_read) {
if (mei_cl_enqueue_ctrl_wr_cb(cl,
mei_cl_mtu(cl),
MEI_FOP_READ,
cl_vtag->fp))
cl->rx_flow_ctrl_creds++;
break;
}
}
}
/**
* mei_cl_vt_support_check - check if client support vtags
*
* @cl: host client
*
* Return:
* * 0 - supported, or not connected at all
* * -EOPNOTSUPP - vtags are not supported by client
*/
int mei_cl_vt_support_check(const struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (!dev->hbm_f_vt_supported)
return -EOPNOTSUPP;
if (!cl->me_cl)
return 0;
return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
}
/**
* mei_cl_add_rd_completed - add read completed callback to list with lock
* and vtag check
*
* @cl: host client
* @cb: callback block
*
*/
void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
{
const struct file *fp;
if (!mei_cl_vt_support_check(cl)) {
fp = mei_cl_fp_by_vtag(cl, cb->vtag);
if (IS_ERR(fp)) {
/* client already disconnected, discarding */
mei_io_cb_free(cb);
return;
}
cb->fp = fp;
mei_cl_reset_read_by_vtag(cl, cb->vtag);
mei_cl_read_vtag_add_fc(cl);
}
spin_lock(&cl->rd_completed_lock);
list_add_tail(&cb->list, &cl->rd_completed);
spin_unlock(&cl->rd_completed_lock);
}
/**
* mei_cl_del_rd_completed - free read completed callback with lock
*
* @cl: host client
* @cb: callback block
*
*/
void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
{
spin_lock(&cl->rd_completed_lock);
mei_io_cb_free(cb);
spin_unlock(&cl->rd_completed_lock);
}
/**
* mei_cl_notify_fop2req - convert fop to proper request
*
* @fop: client notification start response command
*
* Return: MEI_HBM_NOTIFICATION_START/STOP
*/
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
{
if (fop == MEI_FOP_NOTIFY_START)
return MEI_HBM_NOTIFICATION_START;
else
return MEI_HBM_NOTIFICATION_STOP;
}
/**
* mei_cl_notify_req2fop - convert notification request top file operation type
*
* @req: hbm notification request type
*
* Return: MEI_FOP_NOTIFY_START/STOP
*/
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
{
if (req == MEI_HBM_NOTIFICATION_START)
return MEI_FOP_NOTIFY_START;
else
return MEI_FOP_NOTIFY_STOP;
}
/**
* mei_cl_irq_notify - send notification request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
bool request;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
request = mei_cl_notify_fop2req(cb->fop_type);
ret = mei_hbm_cl_notify_req(dev, cl, request);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
/**
* mei_cl_notify_request - send notification stop/start request
*
* @cl: host client
* @fp: associate request with file
* @request: 1 for start or 0 for stop
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_request(struct mei_cl *cl,
const struct file *fp, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
enum mei_cb_file_ops fop_type;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_ev_supported) {
cl_dbg(dev, cl, "notifications not supported\n");
return -EOPNOTSUPP;
}
if (!mei_cl_is_connected(cl))
return -ENODEV;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
fop_type = mei_cl_notify_req2fop(request);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_notify_req(dev, cl, request)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->notify_en == request ||
cl->status ||
!mei_cl_is_connected(cl),
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->notify_en != request && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_notify - raise notification
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*/
void mei_cl_notify(struct mei_cl *cl)
{
struct mei_device *dev;
if (!cl || !cl->dev)
return;
dev = cl->dev;
if (!cl->notify_en)
return;
cl_dbg(dev, cl, "notify event");
cl->notify_ev = true;
if (!mei_cl_bus_notify_event(cl))
wake_up_interruptible(&cl->ev_wait);
if (cl->ev_async)
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
}
/**
* mei_cl_notify_get - get or wait for notification event
*
* @cl: host client
* @block: this request is blocking
* @notify_ev: true if notification event was received
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
{
struct mei_device *dev;
int rets;
*notify_ev = false;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_ev_supported) {
cl_dbg(dev, cl, "notifications not supported\n");
return -EOPNOTSUPP;
}
if (!mei_cl_is_connected(cl))
return -ENODEV;
if (cl->notify_ev)
goto out;
if (!block)
return -EAGAIN;
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
mutex_lock(&dev->device_lock);
if (rets < 0)
return rets;
out:
*notify_ev = cl->notify_ev;
cl->notify_ev = false;
return 0;
}
/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
* @length: number of bytes to read
* @fp: pointer to file structure
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!mei_cl_is_connected(cl))
return -ENODEV;
if (!mei_me_cl_is_active(cl->me_cl)) {
cl_err(dev, cl, "no such me client\n");
return -ENOTTY;
}
if (mei_cl_is_fixed_address(cl))
return 0;
/* HW currently supports only one pending read */
if (cl->rx_flow_ctrl_creds) {
mei_cl_set_read_by_fp(cl, fp);
return -EBUSY;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
mei_cl_set_read_by_fp(cl, fp);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
rets = 0;
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
goto out;
list_move_tail(&cb->list, &cl->rd_pending);
}
cl->rx_flow_ctrl_creds++;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
nortpm:
if (rets)
mei_io_cb_free(cb);
return rets;
}
static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
{
struct mei_ext_hdr_vtag *vtag_hdr = ext;
vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
vtag_hdr->vtag = vtag;
vtag_hdr->reserved = 0;
return vtag_hdr->hdr.length;
}
static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
{
return ext && ext->type == MEI_EXT_HDR_GSC;
}
static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
{
memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
return ext->length;
}
/**
* mei_msg_hdr_init - allocate and initialize mei message header
*
* @cb: message callback structure
*
* Return: a pointer to initialized header or ERR_PTR on failure
*/
static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
size_t hdr_len;
struct mei_ext_meta_hdr *meta;
struct mei_msg_hdr *mei_hdr;
bool is_ext, is_hbm, is_gsc, is_vtag;
struct mei_ext_hdr *next_ext;
if (!cb)
return ERR_PTR(-EINVAL);
/* Extended header for vtag is attached only on the first fragment */
is_vtag = (cb->vtag && cb->buf_idx == 0);
is_hbm = cb->cl->me_cl->client_id == 0;
is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
is_ext = is_vtag || is_gsc;
/* Compute extended header size */
hdr_len = sizeof(*mei_hdr);
if (!is_ext)
goto setup_hdr;
hdr_len += sizeof(*meta);
if (is_vtag)
hdr_len += sizeof(struct mei_ext_hdr_vtag);
if (is_gsc)
hdr_len += mei_ext_hdr_len(cb->ext_hdr);
setup_hdr:
mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
if (!mei_hdr)
return ERR_PTR(-ENOMEM);
mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
mei_hdr->me_addr = mei_cl_me_id(cb->cl);
mei_hdr->internal = cb->internal;
mei_hdr->extended = is_ext;
if (!is_ext)
goto out;
meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
meta->size = 0;
next_ext = (struct mei_ext_hdr *)meta->hdrs;
if (is_vtag) {
meta->count++;
meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
next_ext = mei_ext_next(next_ext);
}
if (is_gsc) {
meta->count++;
meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
next_ext = mei_ext_next(next_ext);
}
out:
mei_hdr->length = hdr_len - sizeof(*mei_hdr);
return mei_hdr;
}
/**
* mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0, OK; otherwise error.
*/
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr *mei_hdr = NULL;
size_t hdr_len;
size_t hbuf_len, dr_len;
size_t buf_len = 0;
size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
int rets;
bool first_chunk;
const void *data = NULL;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
buf = &cb->buf;
first_chunk = cb->buf_idx == 0;
rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
if (rets < 0)
goto err;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
return 0;
}
if (buf->data) {
buf_len = buf->size - cb->buf_idx;
data = buf->data + cb->buf_idx;
}
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
rets = -EOVERFLOW;
goto err;
}
hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
rets = PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
/**
* Split the message only if we can write the whole host buffer
* otherwise wait for next time the host buffer is empty.
*/
if (hdr_len + buf_len <= hbuf_len) {
data_len = buf_len;
mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr->dma_ring = 1;
if (buf_len > dr_len)
buf_len = dr_len;
else
mei_hdr->msg_complete = 1;
data_len = sizeof(dma_len);
dma_len = buf_len;
data = &dma_len;
} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
buf_len = hbuf_len - hdr_len;
data_len = buf_len;
} else {
kfree(mei_hdr);
return 0;
}
mei_hdr->length += data_len;
if (mei_hdr->dma_ring && buf->data)
mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
if (rets)
goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
cb->buf_idx += buf_len;
if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
rets = -EIO;
goto err;
}
}
if (mei_hdr->msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list);
kfree(mei_hdr);
return 0;
err:
kfree(mei_hdr);
cl->status = rets;
list_move_tail(&cb->list, cmpl_list);
return rets;
}
/**
* mei_cl_write - submit a write cb to mei device
* assumes device_lock is locked
*
* @cl: host client
* @cb: write callback with filled data
* @timeout: send timeout in milliseconds.
* effective only for blocking writes: the cb->blocking is set.
* set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
*
* Return: number of bytes sent on success, <0 on failure.
*/
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr *mei_hdr = NULL;
size_t hdr_len;
size_t hbuf_len, dr_len;
size_t buf_len;
size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
ssize_t rets;
bool blocking;
const void *data;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
if (WARN_ON(!cb))
return -EINVAL;
dev = cl->dev;
buf = &cb->buf;
buf_len = buf->size;
cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
blocking = cb->blocking;
data = buf->data;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
rets = mei_cl_tx_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
rets = -PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
rets = buf_len;
goto out;
}
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
rets = buf_len;
goto out;
}
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
rets = -EOVERFLOW;
goto out;
}
hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
if (hdr_len + buf_len <= hbuf_len) {
data_len = buf_len;
mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr->dma_ring = 1;
if (buf_len > dr_len)
buf_len = dr_len;
else
mei_hdr->msg_complete = 1;
data_len = sizeof(dma_len);
dma_len = buf_len;
data = &dma_len;
} else {
buf_len = hbuf_len - hdr_len;
data_len = buf_len;
}
mei_hdr->length += data_len;
if (mei_hdr->dma_ring && buf->data)
mei_dma_ring_write(dev, buf->data, buf_len);
rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
if (rets)
goto err;
rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
if (rets)
goto err;
cl->writing_state = MEI_WRITING;
cb->buf_idx = buf_len;
/* restore return value */
buf_len = buf->size;
out:
if (mei_hdr->msg_complete)
mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
else
mei_tx_cb_enqueue(cb, &dev->write_list);
cb = NULL;
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible_timeout(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE ||
(!mei_cl_is_connected(cl)),
msecs_to_jiffies(timeout));
mutex_lock(&dev->device_lock);
/* clean all queue on timeout as something fatal happened */
if (rets == 0) {
rets = -ETIME;
mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
}
/* wait_event_interruptible returns -ERESTARTSYS */
if (rets > 0)
rets = 0;
if (rets) {
if (signal_pending(current))
rets = -EINTR;
goto err;
}
if (cl->writing_state != MEI_WRITE_COMPLETE) {
rets = -EFAULT;
goto err;
}
}
rets = buf_len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
free:
mei_io_cb_free(cb);
kfree(mei_hdr);
return rets;
}
/**
* mei_cl_complete - processes completed operation for a client
*
* @cl: private data of the file object.
* @cb: callback block.
*/
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev = cl->dev;
switch (cb->fop_type) {
case MEI_FOP_WRITE:
mei_tx_cb_dequeue(cb);
cl->writing_state = MEI_WRITE_COMPLETE;
if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
} else {
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
}
break;
case MEI_FOP_READ:
mei_cl_add_rd_completed(cl, cb);
if (!mei_cl_is_fixed_address(cl) &&
!WARN_ON(!cl->rx_flow_ctrl_creds))
cl->rx_flow_ctrl_creds--;
if (!mei_cl_bus_rx_event(cl))
wake_up_interruptible(&cl->rx_wait);
break;
case MEI_FOP_CONNECT:
case MEI_FOP_DISCONNECT:
case MEI_FOP_NOTIFY_STOP:
case MEI_FOP_NOTIFY_START:
case MEI_FOP_DMA_MAP:
case MEI_FOP_DMA_UNMAP:
if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
break;
case MEI_FOP_DISCONNECT_RSP:
mei_io_cb_free(cb);
mei_cl_set_disconnected(cl);
break;
default:
BUG_ON(0);
}
}
/**
* mei_cl_all_disconnect - disconnect forcefully all connected clients
*
* @dev: mei device
*/
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
{
struct mei_cl *cl;
list_for_each_entry(cl, &dev->file_list, link)
if (cl->dma.buffer_id == buffer_id)
return cl;
return NULL;
}
/**
* mei_cl_irq_dma_map - send client dma map request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_dma_map_req(dev, cl);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
/**
* mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
*
* @cl: client
* @cb: callback block.
* @cmpl_list: complete list.
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
u32 msg_slots;
int slots;
int ret;
msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
slots = mei_hbuf_empty_slots(dev);
if (slots < 0)
return -EOVERFLOW;
if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_dma_unmap_req(dev, cl);
if (ret) {
cl->status = ret;
list_move_tail(&cb->list, cmpl_list);
return ret;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
return 0;
}
static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
{
cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
&cl->dma.daddr, GFP_KERNEL);
if (!cl->dma.vaddr)
return -ENOMEM;
cl->dma.buffer_id = buf_id;
cl->dma.size = size;
return 0;
}
static void mei_cl_dma_free(struct mei_cl *cl)
{
cl->dma.buffer_id = 0;
dmam_free_coherent(cl->dev->dev,
cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
cl->dma.size = 0;
cl->dma.vaddr = NULL;
cl->dma.daddr = 0;
}
/**
* mei_cl_dma_alloc_and_map - send client dma map request
*
* @cl: host client
* @fp: pointer to file structure
* @buffer_id: id of the mapped buffer
* @size: size of the buffer
*
* Locking: called under "dev->device_lock" lock
*
* Return:
* * -ENODEV
* * -EINVAL
* * -EOPNOTSUPP
* * -EPROTO
* * -ENOMEM;
*/
int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
u8 buffer_id, size_t size)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_cd_supported) {
cl_dbg(dev, cl, "client dma is not supported\n");
return -EOPNOTSUPP;
}
if (buffer_id == 0)
return -EINVAL;
if (mei_cl_is_connected(cl))
return -EPROTO;
if (cl->dma_mapped)
return -EPROTO;
if (mei_cl_dma_map_find(dev, buffer_id)) {
cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
cl->dma.buffer_id);
return -EPROTO;
}
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = mei_cl_dma_alloc(cl, buffer_id, size);
if (rets) {
pm_runtime_put_noidle(dev->dev);
return rets;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_dma_map_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
cl->status = 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!cl->dma_mapped && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
out:
if (rets)
mei_cl_dma_free(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_dma_unmap - send client dma unmap request
*
* @cl: host client
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on such and error otherwise.
*/
int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_cd_supported) {
cl_dbg(dev, cl, "client dma is not supported\n");
return -EOPNOTSUPP;
}
/* do not allow unmap for connected client */
if (mei_cl_is_connected(cl))
return -EPROTO;
if (!cl->dma_mapped)
return -EPROTO;
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
}
if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
cl->status = 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->dma_mapped && !cl->status)
cl->status = -EFAULT;
rets = cl->status;
if (!rets)
mei_cl_dma_free(cl);
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
mei_io_cb_free(cb);
return rets;
}
| linux-master | drivers/misc/mei/client.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
static const char *mei_hbm_status_str(enum mei_hbm_status status)
{
#define MEI_HBM_STATUS(status) case MEI_HBMS_##status: return #status
switch (status) {
MEI_HBM_STATUS(SUCCESS);
MEI_HBM_STATUS(CLIENT_NOT_FOUND);
MEI_HBM_STATUS(ALREADY_EXISTS);
MEI_HBM_STATUS(REJECTED);
MEI_HBM_STATUS(INVALID_PARAMETER);
MEI_HBM_STATUS(NOT_ALLOWED);
MEI_HBM_STATUS(ALREADY_STARTED);
MEI_HBM_STATUS(NOT_STARTED);
default: return "unknown";
}
#undef MEI_HBM_STATUS
};
static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
{
#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status
switch (status) {
MEI_CL_CS(SUCCESS);
MEI_CL_CS(NOT_FOUND);
MEI_CL_CS(ALREADY_STARTED);
MEI_CL_CS(OUT_OF_RESOURCES);
MEI_CL_CS(MESSAGE_SMALL);
MEI_CL_CS(NOT_ALLOWED);
default: return "unknown";
}
#undef MEI_CL_CCS
}
const char *mei_hbm_state_str(enum mei_hbm_state state)
{
#define MEI_HBM_STATE(state) case MEI_HBM_##state: return #state
switch (state) {
MEI_HBM_STATE(IDLE);
MEI_HBM_STATE(STARTING);
MEI_HBM_STATE(STARTED);
MEI_HBM_STATE(DR_SETUP);
MEI_HBM_STATE(ENUM_CLIENTS);
MEI_HBM_STATE(CLIENT_PROPERTIES);
MEI_HBM_STATE(STOPPED);
default:
return "unknown";
}
#undef MEI_HBM_STATE
}
/**
* mei_cl_conn_status_to_errno - convert client connect response
* status to error code
*
* @status: client connect response status
*
* Return: corresponding error code
*/
static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
{
switch (status) {
case MEI_CL_CONN_SUCCESS: return 0;
case MEI_CL_CONN_NOT_FOUND: return -ENOTTY;
case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
case MEI_CL_CONN_NOT_ALLOWED: return -EBUSY;
default: return -EINVAL;
}
}
/**
* mei_hbm_write_message - wrapper for sending hbm messages.
*
* @dev: mei device
* @hdr: mei header
* @data: payload
*/
static inline int mei_hbm_write_message(struct mei_device *dev,
struct mei_msg_hdr *hdr,
const void *data)
{
return mei_write_message(dev, hdr, sizeof(*hdr), data, hdr->length);
}
/**
* mei_hbm_idle - set hbm to idle state
*
* @dev: the device structure
*/
void mei_hbm_idle(struct mei_device *dev)
{
dev->init_clients_timer = 0;
dev->hbm_state = MEI_HBM_IDLE;
}
/**
* mei_hbm_reset - reset hbm counters and book keeping data structurs
*
* @dev: the device structure
*/
void mei_hbm_reset(struct mei_device *dev)
{
mei_me_cl_rm_all(dev);
mei_hbm_idle(dev);
}
/**
* mei_hbm_hdr - construct hbm header
*
* @mei_hdr: hbm header
* @length: payload length
*/
static inline void mei_hbm_hdr(struct mei_msg_hdr *mei_hdr, size_t length)
{
memset(mei_hdr, 0, sizeof(*mei_hdr));
mei_hdr->length = length;
mei_hdr->msg_complete = 1;
}
/**
* mei_hbm_cl_hdr - construct client hbm header
*
* @cl: client
* @hbm_cmd: host bus message command
* @buf: buffer for cl header
* @len: buffer length
*/
static inline
void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
{
struct mei_hbm_cl_cmd *cmd = buf;
memset(cmd, 0, len);
cmd->hbm_cmd = hbm_cmd;
cmd->host_addr = mei_cl_host_addr(cl);
cmd->me_addr = mei_cl_me_id(cl);
}
/**
* mei_hbm_cl_write - write simple hbm client message
*
* @dev: the device structure
* @cl: client
* @hbm_cmd: host bus message command
* @buf: message buffer
* @len: buffer length
*
* Return: 0 on success, <0 on failure.
*/
static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
u8 hbm_cmd, void *buf, size_t len)
{
struct mei_msg_hdr mei_hdr;
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
return mei_hbm_write_message(dev, &mei_hdr, buf);
}
/**
* mei_hbm_cl_addr_equal - check if the client's and
* the message address match
*
* @cl: client
* @cmd: hbm client message
*
* Return: true if addresses are the same
*/
static inline
bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd)
{
return mei_cl_host_addr(cl) == cmd->host_addr &&
mei_cl_me_id(cl) == cmd->me_addr;
}
/**
* mei_hbm_cl_find_by_cmd - find recipient client
*
* @dev: the device structure
* @buf: a buffer with hbm cl command
*
* Return: the recipient client or NULL if not found
*/
static inline
struct mei_cl *mei_hbm_cl_find_by_cmd(struct mei_device *dev, void *buf)
{
struct mei_hbm_cl_cmd *cmd = (struct mei_hbm_cl_cmd *)buf;
struct mei_cl *cl;
list_for_each_entry(cl, &dev->file_list, link)
if (mei_hbm_cl_addr_equal(cl, cmd))
return cl;
return NULL;
}
/**
* mei_hbm_start_wait - wait for start response message.
*
* @dev: the device structure
*
* Return: 0 on success and < 0 on failure
*/
int mei_hbm_start_wait(struct mei_device *dev)
{
int ret;
if (dev->hbm_state > MEI_HBM_STARTING)
return 0;
mutex_unlock(&dev->device_lock);
ret = wait_event_timeout(dev->wait_hbm_start,
dev->hbm_state != MEI_HBM_STARTING,
dev->timeouts.hbm);
mutex_lock(&dev->device_lock);
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
dev->hbm_state = MEI_HBM_IDLE;
dev_err(dev->dev, "waiting for mei start failed\n");
return -ETIME;
}
return 0;
}
/**
* mei_hbm_start_req - sends start request message.
*
* @dev: the device structure
*
* Return: 0 on success and < 0 on failure
*/
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_version_request req;
int ret;
mei_hbm_reset(dev);
mei_hbm_hdr(&mei_hdr, sizeof(req));
/* host start message */
memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_START_REQ_CMD;
req.host_version.major_version = HBM_MAJOR_VERSION;
req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
return ret;
}
dev->hbm_state = MEI_HBM_STARTING;
dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_hbm_dma_setup_req() - setup DMA request
* @dev: the device structure
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_dma_setup_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_dma_setup_request req;
unsigned int i;
int ret;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
for (i = 0; i < DMA_DSCR_NUM; i++) {
phys_addr_t paddr;
paddr = dev->dr_dscr[i].daddr;
req.dma_dscr[i].addr_hi = upper_32_bits(paddr);
req.dma_dscr[i].addr_lo = lower_32_bits(paddr);
req.dma_dscr[i].size = dev->dr_dscr[i].size;
}
mei_dma_ring_reset(dev);
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "dma setup request write failed: ret = %d.\n",
ret);
return ret;
}
dev->hbm_state = MEI_HBM_DR_SETUP;
dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_hbm_capabilities_req - request capabilities
*
* @dev: the device structure
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_capabilities_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_capability_request req;
int ret;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD;
if (dev->hbm_f_vt_supported)
req.capability_requested[0] |= HBM_CAP_VT;
if (dev->hbm_f_cd_supported)
req.capability_requested[0] |= HBM_CAP_CD;
if (dev->hbm_f_gsc_supported)
req.capability_requested[0] |= HBM_CAP_GSC;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev,
"capabilities request write failed: ret = %d.\n", ret);
return ret;
}
dev->hbm_state = MEI_HBM_CAP_SETUP;
dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_hbm_enum_clients_req - sends enumeration client request message.
*
* @dev: the device structure
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_enum_request req;
int ret;
/* enumerate clients */
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_ENUM_REQ_CMD;
req.flags |= dev->hbm_f_dc_supported ? MEI_HBM_ENUM_F_ALLOW_ADD : 0;
req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_hbm_me_cl_add - add new me client to the list
*
* @dev: the device structure
* @res: hbm property response
*
* Return: 0 on success and -ENOMEM on allocation failure
*/
static int mei_hbm_me_cl_add(struct mei_device *dev,
struct hbm_props_response *res)
{
struct mei_me_client *me_cl;
const uuid_le *uuid = &res->client_properties.protocol_name;
mei_me_cl_rm_by_uuid(dev, uuid);
me_cl = kzalloc(sizeof(*me_cl), GFP_KERNEL);
if (!me_cl)
return -ENOMEM;
mei_me_cl_init(me_cl);
me_cl->props = res->client_properties;
me_cl->client_id = res->me_addr;
me_cl->tx_flow_ctrl_creds = 0;
mei_me_cl_add(dev, me_cl);
return 0;
}
/**
* mei_hbm_add_cl_resp - send response to fw on client add request
*
* @dev: the device structure
* @addr: me address
* @status: response status
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
struct mei_msg_hdr mei_hdr;
struct hbm_add_client_response resp;
int ret;
dev_dbg(dev->dev, "adding client response\n");
mei_hbm_hdr(&mei_hdr, sizeof(resp));
memset(&resp, 0, sizeof(resp));
resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp.me_addr = addr;
resp.status = status;
ret = mei_hbm_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
return ret;
}
/**
* mei_hbm_fw_add_cl_req - request from the fw to add a client
*
* @dev: the device structure
* @req: add client request
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
struct hbm_add_client_request *req)
{
int ret;
u8 status = MEI_HBMS_SUCCESS;
BUILD_BUG_ON(sizeof(struct hbm_add_client_request) !=
sizeof(struct hbm_props_response));
ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
if (ret)
status = !MEI_HBMS_SUCCESS;
if (dev->dev_state == MEI_DEV_ENABLED)
schedule_work(&dev->bus_rescan_work);
return mei_hbm_add_cl_resp(dev, req->me_addr, status);
}
/**
* mei_hbm_cl_notify_req - send notification request
*
* @dev: the device structure
* @cl: a client to disconnect from
* @start: true for start false for stop
*
* Return: 0 on success and -EIO on write failure
*/
int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 start)
{
struct mei_msg_hdr mei_hdr;
struct hbm_notification_request req;
int ret;
mei_hbm_hdr(&mei_hdr, sizeof(req));
mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, sizeof(req));
req.start = start;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
return ret;
}
/**
* notify_res_to_fop - convert notification response to the proper
* notification FOP
*
* @cmd: client notification start response command
*
* Return: MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP;
*/
static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd)
{
struct hbm_notification_response *rs =
(struct hbm_notification_response *)cmd;
return mei_cl_notify_req2fop(rs->start);
}
/**
* mei_hbm_cl_notify_start_res - update the client state according
* notify start response
*
* @dev: the device structure
* @cl: mei host client
* @cmd: client notification start response command
*/
static void mei_hbm_cl_notify_start_res(struct mei_device *dev,
struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_notification_response *rs =
(struct hbm_notification_response *)cmd;
cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
if (rs->status == MEI_HBMS_SUCCESS ||
rs->status == MEI_HBMS_ALREADY_STARTED) {
cl->notify_en = true;
cl->status = 0;
} else {
cl->status = -EINVAL;
}
}
/**
* mei_hbm_cl_notify_stop_res - update the client state according
* notify stop response
*
* @dev: the device structure
* @cl: mei host client
* @cmd: client notification stop response command
*/
static void mei_hbm_cl_notify_stop_res(struct mei_device *dev,
struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_notification_response *rs =
(struct hbm_notification_response *)cmd;
cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
if (rs->status == MEI_HBMS_SUCCESS ||
rs->status == MEI_HBMS_NOT_STARTED) {
cl->notify_en = false;
cl->status = 0;
} else {
/* TODO: spec is not clear yet about other possible issues */
cl->status = -EINVAL;
}
}
/**
* mei_hbm_cl_notify - signal notification event
*
* @dev: the device structure
* @cmd: notification client message
*/
static void mei_hbm_cl_notify(struct mei_device *dev,
struct mei_hbm_cl_cmd *cmd)
{
struct mei_cl *cl;
cl = mei_hbm_cl_find_by_cmd(dev, cmd);
if (cl)
mei_cl_notify(cl);
}
/**
* mei_hbm_cl_dma_map_req - send client dma map request
*
* @dev: the device structure
* @cl: mei host client
*
* Return: 0 on success and -EIO on write failure
*/
int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr mei_hdr;
struct hbm_client_dma_map_request req;
int ret;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_CLIENT_DMA_MAP_REQ_CMD;
req.client_buffer_id = cl->dma.buffer_id;
req.address_lsb = lower_32_bits(cl->dma.daddr);
req.address_msb = upper_32_bits(cl->dma.daddr);
req.size = cl->dma.size;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "dma map request failed: ret = %d\n", ret);
return ret;
}
/**
* mei_hbm_cl_dma_unmap_req - send client dma unmap request
*
* @dev: the device structure
* @cl: mei host client
*
* Return: 0 on success and -EIO on write failure
*/
int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr mei_hdr;
struct hbm_client_dma_unmap_request req;
int ret;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD;
req.client_buffer_id = cl->dma.buffer_id;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret);
return ret;
}
static void mei_hbm_cl_dma_map_res(struct mei_device *dev,
struct hbm_client_dma_response *res)
{
struct mei_cl *cl;
struct mei_cl_cb *cb, *next;
cl = NULL;
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
if (cb->fop_type != MEI_FOP_DMA_MAP)
continue;
if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped)
continue;
cl = cb->cl;
break;
}
if (!cl)
return;
if (res->status) {
dev_err(dev->dev, "cl dma map failed %d\n", res->status);
cl->status = -EFAULT;
} else {
dev_dbg(dev->dev, "cl dma map succeeded\n");
cl->dma_mapped = 1;
cl->status = 0;
}
wake_up(&cl->wait);
}
static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev,
struct hbm_client_dma_response *res)
{
struct mei_cl *cl;
struct mei_cl_cb *cb, *next;
cl = NULL;
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
if (cb->fop_type != MEI_FOP_DMA_UNMAP)
continue;
if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped)
continue;
cl = cb->cl;
break;
}
if (!cl)
return;
if (res->status) {
dev_err(dev->dev, "cl dma unmap failed %d\n", res->status);
cl->status = -EFAULT;
} else {
dev_dbg(dev->dev, "cl dma unmap succeeded\n");
cl->dma_mapped = 0;
cl->status = 0;
}
wake_up(&cl->wait);
}
/**
* mei_hbm_prop_req - request property for a single client
*
* @dev: the device structure
* @start_idx: client index to start search
*
* Return: 0 on success and < 0 on failure
*/
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
struct mei_msg_hdr mei_hdr;
struct hbm_props_request req;
unsigned long addr;
int ret;
addr = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, start_idx);
/* We got all client properties */
if (addr == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
mei_host_client_init(dev);
return 0;
}
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
req.me_addr = addr;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
return ret;
}
dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
/**
* mei_hbm_pg - sends pg command
*
* @dev: the device structure
* @pg_cmd: the pg command code
*
* Return: -EIO on write failure
* -EOPNOTSUPP if the operation is not supported by the protocol
*/
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
struct mei_msg_hdr mei_hdr;
struct hbm_power_gate req;
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = pg_cmd;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
}
EXPORT_SYMBOL_GPL(mei_hbm_pg);
/**
* mei_hbm_stop_req - send stop request message
*
* @dev: mei device
*
* Return: -EIO on write failure
*/
static int mei_hbm_stop_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_stop_request req;
mei_hbm_hdr(&mei_hdr, sizeof(req));
memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
return mei_hbm_write_message(dev, &mei_hdr, &req);
}
/**
* mei_hbm_cl_flow_control_req - sends flow control request.
*
* @dev: the device structure
* @cl: client info
*
* Return: -EIO on write failure
*/
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
struct hbm_flow_control req;
cl_dbg(dev, cl, "sending flow control\n");
return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD,
&req, sizeof(req));
}
/**
* mei_hbm_add_single_tx_flow_ctrl_creds - adds single buffer credentials.
*
* @dev: the device structure
* @fctrl: flow control response bus message
*
* Return: 0 on success, < 0 otherwise
*/
static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
struct hbm_flow_control *fctrl)
{
struct mei_me_client *me_cl;
int rets;
me_cl = mei_me_cl_by_id(dev, fctrl->me_addr);
if (!me_cl) {
dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr);
return -ENOENT;
}
if (WARN_ON(me_cl->props.single_recv_buf == 0)) {
rets = -EINVAL;
goto out;
}
me_cl->tx_flow_ctrl_creds++;
dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
fctrl->me_addr, me_cl->tx_flow_ctrl_creds);
rets = 0;
out:
mei_me_cl_put(me_cl);
return rets;
}
/**
* mei_hbm_cl_tx_flow_ctrl_creds_res - flow control response from me
*
* @dev: the device structure
* @fctrl: flow control response bus message
*/
static void mei_hbm_cl_tx_flow_ctrl_creds_res(struct mei_device *dev,
struct hbm_flow_control *fctrl)
{
struct mei_cl *cl;
if (!fctrl->host_addr) {
/* single receive buffer */
mei_hbm_add_single_tx_flow_ctrl_creds(dev, fctrl);
return;
}
cl = mei_hbm_cl_find_by_cmd(dev, fctrl);
if (cl) {
cl->tx_flow_ctrl_creds++;
cl_dbg(dev, cl, "flow control creds = %d.\n",
cl->tx_flow_ctrl_creds);
}
}
/**
* mei_hbm_cl_disconnect_req - sends disconnect message to fw.
*
* @dev: the device structure
* @cl: a client to disconnect from
*
* Return: -EIO on write failure
*/
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
struct hbm_client_connect_request req;
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD,
&req, sizeof(req));
}
/**
* mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW
*
* @dev: the device structure
* @cl: a client to disconnect from
*
* Return: -EIO on write failure
*/
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
{
struct hbm_client_connect_response resp;
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD,
&resp, sizeof(resp));
}
/**
* mei_hbm_cl_disconnect_res - update the client state according
* disconnect response
*
* @dev: the device structure
* @cl: mei host client
* @cmd: disconnect client response host bus message
*/
static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_client_connect_response *rs =
(struct hbm_client_connect_response *)cmd;
cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
if (rs->status == MEI_CL_DISCONN_SUCCESS)
cl->state = MEI_FILE_DISCONNECT_REPLY;
cl->status = 0;
}
/**
* mei_hbm_cl_connect_req - send connection request to specific me client
*
* @dev: the device structure
* @cl: a client to connect to
*
* Return: -EIO on write failure
*/
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
struct hbm_client_connect_request req;
return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD,
&req, sizeof(req));
}
/**
* mei_hbm_cl_connect_res - update the client state according
* connection response
*
* @dev: the device structure
* @cl: mei host client
* @cmd: connect client response host bus message
*/
static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_client_connect_response *rs =
(struct hbm_client_connect_response *)cmd;
cl_dbg(dev, cl, "hbm: connect response status=%s\n",
mei_cl_conn_status_str(rs->status));
if (rs->status == MEI_CL_CONN_SUCCESS)
cl->state = MEI_FILE_CONNECTED;
else {
cl->state = MEI_FILE_DISCONNECT_REPLY;
if (rs->status == MEI_CL_CONN_NOT_FOUND) {
mei_me_cl_del(dev, cl->me_cl);
if (dev->dev_state == MEI_DEV_ENABLED)
schedule_work(&dev->bus_rescan_work);
}
}
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
/**
* mei_hbm_cl_res - process hbm response received on behalf
* an client
*
* @dev: the device structure
* @rs: hbm client message
* @fop_type: file operation type
*/
static void mei_hbm_cl_res(struct mei_device *dev,
struct mei_hbm_cl_cmd *rs,
enum mei_cb_file_ops fop_type)
{
struct mei_cl *cl;
struct mei_cl_cb *cb, *next;
cl = NULL;
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
cl = cb->cl;
if (cb->fop_type != fop_type)
continue;
if (mei_hbm_cl_addr_equal(cl, rs)) {
list_del_init(&cb->list);
break;
}
}
if (!cl)
return;
switch (fop_type) {
case MEI_FOP_CONNECT:
mei_hbm_cl_connect_res(dev, cl, rs);
break;
case MEI_FOP_DISCONNECT:
mei_hbm_cl_disconnect_res(dev, cl, rs);
break;
case MEI_FOP_NOTIFY_START:
mei_hbm_cl_notify_start_res(dev, cl, rs);
break;
case MEI_FOP_NOTIFY_STOP:
mei_hbm_cl_notify_stop_res(dev, cl, rs);
break;
default:
return;
}
cl->timer_count = 0;
wake_up(&cl->wait);
}
/**
* mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware
* host sends disconnect response
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
*
* Return: -ENOMEM on allocation failure
*/
static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
struct hbm_client_connect_request *disconnect_req)
{
struct mei_cl *cl;
struct mei_cl_cb *cb;
cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
if (cl) {
cl_warn(dev, cl, "fw disconnect request received\n");
cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP,
NULL);
if (!cb)
return -ENOMEM;
}
return 0;
}
/**
* mei_hbm_pg_enter_res - PG enter response received
*
* @dev: the device structure.
*
* Return: 0 on success, -EPROTO on state mismatch
*/
static int mei_hbm_pg_enter_res(struct mei_device *dev)
{
if (mei_pg_state(dev) != MEI_PG_OFF ||
dev->pg_event != MEI_PG_EVENT_WAIT) {
dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
dev->pg_event = MEI_PG_EVENT_RECEIVED;
wake_up(&dev->wait_pg);
return 0;
}
/**
* mei_hbm_pg_resume - process with PG resume
*
* @dev: the device structure.
*/
void mei_hbm_pg_resume(struct mei_device *dev)
{
pm_request_resume(dev->dev);
}
EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
/**
* mei_hbm_pg_exit_res - PG exit response received
*
* @dev: the device structure.
*
* Return: 0 on success, -EPROTO on state mismatch
*/
static int mei_hbm_pg_exit_res(struct mei_device *dev)
{
if (mei_pg_state(dev) != MEI_PG_ON ||
(dev->pg_event != MEI_PG_EVENT_WAIT &&
dev->pg_event != MEI_PG_EVENT_IDLE)) {
dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
switch (dev->pg_event) {
case MEI_PG_EVENT_WAIT:
dev->pg_event = MEI_PG_EVENT_RECEIVED;
wake_up(&dev->wait_pg);
break;
case MEI_PG_EVENT_IDLE:
/*
* If the driver is not waiting on this then
* this is HW initiated exit from PG.
* Start runtime pm resume sequence to exit from PG.
*/
dev->pg_event = MEI_PG_EVENT_RECEIVED;
mei_hbm_pg_resume(dev);
break;
default:
WARN(1, "hbm: pg exit response: unexpected pg event = %d\n",
dev->pg_event);
return -EPROTO;
}
return 0;
}
/**
* mei_hbm_config_features - check what hbm features and commands
* are supported by the fw
*
* @dev: the device structure
*/
static void mei_hbm_config_features(struct mei_device *dev)
{
/* Power Gating Isolation Support */
dev->hbm_f_pg_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_PGI)
dev->hbm_f_pg_supported = 1;
if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
dev->hbm_f_pg_supported = 1;
dev->hbm_f_dc_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
dev->hbm_f_dc_supported = 1;
dev->hbm_f_ie_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
dev->hbm_f_ie_supported = 1;
/* disconnect on connect timeout instead of link reset */
dev->hbm_f_dot_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
dev->hbm_f_dot_supported = 1;
/* Notification Event Support */
dev->hbm_f_ev_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
dev->hbm_f_ev_supported = 1;
/* Fixed Address Client Support */
dev->hbm_f_fa_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
/* OS ver message Support */
dev->hbm_f_os_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
dev->hbm_f_os_supported = 1;
/* DMA Ring Support */
dev->hbm_f_dr_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
(dev->version.major_version == HBM_MAJOR_VERSION_DR &&
dev->version.minor_version >= HBM_MINOR_VERSION_DR))
dev->hbm_f_dr_supported = 1;
/* VTag Support */
dev->hbm_f_vt_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_VT ||
(dev->version.major_version == HBM_MAJOR_VERSION_VT &&
dev->version.minor_version >= HBM_MINOR_VERSION_VT))
dev->hbm_f_vt_supported = 1;
/* GSC support */
if (dev->version.major_version > HBM_MAJOR_VERSION_GSC ||
(dev->version.major_version == HBM_MAJOR_VERSION_GSC &&
dev->version.minor_version >= HBM_MINOR_VERSION_GSC))
dev->hbm_f_gsc_supported = 1;
/* Capability message Support */
dev->hbm_f_cap_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_CAP ||
(dev->version.major_version == HBM_MAJOR_VERSION_CAP &&
dev->version.minor_version >= HBM_MINOR_VERSION_CAP))
dev->hbm_f_cap_supported = 1;
/* Client DMA Support */
dev->hbm_f_cd_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_CD ||
(dev->version.major_version == HBM_MAJOR_VERSION_CD &&
dev->version.minor_version >= HBM_MINOR_VERSION_CD))
dev->hbm_f_cd_supported = 1;
}
/**
* mei_hbm_version_is_supported - checks whether the driver can
* support the hbm version of the device
*
* @dev: the device structure
* Return: true if driver can support hbm version of the device
*/
bool mei_hbm_version_is_supported(struct mei_device *dev)
{
return (dev->version.major_version < HBM_MAJOR_VERSION) ||
(dev->version.major_version == HBM_MAJOR_VERSION &&
dev->version.minor_version <= HBM_MINOR_VERSION);
}
/**
* mei_hbm_dispatch - bottom half read routine after ISR to
* handle the read bus message cmd processing.
*
* @dev: the device structure
* @hdr: header of bus message
*
* Return: 0 on success and < 0 on failure
*/
int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
struct mei_bus_message *mei_msg;
struct hbm_host_version_response *version_res;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
struct hbm_dma_setup_response *dma_setup_res;
struct hbm_add_client_request *add_cl_req;
struct hbm_capability_response *capability_res;
int ret;
struct mei_hbm_cl_cmd *cl_cmd;
struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *fctrl;
struct hbm_client_dma_response *client_dma_res;
/* read the message to our buffer */
BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
cl_cmd = (struct mei_hbm_cl_cmd *)mei_msg;
/* ignore spurious message and prevent reset nesting
* hbm is put to idle during system reset
*/
if (dev->hbm_state == MEI_HBM_IDLE) {
dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n");
return 0;
}
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
dev_dbg(dev->dev, "hbm: start: response message received.\n");
dev->init_clients_timer = 0;
version_res = (struct hbm_host_version_response *)mei_msg;
dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
HBM_MAJOR_VERSION, HBM_MINOR_VERSION,
version_res->me_max_version.major_version,
version_res->me_max_version.minor_version);
if (version_res->host_version_supported) {
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
} else {
dev->version.major_version =
version_res->me_max_version.major_version;
dev->version.minor_version =
version_res->me_max_version.minor_version;
}
if (!mei_hbm_version_is_supported(dev)) {
dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
dev_err(dev->dev, "hbm: start: failed to send stop request\n");
return -EIO;
}
break;
}
mei_hbm_config_features(dev);
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0;
}
dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
if (dev->hbm_f_cap_supported) {
if (mei_hbm_capabilities_req(dev))
return -EIO;
wake_up(&dev->wait_hbm_start);
break;
}
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
dev_info(dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
wake_up(&dev->wait_hbm_start);
break;
}
}
dev->hbm_f_dr_supported = 0;
mei_dmam_ring_free(dev);
if (mei_hbm_enum_clients_req(dev))
return -EIO;
wake_up(&dev->wait_hbm_start);
break;
case MEI_HBM_CAPABILITIES_RES_CMD:
dev_dbg(dev->dev, "hbm: capabilities response: message received.\n");
dev->init_clients_timer = 0;
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CAP_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
return 0;
}
dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
capability_res = (struct hbm_capability_response *)mei_msg;
if (!(capability_res->capability_granted[0] & HBM_CAP_VT))
dev->hbm_f_vt_supported = 0;
if (!(capability_res->capability_granted[0] & HBM_CAP_CD))
dev->hbm_f_cd_supported = 0;
if (!(capability_res->capability_granted[0] & HBM_CAP_GSC))
dev->hbm_f_gsc_supported = 0;
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
dev_info(dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
break;
}
}
dev->hbm_f_dr_supported = 0;
mei_dmam_ring_free(dev);
if (mei_hbm_enum_clients_req(dev))
return -EIO;
break;
case MEI_HBM_DMA_SETUP_RES_CMD:
dev_dbg(dev->dev, "hbm: dma setup response: message received.\n");
dev->init_clients_timer = 0;
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_DR_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0;
}
dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
if (dma_setup_res->status) {
u8 status = dma_setup_res->status;
if (status == MEI_HBMS_NOT_ALLOWED) {
dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
} else {
dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
status,
mei_hbm_status_str(status));
}
dev->hbm_f_dr_supported = 0;
mei_dmam_ring_free(dev);
}
if (mei_hbm_enum_clients_req(dev))
return -EIO;
break;
case CLIENT_CONNECT_RES_CMD:
dev_dbg(dev->dev, "hbm: client connect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT);
break;
case CLIENT_DISCONNECT_RES_CMD:
dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT);
break;
case MEI_FLOW_CONTROL_CMD:
dev_dbg(dev->dev, "hbm: client flow control response: message received.\n");
fctrl = (struct hbm_flow_control *)mei_msg;
mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl);
break;
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
ret = mei_hbm_pg_enter_res(dev);
if (ret)
return ret;
break;
case MEI_PG_ISOLATION_EXIT_REQ_CMD:
dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
ret = mei_hbm_pg_exit_res(dev);
if (ret)
return ret;
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
dev_dbg(dev->dev, "hbm: properties response: message received.\n");
dev->init_clients_timer = 0;
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0;
}
dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
props_res = (struct hbm_props_response *)mei_msg;
if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
props_res->me_addr);
} else if (props_res->status) {
dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
props_res->status,
mei_hbm_status_str(props_res->status));
return -EPROTO;
} else {
mei_hbm_me_cl_add(dev, props_res);
}
/* request property for the next client */
if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
return -EIO;
break;
case HOST_ENUM_RES_CMD:
dev_dbg(dev->dev, "hbm: enumeration response: message received\n");
dev->init_clients_timer = 0;
enum_res = (struct hbm_host_enum_response *) mei_msg;
BUILD_BUG_ON(sizeof(dev->me_clients_map)
< sizeof(enum_res->valid_addresses));
memcpy(dev->me_clients_map, enum_res->valid_addresses,
sizeof(enum_res->valid_addresses));
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0;
}
dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
/* first property request */
if (mei_hbm_prop_req(dev, 0))
return -EIO;
break;
case HOST_STOP_RES_CMD:
dev_dbg(dev->dev, "hbm: stop response: message received\n");
dev->init_clients_timer = 0;
if (dev->hbm_state != MEI_HBM_STOPPED) {
dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
dev_info(dev->dev, "hbm: stop response: resetting.\n");
/* force the reset */
return -EPROTO;
case CLIENT_DISCONNECT_REQ_CMD:
dev_dbg(dev->dev, "hbm: disconnect request: message received\n");
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
dev_dbg(dev->dev, "hbm: stop request: message received\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
dev_err(dev->dev, "hbm: stop request: failed to send stop request\n");
return -EIO;
}
break;
case MEI_HBM_ADD_CLIENT_REQ_CMD:
dev_dbg(dev->dev, "hbm: add client request received\n");
/*
* after the host receives the enum_resp
* message clients may be added or removed
*/
if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
dev->hbm_state >= MEI_HBM_STOPPED) {
dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
add_cl_req = (struct hbm_add_client_request *)mei_msg;
ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
if (ret) {
dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
ret);
return -EIO;
}
dev_dbg(dev->dev, "hbm: add client request processed\n");
break;
case MEI_HBM_NOTIFY_RES_CMD:
dev_dbg(dev->dev, "hbm: notify response received\n");
mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
break;
case MEI_HBM_NOTIFICATION_CMD:
dev_dbg(dev->dev, "hbm: notification\n");
mei_hbm_cl_notify(dev, cl_cmd);
break;
case MEI_HBM_CLIENT_DMA_MAP_RES_CMD:
dev_dbg(dev->dev, "hbm: client dma map response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_map_res(dev, client_dma_res);
break;
case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD:
dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_unmap_res(dev, client_dma_res);
break;
default:
WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd);
return -EPROTO;
}
return 0;
}
| linux-master | drivers/misc/mei/hbm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/scatterlist.h>
#include <linux/mei_cl_bus.h>
#include "mei_dev.h"
#include "client.h"
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
/**
* __mei_cl_send - internal client send (write)
*
* @cl: host client
* @buf: buffer to send
* @length: buffer length
* @vtag: virtual tag
* @mode: sending mode
*
* Return: written size bytes or < 0 on error
*/
ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode)
{
return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
}
/**
* __mei_cl_send_timeout - internal client send (write)
*
* @cl: host client
* @buf: buffer to send
* @length: buffer length
* @vtag: virtual tag
* @mode: sending mode
* @timeout: send timeout in milliseconds.
* effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
* set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
*
* Return: written size bytes or < 0 on error
*/
ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode, unsigned long timeout)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
bus = cl->dev;
mutex_lock(&bus->device_lock);
if (bus->dev_state != MEI_DEV_ENABLED &&
bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
/* Check if we have an ME client device */
if (!mei_me_cl_is_active(cl->me_cl)) {
rets = -ENOTTY;
goto out;
}
if (vtag) {
/* Check if vtag is supported by client */
rets = mei_cl_vt_support_check(cl);
if (rets)
goto out;
}
if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
while (cl->tx_cb_queued >= bus->tx_queue_limit) {
mutex_unlock(&bus->device_lock);
rets = wait_event_interruptible(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE ||
(!mei_cl_is_connected(cl)));
mutex_lock(&bus->device_lock);
if (rets) {
if (signal_pending(current))
rets = -EINTR;
goto out;
}
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
}
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
if (!cb) {
rets = -ENOMEM;
goto out;
}
cb->vtag = vtag;
cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
memcpy(cb->buf.data, buf, length);
/* hack we point data to header */
if (mode & MEI_CL_IO_SGL) {
cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
cb->buf.data = NULL;
cb->buf.size = 0;
}
rets = mei_cl_write(cl, cb, timeout);
if (mode & MEI_CL_IO_SGL && rets == 0)
rets = length;
out:
mutex_unlock(&bus->device_lock);
return rets;
}
/**
* __mei_cl_recv - internal client receive (read)
*
* @cl: host client
* @buf: buffer to receive
* @length: buffer length
* @mode: io mode
* @vtag: virtual tag
* @timeout: recv timeout, 0 for infinite timeout
*
* Return: read size in bytes of < 0 on error
*/
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
unsigned int mode, unsigned long timeout)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
size_t r_length;
ssize_t rets;
bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
bus = cl->dev;
mutex_lock(&bus->device_lock);
if (bus->dev_state != MEI_DEV_ENABLED &&
bus->dev_state != MEI_DEV_POWERING_DOWN) {
rets = -ENODEV;
goto out;
}
cb = mei_cl_read_cb(cl, NULL);
if (cb)
goto copy;
rets = mei_cl_read_start(cl, length, NULL);
if (rets && rets != -EBUSY)
goto out;
if (nonblock) {
rets = -EAGAIN;
goto out;
}
/* wait on event only if there is no other waiter */
/* synchronized under device mutex */
if (!waitqueue_active(&cl->rx_wait)) {
mutex_unlock(&bus->device_lock);
if (timeout) {
rets = wait_event_interruptible_timeout
(cl->rx_wait,
mei_cl_read_cb(cl, NULL) ||
(!mei_cl_is_connected(cl)),
msecs_to_jiffies(timeout));
if (rets == 0)
return -ETIME;
if (rets < 0) {
if (signal_pending(current))
return -EINTR;
return -ERESTARTSYS;
}
} else {
if (wait_event_interruptible
(cl->rx_wait,
mei_cl_read_cb(cl, NULL) ||
(!mei_cl_is_connected(cl)))) {
if (signal_pending(current))
return -EINTR;
return -ERESTARTSYS;
}
}
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
}
cb = mei_cl_read_cb(cl, NULL);
if (!cb) {
rets = 0;
goto out;
}
copy:
if (cb->status) {
rets = cb->status;
goto free;
}
/* for the GSC type - copy the extended header to the buffer */
if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
memcpy(buf, cb->ext_hdr, r_length);
} else {
r_length = min_t(size_t, length, cb->buf_idx);
memcpy(buf, cb->buf.data, r_length);
}
rets = r_length;
if (vtag)
*vtag = cb->vtag;
free:
mei_cl_del_rd_completed(cl, cb);
out:
mutex_unlock(&bus->device_lock);
return rets;
}
/**
* mei_cldev_send_vtag - me device send with vtag (write)
*
* @cldev: me client device
* @buf: buffer to send
* @length: buffer length
* @vtag: virtual tag
*
* Return:
* * written size in bytes
* * < 0 on error
*/
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag)
{
struct mei_cl *cl = cldev->cl;
return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
}
EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
/**
* mei_cldev_recv_vtag - client receive with vtag (read)
*
* @cldev: me client device
* @buf: buffer to receive
* @length: buffer length
* @vtag: virtual tag
*
* Return:
* * read size in bytes
* * < 0 on error
*/
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag)
{
struct mei_cl *cl = cldev->cl;
return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
/**
* mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
*
* @cldev: me client device
* @buf: buffer to receive
* @length: buffer length
* @vtag: virtual tag
*
* Return:
* * read size in bytes
* * -EAGAIN if function will block.
* * < 0 on other error
*/
ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
size_t length, u8 *vtag)
{
struct mei_cl *cl = cldev->cl;
return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
/**
* mei_cldev_send - me device send (write)
*
* @cldev: me client device
* @buf: buffer to send
* @length: buffer length
*
* Return:
* * written size in bytes
* * < 0 on error
*/
ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
{
return mei_cldev_send_vtag(cldev, buf, length, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_send);
/**
* mei_cldev_recv - client receive (read)
*
* @cldev: me client device
* @buf: buffer to receive
* @length: buffer length
*
* Return: read size in bytes of < 0 on error
*/
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
return mei_cldev_recv_vtag(cldev, buf, length, NULL);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv);
/**
* mei_cldev_recv_nonblock - non block client receive (read)
*
* @cldev: me client device
* @buf: buffer to receive
* @length: buffer length
*
* Return: read size in bytes of < 0 on error
* -EAGAIN if function will block.
*/
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length)
{
return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
/**
* mei_cl_bus_rx_work - dispatch rx event for a bus device
*
* @work: work
*/
static void mei_cl_bus_rx_work(struct work_struct *work)
{
struct mei_cl_device *cldev;
struct mei_device *bus;
cldev = container_of(work, struct mei_cl_device, rx_work);
bus = cldev->bus;
if (cldev->rx_cb)
cldev->rx_cb(cldev);
mutex_lock(&bus->device_lock);
if (mei_cl_is_connected(cldev->cl))
mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
mutex_unlock(&bus->device_lock);
}
/**
* mei_cl_bus_notif_work - dispatch FW notif event for a bus device
*
* @work: work
*/
static void mei_cl_bus_notif_work(struct work_struct *work)
{
struct mei_cl_device *cldev;
cldev = container_of(work, struct mei_cl_device, notif_work);
if (cldev->notif_cb)
cldev->notif_cb(cldev);
}
/**
* mei_cl_bus_notify_event - schedule notify cb on bus client
*
* @cl: host client
*
* Return: true if event was scheduled
* false if the client is not waiting for event
*/
bool mei_cl_bus_notify_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->notif_cb)
return false;
if (!cl->notify_ev)
return false;
schedule_work(&cldev->notif_work);
cl->notify_ev = false;
return true;
}
/**
* mei_cl_bus_rx_event - schedule rx event
*
* @cl: host client
*
* Return: true if event was scheduled
* false if the client is not waiting for event
*/
bool mei_cl_bus_rx_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->rx_cb)
return false;
schedule_work(&cldev->rx_work);
return true;
}
/**
* mei_cldev_register_rx_cb - register Rx event callback
*
* @cldev: me client devices
* @rx_cb: callback function
*
* Return: 0 on success
* -EALREADY if an callback is already registered
* <0 on other errors
*/
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
{
struct mei_device *bus = cldev->bus;
int ret;
if (!rx_cb)
return -EINVAL;
if (cldev->rx_cb)
return -EALREADY;
cldev->rx_cb = rx_cb;
INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
mutex_lock(&bus->device_lock);
if (mei_cl_is_connected(cldev->cl))
ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
else
ret = -ENODEV;
mutex_unlock(&bus->device_lock);
if (ret && ret != -EBUSY) {
cancel_work_sync(&cldev->rx_work);
cldev->rx_cb = NULL;
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
/**
* mei_cldev_register_notif_cb - register FW notification event callback
*
* @cldev: me client devices
* @notif_cb: callback function
*
* Return: 0 on success
* -EALREADY if an callback is already registered
* <0 on other errors
*/
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb)
{
struct mei_device *bus = cldev->bus;
int ret;
if (!notif_cb)
return -EINVAL;
if (cldev->notif_cb)
return -EALREADY;
cldev->notif_cb = notif_cb;
INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
mutex_lock(&bus->device_lock);
ret = mei_cl_notify_request(cldev->cl, NULL, 1);
mutex_unlock(&bus->device_lock);
if (ret) {
cancel_work_sync(&cldev->notif_work);
cldev->notif_cb = NULL;
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
/**
* mei_cldev_get_drvdata - driver data getter
*
* @cldev: mei client device
*
* Return: driver private data
*/
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
{
return dev_get_drvdata(&cldev->dev);
}
EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
/**
* mei_cldev_set_drvdata - driver data setter
*
* @cldev: mei client device
* @data: data to store
*/
void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
{
dev_set_drvdata(&cldev->dev, data);
}
EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
/**
* mei_cldev_uuid - return uuid of the underlying me client
*
* @cldev: mei client device
*
* Return: me client uuid
*/
const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
{
return mei_me_cl_uuid(cldev->me_cl);
}
EXPORT_SYMBOL_GPL(mei_cldev_uuid);
/**
* mei_cldev_ver - return protocol version of the underlying me client
*
* @cldev: mei client device
*
* Return: me client protocol version
*/
u8 mei_cldev_ver(const struct mei_cl_device *cldev)
{
return mei_me_cl_ver(cldev->me_cl);
}
EXPORT_SYMBOL_GPL(mei_cldev_ver);
/**
* mei_cldev_enabled - check whether the device is enabled
*
* @cldev: mei client device
*
* Return: true if me client is initialized and connected
*/
bool mei_cldev_enabled(const struct mei_cl_device *cldev)
{
return mei_cl_is_connected(cldev->cl);
}
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
/**
* mei_cl_bus_module_get - acquire module of the underlying
* hw driver.
*
* @cldev: mei client device
*
* Return: true on success; false if the module was removed.
*/
static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
{
return try_module_get(cldev->bus->dev->driver->owner);
}
/**
* mei_cl_bus_module_put - release the underlying hw module.
*
* @cldev: mei client device
*/
static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
{
module_put(cldev->bus->dev->driver->owner);
}
/**
* mei_cl_bus_vtag - get bus vtag entry wrapper
* The tag for bus client is always first.
*
* @cl: host client
*
* Return: bus vtag or NULL
*/
static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
{
return list_first_entry_or_null(&cl->vtag_map,
struct mei_cl_vtag, list);
}
/**
* mei_cl_bus_vtag_alloc - add bus client entry to vtag map
*
* @cldev: me client device
*
* Return:
* * 0 on success
* * -ENOMEM if memory allocation failed
*/
static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
{
struct mei_cl *cl = cldev->cl;
struct mei_cl_vtag *cl_vtag;
/*
* Bail out if the client does not supports vtags
* or has already allocated one
*/
if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
return 0;
cl_vtag = mei_cl_vtag_alloc(NULL, 0);
if (IS_ERR(cl_vtag))
return -ENOMEM;
list_add_tail(&cl_vtag->list, &cl->vtag_map);
return 0;
}
/**
* mei_cl_bus_vtag_free - remove the bus entry from vtag map
*
* @cldev: me client device
*/
static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
{
struct mei_cl *cl = cldev->cl;
struct mei_cl_vtag *cl_vtag;
cl_vtag = mei_cl_bus_vtag(cl);
if (!cl_vtag)
return;
list_del(&cl_vtag->list);
kfree(cl_vtag);
}
void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
{
struct mei_device *bus;
struct mei_cl *cl;
int ret;
if (!cldev || !buffer_id || !size)
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
MEI_FW_PAGE_SIZE);
return ERR_PTR(-EINVAL);
}
cl = cldev->cl;
bus = cldev->bus;
mutex_lock(&bus->device_lock);
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
if (ret)
mei_cl_unlink(cl);
notlinked:
mutex_unlock(&bus->device_lock);
if (ret)
return ERR_PTR(ret);
return cl->dma.vaddr;
}
EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
{
struct mei_device *bus;
struct mei_cl *cl;
int ret;
if (!cldev)
return -EINVAL;
cl = cldev->cl;
bus = cldev->bus;
mutex_lock(&bus->device_lock);
ret = mei_cl_dma_unmap(cl, NULL);
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
mutex_unlock(&bus->device_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
/**
* mei_cldev_enable - enable me client device
* create connection with me client
*
* @cldev: me client device
*
* Return: 0 on success and < 0 on error
*/
int mei_cldev_enable(struct mei_cl_device *cldev)
{
struct mei_device *bus = cldev->bus;
struct mei_cl *cl;
int ret;
cl = cldev->cl;
mutex_lock(&bus->device_lock);
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
if (mei_cl_is_connected(cl)) {
ret = 0;
goto out;
}
if (!mei_me_cl_is_active(cldev->me_cl)) {
dev_err(&cldev->dev, "me client is not active\n");
ret = -ENOTTY;
goto out;
}
ret = mei_cl_bus_vtag_alloc(cldev);
if (ret)
goto out;
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
if (ret < 0) {
dev_err(&cldev->dev, "cannot connect\n");
mei_cl_bus_vtag_free(cldev);
}
out:
if (ret)
mei_cl_unlink(cl);
notlinked:
mutex_unlock(&bus->device_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mei_cldev_enable);
/**
* mei_cldev_unregister_callbacks - internal wrapper for unregistering
* callbacks.
*
* @cldev: client device
*/
static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
{
if (cldev->rx_cb) {
cancel_work_sync(&cldev->rx_work);
cldev->rx_cb = NULL;
}
if (cldev->notif_cb) {
cancel_work_sync(&cldev->notif_work);
cldev->notif_cb = NULL;
}
}
/**
* mei_cldev_disable - disable me client device
* disconnect form the me client
*
* @cldev: me client device
*
* Return: 0 on success and < 0 on error
*/
int mei_cldev_disable(struct mei_cl_device *cldev)
{
struct mei_device *bus;
struct mei_cl *cl;
int err;
if (!cldev)
return -ENODEV;
cl = cldev->cl;
bus = cldev->bus;
mei_cldev_unregister_callbacks(cldev);
mutex_lock(&bus->device_lock);
mei_cl_bus_vtag_free(cldev);
if (!mei_cl_is_connected(cl)) {
dev_dbg(bus->dev, "Already disconnected\n");
err = 0;
goto out;
}
err = mei_cl_disconnect(cl);
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
out:
/* Flush queues and remove any pending read unless we have mapped DMA */
if (!cl->dma_mapped) {
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
}
mutex_unlock(&bus->device_lock);
return err;
}
EXPORT_SYMBOL_GPL(mei_cldev_disable);
/**
* mei_cldev_send_gsc_command - sends a gsc command, by sending
* a gsl mei message to gsc and receiving reply from gsc
*
* @cldev: me client device
* @client_id: client id to send the command to
* @fence_id: fence id to send the command to
* @sg_in: scatter gather list containing addresses for rx message buffer
* @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
* @sg_out: scatter gather list containing addresses for tx message buffer
*
* Return:
* * written size in bytes
* * < 0 on error
*/
ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
u8 client_id, u32 fence_id,
struct scatterlist *sg_in,
size_t total_in_len,
struct scatterlist *sg_out)
{
struct mei_cl *cl;
struct mei_device *bus;
ssize_t ret = 0;
struct mei_ext_hdr_gsc_h2f *ext_hdr;
size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
int sg_out_nents, sg_in_nents;
int i;
struct scatterlist *sg;
struct mei_ext_hdr_gsc_f2h rx_msg;
unsigned int sg_len;
if (!cldev || !sg_in || !sg_out)
return -EINVAL;
cl = cldev->cl;
bus = cldev->bus;
dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
if (!bus->hbm_f_gsc_supported)
return -EOPNOTSUPP;
sg_out_nents = sg_nents(sg_out);
sg_in_nents = sg_nents(sg_in);
/* at least one entry in tx and rx sgls must be present */
if (sg_out_nents <= 0 || sg_in_nents <= 0)
return -EINVAL;
buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
if (!ext_hdr)
return -ENOMEM;
/* construct the GSC message */
ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
ext_hdr->client_id = client_id;
ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
ext_hdr->fence_id = fence_id;
ext_hdr->input_address_count = sg_in_nents;
ext_hdr->output_address_count = sg_out_nents;
ext_hdr->reserved[0] = 0;
ext_hdr->reserved[1] = 0;
/* copy in-sgl to the message */
for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
total_in_len -= ext_hdr->sgl[i].length;
}
/* copy out-sgl to the message */
for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
ext_hdr->sgl[i].length = sg_len;
}
/* send the message to GSC */
ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
if (ret < 0) {
dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
goto end;
}
if (ret != buf_sz) {
dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
ret, buf_sz);
ret = -EIO;
goto end;
}
/* receive the reply from GSC, note that at this point sg_in should contain the reply */
ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
if (ret != sizeof(rx_msg)) {
dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
ret, sizeof(rx_msg));
if (ret >= 0)
ret = -EIO;
goto end;
}
/* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n",
rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
ret = -EFAULT;
goto end;
}
dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
ret = rx_msg.written;
end:
kfree(ext_hdr);
return ret;
}
EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
/**
* mei_cl_device_find - find matching entry in the driver id table
*
* @cldev: me client device
* @cldrv: me client driver
*
* Return: id on success; NULL if no id is matching
*/
static const
struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
const struct mei_cl_driver *cldrv)
{
const struct mei_cl_device_id *id;
const uuid_le *uuid;
u8 version;
bool match;
uuid = mei_me_cl_uuid(cldev->me_cl);
version = mei_me_cl_ver(cldev->me_cl);
id = cldrv->id_table;
while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
if (!uuid_le_cmp(*uuid, id->uuid)) {
match = true;
if (cldev->name[0])
if (strncmp(cldev->name, id->name,
sizeof(id->name)))
match = false;
if (id->version != MEI_CL_VERSION_ANY)
if (id->version != version)
match = false;
if (match)
return id;
}
id++;
}
return NULL;
}
/**
* mei_cl_device_match - device match function
*
* @dev: device
* @drv: driver
*
* Return: 1 if matching device was found 0 otherwise
*/
static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
{
const struct mei_cl_device *cldev = to_mei_cl_device(dev);
const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
const struct mei_cl_device_id *found_id;
if (!cldev->do_match)
return 0;
if (!cldrv || !cldrv->id_table)
return 0;
found_id = mei_cl_device_find(cldev, cldrv);
if (found_id)
return 1;
return 0;
}
/**
* mei_cl_device_probe - bus probe function
*
* @dev: device
*
* Return: 0 on success; < 0 otherwise
*/
static int mei_cl_device_probe(struct device *dev)
{
struct mei_cl_device *cldev;
struct mei_cl_driver *cldrv;
const struct mei_cl_device_id *id;
int ret;
cldev = to_mei_cl_device(dev);
cldrv = to_mei_cl_driver(dev->driver);
if (!cldrv || !cldrv->probe)
return -ENODEV;
id = mei_cl_device_find(cldev, cldrv);
if (!id)
return -ENODEV;
if (!mei_cl_bus_module_get(cldev)) {
dev_err(&cldev->dev, "get hw module failed");
return -ENODEV;
}
ret = cldrv->probe(cldev, id);
if (ret) {
mei_cl_bus_module_put(cldev);
return ret;
}
__module_get(THIS_MODULE);
return 0;
}
/**
* mei_cl_device_remove - remove device from the bus
*
* @dev: device
*
* Return: 0 on success; < 0 otherwise
*/
static void mei_cl_device_remove(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
if (cldrv->remove)
cldrv->remove(cldev);
mei_cldev_unregister_callbacks(cldev);
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
return sprintf(buf, "%pUl", uuid);
}
static DEVICE_ATTR_RO(uuid);
static ssize_t version_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 version = mei_me_cl_ver(cldev->me_cl);
return sprintf(buf, "%02X", version);
}
static DEVICE_ATTR_RO(version);
static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
u8 version = mei_me_cl_ver(cldev->me_cl);
return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
cldev->name, uuid, version);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
return sprintf(buf, "%d", maxconn);
}
static DEVICE_ATTR_RO(max_conn);
static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 fixed = mei_me_cl_fixed(cldev->me_cl);
return sprintf(buf, "%d", fixed);
}
static DEVICE_ATTR_RO(fixed);
static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
bool vt = mei_me_cl_vt(cldev->me_cl);
return sprintf(buf, "%d", vt);
}
static DEVICE_ATTR_RO(vtag);
static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
return sprintf(buf, "%u", maxlen);
}
static DEVICE_ATTR_RO(max_len);
static struct attribute *mei_cldev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_uuid.attr,
&dev_attr_version.attr,
&dev_attr_modalias.attr,
&dev_attr_max_conn.attr,
&dev_attr_fixed.attr,
&dev_attr_vtag.attr,
&dev_attr_max_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(mei_cldev);
/**
* mei_cl_device_uevent - me client bus uevent handler
*
* @dev: device
* @env: uevent kobject
*
* Return: 0 on success -ENOMEM on when add_uevent_var fails
*/
static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
u8 version = mei_me_cl_ver(cldev->me_cl);
if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
return -ENOMEM;
if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
return -ENOMEM;
if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
return -ENOMEM;
if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
cldev->name, uuid, version))
return -ENOMEM;
return 0;
}
static struct bus_type mei_cl_bus_type = {
.name = "mei",
.dev_groups = mei_cldev_groups,
.match = mei_cl_device_match,
.probe = mei_cl_device_probe,
.remove = mei_cl_device_remove,
.uevent = mei_cl_device_uevent,
};
static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
{
if (bus)
get_device(bus->dev);
return bus;
}
static void mei_dev_bus_put(struct mei_device *bus)
{
if (bus)
put_device(bus->dev);
}
static void mei_cl_bus_dev_release(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
kfree(cldev->cl);
kfree(cldev);
}
static const struct device_type mei_cl_device_type = {
.release = mei_cl_bus_dev_release,
};
/**
* mei_cl_bus_set_name - set device name for me client device
* <controller>-<client device>
* Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
dev_set_name(&cldev->dev, "%s-%pUl",
dev_name(cldev->bus->dev),
mei_me_cl_uuid(cldev->me_cl));
}
/**
* mei_cl_bus_dev_alloc - initialize and allocate mei client device
*
* @bus: mei device
* @me_cl: me client
*
* Return: allocated device structur or NULL on allocation failure
*/
static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_me_client *me_cl)
{
struct mei_cl_device *cldev;
struct mei_cl *cl;
cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
if (!cldev)
return NULL;
cl = mei_cl_allocate(bus);
if (!cl) {
kfree(cldev);
return NULL;
}
device_initialize(&cldev->dev);
cldev->dev.parent = bus->dev;
cldev->dev.bus = &mei_cl_bus_type;
cldev->dev.type = &mei_cl_device_type;
cldev->bus = mei_dev_bus_get(bus);
cldev->me_cl = mei_me_cl_get(me_cl);
cldev->cl = cl;
mei_cl_bus_set_name(cldev);
cldev->is_added = 0;
INIT_LIST_HEAD(&cldev->bus_list);
device_enable_async_suspend(&cldev->dev);
return cldev;
}
/**
* mei_cl_bus_dev_setup - setup me client device
* run fix up routines and set the device name
*
* @bus: mei device
* @cldev: me client device
*
* Return: true if the device is eligible for enumeration
*/
static bool mei_cl_bus_dev_setup(struct mei_device *bus,
struct mei_cl_device *cldev)
{
cldev->do_match = 1;
mei_cl_bus_dev_fixup(cldev);
/* the device name can change during fix up */
if (cldev->do_match)
mei_cl_bus_set_name(cldev);
return cldev->do_match == 1;
}
/**
* mei_cl_bus_dev_add - add me client devices
*
* @cldev: me client device
*
* Return: 0 on success; < 0 on failre
*/
static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
{
int ret;
dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
mei_me_cl_uuid(cldev->me_cl),
mei_me_cl_ver(cldev->me_cl));
ret = device_add(&cldev->dev);
if (!ret)
cldev->is_added = 1;
return ret;
}
/**
* mei_cl_bus_dev_stop - stop the driver
*
* @cldev: me client device
*/
static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
{
cldev->do_match = 0;
if (cldev->is_added)
device_release_driver(&cldev->dev);
}
/**
* mei_cl_bus_dev_destroy - destroy me client devices object
*
* @cldev: me client device
*
* Locking: called under "dev->cl_bus_lock" lock
*/
static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
{
WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
if (!cldev->is_added)
return;
device_del(&cldev->dev);
list_del_init(&cldev->bus_list);
cldev->is_added = 0;
put_device(&cldev->dev);
}
/**
* mei_cl_bus_remove_device - remove a devices form the bus
*
* @cldev: me client device
*/
static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
{
mei_cl_bus_dev_stop(cldev);
mei_cl_bus_dev_destroy(cldev);
}
/**
* mei_cl_bus_remove_devices - remove all devices form the bus
*
* @bus: mei device
*/
void mei_cl_bus_remove_devices(struct mei_device *bus)
{
struct mei_cl_device *cldev, *next;
mutex_lock(&bus->cl_bus_lock);
list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
mei_cl_bus_remove_device(cldev);
mutex_unlock(&bus->cl_bus_lock);
}
/**
* mei_cl_bus_dev_init - allocate and initializes an mei client devices
* based on me client
*
* @bus: mei device
* @me_cl: me client
*
* Locking: called under "dev->cl_bus_lock" lock
*/
static void mei_cl_bus_dev_init(struct mei_device *bus,
struct mei_me_client *me_cl)
{
struct mei_cl_device *cldev;
WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
if (me_cl->bus_added)
return;
cldev = mei_cl_bus_dev_alloc(bus, me_cl);
if (!cldev)
return;
me_cl->bus_added = true;
list_add_tail(&cldev->bus_list, &bus->device_list);
}
/**
* mei_cl_bus_rescan - scan me clients list and add create
* devices for eligible clients
*
* @bus: mei device
*/
static void mei_cl_bus_rescan(struct mei_device *bus)
{
struct mei_cl_device *cldev, *n;
struct mei_me_client *me_cl;
mutex_lock(&bus->cl_bus_lock);
down_read(&bus->me_clients_rwsem);
list_for_each_entry(me_cl, &bus->me_clients, list)
mei_cl_bus_dev_init(bus, me_cl);
up_read(&bus->me_clients_rwsem);
list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
if (!mei_me_cl_is_active(cldev->me_cl)) {
mei_cl_bus_remove_device(cldev);
continue;
}
if (cldev->is_added)
continue;
if (mei_cl_bus_dev_setup(bus, cldev))
mei_cl_bus_dev_add(cldev);
else {
list_del_init(&cldev->bus_list);
put_device(&cldev->dev);
}
}
mutex_unlock(&bus->cl_bus_lock);
dev_dbg(bus->dev, "rescan end");
}
void mei_cl_bus_rescan_work(struct work_struct *work)
{
struct mei_device *bus =
container_of(work, struct mei_device, bus_rescan_work);
mei_cl_bus_rescan(bus);
}
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
struct module *owner)
{
int err;
cldrv->driver.name = cldrv->name;
cldrv->driver.owner = owner;
cldrv->driver.bus = &mei_cl_bus_type;
err = driver_register(&cldrv->driver);
if (err)
return err;
pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
return 0;
}
EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
{
driver_unregister(&cldrv->driver);
pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
}
EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
}
void __exit mei_cl_bus_exit(void)
{
bus_unregister(&mei_cl_bus_type);
}
| linux-master | drivers/misc/mei/bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hw-txe.h"
static const struct pci_device_id mei_txe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
#ifdef CONFIG_PM
static inline void mei_txe_set_pm_domain(struct mei_device *dev);
static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
#else
static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
/**
* mei_txe_probe - Device Initialization Routine
*
* @pdev: PCI device structure
* @ent: entry in mei_txe_pci_tbl
*
* Return: 0 on success, <0 on failure.
*/
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
int err;
/* enable pci dev */
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
/* pci request regions and mapping IO device memory for mei driver */
err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto end;
}
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (err) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto end;
}
}
/* allocates and initializes the mei dev structure */
dev = mei_txe_dev_init(pdev);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_txe_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev);
pci_enable_msi(pdev);
/* clear spurious interrupts */
mei_clear_interrupts(dev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_txe_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_txe_irq_quick_handler,
mei_txe_irq_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto end;
}
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
err = mei_register(dev, &pdev->dev);
if (err)
goto stop;
pci_set_drvdata(pdev, dev);
/*
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/*
* TXE maps runtime suspend/resume to own power gating states,
* hence we need to go around native PCI runtime service which
* eventually brings the device into D3cold/hot state.
* But the TXE device cannot wake up from D3 unlike from own
* power gating. To get around PCI device native runtime pm,
* TXE uses runtime pm domain handlers which take precedence.
*/
mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev);
return 0;
stop:
mei_stop(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
end:
dev_err(&pdev->dev, "initialization failed.\n");
return err;
}
/**
* mei_txe_shutdown- Device Shutdown Routine
*
* @pdev: PCI device structure
*
* mei_txe_shutdown is called from the reboot notifier
* it's a simplified version of remove so we go down
* faster.
*/
static void mei_txe_shutdown(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = pci_get_drvdata(pdev);
if (!dev)
return;
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
}
/**
* mei_txe_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
* mei_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
static void mei_txe_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = pci_get_drvdata(pdev);
if (!dev) {
dev_err(&pdev->dev, "mei: dev == NULL\n");
return;
}
pm_runtime_get_noresume(&pdev->dev);
mei_stop(dev);
mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
mei_deregister(dev);
}
#ifdef CONFIG_PM_SLEEP
static int mei_txe_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
return 0;
}
static int mei_txe_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int err;
dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
pci_enable_msi(pdev);
mei_clear_interrupts(dev);
/* request and enable interrupt */
if (pci_dev_msi_enabled(pdev))
err = request_threaded_irq(pdev->irq,
NULL,
mei_txe_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
else
err = request_threaded_irq(pdev->irq,
mei_txe_irq_quick_handler,
mei_txe_irq_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
pdev->irq);
return err;
}
err = mei_restart(dev);
return err;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mei_txe_pm_runtime_idle(struct device *device)
{
struct mei_device *dev;
dev_dbg(device, "rpm: txe: runtime_idle\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
return -EBUSY;
}
static int mei_txe_pm_runtime_suspend(struct device *device)
{
struct mei_device *dev;
int ret;
dev_dbg(device, "rpm: txe: runtime suspend\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
ret = mei_txe_aliveness_set_sync(dev, 0);
else
ret = -EAGAIN;
/* keep irq on we are staying in D0 */
dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
mutex_unlock(&dev->device_lock);
if (ret && ret != -EAGAIN)
schedule_work(&dev->reset_work);
return ret;
}
static int mei_txe_pm_runtime_resume(struct device *device)
{
struct mei_device *dev;
int ret;
dev_dbg(device, "rpm: txe: runtime resume\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
mei_enable_interrupts(dev);
ret = mei_txe_aliveness_set_sync(dev, 1);
mutex_unlock(&dev->device_lock);
dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
return ret;
}
/**
* mei_txe_set_pm_domain - fill and set pm domain structure for device
*
* @dev: mei_device
*/
static inline void mei_txe_set_pm_domain(struct mei_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
}
}
/**
* mei_txe_unset_pm_domain - clean pm domain structure for device
*
* @dev: mei_device
*/
static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
dev_pm_domain_set(dev->dev, NULL);
}
static const struct dev_pm_ops mei_txe_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
mei_txe_pci_resume)
SET_RUNTIME_PM_OPS(
mei_txe_pm_runtime_suspend,
mei_txe_pm_runtime_resume,
mei_txe_pm_runtime_idle)
};
#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
#else
#define MEI_TXE_PM_OPS NULL
#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
static struct pci_driver mei_txe_driver = {
.name = KBUILD_MODNAME,
.id_table = mei_txe_pci_tbl,
.probe = mei_txe_probe,
.remove = mei_txe_remove,
.shutdown = mei_txe_shutdown,
.driver.pm = MEI_TXE_PM_OPS,
};
module_pci_driver(mei_txe_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/mei/pci-txe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/sched/signal.h>
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "client.h"
static const struct class mei_class = {
.name = "mei",
};
static dev_t mei_devt;
#define MEI_MAX_DEVS MINORMASK
static DEFINE_MUTEX(mei_minor_lock);
static DEFINE_IDR(mei_idr);
/**
* mei_open - the open function
*
* @inode: pointer to inode structure
* @file: pointer to file structure
*
* Return: 0 on success, <0 on error
*/
static int mei_open(struct inode *inode, struct file *file)
{
struct mei_device *dev;
struct mei_cl *cl;
int err;
dev = container_of(inode->i_cdev, struct mei_device, cdev);
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
err = -ENODEV;
goto err_unlock;
}
cl = mei_cl_alloc_linked(dev);
if (IS_ERR(cl)) {
err = PTR_ERR(cl);
goto err_unlock;
}
cl->fp = file;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
return nonseekable_open(inode, file);
err_unlock:
mutex_unlock(&dev->device_lock);
return err;
}
/**
* mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
*
* @cl: host client
* @fp: pointer to file structure
*
*/
static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_vtag *vtag_l, *next;
list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
if (vtag_l->fp == fp) {
list_del(&vtag_l->list);
kfree(vtag_l);
return;
}
}
}
/**
* mei_release - the release function
*
* @inode: pointer to inode structure
* @file: pointer to file structure
*
* Return: 0 on success, <0 on error
*/
static int mei_release(struct inode *inode, struct file *file)
{
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
mutex_lock(&dev->device_lock);
mei_cl_vtag_remove_by_fp(cl, file);
if (!list_empty(&cl->vtag_map)) {
cl_dbg(dev, cl, "not the last vtag\n");
mei_cl_flush_queues(cl, file);
rets = 0;
goto out;
}
rets = mei_cl_disconnect(cl);
/*
* Check again: This is necessary since disconnect releases the lock
* and another client can connect in the meantime.
*/
if (!list_empty(&cl->vtag_map)) {
cl_dbg(dev, cl, "not the last vtag after disconnect\n");
mei_cl_flush_queues(cl, file);
goto out;
}
mei_cl_flush_queues(cl, NULL);
cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
kfree(cl);
out:
file->private_data = NULL;
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_read - the read function.
*
* @file: pointer to file structure
* @ubuf: pointer to user buffer
* @length: buffer length
* @offset: data offset in buffer
*
* Return: >=0 data length on success , <0 on error
*/
static ssize_t mei_read(struct file *file, char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
struct mei_cl_cb *cb = NULL;
bool nonblock = !!(file->f_flags & O_NONBLOCK);
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto out;
}
if (length == 0) {
rets = 0;
goto out;
}
if (ubuf == NULL) {
rets = -EMSGSIZE;
goto out;
}
cb = mei_cl_read_cb(cl, file);
if (cb)
goto copy_buffer;
if (*offset > 0)
*offset = 0;
rets = mei_cl_read_start(cl, length, file);
if (rets && rets != -EBUSY) {
cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
goto out;
}
if (nonblock) {
rets = -EAGAIN;
goto out;
}
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
mei_cl_read_cb(cl, file) ||
!mei_cl_is_connected(cl))) {
if (signal_pending(current))
return -EINTR;
return -ERESTARTSYS;
}
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
cb = mei_cl_read_cb(cl, file);
if (!cb) {
rets = 0;
goto out;
}
copy_buffer:
/* now copy the data to user space */
if (cb->status) {
rets = cb->status;
cl_dbg(dev, cl, "read operation failed %zd\n", rets);
goto free;
}
cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
cb->buf.size, cb->buf_idx, *offset);
if (*offset >= cb->buf_idx) {
rets = 0;
goto free;
}
/* length is being truncated to PAGE_SIZE,
* however buf_idx may point beyond that */
length = min_t(size_t, length, cb->buf_idx - *offset);
if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto free;
}
rets = length;
*offset += length;
/* not all data was read, keep the cb */
if (*offset < cb->buf_idx)
goto out;
free:
mei_cl_del_rd_completed(cl, cb);
*offset = 0;
out:
cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_cl_vtag_by_fp - obtain the vtag by file pointer
*
* @cl: host client
* @fp: pointer to file structure
*
* Return: vtag value on success, otherwise 0
*/
static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
{
struct mei_cl_vtag *cl_vtag;
if (!fp)
return 0;
list_for_each_entry(cl_vtag, &cl->vtag_map, list)
if (cl_vtag->fp == fp)
return cl_vtag->vtag;
return 0;
}
/**
* mei_write - the write function.
*
* @file: pointer to file structure
* @ubuf: pointer to user buffer
* @length: buffer length
* @offset: data offset in buffer
*
* Return: >=0 data length on success , <0 on error
*/
static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb;
struct mei_device *dev;
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto out;
}
if (!mei_cl_is_connected(cl)) {
cl_err(dev, cl, "is not connected");
rets = -ENODEV;
goto out;
}
if (!mei_me_cl_is_active(cl->me_cl)) {
rets = -ENOTTY;
goto out;
}
if (length > mei_cl_mtu(cl)) {
rets = -EFBIG;
goto out;
}
if (length == 0) {
rets = 0;
goto out;
}
while (cl->tx_cb_queued >= dev->tx_queue_limit) {
if (file->f_flags & O_NONBLOCK) {
rets = -EAGAIN;
goto out;
}
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE ||
(!mei_cl_is_connected(cl)));
mutex_lock(&dev->device_lock);
if (rets) {
if (signal_pending(current))
rets = -EINTR;
goto out;
}
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
}
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
if (!cb) {
rets = -ENOMEM;
goto out;
}
cb->vtag = mei_cl_vtag_by_fp(cl, file);
rets = copy_from_user(cb->buf.data, ubuf, length);
if (rets) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
mei_io_cb_free(cb);
goto out;
}
rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
out:
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_ioctl_connect_client - the connect to fw client IOCTL function
*
* @file: private data of the file object
* @in_client_uuid: requested UUID for connection
* @client: IOCTL connect data, output parameters
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
static int mei_ioctl_connect_client(struct file *file,
const uuid_le *in_client_uuid,
struct mei_client *client)
{
struct mei_device *dev;
struct mei_me_client *me_cl;
struct mei_cl *cl;
int rets;
cl = file->private_data;
dev = cl->dev;
if (cl->state != MEI_FILE_INITIALIZING &&
cl->state != MEI_FILE_DISCONNECTED)
return -EBUSY;
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
if (!me_cl) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
in_client_uuid);
rets = -ENOTTY;
goto end;
}
if (me_cl->props.fixed_address) {
bool forbidden = dev->override_fixed_address ?
!dev->allow_fixed_address : !dev->hbm_f_fa_supported;
if (forbidden) {
dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
in_client_uuid);
rets = -ENOTTY;
goto end;
}
}
dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
me_cl->client_id);
dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
me_cl->props.protocol_version);
dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
me_cl->props.max_msg_length);
/* prepare the output buffer */
client->max_msg_length = me_cl->props.max_msg_length;
client->protocol_version = me_cl->props.protocol_version;
dev_dbg(dev->dev, "Can connect?\n");
rets = mei_cl_connect(cl, me_cl, file);
end:
mei_me_cl_put(me_cl);
return rets;
}
/**
* mei_vt_support_check - check if client support vtags
*
* Locking: called under "dev->device_lock" lock
*
* @dev: mei_device
* @uuid: client UUID
*
* Return:
* 0 - supported
* -ENOTTY - no such client
* -EOPNOTSUPP - vtags are not supported by client
*/
static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl;
int ret;
if (!dev->hbm_f_vt_supported)
return -EOPNOTSUPP;
me_cl = mei_me_cl_by_uuid(dev, uuid);
if (!me_cl) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
uuid);
return -ENOTTY;
}
ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
mei_me_cl_put(me_cl);
return ret;
}
/**
* mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function
*
* @file: private data of the file object
* @in_client_uuid: requested UUID for connection
* @client: IOCTL connect data, output parameters
* @vtag: vm tag
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
static int mei_ioctl_connect_vtag(struct file *file,
const uuid_le *in_client_uuid,
struct mei_client *client,
u8 vtag)
{
struct mei_device *dev;
struct mei_cl *cl;
struct mei_cl *pos;
struct mei_cl_vtag *cl_vtag;
cl = file->private_data;
dev = cl->dev;
dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
switch (cl->state) {
case MEI_FILE_DISCONNECTED:
if (mei_cl_vtag_by_fp(cl, file) != vtag) {
dev_err(dev->dev, "reconnect with different vtag\n");
return -EINVAL;
}
break;
case MEI_FILE_INITIALIZING:
/* malicious connect from another thread may push vtag */
if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
dev_err(dev->dev, "vtag already filled\n");
return -EINVAL;
}
list_for_each_entry(pos, &dev->file_list, link) {
if (pos == cl)
continue;
if (!pos->me_cl)
continue;
/* only search for same UUID */
if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
continue;
/* if tag already exist try another fp */
if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
continue;
/* replace cl with acquired one */
dev_dbg(dev->dev, "replacing with existing cl\n");
mei_cl_unlink(cl);
kfree(cl);
file->private_data = pos;
cl = pos;
break;
}
cl_vtag = mei_cl_vtag_alloc(file, vtag);
if (IS_ERR(cl_vtag))
return -ENOMEM;
list_add_tail(&cl_vtag->list, &cl->vtag_map);
break;
default:
return -EBUSY;
}
while (cl->state != MEI_FILE_INITIALIZING &&
cl->state != MEI_FILE_DISCONNECTED &&
cl->state != MEI_FILE_CONNECTED) {
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
}
if (!mei_cl_is_connected(cl))
return mei_ioctl_connect_client(file, in_client_uuid, client);
client->max_msg_length = cl->me_cl->props.max_msg_length;
client->protocol_version = cl->me_cl->props.protocol_version;
return 0;
}
/**
* mei_ioctl_client_notify_request -
* propagate event notification request to client
*
* @file: pointer to file structure
* @request: 0 - disable, 1 - enable
*
* Return: 0 on success , <0 on error
*/
static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
{
struct mei_cl *cl = file->private_data;
if (request != MEI_HBM_NOTIFICATION_START &&
request != MEI_HBM_NOTIFICATION_STOP)
return -EINVAL;
return mei_cl_notify_request(cl, file, (u8)request);
}
/**
* mei_ioctl_client_notify_get - wait for notification request
*
* @file: pointer to file structure
* @notify_get: 0 - disable, 1 - enable
*
* Return: 0 on success , <0 on error
*/
static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
{
struct mei_cl *cl = file->private_data;
bool notify_ev;
bool block = (file->f_flags & O_NONBLOCK) == 0;
int rets;
rets = mei_cl_notify_get(cl, block, ¬ify_ev);
if (rets)
return rets;
*notify_get = notify_ev ? 1 : 0;
return 0;
}
/**
* mei_ioctl - the IOCTL function
*
* @file: pointer to file structure
* @cmd: ioctl command
* @data: pointer to mei message structure
*
* Return: 0 on success , <0 on error
*/
static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
struct mei_device *dev;
struct mei_cl *cl = file->private_data;
struct mei_connect_client_data conn;
struct mei_connect_client_data_vtag conn_vtag;
const uuid_le *cl_uuid;
struct mei_client *props;
u8 vtag;
u32 notify_get, notify_req;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
goto out;
}
switch (cmd) {
case IOCTL_MEI_CONNECT_CLIENT:
dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
cl_uuid = &conn.in_client_uuid;
props = &conn.out_client_properties;
vtag = 0;
rets = mei_vt_support_check(dev, cl_uuid);
if (rets == -ENOTTY)
goto out;
if (!rets)
rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
vtag);
else
rets = mei_ioctl_connect_client(file, cl_uuid, props);
if (rets)
goto out;
/* if all is ok, copying the data back to user. */
if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
}
break;
case IOCTL_MEI_CONNECT_CLIENT_VTAG:
dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
if (copy_from_user(&conn_vtag, (char __user *)data,
sizeof(conn_vtag))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
cl_uuid = &conn_vtag.connect.in_client_uuid;
props = &conn_vtag.out_client_properties;
vtag = conn_vtag.connect.vtag;
rets = mei_vt_support_check(dev, cl_uuid);
if (rets == -EOPNOTSUPP)
dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n",
cl_uuid);
if (rets)
goto out;
if (!vtag) {
dev_dbg(dev->dev, "vtag can't be zero\n");
rets = -EINVAL;
goto out;
}
rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
if (rets)
goto out;
/* if all is ok, copying the data back to user. */
if (copy_to_user((char __user *)data, &conn_vtag,
sizeof(conn_vtag))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
}
break;
case IOCTL_MEI_NOTIFY_SET:
dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
if (copy_from_user(¬ify_req,
(char __user *)data, sizeof(notify_req))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
rets = mei_ioctl_client_notify_request(file, notify_req);
break;
case IOCTL_MEI_NOTIFY_GET:
dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
rets = mei_ioctl_client_notify_get(file, ¬ify_get);
if (rets)
goto out;
dev_dbg(dev->dev, "copy connect data to user\n");
if (copy_to_user((char __user *)data,
¬ify_get, sizeof(notify_get))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
}
break;
default:
rets = -ENOIOCTLCMD;
}
out:
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_poll - the poll function
*
* @file: pointer to file structure
* @wait: pointer to poll_table structure
*
* Return: poll mask
*/
static __poll_t mei_poll(struct file *file, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
__poll_t mask = 0;
bool notify_en;
if (WARN_ON(!cl || !cl->dev))
return EPOLLERR;
dev = cl->dev;
mutex_lock(&dev->device_lock);
notify_en = cl->notify_en && (req_events & EPOLLPRI);
if (dev->dev_state != MEI_DEV_ENABLED ||
!mei_cl_is_connected(cl)) {
mask = EPOLLERR;
goto out;
}
if (notify_en) {
poll_wait(file, &cl->ev_wait, wait);
if (cl->notify_ev)
mask |= EPOLLPRI;
}
if (req_events & (EPOLLIN | EPOLLRDNORM)) {
poll_wait(file, &cl->rx_wait, wait);
if (mei_cl_read_cb(cl, file))
mask |= EPOLLIN | EPOLLRDNORM;
else
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
poll_wait(file, &cl->tx_wait, wait);
if (cl->tx_cb_queued < dev->tx_queue_limit)
mask |= EPOLLOUT | EPOLLWRNORM;
}
out:
mutex_unlock(&dev->device_lock);
return mask;
}
/**
* mei_cl_is_write_queued - check if the client has pending writes.
*
* @cl: writing host client
*
* Return: true if client is writing, false otherwise.
*/
static bool mei_cl_is_write_queued(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
list_for_each_entry(cb, &dev->write_list, list)
if (cb->cl == cl)
return true;
list_for_each_entry(cb, &dev->write_waiting_list, list)
if (cb->cl == cl)
return true;
return false;
}
/**
* mei_fsync - the fsync handler
*
* @fp: pointer to file structure
* @start: unused
* @end: unused
* @datasync: unused
*
* Return: 0 on success, -ENODEV if client is not connected
*/
static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
{
struct mei_cl *cl = fp->private_data;
struct mei_device *dev;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
while (mei_cl_is_write_queued(cl)) {
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE ||
!mei_cl_is_connected(cl));
mutex_lock(&dev->device_lock);
if (rets) {
if (signal_pending(current))
rets = -EINTR;
goto out;
}
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
}
}
rets = 0;
out:
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_fasync - asynchronous io support
*
* @fd: file descriptor
* @file: pointer to file structure
* @band: band bitmap
*
* Return: negative on error,
* 0 if it did no changes,
* and positive a process was added or deleted
*/
static int mei_fasync(int fd, struct file *file, int band)
{
struct mei_cl *cl = file->private_data;
if (!mei_cl_is_connected(cl))
return -ENODEV;
return fasync_helper(fd, file, band, &cl->ev_async);
}
/**
* trc_show - mei device trc attribute show method
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t trc_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
u32 trc;
int ret;
ret = mei_trc_status(dev, &trc);
if (ret)
return ret;
return sprintf(buf, "%08X\n", trc);
}
static DEVICE_ATTR_RO(trc);
/**
* fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t fw_status_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
struct mei_fw_status fw_status;
int err, i;
ssize_t cnt = 0;
mutex_lock(&dev->device_lock);
err = mei_fw_status(dev, &fw_status);
mutex_unlock(&dev->device_lock);
if (err) {
dev_err(device, "read fw_status error = %d\n", err);
return err;
}
for (i = 0; i < fw_status.count; i++)
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
fw_status.status[i]);
return cnt;
}
static DEVICE_ATTR_RO(fw_status);
/**
* hbm_ver_show - display HBM protocol version negotiated with FW
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t hbm_ver_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
struct hbm_version ver;
mutex_lock(&dev->device_lock);
ver = dev->version;
mutex_unlock(&dev->device_lock);
return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
}
static DEVICE_ATTR_RO(hbm_ver);
/**
* hbm_ver_drv_show - display HBM protocol version advertised by driver
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t hbm_ver_drv_show(struct device *device,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
}
static DEVICE_ATTR_RO(hbm_ver_drv);
static ssize_t tx_queue_limit_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
u8 size = 0;
mutex_lock(&dev->device_lock);
size = dev->tx_queue_limit;
mutex_unlock(&dev->device_lock);
return sysfs_emit(buf, "%u\n", size);
}
static ssize_t tx_queue_limit_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mei_device *dev = dev_get_drvdata(device);
u8 limit;
unsigned int inp;
int err;
err = kstrtouint(buf, 10, &inp);
if (err)
return err;
if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
return -EINVAL;
limit = inp;
mutex_lock(&dev->device_lock);
dev->tx_queue_limit = limit;
mutex_unlock(&dev->device_lock);
return count;
}
static DEVICE_ATTR_RW(tx_queue_limit);
/**
* fw_ver_show - display ME FW version
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t fw_ver_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
struct mei_fw_version *ver;
ssize_t cnt = 0;
int i;
ver = dev->fw_ver;
for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
ver[i].platform, ver[i].major, ver[i].minor,
ver[i].hotfix, ver[i].buildno);
return cnt;
}
static DEVICE_ATTR_RO(fw_ver);
/**
* dev_state_show - display device state
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t dev_state_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
enum mei_dev_state dev_state;
mutex_lock(&dev->device_lock);
dev_state = dev->dev_state;
mutex_unlock(&dev->device_lock);
return sprintf(buf, "%s", mei_dev_state_str(dev_state));
}
static DEVICE_ATTR_RO(dev_state);
/**
* mei_set_devstate: set to new device state and notify sysfs file.
*
* @dev: mei_device
* @state: new device state
*/
void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
{
struct device *clsdev;
if (dev->dev_state == state)
return;
dev->dev_state = state;
clsdev = class_find_device_by_devt(&mei_class, dev->cdev.dev);
if (clsdev) {
sysfs_notify(&clsdev->kobj, NULL, "dev_state");
put_device(clsdev);
}
}
/**
* kind_show - display device kind
*
* @device: device pointer
* @attr: attribute pointer
* @buf: char out buffer
*
* Return: number of the bytes printed into buf or error
*/
static ssize_t kind_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mei_device *dev = dev_get_drvdata(device);
ssize_t ret;
if (dev->kind)
ret = sprintf(buf, "%s\n", dev->kind);
else
ret = sprintf(buf, "%s\n", "mei");
return ret;
}
static DEVICE_ATTR_RO(kind);
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
&dev_attr_hbm_ver_drv.attr,
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
&dev_attr_trc.attr,
&dev_attr_kind.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
/*
* file operations structure will be used for mei char device.
*/
static const struct file_operations mei_fops = {
.owner = THIS_MODULE,
.read = mei_read,
.unlocked_ioctl = mei_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mei_open,
.release = mei_release,
.write = mei_write,
.poll = mei_poll,
.fsync = mei_fsync,
.fasync = mei_fasync,
.llseek = no_llseek
};
/**
* mei_minor_get - obtain next free device minor number
*
* @dev: device pointer
*
* Return: allocated minor, or -ENOSPC if no free minor left
*/
static int mei_minor_get(struct mei_device *dev)
{
int ret;
mutex_lock(&mei_minor_lock);
ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
if (ret >= 0)
dev->minor = ret;
else if (ret == -ENOSPC)
dev_err(dev->dev, "too many mei devices\n");
mutex_unlock(&mei_minor_lock);
return ret;
}
/**
* mei_minor_free - mark device minor number as free
*
* @dev: device pointer
*/
static void mei_minor_free(struct mei_device *dev)
{
mutex_lock(&mei_minor_lock);
idr_remove(&mei_idr, dev->minor);
mutex_unlock(&mei_minor_lock);
}
int mei_register(struct mei_device *dev, struct device *parent)
{
struct device *clsdev; /* class device */
int ret, devno;
ret = mei_minor_get(dev);
if (ret < 0)
return ret;
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
cdev_init(&dev->cdev, &mei_fops);
dev->cdev.owner = parent->driver->owner;
/* Add the device */
ret = cdev_add(&dev->cdev, devno, 1);
if (ret) {
dev_err(parent, "unable to add device %d:%d\n",
MAJOR(mei_devt), dev->minor);
goto err_dev_add;
}
clsdev = device_create_with_groups(&mei_class, parent, devno,
dev, mei_groups,
"mei%d", dev->minor);
if (IS_ERR(clsdev)) {
dev_err(parent, "unable to create device %d:%d\n",
MAJOR(mei_devt), dev->minor);
ret = PTR_ERR(clsdev);
goto err_dev_create;
}
mei_dbgfs_register(dev, dev_name(clsdev));
return 0;
err_dev_create:
cdev_del(&dev->cdev);
err_dev_add:
mei_minor_free(dev);
return ret;
}
EXPORT_SYMBOL_GPL(mei_register);
void mei_deregister(struct mei_device *dev)
{
int devno;
devno = dev->cdev.dev;
cdev_del(&dev->cdev);
mei_dbgfs_deregister(dev);
device_destroy(&mei_class, devno);
mei_minor_free(dev);
}
EXPORT_SYMBOL_GPL(mei_deregister);
static int __init mei_init(void)
{
int ret;
ret = class_register(&mei_class);
if (ret)
return ret;
ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
if (ret < 0) {
pr_err("unable to allocate char dev region\n");
goto err_class;
}
ret = mei_cl_bus_init();
if (ret < 0) {
pr_err("unable to initialize bus\n");
goto err_chrdev;
}
return 0;
err_chrdev:
unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
err_class:
class_unregister(&mei_class);
return ret;
}
static void __exit mei_exit(void)
{
unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
class_unregister(&mei_class);
mei_cl_bus_exit();
}
module_init(mei_init);
module_exit(mei_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/mei/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "mei-trace.h"
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
#endif /* __CHECKER__ */
| linux-master | drivers/misc/mei/mei-trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "client.h"
#include "hw-me-regs.h"
#include "hw-me.h"
/* mei_pci_tbl - PCI Device ID Table */
static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
#ifdef CONFIG_PM
static inline void mei_me_set_pm_domain(struct mei_device *dev);
static inline void mei_me_unset_pm_domain(struct mei_device *dev);
#else
static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
return pci_read_config_dword(pdev, where, val);
}
/**
* mei_me_quirk_probe - probe for devices that doesn't valid ME interface
*
* @pdev: PCI device structure
* @cfg: per generation config
*
* Return: true if ME Interface is valid, false otherwise
*/
static bool mei_me_quirk_probe(struct pci_dev *pdev,
const struct mei_cfg *cfg)
{
if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
return false;
}
return true;
}
/**
* mei_me_probe - Device Initialization Routine
*
* @pdev: PCI device structure
* @ent: entry in kcs_pci_tbl
*
* Return: 0 on success, <0 on failure.
*/
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct mei_cfg *cfg;
struct mei_device *dev;
struct mei_me_hw *hw;
unsigned int irqflags;
int err;
cfg = mei_me_get_cfg(ent->driver_data);
if (!cfg)
return -ENODEV;
if (!mei_me_quirk_probe(pdev, cfg))
return -ENODEV;
/* enable pci dev */
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device.\n");
goto end;
}
/* set PCI host mastering */
pci_set_master(pdev);
/* pci request regions and mapping IO device memory for mei driver */
err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (err) {
dev_err(&pdev->dev, "failed to get pci regions.\n");
goto end;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto end;
}
/* allocates and initializes the mei dev structure */
dev = mei_me_dev_init(&pdev->dev, cfg, false);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
hw->irq = pdev->irq;
/* request and enable interrupt */
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
irqflags, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
goto end;
}
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
goto release_irq;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
err = mei_register(dev, &pdev->dev);
if (err)
goto stop;
pci_set_drvdata(pdev, dev);
/*
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/*
* ME maps runtime suspend/resume to D0i states,
* hence we need to go around native PCI runtime service which
* eventually brings the device into D3cold/hot state,
* but the mei device cannot wake up from D3 unlike from D0i3.
* To get around the PCI device native runtime pm,
* ME uses runtime pm domain handlers which take precedence
* over the driver's pm handlers.
*/
mei_me_set_pm_domain(dev);
if (mei_pg_is_enabled(dev)) {
pm_runtime_put_noidle(&pdev->dev);
if (hw->d0i3_supported)
pm_runtime_allow(&pdev->dev);
}
dev_dbg(&pdev->dev, "initialization successful.\n");
return 0;
stop:
mei_stop(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
end:
dev_err(&pdev->dev, "initialization failed.\n");
return err;
}
/**
* mei_me_shutdown - Device Removal Routine
*
* @pdev: PCI device structure
*
* mei_me_shutdown is called from the reboot notifier
* it's a simplified version of remove so we go down
* faster.
*/
static void mei_me_shutdown(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = pci_get_drvdata(pdev);
if (!dev)
return;
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
}
/**
* mei_me_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
* mei_me_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
static void mei_me_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
dev = pci_get_drvdata(pdev);
if (!dev)
return;
if (mei_pg_is_enabled(dev))
pm_runtime_get_noresume(&pdev->dev);
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
mei_deregister(dev);
}
#ifdef CONFIG_PM_SLEEP
static int mei_me_pci_prepare(struct device *device)
{
pm_runtime_resume(device);
return 0;
}
static int mei_me_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
return 0;
}
static int mei_me_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
unsigned int irqflags;
int err;
dev = pci_get_drvdata(pdev);
if (!dev)
return -ENODEV;
pci_enable_msi(pdev);
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
/* request and enable interrupt */
err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
irqflags, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
pdev->irq);
return err;
}
err = mei_restart(dev);
if (err)
return err;
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
return 0;
}
static void mei_me_pci_complete(struct device *device)
{
pm_runtime_suspend(device);
}
#else /* CONFIG_PM_SLEEP */
#define mei_me_pci_prepare NULL
#define mei_me_pci_complete NULL
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
{
struct mei_device *dev;
dev_dbg(device, "rpm: me: runtime_idle\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
return -EBUSY;
}
static int mei_me_pm_runtime_suspend(struct device *device)
{
struct mei_device *dev;
int ret;
dev_dbg(device, "rpm: me: runtime suspend\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
ret = mei_me_pg_enter_sync(dev);
else
ret = -EAGAIN;
mutex_unlock(&dev->device_lock);
dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
if (ret && ret != -EAGAIN)
schedule_work(&dev->reset_work);
return ret;
}
static int mei_me_pm_runtime_resume(struct device *device)
{
struct mei_device *dev;
int ret;
dev_dbg(device, "rpm: me: runtime resume\n");
dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
ret = mei_me_pg_exit_sync(dev);
mutex_unlock(&dev->device_lock);
dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
return ret;
}
/**
* mei_me_set_pm_domain - fill and set pm domain structure for device
*
* @dev: mei_device
*/
static inline void mei_me_set_pm_domain(struct mei_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
}
}
/**
* mei_me_unset_pm_domain - clean pm domain structure for device
*
* @dev: mei_device
*/
static inline void mei_me_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
dev_pm_domain_set(dev->dev, NULL);
}
static const struct dev_pm_ops mei_me_pm_ops = {
.prepare = mei_me_pci_prepare,
.complete = mei_me_pci_complete,
SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
mei_me_pci_resume)
SET_RUNTIME_PM_OPS(
mei_me_pm_runtime_suspend,
mei_me_pm_runtime_resume,
mei_me_pm_runtime_idle)
};
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
#else
#define MEI_ME_PM_OPS NULL
#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
static struct pci_driver mei_me_driver = {
.name = KBUILD_MODNAME,
.id_table = mei_me_pci_tbl,
.probe = mei_me_probe,
.remove = mei_me_remove,
.shutdown = mei_me_shutdown,
.driver.pm = MEI_ME_PM_OPS,
.driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
module_pci_driver(mei_me_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/mei/pci-me.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
*
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/mei_aux.h>
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/kthread.h>
#include "mei_dev.h"
#include "hw-me.h"
#include "hw-me-regs.h"
#include "mei-trace.h"
#define MEI_GSC_RPM_TIMEOUT 500
static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val)
{
struct mei_me_hw *hw = to_me_hw(dev);
*val = ioread32(hw->mem_addr + where + 0xC00);
return 0;
}
static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem)
{
u32 low = lower_32_bits(mem->start);
u32 hi = upper_32_bits(mem->start);
u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID;
iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG);
iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG);
iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG);
}
static int mei_gsc_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *aux_dev_id)
{
struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
struct mei_device *dev;
struct mei_me_hw *hw;
struct device *device;
const struct mei_cfg *cfg;
int ret;
cfg = mei_me_get_cfg(aux_dev_id->driver_data);
if (!cfg)
return -ENODEV;
device = &aux_dev->dev;
dev = mei_me_dev_init(device, cfg, adev->slow_firmware);
if (!dev) {
ret = -ENOMEM;
goto err;
}
hw = to_me_hw(dev);
hw->mem_addr = devm_ioremap_resource(device, &adev->bar);
if (IS_ERR(hw->mem_addr)) {
ret = PTR_ERR(hw->mem_addr);
goto err;
}
hw->irq = adev->irq;
hw->read_fws = mei_gsc_read_hfs;
dev_set_drvdata(device, dev);
if (adev->ext_op_mem.start) {
mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
dev->pxp_mode = MEI_DEV_PXP_INIT;
}
/* use polling */
if (mei_me_hw_use_polling(hw)) {
mei_disable_interrupts(dev);
mei_clear_interrupts(dev);
init_waitqueue_head(&hw->wait_active);
hw->is_active = true; /* start in active mode for initialization */
hw->polling_thread = kthread_run(mei_me_polling_thread, dev,
"kmegscirqd/%s", dev_name(device));
if (IS_ERR(hw->polling_thread)) {
ret = PTR_ERR(hw->polling_thread);
dev_err(device, "unable to create kernel thread: %d\n", ret);
goto err;
}
} else {
ret = devm_request_threaded_irq(device, hw->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
IRQF_ONESHOT, KBUILD_MODNAME, dev);
if (ret) {
dev_err(device, "irq register failed %d\n", ret);
goto err;
}
}
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
pm_runtime_enable(device);
/* Continue to char device setup in spite of firmware handshake failure.
* In order to provide access to the firmware status registers to the user
* space via sysfs.
*/
if (mei_start(dev))
dev_warn(device, "init hw failure.\n");
pm_runtime_set_autosuspend_delay(device, MEI_GSC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(device);
ret = mei_register(dev, device);
if (ret)
goto register_err;
pm_runtime_put_noidle(device);
return 0;
register_err:
mei_stop(dev);
if (!mei_me_hw_use_polling(hw))
devm_free_irq(device, hw->irq, dev);
err:
dev_err(device, "probe failed: %d\n", ret);
dev_set_drvdata(device, NULL);
return ret;
}
static void mei_gsc_remove(struct auxiliary_device *aux_dev)
{
struct mei_device *dev;
struct mei_me_hw *hw;
dev = dev_get_drvdata(&aux_dev->dev);
if (!dev)
return;
hw = to_me_hw(dev);
mei_stop(dev);
hw = to_me_hw(dev);
if (mei_me_hw_use_polling(hw))
kthread_stop(hw->polling_thread);
mei_deregister(dev);
pm_runtime_disable(&aux_dev->dev);
mei_disable_interrupts(dev);
if (!mei_me_hw_use_polling(hw))
devm_free_irq(&aux_dev->dev, hw->irq, dev);
}
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
mei_stop(dev);
mei_disable_interrupts(dev);
return 0;
}
static int __maybe_unused mei_gsc_pm_resume(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
struct auxiliary_device *aux_dev;
struct mei_aux_device *adev;
int err;
struct mei_me_hw *hw;
if (!dev)
return -ENODEV;
hw = to_me_hw(dev);
aux_dev = to_auxiliary_dev(device);
adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
if (adev->ext_op_mem.start) {
mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
dev->pxp_mode = MEI_DEV_PXP_INIT;
}
err = mei_restart(dev);
if (err)
return err;
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
return 0;
}
static int __maybe_unused mei_gsc_pm_runtime_idle(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
return -EBUSY;
}
static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
struct mei_me_hw *hw;
int ret;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev)) {
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_ON;
if (mei_me_hw_use_polling(hw))
hw->is_active = false;
ret = 0;
} else {
ret = -EAGAIN;
}
mutex_unlock(&dev->device_lock);
return ret;
}
static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
struct mei_me_hw *hw;
irqreturn_t irq_ret;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_OFF;
if (mei_me_hw_use_polling(hw)) {
hw->is_active = true;
wake_up(&hw->wait_active);
}
mutex_unlock(&dev->device_lock);
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
dev_err(dev->dev, "thread handler fail %d\n", irq_ret);
return 0;
}
static const struct dev_pm_ops mei_gsc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mei_gsc_pm_suspend,
mei_gsc_pm_resume)
SET_RUNTIME_PM_OPS(mei_gsc_pm_runtime_suspend,
mei_gsc_pm_runtime_resume,
mei_gsc_pm_runtime_idle)
};
static const struct auxiliary_device_id mei_gsc_id_table[] = {
{
.name = "i915.mei-gsc",
.driver_data = MEI_ME_GSC_CFG,
},
{
.name = "i915.mei-gscfi",
.driver_data = MEI_ME_GSCFI_CFG,
},
{
/* sentinel */
}
};
MODULE_DEVICE_TABLE(auxiliary, mei_gsc_id_table);
static struct auxiliary_driver mei_gsc_driver = {
.probe = mei_gsc_probe,
.remove = mei_gsc_remove,
.driver = {
/* auxiliary_driver_register() sets .name to be the modname */
.pm = &mei_gsc_pm_ops,
},
.id_table = mei_gsc_id_table
};
module_auxiliary_driver(mei_gsc_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS("auxiliary:i915.mei-gsc");
MODULE_ALIAS("auxiliary:i915.mei-gscfi");
MODULE_DESCRIPTION("Intel(R) Graphics System Controller");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/mei/gsc-me.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2023, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
#include "mei_dev.h"
#include "client.h"
#include "mkhi.h"
#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
#define MEI_UUID_IGSC_MKHI UUID_LE(0xE2C2AFA2, 0x3817, 0x4D19, \
0x9D, 0x95, 0x06, 0xB1, 0x6B, 0x58, 0x8A, 0x5D)
#define MEI_UUID_IGSC_MKHI_FIX UUID_LE(0x46E0C1FB, 0xA546, 0x414F, \
0x91, 0x70, 0xB7, 0xF4, 0x6D, 0x57, 0xB4, 0xAD)
#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
#define MEI_UUID_PAVP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
#define MEI_UUID_ANY NULL_UUID_LE
/**
* number_of_connections - determine whether an client be on the bus
* according number of connections
* We support only clients:
* 1. with single connection
* 2. and fixed clients (max_number_of_connections == 0)
*
* @cldev: me clients device
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
}
/**
* blacklist - blacklist a client from the bus
*
* @cldev: me clients device
*/
static void blacklist(struct mei_cl_device *cldev)
{
cldev->do_match = 0;
}
/**
* whitelist - forcefully whitelist client
*
* @cldev: me clients device
*/
static void whitelist(struct mei_cl_device *cldev)
{
cldev->do_match = 1;
}
#define OSTYPE_LINUX 2
struct mei_os_ver {
__le16 build;
__le16 reserved1;
u8 os_type;
u8 major;
u8 minor;
u8 reserved2;
} __packed;
struct mkhi_fw_ver_block {
u16 minor;
u8 major;
u8 platform;
u16 buildno;
u16 hotfix;
} __packed;
struct mkhi_fw_ver {
struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
} __packed;
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fwcaps) + \
sizeof(struct mei_os_ver))
static int mei_osver(struct mei_cl_device *cldev)
{
const size_t size = MKHI_OSVER_BUF_LEN;
u8 buf[MKHI_OSVER_BUF_LEN];
struct mkhi_msg *req;
struct mkhi_fwcaps *fwcaps;
struct mei_os_ver *os_ver;
unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL;
memset(buf, 0, size);
req = (struct mkhi_msg *)buf;
req->hdr.group_id = MKHI_FWCAPS_GROUP_ID;
req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD;
fwcaps = (struct mkhi_fwcaps *)req->data;
fwcaps->id.rule_type = 0x0;
fwcaps->id.feature_id = MKHI_FEATURE_PTT;
fwcaps->len = sizeof(*os_ver);
os_ver = (struct mei_os_ver *)fwcaps->data;
os_ver->os_type = OSTYPE_LINUX;
return __mei_cl_send(cldev->cl, buf, size, 0, mode);
}
#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fw_ver))
#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fw_ver_block) * (__num))
static int mei_fwver(struct mei_cl_device *cldev)
{
u8 buf[MKHI_FWVER_BUF_LEN];
struct mkhi_msg req;
struct mkhi_msg *rsp;
struct mkhi_fw_ver *fwver;
int bytes_recv, ret, i;
memset(buf, 0, sizeof(buf));
req.hdr.group_id = MKHI_GEN_GROUP_ID;
req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_info(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
return ret;
}
ret = 0;
bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
cldev->bus->timeouts.mkhi_recv);
if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
/*
* Should be at least one version block,
* error out if nothing found
*/
dev_info(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv);
return -EIO;
}
rsp = (struct mkhi_msg *)buf;
fwver = (struct mkhi_fw_ver *)rsp->data;
memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
break;
dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n",
i, fwver->ver[i].platform,
fwver->ver[i].major, fwver->ver[i].minor,
fwver->ver[i].hotfix, fwver->ver[i].buildno);
cldev->bus->fw_ver[i].platform = fwver->ver[i].platform;
cldev->bus->fw_ver[i].major = fwver->ver[i].major;
cldev->bus->fw_ver[i].minor = fwver->ver[i].minor;
cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix;
cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno;
}
cldev->bus->fw_ver_received = 1;
return ret;
}
#define GFX_MEMORY_READY_TIMEOUT 200 /* timeout in milliseconds */
static int mei_gfx_memory_ready(struct mei_cl_device *cldev)
{
struct mkhi_gfx_mem_ready req = {0};
unsigned int mode = MEI_CL_IO_TX_INTERNAL | MEI_CL_IO_TX_BLOCKING;
req.hdr.group_id = MKHI_GROUP_ID_GFX;
req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ;
req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED;
dev_dbg(&cldev->dev, "Sending memory ready command\n");
return __mei_cl_send_timeout(cldev->cl, (u8 *)&req, sizeof(req), 0,
mode, GFX_MEMORY_READY_TIMEOUT);
}
static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
/* No need to enable the client if nothing is needed from it */
if (!cldev->bus->fw_f_fw_ver_supported &&
!cldev->bus->hbm_f_os_supported)
return;
ret = mei_cldev_enable(cldev);
if (ret)
return;
if (cldev->bus->fw_f_fw_ver_supported) {
ret = mei_fwver(cldev);
if (ret < 0)
dev_info(&cldev->dev, "FW version command failed %d\n",
ret);
}
if (cldev->bus->hbm_f_os_supported) {
ret = mei_osver(cldev);
if (ret < 0)
dev_info(&cldev->dev, "OS version command failed %d\n",
ret);
}
mei_cldev_disable(cldev);
}
static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
{
int ret;
/*
* No need to enable the client if nothing is needed from it.
* No need to fill in version if it is already filled in by the fix address client.
*/
if (!cldev->bus->fw_f_fw_ver_supported || cldev->bus->fw_ver_received)
return;
ret = mei_cldev_enable(cldev);
if (ret)
return;
ret = mei_fwver(cldev);
if (ret < 0)
dev_info(&cldev->dev, "FW version command failed %d\n", ret);
mei_cldev_disable(cldev);
}
static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev)
{
int ret;
/* No need to enable the client if nothing is needed from it */
if (!cldev->bus->fw_f_fw_ver_supported &&
cldev->bus->pxp_mode != MEI_DEV_PXP_INIT)
return;
ret = mei_cldev_enable(cldev);
if (ret)
return;
if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) {
ret = mei_gfx_memory_ready(cldev);
if (ret < 0) {
dev_err(&cldev->dev, "memory ready command failed %d\n", ret);
} else {
dev_dbg(&cldev->dev, "memory ready command sent\n");
cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP;
}
/* we go to reset after that */
goto out;
}
ret = mei_fwver(cldev);
if (ret < 0)
dev_info(&cldev->dev, "FW version command failed %d\n",
ret);
out:
mei_cldev_disable(cldev);
}
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
*
* @cldev: me clients device
*/
#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
#include <linux/pci.h>
#include "hw-me-regs.h"
static void mei_wd(struct mei_cl_device *cldev)
{
struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
if (pdev->device == MEI_DEV_ID_WPT_LP ||
pdev->device == MEI_DEV_ID_SPT ||
pdev->device == MEI_DEV_ID_SPT_H)
cldev->me_cl->props.protocol_version = 0x2;
cldev->do_match = 1;
}
#else
static inline void mei_wd(struct mei_cl_device *cldev) {}
#endif /* CONFIG_INTEL_MEI_ME */
struct mei_nfc_cmd {
u8 command;
u8 status;
u16 req_id;
u32 reserved;
u16 data_size;
u8 sub_command;
u8 data[];
} __packed;
struct mei_nfc_reply {
u8 command;
u8 status;
u16 req_id;
u32 reserved;
u16 data_size;
u8 sub_command;
u8 reply_status;
u8 data[];
} __packed;
struct mei_nfc_if_version {
u8 radio_version_sw[3];
u8 reserved[3];
u8 radio_version_hw[3];
u8 i2c_addr;
u8 fw_ivn;
u8 vendor_id;
u8 radio_type;
} __packed;
#define MEI_NFC_CMD_MAINTENANCE 0x00
#define MEI_NFC_SUBCMD_IF_VERSION 0x01
/* Vendors */
#define MEI_NFC_VENDOR_INSIDE 0x00
#define MEI_NFC_VENDOR_NXP 0x01
/* Radio types */
#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
#define MEI_NFC_VENDOR_NXP_PN544 0x01
/**
* mei_nfc_if_version - get NFC interface version
*
* @cl: host client (nfc info)
* @ver: NFC interface version to be filled in
*
* Return: 0 on success; < 0 otherwise
*/
static int mei_nfc_if_version(struct mei_cl *cl,
struct mei_nfc_if_version *ver)
{
struct mei_device *bus;
struct mei_nfc_cmd cmd = {
.command = MEI_NFC_CMD_MAINTENANCE,
.data_size = 1,
.sub_command = MEI_NFC_SUBCMD_IF_VERSION,
};
struct mei_nfc_reply *reply = NULL;
size_t if_version_length;
u8 vtag;
int bytes_recv, ret;
bus = cl->dev;
WARN_ON(mutex_is_locked(&bus->device_lock));
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
return ret;
}
/* to be sure on the stack we alloc memory */
if_version_length = sizeof(*reply) + sizeof(*ver);
reply = kzalloc(if_version_length, GFP_KERNEL);
if (!reply)
return -ENOMEM;
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
0, 0);
if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
ret = -EIO;
goto err;
}
memcpy(ver, reply->data, sizeof(*ver));
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
err:
kfree(reply);
return ret;
}
/**
* mei_nfc_radio_name - derive nfc radio name from the interface version
*
* @ver: NFC radio version
*
* Return: radio name string
*/
static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver)
{
if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) {
if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD)
return "microread";
}
if (ver->vendor_id == MEI_NFC_VENDOR_NXP) {
if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544)
return "pn544";
}
return NULL;
}
/**
* mei_nfc - The nfc fixup function. The function retrieves nfc radio
* name and set is as device attribute so we can load
* the proper device driver for it
*
* @cldev: me client device (nfc)
*/
static void mei_nfc(struct mei_cl_device *cldev)
{
struct mei_device *bus;
struct mei_cl *cl;
struct mei_me_client *me_cl = NULL;
struct mei_nfc_if_version ver;
const char *radio_name = NULL;
int ret;
bus = cldev->bus;
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
cl = mei_cl_alloc_linked(bus);
if (IS_ERR(cl)) {
ret = PTR_ERR(cl);
cl = NULL;
dev_err(bus->dev, "nfc hook alloc failed %d\n", ret);
goto out;
}
me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid);
if (!me_cl) {
ret = -ENOTTY;
dev_err(bus->dev, "Cannot find nfc info %d\n", ret);
goto out;
}
ret = mei_cl_connect(cl, me_cl, NULL);
if (ret < 0) {
dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n",
ret);
goto out;
}
mutex_unlock(&bus->device_lock);
ret = mei_nfc_if_version(cl, &ver);
if (ret)
goto disconnect;
radio_name = mei_nfc_radio_name(&ver);
if (!radio_name) {
ret = -ENOENT;
dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n",
ret);
goto disconnect;
}
dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
strscpy(cldev->name, radio_name, sizeof(cldev->name));
disconnect:
mutex_lock(&bus->device_lock);
if (mei_cl_disconnect(cl) < 0)
dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n");
mei_cl_flush_queues(cl, NULL);
out:
mei_cl_unlink(cl);
mutex_unlock(&bus->device_lock);
mei_me_cl_put(me_cl);
kfree(cl);
if (ret)
cldev->do_match = 0;
dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
}
/**
* vt_support - enable on bus clients with vtag support
*
* @cldev: me clients device
*/
static void vt_support(struct mei_cl_device *cldev)
{
if (cldev->me_cl->props.vt_supported == 1)
cldev->do_match = 1;
}
/**
* pxp_is_ready - enable bus client if pxp is ready
*
* @cldev: me clients device
*/
static void pxp_is_ready(struct mei_cl_device *cldev)
{
struct mei_device *bus = cldev->bus;
switch (bus->pxp_mode) {
case MEI_DEV_PXP_READY:
case MEI_DEV_PXP_DEFAULT:
cldev->do_match = 1;
break;
default:
cldev->do_match = 0;
break;
}
}
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
static struct mei_fixup {
const uuid_le uuid;
void (*hook)(struct mei_cl_device *cldev);
} mei_fixups[] = {
MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver),
MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
MEI_FIXUP(MEI_UUID_ANY, vt_support),
MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready),
};
/**
* mei_cl_bus_dev_fixup - run fixup handlers
*
* @cldev: me client device
*/
void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev)
{
struct mei_fixup *f;
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
size_t i;
for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
f = &mei_fixups[i];
if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 ||
uuid_le_cmp(f->uuid, *uuid) == 0)
f->hook(cldev);
}
}
| linux-master | drivers/misc/mei/bus-fixup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022-2023 Intel Corporation
*/
/**
* DOC: MEI_GSC_PROXY Client Driver
*
* The mei_gsc_proxy driver acts as a translation layer between
* proxy user (I915) and ME FW by proxying messages to ME FW
*/
#include <linux/component.h>
#include <linux/mei_cl_bus.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include <drm/drm_connector.h>
#include <drm/i915_component.h>
#include <drm/i915_gsc_proxy_mei_interface.h>
/**
* mei_gsc_proxy_send - Sends a proxy message to ME FW.
* @dev: device corresponding to the mei_cl_device
* @buf: a message buffer to send
* @size: size of the message
* Return: bytes sent on Success, <0 on Failure
*/
static int mei_gsc_proxy_send(struct device *dev, const void *buf, size_t size)
{
ssize_t ret;
if (!dev || !buf)
return -EINVAL;
ret = mei_cldev_send(to_mei_cl_device(dev), buf, size);
if (ret < 0)
dev_dbg(dev, "mei_cldev_send failed. %zd\n", ret);
return ret;
}
/**
* mei_gsc_proxy_recv - Receives a proxy message from ME FW.
* @dev: device corresponding to the mei_cl_device
* @buf: a message buffer to contain the received message
* @size: size of the buffer
* Return: bytes received on Success, <0 on Failure
*/
static int mei_gsc_proxy_recv(struct device *dev, void *buf, size_t size)
{
ssize_t ret;
if (!dev || !buf)
return -EINVAL;
ret = mei_cldev_recv(to_mei_cl_device(dev), buf, size);
if (ret < 0)
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", ret);
return ret;
}
static const struct i915_gsc_proxy_component_ops mei_gsc_proxy_ops = {
.owner = THIS_MODULE,
.send = mei_gsc_proxy_send,
.recv = mei_gsc_proxy_recv,
};
static int mei_component_master_bind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
comp_master->ops = &mei_gsc_proxy_ops;
comp_master->mei_dev = dev;
return component_bind_all(dev, comp_master);
}
static void mei_component_master_unbind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
component_unbind_all(dev, comp_master);
}
static const struct component_master_ops mei_component_master_ops = {
.bind = mei_component_master_bind,
.unbind = mei_component_master_unbind,
};
/**
* mei_gsc_proxy_component_match - compare function for matching mei.
*
* The function checks if the device is pci device and
* Intel VGA adapter, the subcomponent is SW Proxy
* and the parent of MEI PCI and the parent of VGA are the same PCH device.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
* @data: compare data (mei pci parent)
*
* Return:
* * 1 - if components match
* * 0 - otherwise
*/
static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
void *data)
{
struct pci_dev *pdev;
if (!dev_is_pci(dev))
return 0;
pdev = to_pci_dev(dev);
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
pdev->vendor != PCI_VENDOR_ID_INTEL)
return 0;
if (subcomponent != I915_COMPONENT_GSC_PROXY)
return 0;
return component_compare_dev(dev->parent, ((struct device *)data)->parent);
}
static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct i915_gsc_proxy_component *comp_master;
struct component_match *master_match = NULL;
int ret;
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
goto enable_err_exit;
}
comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
if (!comp_master) {
ret = -ENOMEM;
goto err_exit;
}
component_match_add_typed(&cldev->dev, &master_match,
mei_gsc_proxy_component_match, cldev->dev.parent);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
}
mei_cldev_set_drvdata(cldev, comp_master);
ret = component_master_add_with_match(&cldev->dev,
&mei_component_master_ops,
master_match);
if (ret < 0) {
dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
goto err_exit;
}
return 0;
err_exit:
mei_cldev_set_drvdata(cldev, NULL);
kfree(comp_master);
mei_cldev_disable(cldev);
enable_err_exit:
return ret;
}
static void mei_gsc_proxy_remove(struct mei_cl_device *cldev)
{
struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
kfree(comp_master);
mei_cldev_set_drvdata(cldev, NULL);
ret = mei_cldev_disable(cldev);
if (ret)
dev_warn(&cldev->dev, "mei_cldev_disable() failed %d\n", ret);
}
#define MEI_UUID_GSC_PROXY UUID_LE(0xf73db04, 0x97ab, 0x4125, \
0xb8, 0x93, 0xe9, 0x4, 0xad, 0xd, 0x54, 0x64)
static struct mei_cl_device_id mei_gsc_proxy_tbl[] = {
{ .uuid = MEI_UUID_GSC_PROXY, .version = MEI_CL_VERSION_ANY },
{ }
};
MODULE_DEVICE_TABLE(mei, mei_gsc_proxy_tbl);
static struct mei_cl_driver mei_gsc_proxy_driver = {
.id_table = mei_gsc_proxy_tbl,
.name = KBUILD_MODNAME,
.probe = mei_gsc_proxy_probe,
.remove = mei_gsc_proxy_remove,
};
module_mei_cl_driver(mei_gsc_proxy_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MEI GSC PROXY");
| linux-master | drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2020 - 2021 Intel Corporation
*/
/**
* DOC: MEI_PXP Client Driver
*
* The mei_pxp driver acts as a translation layer between PXP
* protocol implementer (I915) and ME FW by translating PXP
* negotiation messages to ME FW command payloads and vice versa.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
#include <linux/component.h>
#include <drm/drm_connector.h>
#include <drm/i915_component.h>
#include <drm/i915_pxp_tee_interface.h>
#include "mei_pxp.h"
/**
* mei_pxp_send_message() - Sends a PXP message to ME FW.
* @dev: device corresponding to the mei_cl_device
* @message: a message buffer to send
* @size: size of the message
* Return: 0 on Success, <0 on Failure
*/
static int
mei_pxp_send_message(struct device *dev, const void *message, size_t size)
{
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !message)
return -EINVAL;
cldev = to_mei_cl_device(dev);
byte = mei_cldev_send(cldev, message, size);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
return 0;
}
/**
* mei_pxp_receive_message() - Receives a PXP message from ME FW.
* @dev: device corresponding to the mei_cl_device
* @buffer: a message buffer to contain the received message
* @size: size of the buffer
* Return: bytes sent on Success, <0 on Failure
*/
static int
mei_pxp_receive_message(struct device *dev, void *buffer, size_t size)
{
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !buffer)
return -EINVAL;
cldev = to_mei_cl_device(dev);
byte = mei_cldev_recv(cldev, buffer, size);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
return byte;
}
/**
* mei_pxp_gsc_command() - sends a gsc command, by sending
* a sgl mei message to gsc and receiving reply from gsc
*
* @dev: device corresponding to the mei_cl_device
* @client_id: client id to send the command to
* @fence_id: fence id to send the command to
* @sg_in: scatter gather list containing addresses for rx message buffer
* @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
* @sg_out: scatter gather list containing addresses for tx message buffer
*
* Return: bytes sent on Success, <0 on Failure
*/
static ssize_t mei_pxp_gsc_command(struct device *dev, u8 client_id, u32 fence_id,
struct scatterlist *sg_in, size_t total_in_len,
struct scatterlist *sg_out)
{
struct mei_cl_device *cldev;
cldev = to_mei_cl_device(dev);
return mei_cldev_send_gsc_command(cldev, client_id, fence_id, sg_in, total_in_len, sg_out);
}
static const struct i915_pxp_component_ops mei_pxp_ops = {
.owner = THIS_MODULE,
.send = mei_pxp_send_message,
.recv = mei_pxp_receive_message,
.gsc_command = mei_pxp_gsc_command,
};
static int mei_component_master_bind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
comp_master->ops = &mei_pxp_ops;
comp_master->tee_dev = dev;
ret = component_bind_all(dev, comp_master);
if (ret < 0)
return ret;
return 0;
}
static void mei_component_master_unbind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
component_unbind_all(dev, comp_master);
}
static const struct component_master_ops mei_component_master_ops = {
.bind = mei_component_master_bind,
.unbind = mei_component_master_unbind,
};
/**
* mei_pxp_component_match - compare function for matching mei pxp.
*
* The function checks if the driver is i915, the subcomponent is PXP
* and the grand parent of pxp and the parent of i915 are the same
* PCH device.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_PXP)
* @data: compare data (mei pxp device)
*
* Return:
* * 1 - if components match
* * 0 - otherwise
*/
static int mei_pxp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
if (!dev)
return 0;
if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_PXP)
return 0;
base = base->parent;
if (!base) /* mei device */
return 0;
base = base->parent; /* pci device */
/* for dgfx */
if (base && dev == base)
return 1;
/* for pch */
dev = dev->parent;
return (base && dev && dev == base);
}
static int mei_pxp_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct i915_pxp_component *comp_master;
struct component_match *master_match;
int ret;
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
goto enable_err_exit;
}
comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
if (!comp_master) {
ret = -ENOMEM;
goto err_exit;
}
master_match = NULL;
component_match_add_typed(&cldev->dev, &master_match,
mei_pxp_component_match, &cldev->dev);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
}
mei_cldev_set_drvdata(cldev, comp_master);
ret = component_master_add_with_match(&cldev->dev,
&mei_component_master_ops,
master_match);
if (ret < 0) {
dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
goto err_exit;
}
return 0;
err_exit:
mei_cldev_set_drvdata(cldev, NULL);
kfree(comp_master);
mei_cldev_disable(cldev);
enable_err_exit:
return ret;
}
static void mei_pxp_remove(struct mei_cl_device *cldev)
{
struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
kfree(comp_master);
mei_cldev_set_drvdata(cldev, NULL);
ret = mei_cldev_disable(cldev);
if (ret)
dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
}
/* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
#define MEI_GUID_PXP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
static struct mei_cl_device_id mei_pxp_tbl[] = {
{ .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
{ }
};
MODULE_DEVICE_TABLE(mei, mei_pxp_tbl);
static struct mei_cl_driver mei_pxp_driver = {
.id_table = mei_pxp_tbl,
.name = KBUILD_MODNAME,
.probe = mei_pxp_probe,
.remove = mei_pxp_remove,
};
module_mei_cl_driver(mei_pxp_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MEI PXP");
| linux-master | drivers/misc/mei/pxp/mei_pxp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2019 Intel Corporation
*
* mei_hdcp.c: HDCP client driver for mei bus
*
* Author:
* Ramalingam C <[email protected]>
*/
/**
* DOC: MEI_HDCP Client Driver
*
* The mei_hdcp driver acts as a translation layer between HDCP 2.2
* protocol implementer (I915) and ME FW by translating HDCP2.2
* negotiation messages to ME FW command payloads and vice versa.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
#include <linux/component.h>
#include <drm/drm_connector.h>
#include <drm/i915_component.h>
#include <drm/i915_hdcp_interface.h>
#include "mei_hdcp.h"
/**
* mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @ake_data: AKE_Init msg output.
*
* Return: 0 on Success, <0 on Failure.
*/
static int
mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
struct wired_cmd_initiate_hdcp2_session_out
session_init_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !ake_data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_init_in.header.buffer_len =
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
session_init_in.port.physical_port = (u8)data->hdcp_ddi;
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
sizeof(session_init_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&session_init_out,
sizeof(session_init_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_INITIATE_HDCP2_SESSION,
session_init_out.header.status);
return -EIO;
}
ake_data->msg_id = HDCP_2_2_AKE_INIT;
ake_data->tx_caps = session_init_out.tx_caps;
memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
return 0;
}
/**
* mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate
* AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @rx_cert: AKE_Send_Cert for verification
* @km_stored: Pairing status flag output
* @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg
* @msg_sz : size of AKE_XXXXX_Km output msg
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
bool *km_stored,
struct hdcp2_ake_no_stored_km
*ek_pub_km,
size_t *msg_sz)
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
return -EINVAL;
cldev = to_mei_cl_device(dev);
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_rxcert_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in,
sizeof(verify_rxcert_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte);
return byte;
}
if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_VERIFY_RECEIVER_CERT,
verify_rxcert_out.header.status);
return -EIO;
}
*km_stored = !!verify_rxcert_out.km_stored;
if (verify_rxcert_out.km_stored) {
ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
*msg_sz = sizeof(struct hdcp2_ake_stored_km);
} else {
ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
*msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
}
memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
sizeof(verify_rxcert_out.ekm_buff));
return 0;
}
/**
* mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @rx_hprime: AKE_Send_H_prime msg for ME FW verification
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !rx_hprime)
return -EINVAL;
cldev = to_mei_cl_device(dev);
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in,
sizeof(send_hprime_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
return -EIO;
}
return 0;
}
/**
* mei_hdcp_store_pairing_info() - Store pairing info received at ME FW
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @pairing_info: AKE_Send_Pairing_Info msg input to ME FW
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !pairing_info)
return -EINVAL;
cldev = to_mei_cl_device(dev);
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
pairing_info_in.header.buffer_len =
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in,
sizeof(pairing_info_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
WIRED_AKE_SEND_PAIRING_INFO,
pairing_info_out.header.status);
return -EIO;
}
return 0;
}
/**
* mei_hdcp_initiate_locality_check() - Prepare LC_Init
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @lc_init_data: LC_Init msg output
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !lc_init_data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
return -EIO;
}
lc_init_data->msg_id = HDCP_2_2_LC_INIT;
memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
return 0;
}
/**
* mei_hdcp_verify_lprime() - Verify lprime.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @rx_lprime: LC_Send_L_prime msg for ME FW verification
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !rx_lprime)
return -EINVAL;
cldev = to_mei_cl_device(dev);
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_lprime_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in,
sizeof(verify_lprime_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VALIDATE_LOCALITY,
verify_lprime_out.header.status);
return -EIO;
}
return 0;
}
/**
* mei_hdcp_get_session_key() - Prepare SKE_Send_Eks.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @ske_data: SKE_Send_Eks msg output from ME FW.
*
* Return: 0 on Success, <0 on Failure
*/
static int mei_hdcp_get_session_key(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data || !ske_data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_GET_SESSION_KEY, get_skey_out.header.status);
return -EIO;
}
ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
HDCP_2_2_E_DKEY_KS_LEN);
memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
return 0;
}
/**
* mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology
* and prepare rep_ack.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @rep_topology: Receiver ID List to be validated
* @rep_send_ack : repeater ack from ME FW.
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
*rep_topology,
struct hdcp2_rep_send_ack
*rep_send_ack)
{
struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_repeater_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
HDCP_2_2_SEQ_NUM_LEN);
memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
HDCP_2_2_V_PRIME_HALF_LEN);
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in,
sizeof(verify_repeater_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VERIFY_REPEATER,
verify_repeater_out.header.status);
return -EIO;
}
memcpy(rep_send_ack->v, verify_repeater_out.v,
HDCP_2_2_V_PRIME_HALF_LEN);
rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
return 0;
}
/**
* mei_hdcp_verify_mprime() - Verify mprime.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
* @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification.
*
* Return: 0 on Success, <0 on Failure
*/
static int mei_hdcp_verify_mprime(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out
verify_mprime_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
size_t cmd_size;
if (!dev || !stream_ready || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
return -EINVAL;
verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
if (!verify_mprime_in)
return -ENOMEM;
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in->streams, data->streams,
array_size(data->k, sizeof(*data->streams)));
verify_mprime_in->k = cpu_to_be16(data->k);
byte = mei_cldev_send(cldev, (u8 *)verify_mprime_in, cmd_size);
kfree(verify_mprime_in);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out,
sizeof(verify_mprime_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_REPEATER_AUTH_STREAM_REQ,
verify_mprime_out.header.status);
return -EIO;
}
return 0;
}
/**
* mei_hdcp_enable_authentication() - Mark a port as authenticated
* through ME FW
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
*
* Return: 0 on Success, <0 on Failure
*/
static int mei_hdcp_enable_authentication(struct device *dev,
struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
sizeof(enable_auth_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_ENABLE_AUTH, enable_auth_out.header.status);
return -EIO;
}
return 0;
}
/**
* mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port.
* This also disables the authenticated state of the port.
* @dev: device corresponding to the mei_cl_device
* @data: Intel HW specific hdcp data
*
* Return: 0 on Success, <0 on Failure
*/
static int
mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = { { 0 } };
struct wired_cmd_close_session_out session_close_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_close_in.header.buffer_len =
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
}
byte = mei_cldev_recv(cldev, (u8 *)&session_close_out,
sizeof(session_close_out));
if (byte < 0) {
dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
return byte;
}
if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
session_close_out.header.status);
return -EIO;
}
return 0;
}
static const struct i915_hdcp_ops mei_hdcp_ops = {
.owner = THIS_MODULE,
.initiate_hdcp2_session = mei_hdcp_initiate_session,
.verify_receiver_cert_prepare_km =
mei_hdcp_verify_receiver_cert_prepare_km,
.verify_hprime = mei_hdcp_verify_hprime,
.store_pairing_info = mei_hdcp_store_pairing_info,
.initiate_locality_check = mei_hdcp_initiate_locality_check,
.verify_lprime = mei_hdcp_verify_lprime,
.get_session_key = mei_hdcp_get_session_key,
.repeater_check_flow_prepare_ack =
mei_hdcp_repeater_check_flow_prepare_ack,
.verify_mprime = mei_hdcp_verify_mprime,
.enable_hdcp_authentication = mei_hdcp_enable_authentication,
.close_hdcp_session = mei_hdcp_close_session,
};
static int mei_component_master_bind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev);
int ret;
dev_dbg(dev, "%s\n", __func__);
comp_arbiter->ops = &mei_hdcp_ops;
comp_arbiter->hdcp_dev = dev;
ret = component_bind_all(dev, comp_arbiter);
if (ret < 0)
return ret;
return 0;
}
static void mei_component_master_unbind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev);
dev_dbg(dev, "%s\n", __func__);
component_unbind_all(dev, comp_arbiter);
}
static const struct component_master_ops mei_component_master_ops = {
.bind = mei_component_master_bind,
.unbind = mei_component_master_unbind,
};
/**
* mei_hdcp_component_match - compare function for matching mei hdcp.
*
* The function checks if the driver is i915, the subcomponent is HDCP
* and the grand parent of hdcp and the parent of i915 are the same
* PCH device.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_HDCP)
* @data: compare data (mei hdcp device)
*
* Return:
* * 1 - if components match
* * 0 - otherwise
*/
static int mei_hdcp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
if (!dev->driver || strcmp(dev->driver->name, "i915") ||
subcomponent != I915_COMPONENT_HDCP)
return 0;
base = base->parent;
if (!base)
return 0;
base = base->parent;
dev = dev->parent;
return (base && dev && dev == base);
}
static int mei_hdcp_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct i915_hdcp_arbiter *comp_arbiter;
struct component_match *master_match;
int ret;
ret = mei_cldev_enable(cldev);
if (ret < 0) {
dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
goto enable_err_exit;
}
comp_arbiter = kzalloc(sizeof(*comp_arbiter), GFP_KERNEL);
if (!comp_arbiter) {
ret = -ENOMEM;
goto err_exit;
}
master_match = NULL;
component_match_add_typed(&cldev->dev, &master_match,
mei_hdcp_component_match, &cldev->dev);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
}
mei_cldev_set_drvdata(cldev, comp_arbiter);
ret = component_master_add_with_match(&cldev->dev,
&mei_component_master_ops,
master_match);
if (ret < 0) {
dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
goto err_exit;
}
return 0;
err_exit:
mei_cldev_set_drvdata(cldev, NULL);
kfree(comp_arbiter);
mei_cldev_disable(cldev);
enable_err_exit:
return ret;
}
static void mei_hdcp_remove(struct mei_cl_device *cldev)
{
struct i915_hdcp_arbiter *comp_arbiter = mei_cldev_get_drvdata(cldev);
int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
kfree(comp_arbiter);
mei_cldev_set_drvdata(cldev, NULL);
ret = mei_cldev_disable(cldev);
if (ret)
dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
}
#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
static const struct mei_cl_device_id mei_hdcp_tbl[] = {
{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
{ }
};
MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl);
static struct mei_cl_driver mei_hdcp_driver = {
.id_table = mei_hdcp_tbl,
.name = KBUILD_MODNAME,
.probe = mei_hdcp_probe,
.remove = mei_hdcp_remove,
};
module_mei_cl_driver(mei_hdcp_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MEI HDCP");
| linux-master | drivers/misc/mei/hdcp/mei_hdcp.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2022 Microchip Technology Inc.
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/idr.h>
#include "mchp_pci1xxxx_gp.h"
struct aux_bus_device {
struct auxiliary_device_wrapper *aux_device_wrapper[2];
};
static DEFINE_IDA(gp_client_ida);
static const char aux_dev_otp_e2p_name[15] = "gp_otp_e2p";
static const char aux_dev_gpio_name[15] = "gp_gpio";
static void gp_auxiliary_device_release(struct device *dev)
{
struct auxiliary_device_wrapper *aux_device_wrapper =
(struct auxiliary_device_wrapper *)container_of(dev,
struct auxiliary_device_wrapper, aux_dev.dev);
ida_free(&gp_client_ida, aux_device_wrapper->aux_dev.id);
kfree(aux_device_wrapper);
}
static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct aux_bus_device *aux_bus;
int retval;
retval = pcim_enable_device(pdev);
if (retval)
return retval;
aux_bus = devm_kzalloc(&pdev->dev, sizeof(*aux_bus), GFP_KERNEL);
if (!aux_bus)
return -ENOMEM;
aux_bus->aux_device_wrapper[0] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[0]),
GFP_KERNEL);
if (!aux_bus->aux_device_wrapper[0])
return -ENOMEM;
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
goto err_ida_alloc_0;
aux_bus->aux_device_wrapper[0]->aux_dev.name = aux_dev_otp_e2p_name;
aux_bus->aux_device_wrapper[0]->aux_dev.dev.parent = &pdev->dev;
aux_bus->aux_device_wrapper[0]->aux_dev.dev.release = gp_auxiliary_device_release;
aux_bus->aux_device_wrapper[0]->aux_dev.id = retval;
aux_bus->aux_device_wrapper[0]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
aux_bus->aux_device_wrapper[0]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[0]->aux_dev);
if (retval < 0)
goto err_aux_dev_init_0;
retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[0]->aux_dev);
if (retval)
goto err_aux_dev_add_0;
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
if (!aux_bus->aux_device_wrapper[1])
return -ENOMEM;
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
goto err_ida_alloc_1;
aux_bus->aux_device_wrapper[1]->aux_dev.name = aux_dev_gpio_name;
aux_bus->aux_device_wrapper[1]->aux_dev.dev.parent = &pdev->dev;
aux_bus->aux_device_wrapper[1]->aux_dev.dev.release = gp_auxiliary_device_release;
aux_bus->aux_device_wrapper[1]->aux_dev.id = retval;
aux_bus->aux_device_wrapper[1]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
aux_bus->aux_device_wrapper[1]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (retval < 0)
goto err_aux_dev_init_1;
retval = pci_irq_vector(pdev, 0);
if (retval < 0)
goto err_aux_dev_init_1;
pdev->irq = retval;
aux_bus->aux_device_wrapper[1]->gp_aux_data.irq_num = pdev->irq;
retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[1]->aux_dev);
if (retval < 0)
goto err_aux_dev_init_1;
retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[1]->aux_dev);
if (retval)
goto err_aux_dev_add_1;
pci_set_drvdata(pdev, aux_bus);
pci_set_master(pdev);
return 0;
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
err_ida_alloc_1:
kfree(aux_bus->aux_device_wrapper[1]);
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
return retval;
}
static void gp_aux_bus_remove(struct pci_dev *pdev)
{
struct aux_bus_device *aux_bus = pci_get_drvdata(pdev);
auxiliary_device_delete(&aux_bus->aux_device_wrapper[0]->aux_dev);
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
auxiliary_device_delete(&aux_bus->aux_device_wrapper[1]->aux_dev);
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
}
static const struct pci_device_id pci1xxxx_tbl[] = {
{ PCI_DEVICE(0x1055, 0xA005) },
{ PCI_DEVICE(0x1055, 0xA015) },
{ PCI_DEVICE(0x1055, 0xA025) },
{ PCI_DEVICE(0x1055, 0xA035) },
{ PCI_DEVICE(0x1055, 0xA045) },
{ PCI_DEVICE(0x1055, 0xA055) },
{0,}
};
MODULE_DEVICE_TABLE(pci, pci1xxxx_tbl);
static struct pci_driver pci1xxxx_gp_driver = {
.name = "PCI1xxxxGP",
.id_table = pci1xxxx_tbl,
.probe = gp_aux_bus_probe,
.remove = gp_aux_bus_remove,
};
module_pci_driver(pci1xxxx_gp_driver);
MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GP expander");
MODULE_AUTHOR("Kumaravel Thiagarajan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2022-2023 Microchip Technology Inc.
// PCI1xxxx OTP/EEPROM driver
#include <linux/auxiliary_bus.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include "mchp_pci1xxxx_gp.h"
#define AUX_DRIVER_NAME "PCI1xxxxOTPE2P"
#define EEPROM_NAME "pci1xxxx_eeprom"
#define OTP_NAME "pci1xxxx_otp"
#define PERI_PF3_SYSTEM_REG_ADDR_BASE 0x2000
#define PERI_PF3_SYSTEM_REG_LENGTH 0x4000
#define EEPROM_SIZE_BYTES 8192
#define OTP_SIZE_BYTES 8192
#define CONFIG_REG_ADDR_BASE 0
#define EEPROM_REG_ADDR_BASE 0x0E00
#define OTP_REG_ADDR_BASE 0x1000
#define MMAP_OTP_OFFSET(x) (OTP_REG_ADDR_BASE + (x))
#define MMAP_EEPROM_OFFSET(x) (EEPROM_REG_ADDR_BASE + (x))
#define MMAP_CFG_OFFSET(x) (CONFIG_REG_ADDR_BASE + (x))
#define EEPROM_CMD_REG 0x00
#define EEPROM_DATA_REG 0x04
#define EEPROM_CMD_EPC_WRITE (BIT(29) | BIT(28))
#define EEPROM_CMD_EPC_TIMEOUT_BIT BIT(17)
#define EEPROM_CMD_EPC_BUSY_BIT BIT(31)
#define STATUS_READ_DELAY_US 1
#define STATUS_READ_TIMEOUT_US 20000
#define OTP_ADDR_HIGH_OFFSET 0x04
#define OTP_ADDR_LOW_OFFSET 0x08
#define OTP_PRGM_DATA_OFFSET 0x10
#define OTP_PRGM_MODE_OFFSET 0x14
#define OTP_RD_DATA_OFFSET 0x18
#define OTP_FUNC_CMD_OFFSET 0x20
#define OTP_CMD_GO_OFFSET 0x28
#define OTP_PASS_FAIL_OFFSET 0x2C
#define OTP_STATUS_OFFSET 0x30
#define OTP_FUNC_RD_BIT BIT(0)
#define OTP_FUNC_PGM_BIT BIT(1)
#define OTP_CMD_GO_BIT BIT(0)
#define OTP_STATUS_BUSY_BIT BIT(0)
#define OTP_PGM_MODE_BYTE_BIT BIT(0)
#define OTP_FAIL_BIT BIT(0)
#define OTP_PWR_DN_BIT BIT(0)
#define OTP_PWR_DN_OFFSET 0x00
#define CFG_SYS_LOCK_OFFSET 0xA0
#define CFG_SYS_LOCK_PF3 BIT(5)
#define BYTE_LOW (GENMASK(7, 0))
#define BYTE_HIGH (GENMASK(12, 8))
struct pci1xxxx_otp_eeprom_device {
struct auxiliary_device *pdev;
void __iomem *reg_base;
struct nvmem_config nvmem_config_eeprom;
struct nvmem_device *nvmem_eeprom;
struct nvmem_config nvmem_config_otp;
struct nvmem_device *nvmem_otp;
};
static int set_sys_lock(struct pci1xxxx_otp_eeprom_device *priv)
{
void __iomem *sys_lock = priv->reg_base +
MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
u8 data;
writel(CFG_SYS_LOCK_PF3, sys_lock);
data = readl(sys_lock);
if (data != CFG_SYS_LOCK_PF3)
return -EPERM;
return 0;
}
static void release_sys_lock(struct pci1xxxx_otp_eeprom_device *priv)
{
void __iomem *sys_lock = priv->reg_base +
MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
writel(0, sys_lock);
}
static bool is_eeprom_responsive(struct pci1xxxx_otp_eeprom_device *priv)
{
void __iomem *rb = priv->reg_base;
u32 regval;
int ret;
writel(EEPROM_CMD_EPC_TIMEOUT_BIT,
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
writel(EEPROM_CMD_EPC_BUSY_BIT,
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
/* Wait for the EPC_BUSY bit to get cleared or timeout bit to get set*/
ret = read_poll_timeout(readl, regval, !(regval & EEPROM_CMD_EPC_BUSY_BIT),
STATUS_READ_DELAY_US, STATUS_READ_TIMEOUT_US,
true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
/* Return failure if either of software or hardware timeouts happen */
if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT)))
return false;
return true;
}
static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off,
void *buf_t, size_t count)
{
struct pci1xxxx_otp_eeprom_device *priv = priv_t;
void __iomem *rb = priv->reg_base;
char *buf = buf_t;
u32 regval;
u32 byte;
int ret;
if (off >= priv->nvmem_config_eeprom.size)
return -EFAULT;
if ((off + count) > priv->nvmem_config_eeprom.size)
count = priv->nvmem_config_eeprom.size - off;
ret = set_sys_lock(priv);
if (ret)
return ret;
for (byte = 0; byte < count; byte++) {
writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb +
MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
ret = read_poll_timeout(readl, regval,
!(regval & EEPROM_CMD_EPC_BUSY_BIT),
STATUS_READ_DELAY_US,
STATUS_READ_TIMEOUT_US, true,
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) {
ret = -EIO;
goto error;
}
buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
}
ret = byte;
error:
release_sys_lock(priv);
return ret;
}
static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off,
void *value_t, size_t count)
{
struct pci1xxxx_otp_eeprom_device *priv = priv_t;
void __iomem *rb = priv->reg_base;
char *value = value_t;
u32 regval;
u32 byte;
int ret;
if (off >= priv->nvmem_config_eeprom.size)
return -EFAULT;
if ((off + count) > priv->nvmem_config_eeprom.size)
count = priv->nvmem_config_eeprom.size - off;
ret = set_sys_lock(priv);
if (ret)
return ret;
for (byte = 0; byte < count; byte++) {
writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
regval = EEPROM_CMD_EPC_TIMEOUT_BIT | EEPROM_CMD_EPC_WRITE |
(off + byte);
writel(regval, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
writel(EEPROM_CMD_EPC_BUSY_BIT | regval,
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
ret = read_poll_timeout(readl, regval,
!(regval & EEPROM_CMD_EPC_BUSY_BIT),
STATUS_READ_DELAY_US,
STATUS_READ_TIMEOUT_US, true,
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) {
ret = -EIO;
goto error;
}
}
ret = byte;
error:
release_sys_lock(priv);
return ret;
}
static void otp_device_set_address(struct pci1xxxx_otp_eeprom_device *priv,
u16 address)
{
u16 lo, hi;
lo = address & BYTE_LOW;
hi = (address & BYTE_HIGH) >> 8;
writew(lo, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_LOW_OFFSET));
writew(hi, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_HIGH_OFFSET));
}
static int pci1xxxx_otp_read(void *priv_t, unsigned int off,
void *buf_t, size_t count)
{
struct pci1xxxx_otp_eeprom_device *priv = priv_t;
void __iomem *rb = priv->reg_base;
char *buf = buf_t;
u32 regval;
u32 byte;
int ret;
u8 data;
if (off >= priv->nvmem_config_otp.size)
return -EFAULT;
if ((off + count) > priv->nvmem_config_otp.size)
count = priv->nvmem_config_otp.size - off;
ret = set_sys_lock(priv);
if (ret)
return ret;
for (byte = 0; byte < count; byte++) {
otp_device_set_address(priv, (u16)(off + byte));
data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
writel(data | OTP_FUNC_RD_BIT,
rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
writel(data | OTP_CMD_GO_BIT,
rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
ret = read_poll_timeout(readl, regval,
!(regval & OTP_STATUS_BUSY_BIT),
STATUS_READ_DELAY_US,
STATUS_READ_TIMEOUT_US, true,
rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
if (ret < 0 || data & OTP_FAIL_BIT) {
ret = -EIO;
goto error;
}
buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
}
ret = byte;
error:
release_sys_lock(priv);
return ret;
}
static int pci1xxxx_otp_write(void *priv_t, unsigned int off,
void *value_t, size_t count)
{
struct pci1xxxx_otp_eeprom_device *priv = priv_t;
void __iomem *rb = priv->reg_base;
char *value = value_t;
u32 regval;
u32 byte;
int ret;
u8 data;
if (off >= priv->nvmem_config_otp.size)
return -EFAULT;
if ((off + count) > priv->nvmem_config_otp.size)
count = priv->nvmem_config_otp.size - off;
ret = set_sys_lock(priv);
if (ret)
return ret;
for (byte = 0; byte < count; byte++) {
otp_device_set_address(priv, (u16)(off + byte));
/*
* Set OTP_PGM_MODE_BYTE command bit in OTP_PRGM_MODE register
* to enable Byte programming
*/
data = readl(rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
writel(data | OTP_PGM_MODE_BYTE_BIT,
rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
writel(*(value + byte), rb + MMAP_OTP_OFFSET(OTP_PRGM_DATA_OFFSET));
data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
writel(data | OTP_FUNC_PGM_BIT,
rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
writel(data | OTP_CMD_GO_BIT,
rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
ret = read_poll_timeout(readl, regval,
!(regval & OTP_STATUS_BUSY_BIT),
STATUS_READ_DELAY_US,
STATUS_READ_TIMEOUT_US, true,
rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
if (ret < 0 || data & OTP_FAIL_BIT) {
ret = -EIO;
goto error;
}
}
ret = byte;
error:
release_sys_lock(priv);
return ret;
}
static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *id)
{
struct auxiliary_device_wrapper *aux_dev_wrapper;
struct pci1xxxx_otp_eeprom_device *priv;
struct gp_aux_data_type *pdata;
int ret;
u8 data;
aux_dev_wrapper = container_of(aux_dev, struct auxiliary_device_wrapper,
aux_dev);
pdata = &aux_dev_wrapper->gp_aux_data;
if (!pdata)
return -EINVAL;
priv = devm_kzalloc(&aux_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->pdev = aux_dev;
if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start +
PERI_PF3_SYSTEM_REG_ADDR_BASE,
PERI_PF3_SYSTEM_REG_LENGTH,
aux_dev->name))
return -ENOMEM;
priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start +
PERI_PF3_SYSTEM_REG_ADDR_BASE,
PERI_PF3_SYSTEM_REG_LENGTH);
if (!priv->reg_base)
return -ENOMEM;
ret = set_sys_lock(priv);
if (ret)
return ret;
/* Set OTP_PWR_DN to 0 to make OTP Operational */
data = readl(priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
writel(data & ~OTP_PWR_DN_BIT,
priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
dev_set_drvdata(&aux_dev->dev, priv);
if (is_eeprom_responsive(priv)) {
priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM;
priv->nvmem_config_eeprom.name = EEPROM_NAME;
priv->nvmem_config_eeprom.dev = &aux_dev->dev;
priv->nvmem_config_eeprom.owner = THIS_MODULE;
priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read;
priv->nvmem_config_eeprom.reg_write = pci1xxxx_eeprom_write;
priv->nvmem_config_eeprom.priv = priv;
priv->nvmem_config_eeprom.stride = 1;
priv->nvmem_config_eeprom.word_size = 1;
priv->nvmem_config_eeprom.size = EEPROM_SIZE_BYTES;
priv->nvmem_eeprom = devm_nvmem_register(&aux_dev->dev,
&priv->nvmem_config_eeprom);
if (IS_ERR(priv->nvmem_eeprom))
return PTR_ERR(priv->nvmem_eeprom);
}
release_sys_lock(priv);
priv->nvmem_config_otp.type = NVMEM_TYPE_OTP;
priv->nvmem_config_otp.name = OTP_NAME;
priv->nvmem_config_otp.dev = &aux_dev->dev;
priv->nvmem_config_otp.owner = THIS_MODULE;
priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read;
priv->nvmem_config_otp.reg_write = pci1xxxx_otp_write;
priv->nvmem_config_otp.priv = priv;
priv->nvmem_config_otp.stride = 1;
priv->nvmem_config_otp.word_size = 1;
priv->nvmem_config_otp.size = OTP_SIZE_BYTES;
priv->nvmem_otp = devm_nvmem_register(&aux_dev->dev,
&priv->nvmem_config_otp);
if (IS_ERR(priv->nvmem_otp))
return PTR_ERR(priv->nvmem_otp);
return ret;
}
static void pci1xxxx_otp_eeprom_remove(struct auxiliary_device *aux_dev)
{
struct pci1xxxx_otp_eeprom_device *priv;
void __iomem *sys_lock;
priv = dev_get_drvdata(&aux_dev->dev);
sys_lock = priv->reg_base + MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
writel(CFG_SYS_LOCK_PF3, sys_lock);
/* Shut down OTP */
writel(OTP_PWR_DN_BIT,
priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
writel(0, sys_lock);
}
static const struct auxiliary_device_id pci1xxxx_otp_eeprom_auxiliary_id_table[] = {
{.name = "mchp_pci1xxxx_gp.gp_otp_e2p"},
{},
};
MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_otp_eeprom_auxiliary_id_table);
static struct auxiliary_driver pci1xxxx_otp_eeprom_driver = {
.driver = {
.name = AUX_DRIVER_NAME,
},
.probe = pci1xxxx_otp_eeprom_probe,
.remove = pci1xxxx_otp_eeprom_remove,
.id_table = pci1xxxx_otp_eeprom_auxiliary_id_table
};
module_auxiliary_driver(pci1xxxx_otp_eeprom_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kumaravel Thiagarajan <[email protected]>");
MODULE_AUTHOR("Tharun Kumar P <[email protected]>");
MODULE_AUTHOR("Vaibhaav Ram T.L <[email protected]>");
MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx OTP EEPROM Programmer");
| linux-master | drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2022 Microchip Technology Inc.
// pci1xxxx gpio driver
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/gpio/driver.h>
#include <linux/bio.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include "mchp_pci1xxxx_gp.h"
#define PCI1XXXX_NR_PINS 93
#define PERI_GEN_RESET 0
#define OUT_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400)
#define INP_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x10)
#define OUT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x20)
#define INP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x30)
#define PULLUP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x40)
#define PULLDOWN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x50)
#define OPENDRAIN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x60)
#define WAKEMASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x70)
#define MODE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x80)
#define INTR_LO_TO_HI_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0x90)
#define INTR_HI_TO_LO_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0xA0)
#define INTR_LEVEL_CONFIG_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xB0)
#define INTR_LEVEL_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xC0)
#define INTR_STAT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xD0)
#define DEBOUNCE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xE0)
#define PIO_GLOBAL_CONFIG_OFFSET (0x400 + 0xF0)
#define PIO_PCI_CTRL_REG_OFFSET (0x400 + 0xF4)
#define INTR_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x100)
#define INTR_STATUS_OFFSET(x) (((x) * 4) + 0x400 + 0xD0)
struct pci1xxxx_gpio {
struct auxiliary_device *aux_dev;
void __iomem *reg_base;
struct gpio_chip gpio;
spinlock_t lock;
int irq_base;
};
static int pci1xxxx_gpio_get_direction(struct gpio_chip *gpio, unsigned int nr)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
u32 data;
int ret = -EINVAL;
data = readl(priv->reg_base + INP_EN_OFFSET(nr));
if (data & BIT(nr % 32)) {
ret = 1;
} else {
data = readl(priv->reg_base + OUT_EN_OFFSET(nr));
if (data & BIT(nr % 32))
ret = 0;
}
return ret;
}
static inline void pci1xxx_assign_bit(void __iomem *base_addr, unsigned int reg_offset,
unsigned int bitpos, bool set)
{
u32 data;
data = readl(base_addr + reg_offset);
if (set)
data |= BIT(bitpos);
else
data &= ~BIT(bitpos);
writel(data, base_addr + reg_offset);
}
static int pci1xxxx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), true);
pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), false);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int pci1xxxx_gpio_get(struct gpio_chip *gpio, unsigned int nr)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
return (readl(priv->reg_base + INP_OFFSET(nr)) >> (nr % 32)) & 1;
}
static int pci1xxxx_gpio_direction_output(struct gpio_chip *gpio,
unsigned int nr, int val)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
unsigned long flags;
u32 data;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), false);
pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), true);
data = readl(priv->reg_base + OUT_OFFSET(nr));
if (val)
data |= (1 << (nr % 32));
else
data &= ~(1 << (nr % 32));
writel(data, priv->reg_base + OUT_OFFSET(nr));
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void pci1xxxx_gpio_set(struct gpio_chip *gpio,
unsigned int nr, int val)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, OUT_OFFSET(nr), (nr % 32), val);
spin_unlock_irqrestore(&priv->lock, flags);
}
static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
unsigned long config)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), true);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), true);
break;
case PIN_CONFIG_BIAS_DISABLE:
pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), false);
pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), false);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
default:
ret = -EOPNOTSUPP;
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
unsigned int gpio = irqd_to_hwirq(data);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void pci1xxxx_gpio_irq_set_mask(struct irq_data *data, bool set)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
unsigned int gpio = irqd_to_hwirq(data);
unsigned long flags;
if (!set)
gpiochip_enable_irq(chip, gpio);
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, INTR_MASK_OFFSET(gpio), (gpio % 32), set);
spin_unlock_irqrestore(&priv->lock, flags);
if (set)
gpiochip_disable_irq(chip, gpio);
}
static void pci1xxxx_gpio_irq_mask(struct irq_data *data)
{
pci1xxxx_gpio_irq_set_mask(data, true);
}
static void pci1xxxx_gpio_irq_unmask(struct irq_data *data)
{
pci1xxxx_gpio_irq_set_mask(data, false);
}
static int pci1xxxx_gpio_set_type(struct irq_data *data, unsigned int trigger_type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
unsigned int gpio = irqd_to_hwirq(data);
unsigned int bitpos = gpio % 32;
if (trigger_type & IRQ_TYPE_EDGE_FALLING) {
pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
bitpos, false);
pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio),
bitpos, false);
irq_set_handler_locked(data, handle_edge_irq);
} else {
pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
bitpos, true);
}
if (trigger_type & IRQ_TYPE_EDGE_RISING) {
pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
bitpos, false);
pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
false);
irq_set_handler_locked(data, handle_edge_irq);
} else {
pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
bitpos, true);
}
if (trigger_type & IRQ_TYPE_LEVEL_LOW) {
pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
bitpos, true);
pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
bitpos, false);
pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
true);
irq_set_handler_locked(data, handle_edge_irq);
}
if (trigger_type & IRQ_TYPE_LEVEL_HIGH) {
pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
bitpos, false);
pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
bitpos, false);
pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
true);
irq_set_handler_locked(data, handle_edge_irq);
}
if ((!(trigger_type & IRQ_TYPE_LEVEL_LOW)) && (!(trigger_type & IRQ_TYPE_LEVEL_HIGH)))
pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio), bitpos, true);
return true;
}
static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
{
struct pci1xxxx_gpio *priv = dev_id;
struct gpio_chip *gc = &priv->gpio;
unsigned long int_status = 0;
unsigned long flags;
u8 pincount;
int bit;
u8 gpiobank;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, true);
spin_unlock_irqrestore(&priv->lock, flags);
for (gpiobank = 0; gpiobank < 3; gpiobank++) {
spin_lock_irqsave(&priv->lock, flags);
int_status = readl(priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
if (gpiobank == 2)
pincount = 29;
else
pincount = 32;
for_each_set_bit(bit, &int_status, pincount) {
unsigned int irq;
spin_lock_irqsave(&priv->lock, flags);
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
generic_handle_irq(irq);
}
}
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, false);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
}
static const struct irq_chip pci1xxxx_gpio_irqchip = {
.name = "pci1xxxx_gpio",
.irq_ack = pci1xxxx_gpio_irq_ack,
.irq_mask = pci1xxxx_gpio_irq_mask,
.irq_unmask = pci1xxxx_gpio_irq_unmask,
.irq_set_type = pci1xxxx_gpio_set_type,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pci1xxxx_gpio_suspend(struct device *dev)
{
struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
16, true);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
17, false);
pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, true);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int pci1xxxx_gpio_resume(struct device *dev)
{
struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
17, true);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
16, false);
pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, false);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
{
struct gpio_chip *gchip = &priv->gpio;
struct gpio_irq_chip *girq;
int retval;
gchip->label = dev_name(&priv->aux_dev->dev);
gchip->parent = &priv->aux_dev->dev;
gchip->owner = THIS_MODULE;
gchip->direction_input = pci1xxxx_gpio_direction_input;
gchip->direction_output = pci1xxxx_gpio_direction_output;
gchip->get_direction = pci1xxxx_gpio_get_direction;
gchip->get = pci1xxxx_gpio_get;
gchip->set = pci1xxxx_gpio_set;
gchip->set_config = pci1xxxx_gpio_set_config;
gchip->dbg_show = NULL;
gchip->base = -1;
gchip->ngpio = PCI1XXXX_NR_PINS;
gchip->can_sleep = false;
retval = devm_request_threaded_irq(&priv->aux_dev->dev, irq,
NULL, pci1xxxx_gpio_irq_handler,
IRQF_ONESHOT, "PCI1xxxxGPIO", priv);
if (retval)
return retval;
girq = &priv->gpio.irq;
gpio_irq_chip_set_chip(girq, &pci1xxxx_gpio_irqchip);
girq->parent_handler = NULL;
girq->num_parents = 0;
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
return 0;
}
static int pci1xxxx_gpio_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *id)
{
struct auxiliary_device_wrapper *aux_dev_wrapper;
struct gp_aux_data_type *pdata;
struct pci1xxxx_gpio *priv;
int retval;
aux_dev_wrapper = (struct auxiliary_device_wrapper *)
container_of(aux_dev, struct auxiliary_device_wrapper, aux_dev);
pdata = &aux_dev_wrapper->gp_aux_data;
if (!pdata)
return -EINVAL;
priv = devm_kzalloc(&aux_dev->dev, sizeof(struct pci1xxxx_gpio), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->lock);
priv->aux_dev = aux_dev;
if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start, 0x800, aux_dev->name))
return -EBUSY;
priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start, 0x800);
if (!priv->reg_base)
return -ENOMEM;
writel(0x0264, (priv->reg_base + 0x400 + 0xF0));
retval = pci1xxxx_gpio_setup(priv, pdata->irq_num);
if (retval < 0)
return retval;
dev_set_drvdata(&aux_dev->dev, priv);
return devm_gpiochip_add_data(&aux_dev->dev, &priv->gpio, priv);
}
static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_gpio_pm_ops, pci1xxxx_gpio_suspend, pci1xxxx_gpio_resume);
static const struct auxiliary_device_id pci1xxxx_gpio_auxiliary_id_table[] = {
{.name = "mchp_pci1xxxx_gp.gp_gpio"},
{}
};
MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_gpio_auxiliary_id_table);
static struct auxiliary_driver pci1xxxx_gpio_driver = {
.driver = {
.name = "PCI1xxxxGPIO",
.pm = &pci1xxxx_gpio_pm_ops,
},
.probe = pci1xxxx_gpio_probe,
.id_table = pci1xxxx_gpio_auxiliary_id_table
};
module_auxiliary_driver(pci1xxxx_gpio_driver);
MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GPIO controller");
MODULE_AUTHOR("Kumaravel Thiagarajan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018-2020 Broadcom.
*/
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/hash.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include "bcm_vk.h"
#include "bcm_vk_msg.h"
#include "bcm_vk_sg.h"
/* functions to manipulate the transport id in msg block */
#define BCM_VK_MSG_Q_SHIFT 4
#define BCM_VK_MSG_Q_MASK 0xF
#define BCM_VK_MSG_ID_MASK 0xFFF
#define BCM_VK_DMA_DRAIN_MAX_MS 2000
/* number x q_size will be the max number of msg processed per loop */
#define BCM_VK_MSG_PROC_MAX_LOOP 2
/* module parameter */
static bool hb_mon = true;
module_param(hb_mon, bool, 0444);
MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
static int batch_log = 1;
module_param(batch_log, int, 0444);
MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
static bool hb_mon_is_on(void)
{
return hb_mon;
}
static u32 get_q_num(const struct vk_msg_blk *msg)
{
u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
if (q_num >= VK_MSGQ_PER_CHAN_MAX)
q_num = VK_MSGQ_NUM_DEFAULT;
return q_num;
}
static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
{
u32 trans_q;
if (q_num >= VK_MSGQ_PER_CHAN_MAX)
trans_q = VK_MSGQ_NUM_DEFAULT;
else
trans_q = q_num;
msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
}
static u32 get_msg_id(const struct vk_msg_blk *msg)
{
return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
}
static void set_msg_id(struct vk_msg_blk *msg, u32 val)
{
msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
}
static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
{
return ((idx + inc) & qinfo->q_mask);
}
static
struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
u32 idx)
{
return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
}
static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
const struct bcm_vk_sync_qinfo *qinfo)
{
u32 wr_idx, rd_idx;
wr_idx = readl_relaxed(&msgq->wr_idx);
rd_idx = readl_relaxed(&msgq->rd_idx);
return ((wr_idx - rd_idx) & qinfo->q_mask);
}
static
u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
const struct bcm_vk_sync_qinfo *qinfo)
{
return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
}
/* number of retries when enqueue message fails before returning EAGAIN */
#define BCM_VK_H2VK_ENQ_RETRY 10
#define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
{
return (!!atomic_read(&vk->msgq_inited));
}
void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
{
struct bcm_vk_alert *alert = &vk->host_alert;
unsigned long flags;
/* use irqsave version as this maybe called inside timer interrupt */
spin_lock_irqsave(&vk->host_alert_lock, flags);
alert->notfs |= bit_mask;
spin_unlock_irqrestore(&vk->host_alert_lock, flags);
if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
queue_work(vk->wq_thread, &vk->wq_work);
}
/*
* Heartbeat related defines
* The heartbeat from host is a last resort. If stuck condition happens
* on the card, firmware is supposed to detect it. Therefore, the heartbeat
* values used will be more relaxed on the driver, which need to be bigger
* than the watchdog timeout on the card. The watchdog timeout on the card
* is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
*/
#define BCM_VK_HB_TIMER_S 3
#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
static void bcm_vk_hb_poll(struct work_struct *work)
{
u32 uptime_s;
struct bcm_vk_hb_ctrl *hb = container_of(to_delayed_work(work), struct bcm_vk_hb_ctrl,
work);
struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
/* read uptime from register and compare */
uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
if (uptime_s == hb->last_uptime)
hb->lost_cnt++;
else /* reset to avoid accumulation */
hb->lost_cnt = 0;
dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
hb->last_uptime, uptime_s, hb->lost_cnt);
/*
* if the interface goes down without any activity, a value
* of 0xFFFFFFFF will be continuously read, and the detection
* will be happened eventually.
*/
hb->last_uptime = uptime_s;
} else {
/* reset heart beat lost cnt */
hb->lost_cnt = 0;
}
/* next, check if heartbeat exceeds limit */
if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
BCM_VK_HB_LOST_MAX,
BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
bcm_vk_blk_drv_access(vk);
bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
}
/* re-arm timer */
schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
}
void bcm_vk_hb_init(struct bcm_vk *vk)
{
struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
INIT_DELAYED_WORK(&hb->work, bcm_vk_hb_poll);
schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
}
void bcm_vk_hb_deinit(struct bcm_vk *vk)
{
struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
cancel_delayed_work_sync(&hb->work);
}
static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
unsigned int start,
unsigned int nbits)
{
spin_lock(&vk->msg_id_lock);
bitmap_clear(vk->bmap, start, nbits);
spin_unlock(&vk->msg_id_lock);
}
/*
* allocate a ctx per file struct
*/
static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
{
u32 i;
struct bcm_vk_ctx *ctx = NULL;
u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
spin_lock(&vk->ctx_lock);
/* check if it is in reset, if so, don't allow */
if (vk->reset_pid) {
dev_err(&vk->pdev->dev,
"No context allowed during reset by pid %d\n",
vk->reset_pid);
goto in_reset_exit;
}
for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
if (!vk->ctx[i].in_use) {
vk->ctx[i].in_use = true;
ctx = &vk->ctx[i];
break;
}
}
if (!ctx) {
dev_err(&vk->pdev->dev, "All context in use\n");
goto all_in_use_exit;
}
/* set the pid and insert it to hash table */
ctx->pid = pid;
ctx->hash_idx = hash_idx;
list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
/* increase kref */
kref_get(&vk->kref);
/* clear counter */
atomic_set(&ctx->pend_cnt, 0);
atomic_set(&ctx->dma_cnt, 0);
init_waitqueue_head(&ctx->rd_wq);
all_in_use_exit:
in_reset_exit:
spin_unlock(&vk->ctx_lock);
return ctx;
}
static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
{
u16 rc = VK_MSG_ID_OVERFLOW;
u16 test_bit_count = 0;
spin_lock(&vk->msg_id_lock);
while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
/*
* first time come in this loop, msg_id will be 0
* and the first one tested will be 1. We skip
* VK_SIMPLEX_MSG_ID (0) for one way host2vk
* communication
*/
vk->msg_id++;
if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
vk->msg_id = 1;
if (test_bit(vk->msg_id, vk->bmap)) {
test_bit_count++;
continue;
}
rc = vk->msg_id;
bitmap_set(vk->bmap, vk->msg_id, 1);
break;
}
spin_unlock(&vk->msg_id_lock);
return rc;
}
static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
{
u32 idx;
u32 hash_idx;
pid_t pid;
struct bcm_vk_ctx *entry;
int count = 0;
if (!ctx) {
dev_err(&vk->pdev->dev, "NULL context detected\n");
return -EINVAL;
}
idx = ctx->idx;
pid = ctx->pid;
spin_lock(&vk->ctx_lock);
if (!vk->ctx[idx].in_use) {
dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
} else {
vk->ctx[idx].in_use = false;
vk->ctx[idx].miscdev = NULL;
/* Remove it from hash list and see if it is the last one. */
list_del(&ctx->node);
hash_idx = ctx->hash_idx;
list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
if (entry->pid == pid)
count++;
}
}
spin_unlock(&vk->ctx_lock);
return count;
}
static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
{
int proc_cnt;
bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
if (proc_cnt)
atomic_dec(&entry->ctx->dma_cnt);
kfree(entry->to_h_msg);
kfree(entry);
}
static void bcm_vk_drain_all_pend(struct device *dev,
struct bcm_vk_msg_chan *chan,
struct bcm_vk_ctx *ctx)
{
u32 num;
struct bcm_vk_wkent *entry, *tmp;
struct bcm_vk *vk;
struct list_head del_q;
if (ctx)
vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
INIT_LIST_HEAD(&del_q);
spin_lock(&chan->pendq_lock);
for (num = 0; num < chan->q_nr; num++) {
list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
list_move_tail(&entry->node, &del_q);
}
}
}
spin_unlock(&chan->pendq_lock);
/* batch clean up */
num = 0;
list_for_each_entry_safe(entry, tmp, &del_q, node) {
list_del(&entry->node);
num++;
if (ctx) {
struct vk_msg_blk *msg;
int bit_set;
bool responded;
u32 msg_id;
/* if it is specific ctx, log for any stuck */
msg = entry->to_v_msg;
msg_id = get_msg_id(msg);
bit_set = test_bit(msg_id, vk->bmap);
responded = entry->to_h_msg ? true : false;
if (num <= batch_log)
dev_info(dev,
"Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
msg->function_id, msg->size,
msg_id, entry->seq_num,
msg->context_id, entry->ctx->idx,
msg->cmd, msg->arg,
responded ? "T" : "F", bit_set);
if (responded)
atomic_dec(&ctx->pend_cnt);
else if (bit_set)
bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
}
bcm_vk_free_wkent(dev, entry);
}
if (num && ctx)
dev_info(dev, "Total drained items %d [fd-%d]\n",
num, ctx->idx);
}
void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
{
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
}
/*
* Function to sync up the messages queue info that is provided by BAR1
*/
int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
{
struct bcm_vk_msgq __iomem *msgq;
struct device *dev = &vk->pdev->dev;
u32 msgq_off;
u32 num_q;
struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
&vk->to_h_msg_chan};
struct bcm_vk_msg_chan *chan;
int i, j;
int ret = 0;
/*
* If the driver is loaded at startup where vk OS is not up yet,
* the msgq-info may not be available until a later time. In
* this case, we skip and the sync function is supposed to be
* called again.
*/
if (!bcm_vk_msgq_marker_valid(vk)) {
dev_info(dev, "BAR1 msgq marker not initialized.\n");
return -EAGAIN;
}
msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
/* each side is always half the total */
num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
dev_err(dev,
"Advertised msgq %d error - max %d allowed\n",
num_q, VK_MSGQ_PER_CHAN_MAX);
return -EINVAL;
}
vk->to_v_msg_chan.q_nr = num_q;
vk->to_h_msg_chan.q_nr = num_q;
/* first msgq location */
msgq = vk->bar[BAR_1] + msgq_off;
/*
* if this function is called when it is already inited,
* something is wrong
*/
if (bcm_vk_drv_access_ok(vk) && !force_sync) {
dev_err(dev, "Msgq info already in sync\n");
return -EPERM;
}
for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
chan = chan_list[i];
memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
for (j = 0; j < num_q; j++) {
struct bcm_vk_sync_qinfo *qinfo;
u32 msgq_start;
u32 msgq_size;
u32 msgq_nxt;
u32 msgq_db_offset, q_db_offset;
chan->msgq[j] = msgq;
msgq_start = readl_relaxed(&msgq->start);
msgq_size = readl_relaxed(&msgq->size);
msgq_nxt = readl_relaxed(&msgq->nxt);
msgq_db_offset = readl_relaxed(&msgq->db_offset);
q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
msgq_db_offset = q_db_offset;
else
/* fall back to default */
msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
dev_info(dev,
"MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
j,
readw_relaxed(&msgq->type),
readw_relaxed(&msgq->num),
msgq_start,
msgq_db_offset,
readl_relaxed(&msgq->rd_idx),
readl_relaxed(&msgq->wr_idx),
msgq_size,
msgq_nxt);
qinfo = &chan->sync_qinfo[j];
/* formulate and record static info */
qinfo->q_start = vk->bar[BAR_1] + msgq_start;
qinfo->q_size = msgq_size;
/* set low threshold as 50% or 1/2 */
qinfo->q_low = qinfo->q_size >> 1;
qinfo->q_mask = qinfo->q_size - 1;
qinfo->q_db_offset = msgq_db_offset;
msgq++;
}
}
atomic_set(&vk->msgq_inited, 1);
return ret;
}
static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
{
u32 i;
mutex_init(&chan->msgq_mutex);
spin_lock_init(&chan->pendq_lock);
for (i = 0; i < VK_MSGQ_MAX_NR; i++)
INIT_LIST_HEAD(&chan->pendq[i]);
return 0;
}
static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
struct bcm_vk_wkent *entry)
{
struct bcm_vk_ctx *ctx;
spin_lock(&chan->pendq_lock);
list_add_tail(&entry->node, &chan->pendq[q_num]);
if (entry->to_h_msg) {
ctx = entry->ctx;
atomic_inc(&ctx->pend_cnt);
wake_up_interruptible(&ctx->rd_wq);
}
spin_unlock(&chan->pendq_lock);
}
static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
struct bcm_vk_wkent *entry,
struct _vk_data *data,
unsigned int num_planes)
{
unsigned int i;
unsigned int item_cnt = 0;
struct device *dev = &vk->pdev->dev;
struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
struct vk_msg_blk *msg = &entry->to_v_msg[0];
struct bcm_vk_msgq __iomem *msgq;
struct bcm_vk_sync_qinfo *qinfo;
u32 ib_sgl_size = 0;
u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
u32 avail;
u32 q_num;
/* check if high watermark is hit, and if so, skip */
q_num = get_q_num(msg);
msgq = chan->msgq[q_num];
qinfo = &chan->sync_qinfo[q_num];
avail = msgq_avail_space(msgq, qinfo);
if (avail < qinfo->q_low) {
dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
avail, qinfo->q_size);
return 0;
}
for (i = 0; i < num_planes; i++) {
if (data[i].address &&
(ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
item_cnt++;
memcpy(buf, entry->dma[i].sglist, data[i].size);
ib_sgl_size += data[i].size;
buf += data[i].size;
}
}
dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
item_cnt, ib_sgl_size, vk->ib_sgl_size);
/* round up size */
ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
>> VK_MSGQ_BLK_SZ_SHIFT;
return ib_sgl_size;
}
void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
{
struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
}
static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
{
static u32 seq_num;
struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
struct device *dev = &vk->pdev->dev;
struct vk_msg_blk *src = &entry->to_v_msg[0];
struct vk_msg_blk __iomem *dst;
struct bcm_vk_msgq __iomem *msgq;
struct bcm_vk_sync_qinfo *qinfo;
u32 q_num = get_q_num(src);
u32 wr_idx; /* local copy */
u32 i;
u32 avail;
u32 retry;
if (entry->to_v_blks != src->size + 1) {
dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
entry->to_v_blks,
src->size + 1,
get_msg_id(src),
src->function_id,
src->context_id);
return -EMSGSIZE;
}
msgq = chan->msgq[q_num];
qinfo = &chan->sync_qinfo[q_num];
mutex_lock(&chan->msgq_mutex);
avail = msgq_avail_space(msgq, qinfo);
/* if not enough space, return EAGAIN and let app handles it */
retry = 0;
while ((avail < entry->to_v_blks) &&
(retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
mutex_unlock(&chan->msgq_mutex);
msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
mutex_lock(&chan->msgq_mutex);
avail = msgq_avail_space(msgq, qinfo);
}
if (retry > BCM_VK_H2VK_ENQ_RETRY) {
mutex_unlock(&chan->msgq_mutex);
return -EAGAIN;
}
/* at this point, mutex is taken and there is enough space */
entry->seq_num = seq_num++; /* update debug seq number */
wr_idx = readl_relaxed(&msgq->wr_idx);
if (wr_idx >= qinfo->q_size) {
dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
wr_idx, qinfo->q_size);
bcm_vk_blk_drv_access(vk);
bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
goto idx_err;
}
dst = msgq_blk_addr(qinfo, wr_idx);
for (i = 0; i < entry->to_v_blks; i++) {
memcpy_toio(dst, src, sizeof(*dst));
src++;
wr_idx = msgq_inc(qinfo, wr_idx, 1);
dst = msgq_blk_addr(qinfo, wr_idx);
}
/* flush the write pointer */
writel(wr_idx, &msgq->wr_idx);
/* log new info for debugging */
dev_dbg(dev,
"MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
readl_relaxed(&msgq->num),
readl_relaxed(&msgq->rd_idx),
wr_idx,
entry->to_v_blks,
msgq_occupied(msgq, qinfo),
msgq_avail_space(msgq, qinfo),
readl_relaxed(&msgq->size));
/*
* press door bell based on queue number. 1 is added to the wr_idx
* to avoid the value of 0 appearing on the VK side to distinguish
* from initial value.
*/
bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
idx_err:
mutex_unlock(&chan->msgq_mutex);
return 0;
}
int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
const pid_t pid, const u32 q_num)
{
int rc = 0;
struct bcm_vk_wkent *entry;
struct device *dev = &vk->pdev->dev;
/*
* check if the marker is still good. Sometimes, the PCIe interface may
* have gone done, and if so and we ship down thing based on broken
* values, kernel may panic.
*/
if (!bcm_vk_msgq_marker_valid(vk)) {
dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
return -EINVAL;
}
entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL);
if (!entry)
return -ENOMEM;
/* fill up necessary data */
entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
set_q_num(&entry->to_v_msg[0], q_num);
set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
entry->to_v_blks = 1; /* always 1 block */
entry->to_v_msg[0].cmd = shut_type;
entry->to_v_msg[0].arg = pid;
rc = bcm_to_v_msg_enqueue(vk, entry);
if (rc)
dev_err(dev,
"Sending shutdown message to q %d for pid %d fails.\n",
get_q_num(&entry->to_v_msg[0]), pid);
kfree(entry);
return rc;
}
static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
const u32 q_num)
{
int rc = 0;
struct device *dev = &vk->pdev->dev;
/*
* don't send down or do anything if message queue is not initialized
* and if it is the reset session, clear it.
*/
if (!bcm_vk_drv_access_ok(vk)) {
if (vk->reset_pid == pid)
vk->reset_pid = 0;
return -EPERM;
}
dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
/* only need to do it if it is not the reset process */
if (vk->reset_pid != pid)
rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
else
/* put reset_pid to 0 if it is exiting last session */
vk->reset_pid = 0;
return rc;
}
static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
struct bcm_vk_msg_chan *chan,
u16 q_num,
u16 msg_id)
{
struct bcm_vk_wkent *entry = NULL, *iter;
spin_lock(&chan->pendq_lock);
list_for_each_entry(iter, &chan->pendq[q_num], node) {
if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
list_del(&iter->node);
entry = iter;
bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
break;
}
}
spin_unlock(&chan->pendq_lock);
return entry;
}
s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
struct vk_msg_blk *data;
struct vk_msg_blk __iomem *src;
struct vk_msg_blk *dst;
struct bcm_vk_msgq __iomem *msgq;
struct bcm_vk_sync_qinfo *qinfo;
struct bcm_vk_wkent *entry;
u32 rd_idx, wr_idx;
u32 q_num, msg_id, j;
u32 num_blks;
s32 total = 0;
int cnt = 0;
int msg_processed = 0;
int max_msg_to_process;
bool exit_loop;
/*
* drain all the messages from the queues, and find its pending
* entry in the to_v queue, based on msg_id & q_num, and move the
* entry to the to_h pending queue, waiting for user space
* program to extract
*/
mutex_lock(&chan->msgq_mutex);
for (q_num = 0; q_num < chan->q_nr; q_num++) {
msgq = chan->msgq[q_num];
qinfo = &chan->sync_qinfo[q_num];
max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
rd_idx = readl_relaxed(&msgq->rd_idx);
wr_idx = readl_relaxed(&msgq->wr_idx);
msg_processed = 0;
exit_loop = false;
while ((rd_idx != wr_idx) && !exit_loop) {
u8 src_size;
/*
* Make a local copy and get pointer to src blk
* The rd_idx is masked before getting the pointer to
* avoid out of bound access in case the interface goes
* down. It will end up pointing to the last block in
* the buffer, but subsequent src->size check would be
* able to catch this.
*/
src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
src_size = readb(&src->size);
if ((rd_idx >= qinfo->q_size) ||
(src_size > (qinfo->q_size - 1))) {
dev_crit(dev,
"Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
rd_idx, src_size, qinfo->q_size);
bcm_vk_blk_drv_access(vk);
bcm_vk_set_host_alert(vk,
ERR_LOG_HOST_PCIE_DWN);
goto idx_err;
}
num_blks = src_size + 1;
data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
if (data) {
/* copy messages and linearize it */
dst = data;
for (j = 0; j < num_blks; j++) {
memcpy_fromio(dst, src, sizeof(*dst));
dst++;
rd_idx = msgq_inc(qinfo, rd_idx, 1);
src = msgq_blk_addr(qinfo, rd_idx);
}
total++;
} else {
/*
* if we could not allocate memory in kernel,
* that is fatal.
*/
dev_crit(dev, "Kernel mem allocation failure.\n");
total = -ENOMEM;
goto idx_err;
}
/* flush rd pointer after a message is dequeued */
writel(rd_idx, &msgq->rd_idx);
/* log new info for debugging */
dev_dbg(dev,
"MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
readl_relaxed(&msgq->num),
rd_idx,
wr_idx,
num_blks,
msgq_occupied(msgq, qinfo),
msgq_avail_space(msgq, qinfo),
readl_relaxed(&msgq->size));
/*
* No need to search if it is an autonomous one-way
* message from driver, as these messages do not bear
* a to_v pending item. Currently, only the shutdown
* message falls into this category.
*/
if (data->function_id == VK_FID_SHUTDOWN) {
kfree(data);
continue;
}
msg_id = get_msg_id(data);
/* lookup original message in to_v direction */
entry = bcm_vk_dequeue_pending(vk,
&vk->to_v_msg_chan,
q_num,
msg_id);
/*
* if there is message to does not have prior send,
* this is the location to add here
*/
if (entry) {
entry->to_h_blks = num_blks;
entry->to_h_msg = data;
bcm_vk_append_pendq(&vk->to_h_msg_chan,
q_num, entry);
} else {
if (cnt++ < batch_log)
dev_info(dev,
"Could not find MsgId[0x%x] for resp func %d bmap %d\n",
msg_id, data->function_id,
test_bit(msg_id, vk->bmap));
kfree(data);
}
/* Fetch wr_idx to handle more back-to-back events */
wr_idx = readl(&msgq->wr_idx);
/*
* cap the max so that even we try to handle more back-to-back events,
* so that it won't hold CPU too long or in case rd/wr idexes are
* corrupted which triggers infinite looping.
*/
if (++msg_processed >= max_msg_to_process) {
dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
q_num, max_msg_to_process);
exit_loop = true;
}
}
}
idx_err:
mutex_unlock(&chan->msgq_mutex);
dev_dbg(dev, "total %d drained from queues\n", total);
return total;
}
/*
* init routine for all required data structures
*/
static int bcm_vk_data_init(struct bcm_vk *vk)
{
int i;
spin_lock_init(&vk->ctx_lock);
for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
vk->ctx[i].in_use = false;
vk->ctx[i].idx = i; /* self identity */
vk->ctx[i].miscdev = NULL;
}
spin_lock_init(&vk->msg_id_lock);
spin_lock_init(&vk->host_alert_lock);
vk->msg_id = 0;
/* initialize hash table */
for (i = 0; i < VK_PID_HT_SZ; i++)
INIT_LIST_HEAD(&vk->pid_ht[i].head);
return 0;
}
irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
{
struct bcm_vk *vk = dev_id;
if (!bcm_vk_drv_access_ok(vk)) {
dev_err(&vk->pdev->dev,
"Interrupt %d received when msgq not inited\n", irq);
goto skip_schedule_work;
}
queue_work(vk->wq_thread, &vk->wq_work);
skip_schedule_work:
return IRQ_HANDLED;
}
int bcm_vk_open(struct inode *inode, struct file *p_file)
{
struct bcm_vk_ctx *ctx;
struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
struct device *dev = &vk->pdev->dev;
int rc = 0;
/* get a context and set it up for file */
ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
if (!ctx) {
dev_err(dev, "Error allocating context\n");
rc = -ENOMEM;
} else {
/*
* set up context and replace private data with context for
* other methods to use. Reason for the context is because
* it is allowed for multiple sessions to open the sysfs, and
* for each file open, when upper layer query the response,
* only those that are tied to a specific open should be
* returned. The context->idx will be used for such binding
*/
ctx->miscdev = miscdev;
p_file->private_data = ctx;
dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
ctx->idx, ctx->pid);
}
return rc;
}
ssize_t bcm_vk_read(struct file *p_file,
char __user *buf,
size_t count,
loff_t *f_pos)
{
ssize_t rc = -ENOMSG;
struct bcm_vk_ctx *ctx = p_file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
miscdev);
struct device *dev = &vk->pdev->dev;
struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
struct bcm_vk_wkent *entry = NULL, *iter;
u32 q_num;
u32 rsp_length;
if (!bcm_vk_drv_access_ok(vk))
return -EPERM;
dev_dbg(dev, "Buf count %zu\n", count);
/*
* search through the pendq on the to_h chan, and return only those
* that belongs to the same context. Search is always from the high to
* the low priority queues
*/
spin_lock(&chan->pendq_lock);
for (q_num = 0; q_num < chan->q_nr; q_num++) {
list_for_each_entry(iter, &chan->pendq[q_num], node) {
if (iter->ctx->idx == ctx->idx) {
if (count >=
(iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
list_del(&iter->node);
atomic_dec(&ctx->pend_cnt);
entry = iter;
} else {
/* buffer not big enough */
rc = -EMSGSIZE;
}
goto read_loop_exit;
}
}
}
read_loop_exit:
spin_unlock(&chan->pendq_lock);
if (entry) {
/* retrieve the passed down msg_id */
set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
rc = rsp_length;
bcm_vk_free_wkent(dev, entry);
} else if (rc == -EMSGSIZE) {
struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
/*
* in this case, return just the first block, so
* that app knows what size it is looking for.
*/
set_msg_id(&tmp_msg, entry->usr_msg_id);
tmp_msg.size = entry->to_h_blks - 1;
if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
rc = -EFAULT;
}
}
return rc;
}
ssize_t bcm_vk_write(struct file *p_file,
const char __user *buf,
size_t count,
loff_t *f_pos)
{
ssize_t rc;
struct bcm_vk_ctx *ctx = p_file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
miscdev);
struct bcm_vk_msgq __iomem *msgq;
struct device *dev = &vk->pdev->dev;
struct bcm_vk_wkent *entry;
u32 sgl_extra_blks;
u32 q_num;
u32 msg_size;
u32 msgq_size;
if (!bcm_vk_drv_access_ok(vk))
return -EPERM;
dev_dbg(dev, "Msg count %zu\n", count);
/* first, do sanity check where count should be multiple of basic blk */
if (count & (VK_MSGQ_BLK_SIZE - 1)) {
dev_err(dev, "Failure with size %zu not multiple of %zu\n",
count, VK_MSGQ_BLK_SIZE);
rc = -EINVAL;
goto write_err;
}
/* allocate the work entry + buffer for size count and inband sgl */
entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
GFP_KERNEL);
if (!entry) {
rc = -ENOMEM;
goto write_err;
}
/* now copy msg from user space, and then formulate the work entry */
if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
rc = -EFAULT;
goto write_free_ent;
}
entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
entry->ctx = ctx;
/* do a check on the blk size which could not exceed queue space */
q_num = get_q_num(&entry->to_v_msg[0]);
msgq = vk->to_v_msg_chan.msgq[q_num];
msgq_size = readl_relaxed(&msgq->size);
if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
> (msgq_size - 1)) {
dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
entry->to_v_blks, msgq_size - 1);
rc = -EINVAL;
goto write_free_ent;
}
/* Use internal message id */
entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
rc = bcm_vk_get_msg_id(vk);
if (rc == VK_MSG_ID_OVERFLOW) {
dev_err(dev, "msg_id overflow\n");
rc = -EOVERFLOW;
goto write_free_ent;
}
set_msg_id(&entry->to_v_msg[0], rc);
ctx->q_num = q_num;
dev_dbg(dev,
"[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
ctx->q_num, ctx->idx, entry->usr_msg_id,
get_msg_id(&entry->to_v_msg[0]));
if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
/* Convert any pointers to sg list */
unsigned int num_planes;
int dir;
struct _vk_data *data;
/*
* check if we are in reset, if so, no buffer transfer is
* allowed and return error.
*/
if (vk->reset_pid) {
dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
ctx->pid);
rc = -EACCES;
goto write_free_msgid;
}
num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
dir = DMA_FROM_DEVICE;
else
dir = DMA_TO_DEVICE;
/* Calculate vk_data location */
/* Go to end of the message */
msg_size = entry->to_v_msg[0].size;
if (msg_size > entry->to_v_blks) {
rc = -EMSGSIZE;
goto write_free_msgid;
}
data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
/* Now back up to the start of the pointers */
data -= num_planes;
/* Convert user addresses to DMA SG List */
rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
if (rc)
goto write_free_msgid;
atomic_inc(&ctx->dma_cnt);
/* try to embed inband sgl */
sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
num_planes);
entry->to_v_blks += sgl_extra_blks;
entry->to_v_msg[0].size += sgl_extra_blks;
} else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
entry->to_v_msg[0].context_id == VK_NEW_CTX) {
/*
* Init happens in 2 stages, only the first stage contains the
* pid that needs translating.
*/
pid_t org_pid, pid;
/*
* translate the pid into the unique host space as user
* may run sessions inside containers or process
* namespaces.
*/
#define VK_MSG_PID_MASK 0xffffff00
#define VK_MSG_PID_SH 8
org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
>> VK_MSG_PID_SH;
pid = task_tgid_nr(current);
entry->to_v_msg[0].arg =
(entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
(pid << VK_MSG_PID_SH);
if (org_pid != pid)
dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
org_pid, org_pid, pid, pid);
}
/*
* store work entry to pending queue until a response is received.
* This needs to be done before enqueuing the message
*/
bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
rc = bcm_to_v_msg_enqueue(vk, entry);
if (rc) {
dev_err(dev, "Fail to enqueue msg to to_v queue\n");
/* remove message from pending list */
entry = bcm_vk_dequeue_pending
(vk,
&vk->to_v_msg_chan,
q_num,
get_msg_id(&entry->to_v_msg[0]));
goto write_free_ent;
}
return count;
write_free_msgid:
bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
write_free_ent:
kfree(entry);
write_err:
return rc;
}
__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
{
__poll_t ret = 0;
int cnt;
struct bcm_vk_ctx *ctx = p_file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
struct device *dev = &vk->pdev->dev;
poll_wait(p_file, &ctx->rd_wq, wait);
cnt = atomic_read(&ctx->pend_cnt);
if (cnt) {
ret = (__force __poll_t)(POLLIN | POLLRDNORM);
if (cnt < 0) {
dev_err(dev, "Error cnt %d, setting back to 0", cnt);
atomic_set(&ctx->pend_cnt, 0);
}
}
return ret;
}
int bcm_vk_release(struct inode *inode, struct file *p_file)
{
int ret;
struct bcm_vk_ctx *ctx = p_file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
struct device *dev = &vk->pdev->dev;
pid_t pid = ctx->pid;
int dma_cnt;
unsigned long timeout, start_time;
/*
* if there are outstanding DMA transactions, need to delay long enough
* to ensure that the card side would have stopped touching the host buffer
* and its SGL list. A race condition could happen if the host app is killed
* abruptly, eg kill -9, while some DMA transfer orders are still inflight.
* Nothing could be done except for a delay as host side is running in a
* completely async fashion.
*/
start_time = jiffies;
timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
do {
if (time_after(jiffies, timeout)) {
dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
dma_cnt, ctx->idx, pid);
break;
}
dma_cnt = atomic_read(&ctx->dma_cnt);
cpu_relax();
cond_resched();
} while (dma_cnt);
dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
ret = bcm_vk_free_ctx(vk, ctx);
if (ret == 0)
ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
else
ret = 0;
kref_put(&vk->kref, bcm_vk_release_data);
return ret;
}
int bcm_vk_msg_init(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
int ret;
if (bcm_vk_data_init(vk)) {
dev_err(dev, "Error initializing internal data structures\n");
return -EINVAL;
}
if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
dev_err(dev, "Error initializing communication channel\n");
return -EIO;
}
/* read msgq info if ready */
ret = bcm_vk_sync_msgq(vk, false);
if (ret && (ret != -EAGAIN)) {
dev_err(dev, "Error reading comm msg Q info\n");
return -EIO;
}
return 0;
}
void bcm_vk_msg_remove(struct bcm_vk *vk)
{
bcm_vk_blk_drv_access(vk);
/* drain all pending items */
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
}
| linux-master | drivers/misc/bcm-vk/bcm_vk_msg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018-2020 Broadcom.
*/
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/pgtable.h>
#include <linux/vmalloc.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <uapi/linux/misc/bcm_vk.h>
#include "bcm_vk.h"
#include "bcm_vk_msg.h"
#include "bcm_vk_sg.h"
/*
* Valkyrie has a hardware limitation of 16M transfer size.
* So limit the SGL chunks to 16M.
*/
#define BCM_VK_MAX_SGL_CHUNK SZ_16M
static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int dir,
struct _vk_data *vkdata);
static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
/* Uncomment to dump SGLIST */
/* #define BCM_VK_DUMP_SGLIST */
static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int direction,
struct _vk_data *vkdata)
{
dma_addr_t addr, sg_addr;
int err;
int i;
int offset;
u32 size;
u32 remaining_size;
u32 transfer_size;
u64 data;
unsigned long first, last;
struct _vk_data *sgdata;
/* Get 64-bit user address */
data = get_unaligned(&vkdata->address);
/* offset into first page */
offset = offset_in_page(data);
/* Calculate number of pages */
first = (data & PAGE_MASK) >> PAGE_SHIFT;
last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
dma->nr_pages = last - first + 1;
/* Allocate DMA pages */
dma->pages = kmalloc_array(dma->nr_pages,
sizeof(struct page *),
GFP_KERNEL);
if (!dma->pages)
return -ENOMEM;
dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
data, vkdata->size, dma->nr_pages);
dma->direction = direction;
/* Get user pages into memory */
err = get_user_pages_fast(data & PAGE_MASK,
dma->nr_pages,
direction == DMA_FROM_DEVICE,
dma->pages);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
/* Max size of sg list is 1 per mapped page + fields at start */
dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
(sizeof(u32) * SGLIST_VKDATA_START);
/* Allocate sglist */
dma->sglist = dma_alloc_coherent(dev,
dma->sglen,
&dma->handle,
GFP_KERNEL);
if (!dma->sglist)
return -ENOMEM;
dma->sglist[SGLIST_NUM_SG] = 0;
dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
remaining_size = vkdata->size;
sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
/* Map all pages into DMA */
size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
remaining_size -= size;
sg_addr = dma_map_page(dev,
dma->pages[0],
offset,
size,
dma->direction);
transfer_size = size;
if (unlikely(dma_mapping_error(dev, sg_addr))) {
__free_page(dma->pages[0]);
return -EIO;
}
for (i = 1; i < dma->nr_pages; i++) {
size = min_t(size_t, PAGE_SIZE, remaining_size);
remaining_size -= size;
addr = dma_map_page(dev,
dma->pages[i],
0,
size,
dma->direction);
if (unlikely(dma_mapping_error(dev, addr))) {
__free_page(dma->pages[i]);
return -EIO;
}
/*
* Compress SG list entry when pages are contiguous
* and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
*/
if ((addr == (sg_addr + transfer_size)) &&
((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
/* pages are contiguous, add to same sg entry */
transfer_size += size;
} else {
/* pages are not contiguous, write sg entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* start new sg entry */
sgdata++;
sg_addr = addr;
transfer_size = size;
}
}
/* Write last sg list entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* Update pointers and size field to point to sglist */
put_unaligned((u64)dma->handle, &vkdata->address);
vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
(sizeof(u32) * SGLIST_VKDATA_START);
#ifdef BCM_VK_DUMP_SGLIST
dev_dbg(dev,
"sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
(u64)dma->sglist,
dma->handle,
dma->sglen,
vkdata->size);
for (i = 0; i < vkdata->size / sizeof(u32); i++)
dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
#endif
return 0;
}
int bcm_vk_sg_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int dir,
struct _vk_data *vkdata,
int num)
{
int i;
int rc = -EINVAL;
/* Convert user addresses to DMA SG List */
for (i = 0; i < num; i++) {
if (vkdata[i].size && vkdata[i].address) {
/*
* If both size and address are non-zero
* then DMA alloc.
*/
rc = bcm_vk_dma_alloc(dev,
&dma[i],
dir,
&vkdata[i]);
} else if (vkdata[i].size ||
vkdata[i].address) {
/*
* If one of size and address are zero
* there is a problem.
*/
dev_err(dev,
"Invalid vkdata %x 0x%x 0x%llx\n",
i, vkdata[i].size, vkdata[i].address);
rc = -EINVAL;
} else {
/*
* If size and address are both zero
* don't convert, but return success.
*/
rc = 0;
}
if (rc)
goto fail_alloc;
}
return rc;
fail_alloc:
while (i > 0) {
i--;
if (dma[i].sglist)
bcm_vk_dma_free(dev, &dma[i]);
}
return rc;
}
static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
{
dma_addr_t addr;
int i;
int num_sg;
u32 size;
struct _vk_data *vkdata;
dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
/* Unmap all pages in the sglist */
num_sg = dma->sglist[SGLIST_NUM_SG];
vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
for (i = 0; i < num_sg; i++) {
size = vkdata[i].size;
addr = get_unaligned(&vkdata[i].address);
dma_unmap_page(dev, addr, size, dma->direction);
}
/* Free allocated sglist */
dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
/* Release lock on all pages */
for (i = 0; i < dma->nr_pages; i++)
put_page(dma->pages[i]);
/* Free allocated dma pages */
kfree(dma->pages);
dma->sglist = NULL;
return 0;
}
int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
int *proc_cnt)
{
int i;
*proc_cnt = 0;
/* Unmap and free all pages and sglists */
for (i = 0; i < num; i++) {
if (dma[i].sglist) {
bcm_vk_dma_free(dev, &dma[i]);
*proc_cnt += 1;
}
}
return 0;
}
| linux-master | drivers/misc/bcm-vk/bcm_vk_sg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018-2020 Broadcom.
*/
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include "bcm_vk.h"
/* TTYVK base offset is 0x30000 into BAR1 */
#define BAR1_TTYVK_BASE_OFFSET 0x300000
/* Each TTYVK channel (TO or FROM) is 0x10000 */
#define BAR1_TTYVK_CHAN_OFFSET 0x100000
/* Each TTYVK channel has TO and FROM, hence the * 2 */
#define BAR1_TTYVK_BASE(index) (BAR1_TTYVK_BASE_OFFSET + \
((index) * BAR1_TTYVK_CHAN_OFFSET * 2))
/* TO TTYVK channel base comes before FROM for each index */
#define TO_TTYK_BASE(index) BAR1_TTYVK_BASE(index)
#define FROM_TTYK_BASE(index) (BAR1_TTYVK_BASE(index) + \
BAR1_TTYVK_CHAN_OFFSET)
struct bcm_vk_tty_chan {
u32 reserved;
u32 size;
u32 wr;
u32 rd;
u32 *data;
};
#define VK_BAR_CHAN(v, DIR, e) ((v)->DIR##_offset \
+ offsetof(struct bcm_vk_tty_chan, e))
#define VK_BAR_CHAN_SIZE(v, DIR) VK_BAR_CHAN(v, DIR, size)
#define VK_BAR_CHAN_WR(v, DIR) VK_BAR_CHAN(v, DIR, wr)
#define VK_BAR_CHAN_RD(v, DIR) VK_BAR_CHAN(v, DIR, rd)
#define VK_BAR_CHAN_DATA(v, DIR, off) (VK_BAR_CHAN(v, DIR, data) + (off))
#define VK_BAR0_REGSEG_TTY_DB_OFFSET 0x86c
/* Poll every 1/10 of second - temp hack till we use MSI interrupt */
#define SERIAL_TIMER_VALUE (HZ / 10)
static void bcm_vk_tty_poll(struct timer_list *t)
{
struct bcm_vk *vk = from_timer(vk, t, serial_timer);
queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id)
{
struct bcm_vk *vk = dev_id;
queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
return IRQ_HANDLED;
}
static void bcm_vk_tty_wq_handler(struct work_struct *work)
{
struct bcm_vk *vk = container_of(work, struct bcm_vk, tty_wq_work);
struct bcm_vk_tty *vktty;
int card_status;
int count;
unsigned char c;
int i;
int wr;
card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
if (BCM_VK_INTF_IS_DOWN(card_status))
return;
for (i = 0; i < BCM_VK_NUM_TTY; i++) {
count = 0;
/* Check the card status that the tty channel is ready */
if ((card_status & BIT(i)) == 0)
continue;
vktty = &vk->tty[i];
/* Don't increment read index if tty app is closed */
if (!vktty->is_opened)
continue;
/* Fetch the wr offset in buffer from VK */
wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, from));
/* safe to ignore until bar read gives proper size */
if (vktty->from_size == 0)
continue;
if (wr >= vktty->from_size) {
dev_err(&vk->pdev->dev,
"ERROR: wq handler ttyVK%d wr:0x%x > 0x%x\n",
i, wr, vktty->from_size);
/* Need to signal and close device in this case */
continue;
}
/*
* Simple read of circular buffer and
* insert into tty flip buffer
*/
while (vk->tty[i].rd != wr) {
c = vkread8(vk, BAR_1,
VK_BAR_CHAN_DATA(vktty, from, vktty->rd));
vktty->rd++;
if (vktty->rd >= vktty->from_size)
vktty->rd = 0;
tty_insert_flip_char(&vktty->port, c, TTY_NORMAL);
count++;
}
if (count) {
tty_flip_buffer_push(&vktty->port);
/* Update read offset from shadow register to card */
vkwrite32(vk, vktty->rd, BAR_1,
VK_BAR_CHAN_RD(vktty, from));
}
}
}
static int bcm_vk_tty_open(struct tty_struct *tty, struct file *file)
{
int card_status;
struct bcm_vk *vk;
struct bcm_vk_tty *vktty;
int index;
/* initialize the pointer in case something fails */
tty->driver_data = NULL;
vk = (struct bcm_vk *)dev_get_drvdata(tty->dev);
index = tty->index;
if (index >= BCM_VK_NUM_TTY)
return -EINVAL;
vktty = &vk->tty[index];
vktty->pid = task_pid_nr(current);
vktty->to_offset = TO_TTYK_BASE(index);
vktty->from_offset = FROM_TTYK_BASE(index);
/* Do not allow tty device to be opened if tty on card not ready */
card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
if (BCM_VK_INTF_IS_DOWN(card_status) || ((card_status & BIT(index)) == 0))
return -EBUSY;
/*
* Get shadow registers of the buffer sizes and the "to" write offset
* and "from" read offset
*/
vktty->to_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, to));
vktty->wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, to));
vktty->from_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, from));
vktty->rd = vkread32(vk, BAR_1, VK_BAR_CHAN_RD(vktty, from));
vktty->is_opened = true;
if (tty->count == 1 && !vktty->irq_enabled) {
timer_setup(&vk->serial_timer, bcm_vk_tty_poll, 0);
mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
return 0;
}
static void bcm_vk_tty_close(struct tty_struct *tty, struct file *file)
{
struct bcm_vk *vk = dev_get_drvdata(tty->dev);
if (tty->index >= BCM_VK_NUM_TTY)
return;
vk->tty[tty->index].is_opened = false;
if (tty->count == 1)
del_timer_sync(&vk->serial_timer);
}
static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val)
{
vkwrite32(vk, db_val, BAR_0,
VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET);
}
static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer,
size_t count)
{
int index;
struct bcm_vk *vk;
struct bcm_vk_tty *vktty;
int i;
index = tty->index;
vk = dev_get_drvdata(tty->dev);
vktty = &vk->tty[index];
/* Simple write each byte to circular buffer */
for (i = 0; i < count; i++) {
vkwrite8(vk, buffer[i], BAR_1,
VK_BAR_CHAN_DATA(vktty, to, vktty->wr));
vktty->wr++;
if (vktty->wr >= vktty->to_size)
vktty->wr = 0;
}
/* Update write offset from shadow register to card */
vkwrite32(vk, vktty->wr, BAR_1, VK_BAR_CHAN_WR(vktty, to));
bcm_vk_tty_doorbell(vk, 0);
return count;
}
static unsigned int bcm_vk_tty_write_room(struct tty_struct *tty)
{
struct bcm_vk *vk = dev_get_drvdata(tty->dev);
return vk->tty[tty->index].to_size - 1;
}
static const struct tty_operations serial_ops = {
.open = bcm_vk_tty_open,
.close = bcm_vk_tty_close,
.write = bcm_vk_tty_write,
.write_room = bcm_vk_tty_write_room,
};
int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
{
int i;
int err;
struct tty_driver *tty_drv;
struct device *dev = &vk->pdev->dev;
tty_drv = tty_alloc_driver
(BCM_VK_NUM_TTY,
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(tty_drv))
return PTR_ERR(tty_drv);
/* Save struct tty_driver for uninstalling the device */
vk->tty_drv = tty_drv;
/* initialize the tty driver */
tty_drv->driver_name = KBUILD_MODNAME;
tty_drv->name = kstrdup(name, GFP_KERNEL);
if (!tty_drv->name) {
err = -ENOMEM;
goto err_tty_driver_kref_put;
}
tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
tty_drv->subtype = SERIAL_TYPE_NORMAL;
tty_drv->init_termios = tty_std_termios;
tty_set_operations(tty_drv, &serial_ops);
/* register the tty driver */
err = tty_register_driver(tty_drv);
if (err) {
dev_err(dev, "tty_register_driver failed\n");
goto err_kfree_tty_name;
}
for (i = 0; i < BCM_VK_NUM_TTY; i++) {
struct device *tty_dev;
tty_port_init(&vk->tty[i].port);
tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
tty_drv, i, dev, vk,
NULL);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
goto unwind;
}
vk->tty[i].is_opened = false;
}
INIT_WORK(&vk->tty_wq_work, bcm_vk_tty_wq_handler);
vk->tty_wq_thread = create_singlethread_workqueue("tty");
if (!vk->tty_wq_thread) {
dev_err(dev, "Fail to create tty workqueue thread\n");
err = -ENOMEM;
goto unwind;
}
return 0;
unwind:
while (--i >= 0)
tty_port_unregister_device(&vk->tty[i].port, tty_drv, i);
tty_unregister_driver(tty_drv);
err_kfree_tty_name:
kfree(tty_drv->name);
tty_drv->name = NULL;
err_tty_driver_kref_put:
tty_driver_kref_put(tty_drv);
return err;
}
void bcm_vk_tty_exit(struct bcm_vk *vk)
{
int i;
del_timer_sync(&vk->serial_timer);
for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
tty_port_unregister_device(&vk->tty[i].port,
vk->tty_drv,
i);
tty_port_destroy(&vk->tty[i].port);
}
tty_unregister_driver(vk->tty_drv);
kfree(vk->tty_drv->name);
vk->tty_drv->name = NULL;
tty_driver_kref_put(vk->tty_drv);
}
void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk)
{
struct bcm_vk_tty *vktty;
int i;
for (i = 0; i < BCM_VK_NUM_TTY; ++i) {
vktty = &vk->tty[i];
if (vktty->pid)
kill_pid(find_vpid(vktty->pid), SIGKILL, 1);
}
}
void bcm_vk_tty_wq_exit(struct bcm_vk *vk)
{
cancel_work_sync(&vk->tty_wq_work);
destroy_workqueue(vk->tty_wq_thread);
}
| linux-master | drivers/misc/bcm-vk/bcm_vk_tty.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018-2020 Broadcom.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/panic_notifier.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <uapi/linux/misc/bcm_vk.h>
#include "bcm_vk.h"
#define PCI_DEVICE_ID_VALKYRIE 0x5e87
#define PCI_DEVICE_ID_VIPER 0x5e88
static DEFINE_IDA(bcm_vk_ida);
enum soc_idx {
VALKYRIE_A0 = 0,
VALKYRIE_B0,
VIPER,
VK_IDX_INVALID
};
enum img_idx {
IMG_PRI = 0,
IMG_SEC,
IMG_PER_TYPE_MAX
};
struct load_image_entry {
const u32 image_type;
const char *image_name[IMG_PER_TYPE_MAX];
};
#define NUM_BOOT_STAGES 2
/* default firmware images names */
static const struct load_image_entry image_tab[][NUM_BOOT_STAGES] = {
[VALKYRIE_A0] = {
{VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}},
{VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}}
},
[VALKYRIE_B0] = {
{VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}},
{VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}}
},
[VIPER] = {
{VK_IMAGE_TYPE_BOOT1, {"vp-boot1.bin", ""}},
{VK_IMAGE_TYPE_BOOT2, {"vp-boot2.bin", ""}}
},
};
/* Location of memory base addresses of interest in BAR1 */
/* Load Boot1 to start of ITCM */
#define BAR1_CODEPUSH_BASE_BOOT1 0x100000
/* Allow minimum 1s for Load Image timeout responses */
#define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC)
/* Image startup timeouts */
#define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC)
#define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC)
/* 1ms wait for checking the transfer complete status */
#define TXFR_COMPLETE_TIMEOUT_MS 1
/* MSIX usages */
#define VK_MSIX_MSGQ_MAX 3
#define VK_MSIX_NOTF_MAX 1
#define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY
#define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \
VK_MSIX_TTY_MAX)
#define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX)
/* Number of bits set in DMA mask*/
#define BCM_VK_DMA_BITS 64
/* Ucode boot wait time */
#define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC)
/* 50% margin */
#define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1)
/* deinit time for the card os after receiving doorbell */
#define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC)
/*
* module parameters
*/
static bool auto_load = true;
module_param(auto_load, bool, 0444);
MODULE_PARM_DESC(auto_load,
"Load images automatically at PCIe probe time.\n");
static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES;
module_param(nr_scratch_pages, uint, 0444);
MODULE_PARM_DESC(nr_scratch_pages,
"Number of pre allocated DMAable coherent pages.\n");
static uint nr_ib_sgl_blk = BCM_VK_DEF_IB_SGL_BLK_LEN;
module_param(nr_ib_sgl_blk, uint, 0444);
MODULE_PARM_DESC(nr_ib_sgl_blk,
"Number of in-band msg blks for short SGL.\n");
/*
* alerts that could be generated from peer
*/
const struct bcm_vk_entry bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM] = {
{ERR_LOG_UECC, ERR_LOG_UECC, "uecc"},
{ERR_LOG_SSIM_BUSY, ERR_LOG_SSIM_BUSY, "ssim_busy"},
{ERR_LOG_AFBC_BUSY, ERR_LOG_AFBC_BUSY, "afbc_busy"},
{ERR_LOG_HIGH_TEMP_ERR, ERR_LOG_HIGH_TEMP_ERR, "high_temp"},
{ERR_LOG_WDOG_TIMEOUT, ERR_LOG_WDOG_TIMEOUT, "wdog_timeout"},
{ERR_LOG_SYS_FAULT, ERR_LOG_SYS_FAULT, "sys_fault"},
{ERR_LOG_RAMDUMP, ERR_LOG_RAMDUMP, "ramdump"},
{ERR_LOG_COP_WDOG_TIMEOUT, ERR_LOG_COP_WDOG_TIMEOUT,
"cop_wdog_timeout"},
{ERR_LOG_MEM_ALLOC_FAIL, ERR_LOG_MEM_ALLOC_FAIL, "malloc_fail warn"},
{ERR_LOG_LOW_TEMP_WARN, ERR_LOG_LOW_TEMP_WARN, "low_temp warn"},
{ERR_LOG_ECC, ERR_LOG_ECC, "ecc"},
{ERR_LOG_IPC_DWN, ERR_LOG_IPC_DWN, "ipc_down"},
};
/* alerts detected by the host */
const struct bcm_vk_entry bcm_vk_host_err[BCM_VK_HOST_ERR_NUM] = {
{ERR_LOG_HOST_PCIE_DWN, ERR_LOG_HOST_PCIE_DWN, "PCIe_down"},
{ERR_LOG_HOST_HB_FAIL, ERR_LOG_HOST_HB_FAIL, "hb_fail"},
{ERR_LOG_HOST_INTF_V_FAIL, ERR_LOG_HOST_INTF_V_FAIL, "intf_ver_fail"},
};
irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id)
{
struct bcm_vk *vk = dev_id;
if (!bcm_vk_drv_access_ok(vk)) {
dev_err(&vk->pdev->dev,
"Interrupt %d received when msgq not inited\n", irq);
goto skip_schedule_work;
}
/* if notification is not pending, set bit and schedule work */
if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
queue_work(vk->wq_thread, &vk->wq_work);
skip_schedule_work:
return IRQ_HANDLED;
}
static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
u32 reg;
u16 major, minor;
int ret = 0;
/* read interface register */
reg = vkread32(vk, BAR_0, BAR_INTF_VER);
major = (reg >> BAR_INTF_VER_MAJOR_SHIFT) & BAR_INTF_VER_MASK;
minor = reg & BAR_INTF_VER_MASK;
/*
* if major number is 0, it is pre-release and it would be allowed
* to continue, else, check versions accordingly
*/
if (!major) {
dev_warn(dev, "Pre-release major.minor=%d.%d - drv %d.%d\n",
major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
} else if (major != SEMANTIC_MAJOR) {
dev_err(dev,
"Intf major.minor=%d.%d rejected - drv %d.%d\n",
major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL);
ret = -EPFNOSUPPORT;
} else {
dev_dbg(dev,
"Intf major.minor=%d.%d passed - drv %d.%d\n",
major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
}
return ret;
}
static void bcm_vk_log_notf(struct bcm_vk *vk,
struct bcm_vk_alert *alert,
struct bcm_vk_entry const *entry_tab,
const u32 table_size)
{
u32 i;
u32 masked_val, latched_val;
struct bcm_vk_entry const *entry;
u32 reg;
u16 ecc_mem_err, uecc_mem_err;
struct device *dev = &vk->pdev->dev;
for (i = 0; i < table_size; i++) {
entry = &entry_tab[i];
masked_val = entry->mask & alert->notfs;
latched_val = entry->mask & alert->flags;
if (masked_val == ERR_LOG_UECC) {
/*
* if there is difference between stored cnt and it
* is greater than threshold, log it.
*/
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
BCM_VK_EXTRACT_FIELD(uecc_mem_err, reg,
BCM_VK_MEM_ERR_FIELD_MASK,
BCM_VK_UECC_MEM_ERR_SHIFT);
if ((uecc_mem_err != vk->alert_cnts.uecc) &&
(uecc_mem_err >= BCM_VK_UECC_THRESHOLD))
dev_info(dev,
"ALERT! %s.%d uecc RAISED - ErrCnt %d\n",
DRV_MODULE_NAME, vk->devid,
uecc_mem_err);
vk->alert_cnts.uecc = uecc_mem_err;
} else if (masked_val == ERR_LOG_ECC) {
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
BCM_VK_EXTRACT_FIELD(ecc_mem_err, reg,
BCM_VK_MEM_ERR_FIELD_MASK,
BCM_VK_ECC_MEM_ERR_SHIFT);
if ((ecc_mem_err != vk->alert_cnts.ecc) &&
(ecc_mem_err >= BCM_VK_ECC_THRESHOLD))
dev_info(dev, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n",
DRV_MODULE_NAME, vk->devid,
ecc_mem_err);
vk->alert_cnts.ecc = ecc_mem_err;
} else if (masked_val != latched_val) {
/* print a log as info */
dev_info(dev, "ALERT! %s.%d %s %s\n",
DRV_MODULE_NAME, vk->devid, entry->str,
masked_val ? "RAISED" : "CLEARED");
}
}
}
static void bcm_vk_dump_peer_log(struct bcm_vk *vk)
{
struct bcm_vk_peer_log log;
struct bcm_vk_peer_log *log_info = &vk->peerlog_info;
char loc_buf[BCM_VK_PEER_LOG_LINE_MAX];
int cnt;
struct device *dev = &vk->pdev->dev;
unsigned int data_offset;
memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log));
dev_dbg(dev, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
log.buf_size, log.mask, log.rd_idx, log.wr_idx);
if (!log_info->buf_size) {
dev_err(dev, "Peer log dump disabled - skipped!\n");
return;
}
/* perform range checking for rd/wr idx */
if ((log.rd_idx > log_info->mask) ||
(log.wr_idx > log_info->mask) ||
(log.buf_size != log_info->buf_size) ||
(log.mask != log_info->mask)) {
dev_err(dev,
"Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n",
log_info->buf_size, log.buf_size,
log_info->mask, log.mask,
log.rd_idx, log.wr_idx);
return;
}
cnt = 0;
data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log);
loc_buf[BCM_VK_PEER_LOG_LINE_MAX - 1] = '\0';
while (log.rd_idx != log.wr_idx) {
loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx);
if ((loc_buf[cnt] == '\0') ||
(cnt == (BCM_VK_PEER_LOG_LINE_MAX - 1))) {
dev_err(dev, "%s", loc_buf);
cnt = 0;
} else {
cnt++;
}
log.rd_idx = (log.rd_idx + 1) & log.mask;
}
/* update rd idx at the end */
vkwrite32(vk, log.rd_idx, BAR_2,
vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx));
}
void bcm_vk_handle_notf(struct bcm_vk *vk)
{
u32 reg;
struct bcm_vk_alert alert;
bool intf_down;
unsigned long flags;
/* handle peer alerts and then locally detected ones */
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG);
intf_down = BCM_VK_INTF_IS_DOWN(reg);
if (!intf_down) {
vk->peer_alert.notfs = reg;
bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err,
ARRAY_SIZE(bcm_vk_peer_err));
vk->peer_alert.flags = vk->peer_alert.notfs;
} else {
/* turn off access */
bcm_vk_blk_drv_access(vk);
}
/* check and make copy of alert with lock and then free lock */
spin_lock_irqsave(&vk->host_alert_lock, flags);
if (intf_down)
vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
alert = vk->host_alert;
vk->host_alert.flags = vk->host_alert.notfs;
spin_unlock_irqrestore(&vk->host_alert_lock, flags);
/* call display with copy */
bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
ARRAY_SIZE(bcm_vk_host_err));
/*
* If it is a sys fault or heartbeat timeout, we would like extract
* log msg from the card so that we would know what is the last fault
*/
if (!intf_down &&
((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
(vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
bcm_vk_dump_peer_log(vk);
}
static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
u64 offset, u32 mask, u32 value,
unsigned long timeout_ms)
{
struct device *dev = &vk->pdev->dev;
unsigned long start_time;
unsigned long timeout;
u32 rd_val, boot_status;
start_time = jiffies;
timeout = start_time + msecs_to_jiffies(timeout_ms);
do {
rd_val = vkread32(vk, bar, offset);
dev_dbg(dev, "BAR%d Offset=0x%llx: 0x%x\n",
bar, offset, rd_val);
/* check for any boot err condition */
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
if (boot_status & BOOT_ERR_MASK) {
dev_err(dev, "Boot Err 0x%x, progress 0x%x after %d ms\n",
(boot_status & BOOT_ERR_MASK) >> BOOT_ERR_SHIFT,
boot_status & BOOT_PROG_MASK,
jiffies_to_msecs(jiffies - start_time));
return -EFAULT;
}
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
cpu_relax();
cond_resched();
} while ((rd_val & mask) != value);
return 0;
}
static void bcm_vk_get_card_info(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
u32 offset;
int i;
u8 *dst;
struct bcm_vk_card_info *info = &vk->card_info;
/* first read the offset from spare register */
offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO);
offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1);
/* based on the offset, read info to internal card info structure */
dst = (u8 *)info;
for (i = 0; i < sizeof(*info); i++)
*dst++ = vkread8(vk, BAR_2, offset++);
#define CARD_INFO_LOG_FMT "version : %x\n" \
"os_tag : %s\n" \
"cmpt_tag : %s\n" \
"cpu_freq : %d MHz\n" \
"cpu_scale : %d full, %d lowest\n" \
"ddr_freq : %d MHz\n" \
"ddr_size : %d MB\n" \
"video_freq: %d MHz\n"
dev_dbg(dev, CARD_INFO_LOG_FMT, info->version, info->os_tag,
info->cmpt_tag, info->cpu_freq_mhz, info->cpu_scale[0],
info->cpu_scale[MAX_OPP - 1], info->ddr_freq_mhz,
info->ddr_size_MB, info->video_core_freq_mhz);
/*
* get the peer log pointer, only need the offset, and get record
* of the log buffer information which would be used for checking
* before dump, in case the BAR2 memory has been corrupted.
*/
vk->peerlog_off = offset;
memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off,
sizeof(vk->peerlog_info));
/*
* Do a range checking and if out of bound, the record will be zeroed
* which guarantees that nothing would be dumped. In other words,
* peer dump is disabled.
*/
if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) ||
(vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) ||
(vk->peerlog_info.rd_idx > vk->peerlog_info.mask) ||
(vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) {
dev_err(dev, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
vk->peerlog_info.buf_size,
vk->peerlog_info.mask,
vk->peerlog_info.rd_idx,
vk->peerlog_info.wr_idx);
memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
} else {
dev_dbg(dev, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
vk->peerlog_info.buf_size,
vk->peerlog_info.mask,
vk->peerlog_info.rd_idx,
vk->peerlog_info.wr_idx);
}
}
static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info;
u32 num, entry_size, offset, buf_size;
u8 *dst;
/* calculate offset which is based on peerlog offset */
buf_size = vkread32(vk, BAR_2,
vk->peerlog_off
+ offsetof(struct bcm_vk_peer_log, buf_size));
offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log)
+ buf_size;
/* first read the num and entry size */
num = vkread32(vk, BAR_2, offset);
entry_size = vkread32(vk, BAR_2, offset + sizeof(num));
/* check for max allowed */
if (num > BCM_VK_PROC_MON_MAX) {
dev_err(dev, "Processing monitoring entry %d exceeds max %d\n",
num, BCM_VK_PROC_MON_MAX);
return;
}
mon->num = num;
mon->entry_size = entry_size;
vk->proc_mon_off = offset;
/* read it once that will capture those static info */
dst = (u8 *)&mon->entries[0];
offset += sizeof(num) + sizeof(entry_size);
memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size);
}
static int bcm_vk_sync_card_info(struct bcm_vk *vk)
{
u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
/* check for marker, but allow diags mode to skip sync */
if (!bcm_vk_msgq_marker_valid(vk))
return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL);
/*
* Write down scratch addr which is used for DMA. For
* signed part, BAR1 is accessible only after boot2 has come
* up
*/
if (vk->tdma_addr) {
vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1,
VK_BAR1_SCRATCH_OFF_HI);
vkwrite32(vk, (u32)vk->tdma_addr, BAR_1,
VK_BAR1_SCRATCH_OFF_LO);
vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
VK_BAR1_SCRATCH_SZ_ADDR);
}
/* get static card info, only need to read once */
bcm_vk_get_card_info(vk);
/* get the proc mon info once */
bcm_vk_get_proc_mon_info(vk);
return 0;
}
void bcm_vk_blk_drv_access(struct bcm_vk *vk)
{
int i;
/*
* kill all the apps except for the process that is resetting.
* If not called during reset, reset_pid will be 0, and all will be
* killed.
*/
spin_lock(&vk->ctx_lock);
/* set msgq_inited to 0 so that all rd/wr will be blocked */
atomic_set(&vk->msgq_inited, 0);
for (i = 0; i < VK_PID_HT_SZ; i++) {
struct bcm_vk_ctx *ctx;
list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
if (ctx->pid != vk->reset_pid) {
dev_dbg(&vk->pdev->dev,
"Send kill signal to pid %d\n",
ctx->pid);
kill_pid(find_vpid(ctx->pid), SIGKILL, 1);
}
}
}
bcm_vk_tty_terminate_tty_user(vk);
spin_unlock(&vk->ctx_lock);
}
static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp,
dma_addr_t host_buf_addr, u32 buf_size)
{
/* update the dma address to the card */
vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1,
VK_BAR1_DMA_BUF_OFF_HI);
vkwrite32(vk, (u32)host_buf_addr, BAR_1,
VK_BAR1_DMA_BUF_OFF_LO);
vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ);
}
static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type,
const char *filename)
{
struct device *dev = &vk->pdev->dev;
const struct firmware *fw = NULL;
void *bufp = NULL;
size_t max_buf, offset;
int ret;
u64 offset_codepush;
u32 codepush;
u32 value;
dma_addr_t boot_dma_addr;
bool is_stdalone;
if (load_type == VK_IMAGE_TYPE_BOOT1) {
/*
* After POR, enable VK soft BOOTSRC so bootrom do not clear
* the pushed image (the TCM memories).
*/
value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT);
value |= BOOTSRC_SOFT_ENABLE;
vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT);
codepush = CODEPUSH_BOOTSTART + CODEPUSH_BOOT1_ENTRY;
offset_codepush = BAR_CODEPUSH_SBL;
/* Write a 1 to request SRAM open bit */
vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush);
/* Wait for VK to respond */
ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN,
SRAM_OPEN, LOAD_IMAGE_TIMEOUT_MS);
if (ret < 0) {
dev_err(dev, "boot1 wait SRAM err - ret(%d)\n", ret);
goto err_buf_out;
}
max_buf = SZ_256K;
bufp = dma_alloc_coherent(dev,
max_buf,
&boot_dma_addr, GFP_KERNEL);
if (!bufp) {
dev_err(dev, "Error allocating 0x%zx\n", max_buf);
ret = -ENOMEM;
goto err_buf_out;
}
} else if (load_type == VK_IMAGE_TYPE_BOOT2) {
codepush = CODEPUSH_BOOT2_ENTRY;
offset_codepush = BAR_CODEPUSH_SBI;
/* Wait for VK to respond */
ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN,
DDR_OPEN, LOAD_IMAGE_TIMEOUT_MS);
if (ret < 0) {
dev_err(dev, "boot2 wait DDR open error - ret(%d)\n",
ret);
goto err_buf_out;
}
max_buf = SZ_4M;
bufp = dma_alloc_coherent(dev,
max_buf,
&boot_dma_addr, GFP_KERNEL);
if (!bufp) {
dev_err(dev, "Error allocating 0x%zx\n", max_buf);
ret = -ENOMEM;
goto err_buf_out;
}
bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf);
} else {
dev_err(dev, "Error invalid image type 0x%x\n", load_type);
ret = -EINVAL;
goto err_buf_out;
}
offset = 0;
ret = request_partial_firmware_into_buf(&fw, filename, dev,
bufp, max_buf, offset);
if (ret) {
dev_err(dev, "Error %d requesting firmware file: %s\n",
ret, filename);
goto err_firmware_out;
}
dev_dbg(dev, "size=0x%zx\n", fw->size);
if (load_type == VK_IMAGE_TYPE_BOOT1)
memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1,
bufp,
fw->size);
dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", codepush, offset_codepush);
vkwrite32(vk, codepush, BAR_0, offset_codepush);
if (load_type == VK_IMAGE_TYPE_BOOT1) {
u32 boot_status;
/* wait until done */
ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
BOOT1_RUNNING,
BOOT1_RUNNING,
BOOT1_STARTUP_TIMEOUT_MS);
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
is_stdalone = !BCM_VK_INTF_IS_DOWN(boot_status) &&
(boot_status & BOOT_STDALONE_RUNNING);
if (ret && !is_stdalone) {
dev_err(dev,
"Timeout %ld ms waiting for boot1 to come up - ret(%d)\n",
BOOT1_STARTUP_TIMEOUT_MS, ret);
goto err_firmware_out;
} else if (is_stdalone) {
u32 reg;
reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS);
if ((reg & BOOT1_STDALONE_PROGRESS_MASK) ==
BOOT1_STDALONE_SUCCESS) {
dev_info(dev, "Boot1 standalone success\n");
ret = 0;
} else {
dev_err(dev, "Timeout %ld ms - Boot1 standalone failure\n",
BOOT1_STARTUP_TIMEOUT_MS);
ret = -EINVAL;
goto err_firmware_out;
}
}
} else if (load_type == VK_IMAGE_TYPE_BOOT2) {
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
/* To send more data to VK than max_buf allowed at a time */
do {
/*
* Check for ack from card. when Ack is received,
* it means all the data is received by card.
* Exit the loop after ack is received.
*/
ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
FW_LOADER_ACK_RCVD_ALL_DATA,
FW_LOADER_ACK_RCVD_ALL_DATA,
TXFR_COMPLETE_TIMEOUT_MS);
if (ret == 0) {
dev_dbg(dev, "Exit boot2 download\n");
break;
} else if (ret == -EFAULT) {
dev_err(dev, "Error detected during ACK waiting");
goto err_firmware_out;
}
/* exit the loop, if there is no response from card */
if (time_after(jiffies, timeout)) {
dev_err(dev, "Error. No reply from card\n");
ret = -ETIMEDOUT;
goto err_firmware_out;
}
/* Wait for VK to open BAR space to copy new data */
ret = bcm_vk_wait(vk, BAR_0, offset_codepush,
codepush, 0,
TXFR_COMPLETE_TIMEOUT_MS);
if (ret == 0) {
offset += max_buf;
ret = request_partial_firmware_into_buf
(&fw,
filename,
dev, bufp,
max_buf,
offset);
if (ret) {
dev_err(dev,
"Error %d requesting firmware file: %s offset: 0x%zx\n",
ret, filename, offset);
goto err_firmware_out;
}
dev_dbg(dev, "size=0x%zx\n", fw->size);
dev_dbg(dev, "Signaling 0x%x to 0x%llx\n",
codepush, offset_codepush);
vkwrite32(vk, codepush, BAR_0, offset_codepush);
/* reload timeout after every codepush */
timeout = jiffies +
msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
} else if (ret == -EFAULT) {
dev_err(dev, "Error detected waiting for transfer\n");
goto err_firmware_out;
}
} while (1);
/* wait for fw status bits to indicate app ready */
ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS,
VK_FWSTS_READY,
VK_FWSTS_READY,
BOOT2_STARTUP_TIMEOUT_MS);
if (ret < 0) {
dev_err(dev, "Boot2 not ready - ret(%d)\n", ret);
goto err_firmware_out;
}
is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) &
BOOT_STDALONE_RUNNING;
if (!is_stdalone) {
ret = bcm_vk_intf_ver_chk(vk);
if (ret) {
dev_err(dev, "failure in intf version check\n");
goto err_firmware_out;
}
/*
* Next, initialize Message Q if we are loading boot2.
* Do a force sync
*/
ret = bcm_vk_sync_msgq(vk, true);
if (ret) {
dev_err(dev, "Boot2 Error reading comm msg Q info\n");
ret = -EIO;
goto err_firmware_out;
}
/* sync & channel other info */
ret = bcm_vk_sync_card_info(vk);
if (ret) {
dev_err(dev, "Syncing Card Info failure\n");
goto err_firmware_out;
}
}
}
err_firmware_out:
release_firmware(fw);
err_buf_out:
if (bufp)
dma_free_coherent(dev, max_buf, bufp, boot_dma_addr);
return ret;
}
static u32 bcm_vk_next_boot_image(struct bcm_vk *vk)
{
u32 boot_status;
u32 fw_status;
u32 load_type = 0; /* default for unknown */
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
if (!BCM_VK_INTF_IS_DOWN(boot_status) && (boot_status & SRAM_OPEN))
load_type = VK_IMAGE_TYPE_BOOT1;
else if (boot_status == BOOT1_RUNNING)
load_type = VK_IMAGE_TYPE_BOOT2;
/* Log status so that we know different stages */
dev_info(&vk->pdev->dev,
"boot-status value for next image: 0x%x : fw-status 0x%x\n",
boot_status, fw_status);
return load_type;
}
static enum soc_idx get_soc_idx(struct bcm_vk *vk)
{
struct pci_dev *pdev = vk->pdev;
enum soc_idx idx = VK_IDX_INVALID;
u32 rev;
static enum soc_idx const vk_soc_tab[] = { VALKYRIE_A0, VALKYRIE_B0 };
switch (pdev->device) {
case PCI_DEVICE_ID_VALKYRIE:
/* get the chip id to decide sub-class */
rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID));
if (rev < ARRAY_SIZE(vk_soc_tab)) {
idx = vk_soc_tab[rev];
} else {
/* Default to A0 firmware for all other chip revs */
idx = VALKYRIE_A0;
dev_warn(&pdev->dev,
"Rev %d not in image lookup table, default to idx=%d\n",
rev, idx);
}
break;
case PCI_DEVICE_ID_VIPER:
idx = VIPER;
break;
default:
dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device);
}
return idx;
}
static const char *get_load_fw_name(struct bcm_vk *vk,
const struct load_image_entry *entry)
{
const struct firmware *fw;
struct device *dev = &vk->pdev->dev;
int ret;
unsigned long dummy;
int i;
for (i = 0; i < IMG_PER_TYPE_MAX; i++) {
fw = NULL;
ret = request_partial_firmware_into_buf(&fw,
entry->image_name[i],
dev, &dummy,
sizeof(dummy),
0);
release_firmware(fw);
if (!ret)
return entry->image_name[i];
}
return NULL;
}
int bcm_vk_auto_load_all_images(struct bcm_vk *vk)
{
int i, ret = -1;
enum soc_idx idx;
struct device *dev = &vk->pdev->dev;
u32 curr_type;
const char *curr_name;
idx = get_soc_idx(vk);
if (idx == VK_IDX_INVALID)
goto auto_load_all_exit;
/* log a message to know the relative loading order */
dev_dbg(dev, "Load All for device %d\n", vk->devid);
for (i = 0; i < NUM_BOOT_STAGES; i++) {
curr_type = image_tab[idx][i].image_type;
if (bcm_vk_next_boot_image(vk) == curr_type) {
curr_name = get_load_fw_name(vk, &image_tab[idx][i]);
if (!curr_name) {
dev_err(dev, "No suitable firmware exists for type %d",
curr_type);
ret = -ENOENT;
goto auto_load_all_exit;
}
ret = bcm_vk_load_image_by_type(vk, curr_type,
curr_name);
dev_info(dev, "Auto load %s, ret %d\n",
curr_name, ret);
if (ret) {
dev_err(dev, "Error loading default %s\n",
curr_name);
goto auto_load_all_exit;
}
}
}
auto_load_all_exit:
return ret;
}
static int bcm_vk_trigger_autoload(struct bcm_vk *vk)
{
if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0)
return -EPERM;
set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
queue_work(vk->wq_thread, &vk->wq_work);
return 0;
}
/*
* deferred work queue for draining and auto download.
*/
static void bcm_vk_wq_handler(struct work_struct *work)
{
struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
struct device *dev = &vk->pdev->dev;
s32 ret;
/* check wq offload bit map to perform various operations */
if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) {
/* clear bit right the way for notification */
clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
bcm_vk_handle_notf(vk);
}
if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
bcm_vk_auto_load_all_images(vk);
/*
* at the end of operation, clear AUTO bit and pending
* bit
*/
clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
}
/* next, try to drain */
ret = bcm_to_h_msg_dequeue(vk);
if (ret == 0)
dev_dbg(dev, "Spurious trigger for workqueue\n");
else if (ret < 0)
bcm_vk_blk_drv_access(vk);
}
static long bcm_vk_load_image(struct bcm_vk *vk,
const struct vk_image __user *arg)
{
struct device *dev = &vk->pdev->dev;
const char *image_name;
struct vk_image image;
u32 next_loadable;
enum soc_idx idx;
int image_idx;
int ret = -EPERM;
if (copy_from_user(&image, arg, sizeof(image)))
return -EACCES;
if ((image.type != VK_IMAGE_TYPE_BOOT1) &&
(image.type != VK_IMAGE_TYPE_BOOT2)) {
dev_err(dev, "invalid image.type %u\n", image.type);
return ret;
}
next_loadable = bcm_vk_next_boot_image(vk);
if (next_loadable != image.type) {
dev_err(dev, "Next expected image %u, Loading %u\n",
next_loadable, image.type);
return ret;
}
/*
* if something is pending download already. This could only happen
* for now when the driver is being loaded, or if someone has issued
* another download command in another shell.
*/
if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
dev_err(dev, "Download operation already pending.\n");
return ret;
}
image_name = image.filename;
if (image_name[0] == '\0') {
/* Use default image name if NULL */
idx = get_soc_idx(vk);
if (idx == VK_IDX_INVALID)
goto err_idx;
/* Image idx starts with boot1 */
image_idx = image.type - VK_IMAGE_TYPE_BOOT1;
image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]);
if (!image_name) {
dev_err(dev, "No suitable image found for type %d",
image.type);
ret = -ENOENT;
goto err_idx;
}
} else {
/* Ensure filename is NULL terminated */
image.filename[sizeof(image.filename) - 1] = '\0';
}
ret = bcm_vk_load_image_by_type(vk, image.type, image_name);
dev_info(dev, "Load %s, ret %d\n", image_name, ret);
err_idx:
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
return ret;
}
static int bcm_vk_reset_successful(struct bcm_vk *vk)
{
struct device *dev = &vk->pdev->dev;
u32 fw_status, reset_reason;
int ret = -EAGAIN;
/*
* Reset could be triggered when the card in several state:
* i) in bootROM
* ii) after boot1
* iii) boot2 running
*
* i) & ii) - no status bits will be updated. If vkboot1
* runs automatically after reset, it will update the reason
* to be unknown reason
* iii) - reboot reason match + deinit done.
*/
fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS);
/* immediate exit if interface goes down */
if (BCM_VK_INTF_IS_DOWN(fw_status)) {
dev_err(dev, "PCIe Intf Down!\n");
goto reset_exit;
}
reset_reason = (fw_status & VK_FWSTS_RESET_REASON_MASK);
if ((reset_reason == VK_FWSTS_RESET_MBOX_DB) ||
(reset_reason == VK_FWSTS_RESET_UNKNOWN))
ret = 0;
/*
* if some of the deinit bits are set, but done
* bit is not, this is a failure if triggered while boot2 is running
*/
if ((fw_status & VK_FWSTS_DEINIT_TRIGGERED) &&
!(fw_status & VK_FWSTS_RESET_DONE))
ret = -EAGAIN;
reset_exit:
dev_dbg(dev, "FW status = 0x%x ret %d\n", fw_status, ret);
return ret;
}
static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val)
{
vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE);
}
static int bcm_vk_trigger_reset(struct bcm_vk *vk)
{
u32 i;
u32 value, boot_status;
bool is_stdalone, is_boot2;
static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME,
BAR_INTF_VER,
BAR_CARD_VOLTAGE,
BAR_CARD_TEMPERATURE,
BAR_CARD_PWR_AND_THRE };
/* clean up before pressing the door bell */
bcm_vk_drain_msg_on_reset(vk);
vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY);
/* make tag '\0' terminated */
vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG);
for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) {
vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i));
vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i));
}
for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++)
vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i));
memset(&vk->card_info, 0, sizeof(vk->card_info));
memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info));
memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts));
/*
* When boot request fails, the CODE_PUSH_OFFSET stays persistent.
* Allowing us to debug the failure. When we call reset,
* we should clear CODE_PUSH_OFFSET so ROM does not execute
* boot again (and fails again) and instead waits for a new
* codepush. And, if previous boot has encountered error, need
* to clear the entry values
*/
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
if (boot_status & BOOT_ERR_MASK) {
dev_info(&vk->pdev->dev,
"Card in boot error 0x%x, clear CODEPUSH val\n",
boot_status);
value = 0;
} else {
value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL);
value &= CODEPUSH_MASK;
}
vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL);
/* special reset handling */
is_stdalone = boot_status & BOOT_STDALONE_RUNNING;
is_boot2 = (boot_status & BOOT_STATE_MASK) == BOOT2_RUNNING;
if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) {
/*
* if card is in ramdump mode, it is hitting an error. Don't
* reset the reboot reason as it will contain valid info that
* is important - simply use special reset
*/
vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS);
return VK_BAR0_RESET_RAMPDUMP;
} else if (is_stdalone && !is_boot2) {
dev_info(&vk->pdev->dev, "Hard reset on Standalone mode");
bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
return VK_BAR0_RESET_DB_HARD;
}
/* reset fw_status with proper reason, and press db */
vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS);
bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT);
/* clear other necessary registers and alert records */
for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++)
vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]);
memset(&vk->host_alert, 0, sizeof(vk->host_alert));
memset(&vk->peer_alert, 0, sizeof(vk->peer_alert));
/* clear 4096 bits of bitmap */
bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE);
return 0;
}
static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg)
{
struct device *dev = &vk->pdev->dev;
struct vk_reset reset;
int ret = 0;
u32 ramdump_reset;
int special_reset;
if (copy_from_user(&reset, arg, sizeof(struct vk_reset)))
return -EFAULT;
/* check if any download is in-progress, if so return error */
if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
dev_err(dev, "Download operation pending - skip reset.\n");
return -EPERM;
}
ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP;
dev_info(dev, "Issue Reset %s\n",
ramdump_reset ? "in ramdump mode" : "");
/*
* The following is the sequence of reset:
* - send card level graceful shut down
* - wait enough time for VK to handle its business, stopping DMA etc
* - kill host apps
* - Trigger interrupt with DB
*/
bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0);
spin_lock(&vk->ctx_lock);
if (!vk->reset_pid) {
vk->reset_pid = task_pid_nr(current);
} else {
dev_err(dev, "Reset already launched by process pid %d\n",
vk->reset_pid);
ret = -EACCES;
}
spin_unlock(&vk->ctx_lock);
if (ret)
goto err_exit;
bcm_vk_blk_drv_access(vk);
special_reset = bcm_vk_trigger_reset(vk);
/*
* Wait enough time for card os to deinit
* and populate the reset reason.
*/
msleep(BCM_VK_DEINIT_TIME_MS);
if (special_reset) {
/* if it is special ramdump reset, return the type to user */
reset.arg2 = special_reset;
if (copy_to_user(arg, &reset, sizeof(reset)))
ret = -EFAULT;
} else {
ret = bcm_vk_reset_successful(vk);
}
err_exit:
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
return ret;
}
static int bcm_vk_mmap(struct file *file, struct vm_area_struct *vma)
{
struct bcm_vk_ctx *ctx = file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
unsigned long pg_size;
/* only BAR2 is mmap possible, which is bar num 4 due to 64bit */
#define VK_MMAPABLE_BAR 4
pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1)
>> PAGE_SHIFT) + 1;
if (vma->vm_pgoff + vma_pages(vma) > pg_size)
return -EINVAL;
vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR)
>> PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret = -EINVAL;
struct bcm_vk_ctx *ctx = file->private_data;
struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
void __user *argp = (void __user *)arg;
dev_dbg(&vk->pdev->dev,
"ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
mutex_lock(&vk->mutex);
switch (cmd) {
case VK_IOCTL_LOAD_IMAGE:
ret = bcm_vk_load_image(vk, argp);
break;
case VK_IOCTL_RESET:
ret = bcm_vk_reset(vk, argp);
break;
default:
break;
}
mutex_unlock(&vk->mutex);
return ret;
}
static const struct file_operations bcm_vk_fops = {
.owner = THIS_MODULE,
.open = bcm_vk_open,
.read = bcm_vk_read,
.write = bcm_vk_write,
.poll = bcm_vk_poll,
.release = bcm_vk_release,
.mmap = bcm_vk_mmap,
.unlocked_ioctl = bcm_vk_ioctl,
};
static int bcm_vk_on_panic(struct notifier_block *nb,
unsigned long e, void *p)
{
struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb);
bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD);
return 0;
}
static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err;
int i;
int id;
int irq;
char name[20];
struct bcm_vk *vk;
struct device *dev = &pdev->dev;
struct miscdevice *misc_device;
u32 boot_status;
/* allocate vk structure which is tied to kref for freeing */
vk = kzalloc(sizeof(*vk), GFP_KERNEL);
if (!vk)
return -ENOMEM;
kref_init(&vk->kref);
if (nr_ib_sgl_blk > BCM_VK_IB_SGL_BLK_MAX) {
dev_warn(dev, "Inband SGL blk %d limited to max %d\n",
nr_ib_sgl_blk, BCM_VK_IB_SGL_BLK_MAX);
nr_ib_sgl_blk = BCM_VK_IB_SGL_BLK_MAX;
}
vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE;
mutex_init(&vk->mutex);
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Cannot enable PCI device\n");
goto err_free_exit;
}
vk->pdev = pci_dev_get(pdev);
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
/* make sure DMA is good */
err = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(BCM_VK_DMA_BITS));
if (err) {
dev_err(dev, "failed to set DMA mask\n");
goto err_disable_pdev;
}
/* The tdma is a scratch area for some DMA testings. */
if (nr_scratch_pages) {
vk->tdma_vaddr = dma_alloc_coherent
(dev,
nr_scratch_pages * PAGE_SIZE,
&vk->tdma_addr, GFP_KERNEL);
if (!vk->tdma_vaddr) {
err = -ENOMEM;
goto err_disable_pdev;
}
}
pci_set_master(pdev);
pci_set_drvdata(pdev, vk);
irq = pci_alloc_irq_vectors(pdev,
VK_MSIX_IRQ_MIN_REQ,
VK_MSIX_IRQ_MAX,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (irq < VK_MSIX_IRQ_MIN_REQ) {
dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n",
VK_MSIX_IRQ_MIN_REQ, irq);
err = (irq >= 0) ? -EINVAL : irq;
goto err_disable_pdev;
}
if (irq != VK_MSIX_IRQ_MAX)
dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n",
irq, VK_MSIX_IRQ_MAX);
for (i = 0; i < MAX_BAR; i++) {
/* multiple by 2 for 64 bit BAR mapping */
vk->bar[i] = pci_ioremap_bar(pdev, i * 2);
if (!vk->bar[i]) {
dev_err(dev, "failed to remap BAR%d\n", i);
err = -ENOMEM;
goto err_iounmap;
}
}
for (vk->num_irqs = 0;
vk->num_irqs < VK_MSIX_MSGQ_MAX;
vk->num_irqs++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_msgq_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk);
if (err) {
dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1);
goto err_irq;
}
}
/* one irq for notification from VK */
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_notf_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk);
if (err) {
dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1);
goto err_irq;
}
vk->num_irqs++;
for (i = 0;
(i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq);
i++, vk->num_irqs++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_tty_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk);
if (err) {
dev_err(dev, "failed request tty IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1);
goto err_irq;
}
bcm_vk_tty_set_irq_enabled(vk, i);
}
id = ida_alloc(&bcm_vk_ida, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "unable to get id\n");
goto err_irq;
}
vk->devid = id;
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
misc_device = &vk->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
err = -ENOMEM;
goto err_ida_remove;
}
misc_device->fops = &bcm_vk_fops,
err = misc_register(misc_device);
if (err) {
dev_err(dev, "failed to register device\n");
goto err_kfree_name;
}
INIT_WORK(&vk->wq_work, bcm_vk_wq_handler);
/* create dedicated workqueue */
vk->wq_thread = create_singlethread_workqueue(name);
if (!vk->wq_thread) {
dev_err(dev, "Fail to create workqueue thread\n");
err = -ENOMEM;
goto err_misc_deregister;
}
err = bcm_vk_msg_init(vk);
if (err) {
dev_err(dev, "failed to init msg queue info\n");
goto err_destroy_workqueue;
}
/* sync other info */
bcm_vk_sync_card_info(vk);
/* register for panic notifier */
vk->panic_nb.notifier_call = bcm_vk_on_panic;
err = atomic_notifier_chain_register(&panic_notifier_list,
&vk->panic_nb);
if (err) {
dev_err(dev, "Fail to register panic notifier\n");
goto err_destroy_workqueue;
}
snprintf(name, sizeof(name), KBUILD_MODNAME ".%d_ttyVK", id);
err = bcm_vk_tty_init(vk, name);
if (err)
goto err_unregister_panic_notifier;
/*
* lets trigger an auto download. We don't want to do it serially here
* because at probing time, it is not supposed to block for a long time.
*/
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
if (auto_load) {
if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) {
err = bcm_vk_trigger_autoload(vk);
if (err)
goto err_bcm_vk_tty_exit;
} else {
dev_err(dev,
"Auto-load skipped - BROM not in proper state (0x%x)\n",
boot_status);
}
}
/* enable hb */
bcm_vk_hb_init(vk);
dev_dbg(dev, "BCM-VK:%u created\n", id);
return 0;
err_bcm_vk_tty_exit:
bcm_vk_tty_exit(vk);
err_unregister_panic_notifier:
atomic_notifier_chain_unregister(&panic_notifier_list,
&vk->panic_nb);
err_destroy_workqueue:
destroy_workqueue(vk->wq_thread);
err_misc_deregister:
misc_deregister(misc_device);
err_kfree_name:
kfree(misc_device->name);
misc_device->name = NULL;
err_ida_remove:
ida_free(&bcm_vk_ida, id);
err_irq:
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), vk);
pci_disable_msix(pdev);
pci_disable_msi(pdev);
err_iounmap:
for (i = 0; i < MAX_BAR; i++) {
if (vk->bar[i])
pci_iounmap(pdev, vk->bar[i]);
}
pci_release_regions(pdev);
err_disable_pdev:
if (vk->tdma_vaddr)
dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
vk->tdma_vaddr, vk->tdma_addr);
pci_free_irq_vectors(pdev);
pci_disable_device(pdev);
pci_dev_put(pdev);
err_free_exit:
kfree(vk);
return err;
}
void bcm_vk_release_data(struct kref *kref)
{
struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref);
struct pci_dev *pdev = vk->pdev;
dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk);
pci_dev_put(pdev);
kfree(vk);
}
static void bcm_vk_remove(struct pci_dev *pdev)
{
int i;
struct bcm_vk *vk = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &vk->miscdev;
bcm_vk_hb_deinit(vk);
/*
* Trigger a reset to card and wait enough time for UCODE to rerun,
* which re-initialize the card into its default state.
* This ensures when driver is re-enumerated it will start from
* a completely clean state.
*/
bcm_vk_trigger_reset(vk);
usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US);
/* unregister panic notifier */
atomic_notifier_chain_unregister(&panic_notifier_list,
&vk->panic_nb);
bcm_vk_msg_remove(vk);
bcm_vk_tty_exit(vk);
if (vk->tdma_vaddr)
dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
vk->tdma_vaddr, vk->tdma_addr);
/* remove if name is set which means misc dev registered */
if (misc_device->name) {
misc_deregister(misc_device);
kfree(misc_device->name);
ida_free(&bcm_vk_ida, vk->devid);
}
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
pci_disable_msix(pdev);
pci_disable_msi(pdev);
cancel_work_sync(&vk->wq_work);
destroy_workqueue(vk->wq_thread);
bcm_vk_tty_wq_exit(vk);
for (i = 0; i < MAX_BAR; i++) {
if (vk->bar[i])
pci_iounmap(pdev, vk->bar[i]);
}
dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid);
pci_release_regions(pdev);
pci_free_irq_vectors(pdev);
pci_disable_device(pdev);
kref_put(&vk->kref, bcm_vk_release_data);
}
static void bcm_vk_shutdown(struct pci_dev *pdev)
{
struct bcm_vk *vk = pci_get_drvdata(pdev);
u32 reg, boot_stat;
reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS);
boot_stat = reg & BOOT_STATE_MASK;
if (boot_stat == BOOT1_RUNNING) {
/* simply trigger a reset interrupt to park it */
bcm_vk_trigger_reset(vk);
} else if (boot_stat == BROM_NOT_RUN) {
int err;
u16 lnksta;
/*
* The boot status only reflects boot condition since last reset
* As ucode will run only once to configure pcie, if multiple
* resets happen, we lost track if ucode has run or not.
* Here, read the current link speed and use that to
* sync up the bootstatus properly so that on reboot-back-up,
* it has the proper state to start with autoload
*/
err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
if (!err &&
(lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) {
reg |= BROM_STATUS_COMPLETE;
vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS);
}
}
}
static const struct pci_device_id bcm_vk_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), },
{ }
};
MODULE_DEVICE_TABLE(pci, bcm_vk_ids);
static struct pci_driver pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bcm_vk_ids,
.probe = bcm_vk_probe,
.remove = bcm_vk_remove,
.shutdown = bcm_vk_shutdown,
};
module_pci_driver(pci_driver);
MODULE_DESCRIPTION("Broadcom VK Host Driver");
MODULE_AUTHOR("Scott Branden <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
| linux-master | drivers/misc/bcm-vk/bcm_vk_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Francis Laniel <[email protected]>
*
* Add tests related to fortified functions in this file.
*/
#include "lkdtm.h"
#include <linux/string.h>
#include <linux/slab.h>
static volatile int fortify_scratch_space;
static void lkdtm_FORTIFY_STR_OBJECT(void)
{
struct target {
char a[10];
int foo;
} target[3] = {};
/*
* Using volatile prevents the compiler from determining the value of
* 'size' at compile time. Without that, we would get a compile error
* rather than a runtime error.
*/
volatile int size = 20;
pr_info("trying to strcmp() past the end of a struct\n");
strncpy(target[0].a, target[1].a, size);
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target[0].a[3];
pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
static void lkdtm_FORTIFY_STR_MEMBER(void)
{
struct target {
char a[10];
char b[10];
} target;
volatile int size = 20;
char *src;
src = kmalloc(size, GFP_KERNEL);
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
pr_info("trying to strncpy() past the end of a struct member...\n");
/*
* strncpy(target.a, src, 20); will hit a compile error because the
* compiler knows at build time that target.a < 20 bytes. Use a
* volatile to force a runtime error.
*/
strncpy(target.a, src, size);
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
}
static void lkdtm_FORTIFY_MEM_OBJECT(void)
{
int before[10];
struct target {
char a[10];
int foo;
} target = {};
int after[10];
/*
* Using volatile prevents the compiler from determining the value of
* 'size' at compile time. Without that, we would get a compile error
* rather than a runtime error.
*/
volatile int size = 20;
memset(before, 0, sizeof(before));
memset(after, 0, sizeof(after));
fortify_scratch_space = before[5];
fortify_scratch_space = after[5];
pr_info("trying to memcpy() past the end of a struct\n");
pr_info("0: %zu\n", __builtin_object_size(&target, 0));
pr_info("1: %zu\n", __builtin_object_size(&target, 1));
pr_info("s: %d\n", size);
memcpy(&target, &before, size);
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
static void lkdtm_FORTIFY_MEM_MEMBER(void)
{
struct target {
char a[10];
char b[10];
} target;
volatile int size = 20;
char *src;
src = kmalloc(size, GFP_KERNEL);
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
pr_info("trying to memcpy() past the end of a struct member...\n");
/*
* strncpy(target.a, src, 20); will hit a compile error because the
* compiler knows at build time that target.a < 20 bytes. Use a
* volatile to force a runtime error.
*/
memcpy(target.a, src, size);
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
}
/*
* Calls fortified strscpy to test that it returns the same result as vanilla
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
static void lkdtm_FORTIFY_STRSCPY(void)
{
char *src;
char dst[5];
struct {
union {
char big[10];
char src[5];
};
} weird = { .big = "hello!" };
char weird_dst[sizeof(weird.src) + 1];
src = kstrdup("foobar", GFP_KERNEL);
if (src == NULL)
return;
/* Vanilla strscpy returns -E2BIG if size is 0. */
if (strscpy(dst, src, 0) != -E2BIG)
pr_warn("FAIL: strscpy() of 0 length did not return -E2BIG\n");
/* Vanilla strscpy returns -E2BIG if src is truncated. */
if (strscpy(dst, src, sizeof(dst)) != -E2BIG)
pr_warn("FAIL: strscpy() did not return -E2BIG while src is truncated\n");
/* After above call, dst must contain "foob" because src was truncated. */
if (strncmp(dst, "foob", sizeof(dst)) != 0)
pr_warn("FAIL: after strscpy() dst does not contain \"foob\" but \"%s\"\n",
dst);
/* Shrink src so the strscpy() below succeeds. */
src[3] = '\0';
/*
* Vanilla strscpy returns number of character copied if everything goes
* well.
*/
if (strscpy(dst, src, sizeof(dst)) != 3)
pr_warn("FAIL: strscpy() did not return 3 while src was copied entirely truncated\n");
/* After above call, dst must contain "foo" because src was copied. */
if (strncmp(dst, "foo", sizeof(dst)) != 0)
pr_warn("FAIL: after strscpy() dst does not contain \"foo\" but \"%s\"\n",
dst);
/* Test when src is embedded inside a union. */
strscpy(weird_dst, weird.src, sizeof(weird_dst));
if (strcmp(weird_dst, "hello") != 0)
pr_warn("FAIL: after strscpy() weird_dst does not contain \"hello\" but \"%s\"\n",
weird_dst);
/* Restore src to its initial value. */
src[3] = 'b';
/*
* Use strlen here so size cannot be known at compile time and there is
* a runtime write overflow.
*/
strscpy(dst, src, strlen(src));
pr_err("FAIL: strscpy() overflow not detected!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
}
static struct crashtype crashtypes[] = {
CRASHTYPE(FORTIFY_STR_OBJECT),
CRASHTYPE(FORTIFY_STR_MEMBER),
CRASHTYPE(FORTIFY_MEM_OBJECT),
CRASHTYPE(FORTIFY_MEM_MEMBER),
CRASHTYPE(FORTIFY_STRSCPY),
};
struct crashtype_category fortify_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/fortify.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests relating directly to heap memory, including
* page allocation and slab allocations.
*/
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
static struct kmem_cache *double_free_cache;
static struct kmem_cache *a_cache;
static struct kmem_cache *b_cache;
/*
* Using volatile here means the compiler cannot ever make assumptions
* about this value. This means compile-time length checks involving
* this variable cannot be performed; only run-time checks.
*/
static volatile int __offset = 1;
/*
* If there aren't guard pages, it's likely that a consecutive allocation will
* let us overflow into the second allocation without overwriting something real.
*
* This should always be caught because there is an unconditional unmapped
* page after vmap allocations.
*/
static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
{
char *one, *two;
one = vzalloc(PAGE_SIZE);
OPTIMIZER_HIDE_VAR(one);
two = vzalloc(PAGE_SIZE);
pr_info("Attempting vmalloc linear overflow ...\n");
memset(one, 0xAA, PAGE_SIZE + __offset);
vfree(two);
vfree(one);
}
/*
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
*
* This should get caught by either memory tagging, KASan, or by using
* CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
if (!data)
return;
pr_info("Attempting slab linear overflow ...\n");
OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
static void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
if (!base)
return;
pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
pr_info("Attempting bad write to freed memory at %p\n",
&base[offset]);
kfree(base);
base[offset] = 0x0abcdef0;
/* Attempt to notice the overwrite. */
again = kmalloc(len, GFP_KERNEL);
kfree(again);
if (again != base)
pr_info("Hmm, didn't get the same memory range.\n");
}
static void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
/*
* The slub allocator will use the either the first word or
* the middle of the allocation to store the free pointer,
* depending on configurations. Store in the second word to
* avoid running into the freelist.
*/
size_t offset = sizeof(*base);
base = kmalloc(len, GFP_KERNEL);
if (!base) {
pr_info("Unable to allocate base memory.\n");
return;
}
val = kmalloc(len, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
kfree(base);
return;
}
*val = 0x12345678;
base[offset] = *val;
pr_info("Value in memory before free: %x\n", base[offset]);
kfree(base);
pr_info("Attempting bad read from freed memory\n");
saw = base[offset];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
} else {
pr_err("FAIL: Memory was not poisoned!\n");
pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
}
kfree(val);
}
static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
pr_info("Writing to the buddy page before free\n");
memset((void *)p, 0x3, PAGE_SIZE);
free_page(p);
schedule();
pr_info("Attempting bad write to the buddy page after free\n");
memset((void *)p, 0x78, PAGE_SIZE);
/* Attempt to notice the overwrite. */
p = __get_free_page(GFP_KERNEL);
free_page(p);
schedule();
}
static void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
int *base;
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
val = kmalloc(1024, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
free_page(p);
return;
}
base = (int *)p;
*val = 0x12345678;
base[0] = *val;
pr_info("Value in memory before free: %x\n", base[0]);
free_page(p);
pr_info("Attempting to read from freed memory\n");
saw = base[0];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
} else {
pr_err("FAIL: Buddy page was not poisoned!\n");
pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
}
kfree(val);
}
static void lkdtm_SLAB_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
first = kmalloc(512, GFP_KERNEL);
if (!first) {
pr_info("Unable to allocate 512 bytes the first time.\n");
return;
}
memset(first, 0xAB, 512);
kfree(first);
val = kmalloc(512, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate 512 bytes the second time.\n");
return;
}
if (val != first) {
pr_warn("Reallocation missed clobbered memory.\n");
}
if (memchr(val, 0xAB, 512) == NULL) {
pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
} else {
pr_err("FAIL: Slab was not initialized\n");
pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
}
kfree(val);
}
static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
first = (u8 *)__get_free_page(GFP_KERNEL);
if (!first) {
pr_info("Unable to allocate first free page\n");
return;
}
memset(first, 0xAB, PAGE_SIZE);
free_page((unsigned long)first);
val = (u8 *)__get_free_page(GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate second free page\n");
return;
}
if (val != first) {
pr_warn("Reallocation missed clobbered memory.\n");
}
if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
} else {
pr_err("FAIL: Slab was not initialized\n");
pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
}
free_page((unsigned long)val);
}
static void lkdtm_SLAB_FREE_DOUBLE(void)
{
int *val;
val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate double_free_cache memory.\n");
return;
}
/* Just make sure we got real memory. */
*val = 0x12345678;
pr_info("Attempting double slab free ...\n");
kmem_cache_free(double_free_cache, val);
kmem_cache_free(double_free_cache, val);
}
static void lkdtm_SLAB_FREE_CROSS(void)
{
int *val;
val = kmem_cache_alloc(a_cache, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate a_cache memory.\n");
return;
}
/* Just make sure we got real memory. */
*val = 0x12345679;
pr_info("Attempting cross-cache slab free ...\n");
kmem_cache_free(b_cache, val);
}
static void lkdtm_SLAB_FREE_PAGE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
pr_info("Attempting non-Slab slab free ...\n");
kmem_cache_free(NULL, (void *)p);
free_page(p);
}
/*
* We have constructors to keep the caches distinctly separated without
* needing to boot with "slab_nomerge".
*/
static void ctor_double_free(void *region)
{ }
static void ctor_a(void *region)
{ }
static void ctor_b(void *region)
{ }
void __init lkdtm_heap_init(void)
{
double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
64, 0, 0, ctor_double_free);
a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
}
void __exit lkdtm_heap_exit(void)
{
kmem_cache_destroy(double_free_cache);
kmem_cache_destroy(a_cache);
kmem_cache_destroy(b_cache);
}
static struct crashtype crashtypes[] = {
CRASHTYPE(SLAB_LINEAR_OVERFLOW),
CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
CRASHTYPE(SLAB_INIT_ON_ALLOC),
CRASHTYPE(BUDDY_INIT_ON_ALLOC),
CRASHTYPE(SLAB_FREE_DOUBLE),
CRASHTYPE(SLAB_FREE_CROSS),
CRASHTYPE(SLAB_FREE_PAGE),
};
struct crashtype_category heap_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/heap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to refcount bugs (e.g. overflow,
* underflow, reaching zero untested, etc).
*/
#include "lkdtm.h"
#include <linux/refcount.h>
static void overflow_check(refcount_t *ref)
{
switch (refcount_read(ref)) {
case REFCOUNT_SATURATED:
pr_info("Overflow detected: saturated\n");
break;
case REFCOUNT_MAX:
pr_warn("Overflow detected: unsafely reset to max\n");
break;
default:
pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
}
}
/*
* A refcount_inc() above the maximum value of the refcount implementation,
* should at least saturate, and at most also WARN.
*/
static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
pr_info("attempting good refcount_inc() without overflow\n");
refcount_dec(&over);
refcount_inc(&over);
pr_info("attempting bad refcount_inc() overflow\n");
refcount_inc(&over);
refcount_inc(&over);
overflow_check(&over);
}
/* refcount_add() should behave just like refcount_inc() above. */
static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
pr_info("attempting good refcount_add() without overflow\n");
refcount_dec(&over);
refcount_dec(&over);
refcount_dec(&over);
refcount_dec(&over);
refcount_add(4, &over);
pr_info("attempting bad refcount_add() overflow\n");
refcount_add(4, &over);
overflow_check(&over);
}
/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
pr_info("attempting bad refcount_inc_not_zero() overflow\n");
if (!refcount_inc_not_zero(&over))
pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
overflow_check(&over);
}
/* refcount_add_not_zero() should behave just like refcount_inc() above. */
static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
pr_info("attempting bad refcount_add_not_zero() overflow\n");
if (!refcount_add_not_zero(6, &over))
pr_warn("Weird: refcount_add_not_zero() reported zero\n");
overflow_check(&over);
}
static void check_zero(refcount_t *ref)
{
switch (refcount_read(ref)) {
case REFCOUNT_SATURATED:
pr_info("Zero detected: saturated\n");
break;
case REFCOUNT_MAX:
pr_warn("Zero detected: unsafely reset to max\n");
break;
case 0:
pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
break;
default:
pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
}
}
/*
* A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
* zero it should either saturate (when inc-from-zero isn't protected)
* or stay at zero (when inc-from-zero is protected) and should WARN for both.
*/
static void lkdtm_REFCOUNT_DEC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(2);
pr_info("attempting good refcount_dec()\n");
refcount_dec(&zero);
pr_info("attempting bad refcount_dec() to zero\n");
refcount_dec(&zero);
check_zero(&zero);
}
static void check_negative(refcount_t *ref, int start)
{
/*
* refcount_t refuses to move a refcount at all on an
* over-sub, so we have to track our starting position instead of
* looking only at zero-pinning.
*/
if (refcount_read(ref) == start) {
pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
start);
return;
}
switch (refcount_read(ref)) {
case REFCOUNT_SATURATED:
pr_info("Negative detected: saturated\n");
break;
case REFCOUNT_MAX:
pr_warn("Negative detected: unsafely reset to max\n");
break;
default:
pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
}
}
/* A refcount_dec() going negative should saturate and may WARN. */
static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
pr_info("attempting bad refcount_dec() below zero\n");
refcount_dec(&neg);
check_negative(&neg, 0);
}
/*
* A refcount_dec_and_test() should act like refcount_dec() above when
* going negative.
*/
static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
pr_info("attempting bad refcount_dec_and_test() below zero\n");
if (refcount_dec_and_test(&neg))
pr_warn("Weird: refcount_dec_and_test() reported zero\n");
check_negative(&neg, 0);
}
/*
* A refcount_sub_and_test() should act like refcount_dec_and_test()
* above when going negative.
*/
static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(3);
pr_info("attempting bad refcount_sub_and_test() below zero\n");
if (refcount_sub_and_test(5, &neg))
pr_warn("Weird: refcount_sub_and_test() reported zero\n");
check_negative(&neg, 3);
}
static void check_from_zero(refcount_t *ref)
{
switch (refcount_read(ref)) {
case 0:
pr_info("Zero detected: stayed at zero\n");
break;
case REFCOUNT_SATURATED:
pr_info("Zero detected: saturated\n");
break;
case REFCOUNT_MAX:
pr_warn("Zero detected: unsafely reset to max\n");
break;
default:
pr_info("Fail: zero not detected, incremented to %d\n",
refcount_read(ref));
}
}
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
*/
static void lkdtm_REFCOUNT_INC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
pr_info("attempting safe refcount_inc_not_zero() from zero\n");
if (!refcount_inc_not_zero(&zero)) {
pr_info("Good: zero detected\n");
if (refcount_read(&zero) == 0)
pr_info("Correctly stayed at zero\n");
else
pr_err("Fail: refcount went past zero!\n");
} else {
pr_err("Fail: Zero not detected!?\n");
}
pr_info("attempting bad refcount_inc() from zero\n");
refcount_inc(&zero);
check_from_zero(&zero);
}
/*
* A refcount_add() should act like refcount_inc() above when starting
* at zero.
*/
static void lkdtm_REFCOUNT_ADD_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
pr_info("attempting safe refcount_add_not_zero() from zero\n");
if (!refcount_add_not_zero(3, &zero)) {
pr_info("Good: zero detected\n");
if (refcount_read(&zero) == 0)
pr_info("Correctly stayed at zero\n");
else
pr_err("Fail: refcount went past zero\n");
} else {
pr_err("Fail: Zero not detected!?\n");
}
pr_info("attempting bad refcount_add() from zero\n");
refcount_add(3, &zero);
check_from_zero(&zero);
}
static void check_saturated(refcount_t *ref)
{
switch (refcount_read(ref)) {
case REFCOUNT_SATURATED:
pr_info("Saturation detected: still saturated\n");
break;
case REFCOUNT_MAX:
pr_warn("Saturation detected: unsafely reset to max\n");
break;
default:
pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
}
}
/*
* A refcount_inc() from a saturated value should at most warn about
* being saturated already.
*/
static void lkdtm_REFCOUNT_INC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_inc() from saturated\n");
refcount_inc(&sat);
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_DEC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_dec() from saturated\n");
refcount_dec(&sat);
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_ADD_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_dec() from saturated\n");
refcount_add(8, &sat);
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
if (!refcount_inc_not_zero(&sat))
pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_add_not_zero() from saturated\n");
if (!refcount_add_not_zero(7, &sat))
pr_warn("Weird: refcount_add_not_zero() reported zero\n");
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_dec_and_test() from saturated\n");
if (refcount_dec_and_test(&sat))
pr_warn("Weird: refcount_dec_and_test() reported zero\n");
check_saturated(&sat);
}
/* Should act like refcount_inc() above from saturated. */
static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
pr_info("attempting bad refcount_sub_and_test() from saturated\n");
if (refcount_sub_and_test(8, &sat))
pr_warn("Weird: refcount_sub_and_test() reported zero\n");
check_saturated(&sat);
}
/* Used to time the existing atomic_t when used for reference counting */
static void lkdtm_ATOMIC_TIMING(void)
{
unsigned int i;
atomic_t count = ATOMIC_INIT(1);
for (i = 0; i < INT_MAX - 1; i++)
atomic_inc(&count);
for (i = INT_MAX; i > 0; i--)
if (atomic_dec_and_test(&count))
break;
if (i != 1)
pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
else
pr_info("atomic timing: done\n");
}
/*
* This can be compared to ATOMIC_TIMING when implementing fast refcount
* protections. Looking at the number of CPU cycles tells the real story
* about performance. For example:
* cd /sys/kernel/debug/provoke-crash
* perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
*/
static void lkdtm_REFCOUNT_TIMING(void)
{
unsigned int i;
refcount_t count = REFCOUNT_INIT(1);
for (i = 0; i < INT_MAX - 1; i++)
refcount_inc(&count);
for (i = INT_MAX; i > 0; i--)
if (refcount_dec_and_test(&count))
break;
if (i != 1)
pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
else
pr_info("refcount timing: done\n");
}
static struct crashtype crashtypes[] = {
CRASHTYPE(REFCOUNT_INC_OVERFLOW),
CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
CRASHTYPE(REFCOUNT_DEC_ZERO),
CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
CRASHTYPE(REFCOUNT_INC_ZERO),
CRASHTYPE(REFCOUNT_ADD_ZERO),
CRASHTYPE(REFCOUNT_INC_SATURATED),
CRASHTYPE(REFCOUNT_DEC_SATURATED),
CRASHTYPE(REFCOUNT_ADD_SATURATED),
CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
CRASHTYPE(ATOMIC_TIMING),
CRASHTYPE(REFCOUNT_TIMING),
};
struct crashtype_category refcount_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/refcount.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests relating directly to Control Flow Integrity.
*/
#include "lkdtm.h"
#include <asm/page.h>
static int called_count;
/* Function taking one argument, without a return value. */
static noinline void lkdtm_increment_void(int *counter)
{
(*counter)++;
}
/* Function taking one argument, returning int. */
static noinline int lkdtm_increment_int(int *counter)
{
(*counter)++;
return *counter;
}
/* Don't allow the compiler to inline the calls. */
static noinline void lkdtm_indirect_call(void (*func)(int *))
{
func(&called_count);
}
/*
* This tries to call an indirect function with a mismatched prototype.
*/
static void lkdtm_CFI_FORWARD_PROTO(void)
{
/*
* Matches lkdtm_increment_void()'s prototype, but not
* lkdtm_increment_int()'s prototype.
*/
pr_info("Calling matched prototype ...\n");
lkdtm_indirect_call(lkdtm_increment_void);
pr_info("Calling mismatched prototype ...\n");
lkdtm_indirect_call((void *)lkdtm_increment_int);
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
}
/*
* This can stay local to LKDTM, as there should not be a production reason
* to disable PAC && SCS.
*/
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
# ifdef CONFIG_ARM64_BTI_KERNEL
# define __no_pac "branch-protection=bti"
# else
# ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET
# define __no_pac "branch-protection=none"
# else
# define __no_pac "sign-return-address=none"
# endif
# endif
# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
#else
# define __no_ret_protection __noscs
#endif
#define no_pac_addr(addr) \
((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
/* The ultimate ROP gadget. */
static noinline __no_ret_protection
void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
*ret_addr = (addr);
else
/* Check architecture, stack layout, or compiler behavior... */
pr_warn("Eek: return address mismatch! %px != %px\n",
*ret_addr, addr);
}
static noinline
void set_return_addr(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
*ret_addr = (addr);
else
/* Check architecture, stack layout, or compiler behavior... */
pr_warn("Eek: return address mismatch! %px != %px\n",
*ret_addr, addr);
}
static volatile int force_check;
static void lkdtm_CFI_BACKWARD(void)
{
/* Use calculated gotos to keep labels addressable. */
void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
pr_info("Attempting unchecked stack return address redirection ...\n");
/* Always false */
if (force_check) {
/*
* Prepare to call with NULLs to avoid parameters being treated as
* constants in -02.
*/
set_return_addr_unchecked(NULL, NULL);
set_return_addr(NULL, NULL);
if (force_check)
goto *labels[1];
if (force_check)
goto *labels[2];
if (force_check)
goto *labels[3];
if (force_check)
goto *labels[4];
return;
}
/*
* Use fallthrough switch case to keep basic block ordering between
* set_return_addr*() and the label after it.
*/
switch (force_check) {
case 0:
set_return_addr_unchecked(&&normal, &&redirected);
fallthrough;
case 1:
normal:
/* Always true */
if (!force_check) {
pr_err("FAIL: stack return address manipulation failed!\n");
/* If we can't redirect "normally", we can't test mitigations. */
return;
}
break;
default:
redirected:
pr_info("ok: redirected stack return address.\n");
break;
}
pr_info("Attempting checked stack return address redirection ...\n");
switch (force_check) {
case 0:
set_return_addr(&&check_normal, &&check_redirected);
fallthrough;
case 1:
check_normal:
/* Always true */
if (!force_check) {
pr_info("ok: control flow unchanged.\n");
return;
}
check_redirected:
pr_err("FAIL: stack return address was redirected!\n");
break;
}
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
return;
}
if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
pr_expected_config(CONFIG_SHADOW_CALL_STACK);
return;
}
pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
lkdtm_kernel_info,
"CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
}
static struct crashtype crashtypes[] = {
CRASHTYPE(CFI_FORWARD_PROTO),
CRASHTYPE(CFI_BACKWARD),
};
struct crashtype_category cfi_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/cfi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to copy_to_user() and copy_from_user()
* hardening.
*/
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
/*
* Many of the tests here end up using const sizes, but those would
* normally be ignored by hardened usercopy, so force the compiler
* into choosing the non-const path to make sure we trigger the
* hardened usercopy checks by added "unconst" to all the const copies,
* and making sure "cache_size" isn't optimized into a const.
*/
static volatile size_t unconst;
static volatile size_t cache_size = 1024;
static struct kmem_cache *whitelist_cache;
static const unsigned char test_text[] = "This is a test.\n";
/*
* Instead of adding -Wno-return-local-addr, just pass the stack address
* through a function to obfuscate it from the compiler.
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(buf); i++) {
buf[i] = value & 0xff;
}
/*
* Put the target buffer in the middle of stack allocation
* so that we don't step on future stack users regardless
* of stack growth direction.
*/
return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
{
unsigned long user_addr;
unsigned char good_stack[32];
unsigned char *bad_stack;
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(good_stack); i++)
good_stack[i] = test_text[i % sizeof(test_text)];
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
bad_stack -= sizeof(unsigned long);
}
#ifdef ARCH_HAS_CURRENT_STACK_POINTER
pr_info("stack : %px\n", (void *)current_stack_pointer);
#endif
pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
/*
* There isn't a safe way to not be protected by usercopy
* if we're going to write to another thread's stack.
*/
if (!bad_frame)
goto free_user;
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
/*
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
static void do_usercopy_slab_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
void __user *test_user_addr;
void *test_kern_addr;
size_t size = unconst + 1024;
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
if (!one || !two) {
pr_warn("Failed to allocate kernel memory\n");
goto free_kernel;
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_kernel;
}
memset(one, 'A', size);
memset(two, 'B', size);
test_user_addr = (void __user *)(user_addr + 16);
test_kern_addr = one + 16;
if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user(test_user_addr, test_kern_addr, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(test_kern_addr, test_user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
pr_err("FAIL: bad usercopy not detected!\n");
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_kernel:
kfree(one);
kfree(two);
}
/*
* This checks for the specific whitelist window within an object. If this
* test passes, then do_usercopy_slab_size() tests will pass too.
*/
static void do_usercopy_slab_whitelist(bool to_user)
{
unsigned long user_alloc;
unsigned char *buf = NULL;
unsigned char __user *user_addr;
size_t offset, size;
/* Make sure cache was prepared. */
if (!whitelist_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}
/*
* Allocate a buffer with a whitelisted window in the buffer.
*/
buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
if (!buf) {
pr_warn("Failed to allocate buffer from whitelist cache\n");
goto free_alloc;
}
/* Allocate user memory we'll poke at. */
user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_alloc >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
user_addr = (void __user *)user_alloc;
memset(buf, 'B', cache_size);
/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
offset = (cache_size / 4) + unconst;
size = (cache_size / 16) + unconst;
if (to_user) {
pr_info("attempting good copy_to_user inside whitelist\n");
if (copy_to_user(user_addr, buf + offset, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user outside whitelist\n");
if (copy_to_user(user_addr, buf + offset - 1, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user inside whitelist\n");
if (copy_from_user(buf + offset, user_addr, size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user outside whitelist\n");
if (copy_from_user(buf + offset - 1, user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
pr_err("FAIL: bad usercopy not detected!\n");
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_alloc, PAGE_SIZE);
free_alloc:
if (buf)
kmem_cache_free(whitelist_cache, buf);
}
/* Callable tests. */
static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{
do_usercopy_slab_size(true);
}
static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{
do_usercopy_slab_size(false);
}
static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{
do_usercopy_slab_whitelist(true);
}
static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{
do_usercopy_slab_whitelist(false);
}
static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
static void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
static void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
pr_info("attempting good copy_to_user from kernel rodata: %px\n",
test_text);
if (copy_to_user((void __user *)user_addr, test_text,
unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user from kernel text: %px\n",
vm_mmap);
if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
pr_err("FAIL: bad copy_to_user() not detected!\n");
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
/*
* This expects "kaddr" to point to a PAGE_SIZE allocation, which means
* a more complete test that would include copy_from_user() would risk
* memory corruption. Just test copy_to_user() here, as that exercises
* almost exactly the same code paths.
*/
static void do_usercopy_page_span(const char *name, void *kaddr)
{
unsigned long uaddr;
uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (uaddr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
/* Initialize contents. */
memset(kaddr, 0xAA, PAGE_SIZE);
/* Bump the kaddr forward to detect a page-spanning overflow. */
kaddr += PAGE_SIZE / 2;
pr_info("attempting good copy_to_user() from kernel %s: %px\n",
name, kaddr);
if (copy_to_user((void __user *)uaddr, kaddr,
unconst + (PAGE_SIZE / 2))) {
pr_err("copy_to_user() failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
name, kaddr);
if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
goto free_user;
}
pr_err("FAIL: bad copy_to_user() not detected!\n");
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(uaddr, PAGE_SIZE);
}
static void lkdtm_USERCOPY_VMALLOC(void)
{
void *addr;
addr = vmalloc(PAGE_SIZE);
if (!addr) {
pr_err("vmalloc() failed!?\n");
return;
}
do_usercopy_page_span("vmalloc", addr);
vfree(addr);
}
static void lkdtm_USERCOPY_FOLIO(void)
{
struct folio *folio;
void *addr;
/*
* FIXME: Folio checking currently misses 0-order allocations, so
* allocate and bump forward to the last page.
*/
folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
if (!folio) {
pr_err("folio_alloc() failed!?\n");
return;
}
addr = folio_address(folio);
if (addr)
do_usercopy_page_span("folio", addr + PAGE_SIZE);
else
pr_err("folio_address() failed?!\n");
folio_put(folio);
}
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
whitelist_cache =
kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
0, 0,
cache_size / 4,
cache_size / 16,
NULL);
}
void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(whitelist_cache);
}
static struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_VMALLOC),
CRASHTYPE(USERCOPY_FOLIO),
CRASHTYPE(USERCOPY_KERNEL),
};
struct crashtype_category usercopy_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/usercopy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This includes functions that are meant to live entirely in .rodata
* (via objcopy tricks), to validate the non-executability of .rodata.
*/
#include "lkdtm.h"
void noinstr lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}
| linux-master | drivers/misc/lkdtm/rodata.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux Kernel Dump Test Module for testing kernel crashes conditions:
* induces system failures at predefined crashpoints and under predefined
* operational conditions in order to evaluate the reliability of kernel
* sanity checking and crash dumps obtained using different dumping
* solutions.
*
* Copyright (C) IBM Corporation, 2006
*
* Author: Ankita Garg <[email protected]>
*
* It is adapted from the Linux Kernel Dump Test Tool by
* Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
*
* Debugfs support added by Simon Kagstrom <[email protected]>
*
* See Documentation/fault-injection/provoke-crashes.rst for instructions
*/
#include "lkdtm.h"
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/buffer_head.h>
#include <linux/kprobes.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/utsname.h>
#define DEFAULT_COUNT 10
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off);
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
#ifdef CONFIG_KPROBES
static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
# define CRASHPOINT_KPROBE(_symbol) \
.kprobe = { \
.symbol_name = (_symbol), \
.pre_handler = lkdtm_kprobe_handler, \
},
# define CRASHPOINT_WRITE(_symbol) \
(_symbol) ? lkdtm_debugfs_entry : direct_entry
#else
# define CRASHPOINT_KPROBE(_symbol)
# define CRASHPOINT_WRITE(_symbol) direct_entry
#endif
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
struct kprobe kprobe;
};
#define CRASHPOINT(_name, _symbol) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
.write = CRASHPOINT_WRITE(_symbol) \
}, \
CRASHPOINT_KPROBE(_symbol) \
}
/* Define the possible places where we can trigger a crash point. */
static struct crashpoint crashpoints[] = {
CRASHPOINT("DIRECT", NULL),
#ifdef CONFIG_KPROBES
CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
CRASHPOINT("FS_SUBMIT_BH", "submit_bh"),
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
CRASHPOINT("TIMERADD", "hrtimer_start"),
CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
#endif
};
/* List of possible types for crashes that can be triggered. */
static const struct crashtype_category *crashtype_categories[] = {
&bugs_crashtypes,
&heap_crashtypes,
&perms_crashtypes,
&refcount_crashtypes,
&usercopy_crashtypes,
&stackleak_crashtypes,
&cfi_crashtypes,
&fortify_crashtypes,
#ifdef CONFIG_PPC_64S_HASH_MMU
&powerpc_crashtypes,
#endif
};
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
static struct crashpoint *lkdtm_crashpoint;
static const struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
static char* cpoint_name;
module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
static char* cpoint_type;
module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
static int cpoint_count = DEFAULT_COUNT;
module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
/*
* For test debug reporting when CI systems provide terse summaries.
* TODO: Remove this once reasonable reporting exists in most CI systems:
* https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com
*/
char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
{
int cat, idx;
for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
struct crashtype *crashtype;
crashtype = &crashtype_categories[cat]->crashtypes[idx];
if (!strcmp(name, crashtype->name))
return crashtype;
}
}
return NULL;
}
/*
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
static noinline void lkdtm_do_action(const struct crashtype *crashtype)
{
if (WARN_ON(!crashtype || !crashtype->func))
return;
crashtype->func();
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
const struct crashtype *crashtype)
{
int ret;
/* If this doesn't have a symbol, just call immediately. */
if (!crashpoint->kprobe.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
}
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
lkdtm_kprobe = &crashpoint->kprobe;
ret = register_kprobe(lkdtm_kprobe);
if (ret < 0) {
pr_info("Couldn't register kprobe %s\n",
crashpoint->kprobe.symbol_name);
lkdtm_kprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
return ret;
}
#ifdef CONFIG_KPROBES
/* Global crash counter and spinlock. */
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
/* Called by kprobe entry points. */
static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
{
unsigned long flags;
bool do_it = false;
if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
return 0;
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
if (crash_count == 0) {
do_it = true;
crash_count = cpoint_count;
}
spin_unlock_irqrestore(&crash_count_lock, flags);
if (do_it)
lkdtm_do_action(lkdtm_crashtype);
return 0;
}
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
const struct crashtype *crashtype = NULL;
char *buf;
int err;
if (count >= PAGE_SIZE)
return -EINVAL;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, user_buf, count)) {
free_page((unsigned long) buf);
return -EFAULT;
}
/* NULL-terminate and remove enter */
buf[count] = '\0';
strim(buf);
crashtype = find_crashtype(buf);
free_page((unsigned long)buf);
if (!crashtype)
return -EINVAL;
err = lkdtm_register_cpoint(crashpoint, crashtype);
if (err < 0)
return err;
*off += count;
return count;
}
#endif
/* Generic read callback that just prints out the available crash types */
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off)
{
int n, cat, idx;
ssize_t out;
char *buf;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
struct crashtype *crashtype;
crashtype = &crashtype_categories[cat]->crashtypes[idx];
n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtype->name);
}
}
buf[n] = '\0';
out = simple_read_from_buffer(user_buf, count, off,
buf, n);
free_page((unsigned long) buf);
return out;
}
static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
{
return 0;
}
/* Special entry to just crash directly. Available without KPROBEs */
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
const struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
return -EINVAL;
if (count < 1)
return -EINVAL;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, user_buf, count)) {
free_page((unsigned long) buf);
return -EFAULT;
}
/* NULL-terminate and remove enter */
buf[count] = '\0';
strim(buf);
crashtype = find_crashtype(buf);
free_page((unsigned long) buf);
if (!crashtype)
return -EINVAL;
pr_info("Performing direct entry %s\n", crashtype->name);
lkdtm_do_action(crashtype);
*off += count;
return count;
}
#ifndef MODULE
/*
* To avoid needing to export parse_args(), just don't use this code
* when LKDTM is built as a module.
*/
struct check_cmdline_args {
const char *param;
int value;
};
static int lkdtm_parse_one(char *param, char *val,
const char *unused, void *arg)
{
struct check_cmdline_args *args = arg;
/* short circuit if we already found a value. */
if (args->value != -ESRCH)
return 0;
if (strncmp(param, args->param, strlen(args->param)) == 0) {
bool bool_result;
int ret;
ret = kstrtobool(val, &bool_result);
if (ret == 0)
args->value = bool_result;
}
return 0;
}
int lkdtm_check_bool_cmdline(const char *param)
{
char *command_line;
struct check_cmdline_args args = {
.param = param,
.value = -ESRCH,
};
command_line = kstrdup(saved_command_line, GFP_KERNEL);
if (!command_line)
return -ENOMEM;
parse_args("Setting sysctl args", command_line,
NULL, 0, -1, -1, &args, lkdtm_parse_one);
kfree(command_line);
return args.value;
}
#endif
static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
const struct crashtype *crashtype = NULL;
int ret;
int i;
/* Neither or both of these need to be set */
if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
pr_err("Need both cpoint_type and cpoint_name or neither\n");
return -EINVAL;
}
if (cpoint_type) {
crashtype = find_crashtype(cpoint_type);
if (!crashtype) {
pr_err("Unknown crashtype '%s'\n", cpoint_type);
return -EINVAL;
}
}
if (cpoint_name) {
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
if (!strcmp(cpoint_name, crashpoints[i].name))
crashpoint = &crashpoints[i];
}
/* Refuse unknown crashpoints. */
if (!crashpoint) {
pr_err("Invalid crashpoint %s\n", cpoint_name);
return -EINVAL;
}
}
#ifdef CONFIG_KPROBES
/* Set crash count. */
crash_count = cpoint_count;
#endif
/* Common initialization. */
lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)",
init_uts_ns.name.release,
init_uts_ns.name.machine);
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
lkdtm_usercopy_init();
lkdtm_heap_init();
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
/* Install debugfs trigger files. */
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
struct crashpoint *cur = &crashpoints[i];
debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, cur,
&cur->fops);
}
/* Install crashpoint if one was selected. */
if (crashpoint) {
ret = lkdtm_register_cpoint(crashpoint, crashtype);
if (ret < 0) {
pr_info("Invalid crashpoint %s\n", crashpoint->name);
goto out_err;
}
pr_info("Crash point %s of type %s registered\n",
crashpoint->name, cpoint_type);
} else {
pr_info("No crash points registered, enable through debugfs\n");
}
return 0;
out_err:
debugfs_remove_recursive(lkdtm_debugfs_root);
return ret;
}
static void __exit lkdtm_module_exit(void)
{
debugfs_remove_recursive(lkdtm_debugfs_root);
/* Handle test-specific clean-up. */
lkdtm_heap_exit();
lkdtm_usercopy_exit();
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
kfree(lkdtm_kernel_info);
pr_info("Crash point unregistered\n");
}
module_init(lkdtm_module_init);
module_exit(lkdtm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kernel crash testing module");
| linux-master | drivers/misc/lkdtm/core.c |
// SPDX-License-Identifier: GPL-2.0
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/mmu.h>
/* Inserts new slb entries */
static void insert_slb_entry(unsigned long p, int ssize, int page_size)
{
unsigned long flags;
flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
preempt_disable();
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(p, ssize, flags)),
"r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
: "memory");
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(p, ssize, flags)),
"r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
: "memory");
preempt_enable();
}
/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
static int inject_vmalloc_slb_multihit(void)
{
char *p;
p = vmalloc(PAGE_SIZE);
if (!p)
return -ENOMEM;
insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
/*
* This triggers exception, If handled correctly we must recover
* from this error.
*/
p[0] = '!';
vfree(p);
return 0;
}
/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
static int inject_kmalloc_slb_multihit(void)
{
char *p;
p = kmalloc(2048, GFP_KERNEL);
if (!p)
return -ENOMEM;
insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
/*
* This triggers exception, If handled correctly we must recover
* from this error.
*/
p[0] = '!';
kfree(p);
return 0;
}
/*
* Few initial SLB entries are bolted. Add a test to inject
* multihit in bolted entry 0.
*/
static void insert_dup_slb_entry_0(void)
{
unsigned long test_address = PAGE_OFFSET, *test_ptr;
unsigned long esid, vsid;
unsigned long i = 0;
test_ptr = (unsigned long *)test_address;
preempt_disable();
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
/* for i !=0 we would need to mask out the old entry number */
asm volatile("slbmte %0,%1" :
: "r" (vsid),
"r" (esid | SLB_NUM_BOLTED)
: "memory");
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
/* for i !=0 we would need to mask out the old entry number */
asm volatile("slbmte %0,%1" :
: "r" (vsid),
"r" (esid | (SLB_NUM_BOLTED + 1))
: "memory");
pr_info("%s accessing test address 0x%lx: 0x%lx\n",
__func__, test_address, *test_ptr);
preempt_enable();
}
static void lkdtm_PPC_SLB_MULTIHIT(void)
{
if (!radix_enabled()) {
pr_info("Injecting SLB multihit errors\n");
/*
* These need not be separate tests, And they do pretty
* much same thing. In any case we must recover from the
* errors introduced by these functions, machine would not
* survive these tests in case of failure to handle.
*/
inject_vmalloc_slb_multihit();
inject_kmalloc_slb_multihit();
insert_dup_slb_entry_0();
pr_info("Recovered from SLB multihit errors\n");
} else {
pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
}
}
static struct crashtype crashtypes[] = {
CRASHTYPE(PPC_SLB_MULTIHIT),
};
struct crashtype_category powerpc_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/powerpc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This code tests that the current task stack is properly erased (filled
* with STACKLEAK_POISON).
*
* Authors:
* Alexander Popov <[email protected]>
* Tycho Andersen <[email protected]>
*/
#include "lkdtm.h"
#include <linux/stackleak.h>
#if defined(CONFIG_GCC_PLUGIN_STACKLEAK)
/*
* Check that stackleak tracks the lowest stack pointer and erases the stack
* below this as expected.
*
* To prevent the lowest stack pointer changing during the test, IRQs are
* masked and instrumentation of this function is disabled. We assume that the
* compiler will create a fixed-size stack frame for this function.
*
* Any non-inlined function may make further use of the stack, altering the
* lowest stack pointer and/or clobbering poison values. To avoid spurious
* failures we must avoid printing until the end of the test or have already
* encountered a failure condition.
*/
static void noinstr check_stackleak_irqoff(void)
{
const unsigned long task_stack_base = (unsigned long)task_stack_page(current);
const unsigned long task_stack_low = stackleak_task_low_bound(current);
const unsigned long task_stack_high = stackleak_task_high_bound(current);
const unsigned long current_sp = current_stack_pointer;
const unsigned long lowest_sp = current->lowest_stack;
unsigned long untracked_high;
unsigned long poison_high, poison_low;
bool test_failed = false;
/*
* Check that the current and lowest recorded stack pointer values fall
* within the expected task stack boundaries. These tests should never
* fail unless the boundaries are incorrect or we're clobbering the
* STACK_END_MAGIC, and in either casee something is seriously wrong.
*/
if (current_sp < task_stack_low || current_sp >= task_stack_high) {
instrumentation_begin();
pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
current_sp, task_stack_low, task_stack_high - 1);
test_failed = true;
goto out;
}
if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
instrumentation_begin();
pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
lowest_sp, task_stack_low, task_stack_high - 1);
test_failed = true;
goto out;
}
/*
* Depending on what has run prior to this test, the lowest recorded
* stack pointer could be above or below the current stack pointer.
* Start from the lowest of the two.
*
* Poison values are naturally-aligned unsigned longs. As the current
* stack pointer might not be sufficiently aligned, we must align
* downwards to find the lowest known stack pointer value. This is the
* high boundary for a portion of the stack which may have been used
* without being tracked, and has to be scanned for poison.
*/
untracked_high = min(current_sp, lowest_sp);
untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long));
/*
* Find the top of the poison in the same way as the erasing code.
*/
poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high);
/*
* Check whether the poisoned portion of the stack (if any) consists
* entirely of poison. This verifies the entries that
* stackleak_find_top_of_poison() should have checked.
*/
poison_low = poison_high;
while (poison_low > task_stack_low) {
poison_low -= sizeof(unsigned long);
if (*(unsigned long *)poison_low == STACKLEAK_POISON)
continue;
instrumentation_begin();
pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
poison_high - poison_low, *(unsigned long *)poison_low);
test_failed = true;
goto out;
}
instrumentation_begin();
pr_info("stackleak stack usage:\n"
" high offset: %lu bytes\n"
" current: %lu bytes\n"
" lowest: %lu bytes\n"
" tracked: %lu bytes\n"
" untracked: %lu bytes\n"
" poisoned: %lu bytes\n"
" low offset: %lu bytes\n",
task_stack_base + THREAD_SIZE - task_stack_high,
task_stack_high - current_sp,
task_stack_high - lowest_sp,
task_stack_high - untracked_high,
untracked_high - poison_high,
poison_high - task_stack_low,
task_stack_low - task_stack_base);
out:
if (test_failed) {
pr_err("FAIL: the thread stack is NOT properly erased!\n");
} else {
pr_info("OK: the rest of the thread stack is properly erased\n");
}
instrumentation_end();
}
static void lkdtm_STACKLEAK_ERASING(void)
{
unsigned long flags;
local_irq_save(flags);
check_stackleak_irqoff();
local_irq_restore(flags);
}
#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
static void lkdtm_STACKLEAK_ERASING(void)
{
if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
} else {
pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n");
}
}
#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
static struct crashtype crashtypes[] = {
CRASHTYPE(STACKLEAK_ERASING),
};
struct crashtype_category stackleak_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/stackleak.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to validating kernel memory
* permissions: non-executable regions, non-writable regions, and
* even non-readable regions.
*/
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
/* Whether or not to fill the target memory area with do_nothing(). */
#define CODE_WRITE true
#define CODE_AS_IS false
/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
#define EXEC_SIZE 64
/* This is non-const, so it will end up in the .data section. */
static u8 data_area[EXEC_SIZE];
/* This is const, so it will end up in the .rodata section. */
static const unsigned long rodata = 0xAA55AA55;
/* This is marked __ro_after_init, so it should ultimately be .rodata. */
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
static noinline void do_nothing(void)
{
return;
}
/* Must immediately follow do_nothing for size calculuations to work out. */
static noinline void do_overwritten(void)
{
pr_info("do_overwritten wasn't overwritten!\n");
return;
}
static noinline void do_almost_nothing(void)
{
pr_info("do_nothing was hijacked!\n");
}
static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
{
if (!have_function_descriptors())
return dst;
memcpy(fdesc, do_nothing, sizeof(*fdesc));
fdesc->addr = (unsigned long)dst;
barrier();
return fdesc;
}
static noinline void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
void *do_nothing_text = dereference_function_descriptor(do_nothing);
pr_info("attempting ok execution at %px\n", do_nothing_text);
do_nothing();
if (write == CODE_WRITE) {
memcpy(dst, do_nothing_text, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
pr_info("attempting bad execution at %px\n", dst);
func = setup_function_descriptor(&fdesc, dst);
func();
pr_err("FAIL: func returned\n");
}
static void execute_user_location(void *dst)
{
int copied;
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void);
func_desc_t fdesc;
void *do_nothing_text = dereference_function_descriptor(do_nothing);
pr_info("attempting ok execution at %px\n", do_nothing_text);
do_nothing();
copied = access_process_vm(current, (unsigned long)dst, do_nothing_text,
EXEC_SIZE, FOLL_WRITE);
if (copied < EXEC_SIZE)
return;
pr_info("attempting bad execution at %px\n", dst);
func = setup_function_descriptor(&fdesc, dst);
func();
pr_err("FAIL: func returned\n");
}
static void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test and make volatile. */
volatile unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
pr_err("FAIL: survived bad write\n");
}
static void lkdtm_WRITE_RO_AFTER_INIT(void)
{
volatile unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
* is considered a "success", a failure is to just skip the
* real test.
*/
if ((*ptr & 0xAA) != 0xAA) {
pr_info("%p was NOT written during init!?\n", ptr);
return;
}
pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
pr_err("FAIL: survived bad write\n");
}
static void lkdtm_WRITE_KERN(void)
{
size_t size;
volatile unsigned char *ptr;
size = (unsigned long)dereference_function_descriptor(do_overwritten) -
(unsigned long)dereference_function_descriptor(do_nothing);
ptr = dereference_function_descriptor(do_overwritten);
pr_info("attempting bad %zu byte write at %px\n", size, ptr);
memcpy((void *)ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
pr_err("FAIL: survived bad write\n");
do_overwritten();
}
static void lkdtm_WRITE_OPD(void)
{
size_t size = sizeof(func_desc_t);
void (*func)(void) = do_nothing;
if (!have_function_descriptors()) {
pr_info("XFAIL: Platform doesn't use function descriptors.\n");
return;
}
pr_info("attempting bad %zu bytes write at %px\n", size, do_nothing);
memcpy(do_nothing, do_almost_nothing, size);
pr_err("FAIL: survived bad write\n");
asm("" : "=m"(func));
func();
}
static void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
static void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
static void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
static void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
static void lkdtm_EXEC_RODATA(void)
{
execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
CODE_AS_IS);
}
static void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
execute_user_location((void *)user_addr);
vm_munmap(user_addr, PAGE_SIZE);
}
static void lkdtm_EXEC_NULL(void)
{
execute_location(NULL, CODE_AS_IS);
}
static void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
pr_warn("copy_to_user failed\n");
vm_munmap(user_addr, PAGE_SIZE);
return;
}
ptr = (unsigned long *)user_addr;
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
pr_err("FAIL: survived bad write\n");
vm_munmap(user_addr, PAGE_SIZE);
}
static void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
volatile unsigned long *ptr = (unsigned long *)NULL;
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
pr_err("FAIL: survived bad write\n");
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
static struct crashtype crashtypes[] = {
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
CRASHTYPE(WRITE_OPD),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
CRASHTYPE(EXEC_NULL),
CRASHTYPE(ACCESS_USERSPACE),
CRASHTYPE(ACCESS_NULL),
};
struct crashtype_category perms_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/perms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to logic bugs (e.g. bad dereferences,
* bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
* lockups) along with other things that don't fit well into existing LKDTM
* test source files.
*/
#include "lkdtm.h"
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
#include <asm/desc.h>
#endif
struct lkdtm_list {
struct list_head node;
};
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
#endif
#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
static int recur_count = REC_NUM_DEFAULT;
static DEFINE_SPINLOCK(lock_me_up);
/*
* Make sure compiler does not optimize this function or stack frame away:
* - function marked noinline
* - stack variables are marked volatile
* - stack variables are written (memset()) and read (buf[..] passed as arg)
* - function may have external effects (memzero_explicit())
* - no tail recursion possible
*/
static int noinline recursive_loop(int remaining)
{
volatile char buf[REC_STACK_SIZE];
volatile int ret;
memset((void *)buf, remaining & 0xFF, sizeof(buf));
if (!remaining)
ret = 0;
else
ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
memzero_explicit((void *)buf, sizeof(buf));
return ret;
}
/* If the depth is negative, use the default, otherwise keep parameter. */
void __init lkdtm_bugs_init(int *recur_param)
{
if (*recur_param < 0)
*recur_param = recur_count;
else
recur_count = *recur_param;
}
static void lkdtm_PANIC(void)
{
panic("dumptest");
}
static void lkdtm_BUG(void)
{
BUG();
}
static int warn_counter;
static void lkdtm_WARNING(void)
{
WARN_ON(++warn_counter);
}
static void lkdtm_WARNING_MESSAGE(void)
{
WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
static void lkdtm_EXCEPTION(void)
{
*((volatile int *) 0) = 0;
}
static void lkdtm_LOOP(void)
{
for (;;)
;
}
static void lkdtm_EXHAUST_STACK(void)
{
pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
recursive_loop(recur_count);
pr_info("FAIL: survived without exhausting stack?!\n");
}
static noinline void __lkdtm_CORRUPT_STACK(void *stack)
{
memset(stack, '\xff', 64);
}
/* This should trip the stack canary, not corrupt the return address. */
static noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
pr_info("Corrupting stack containing char array ...\n");
__lkdtm_CORRUPT_STACK((void *)&data);
}
/* Same as above but will only get a canary with -fstack-protector-strong */
static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
{
union {
unsigned short shorts[4];
unsigned long *ptr;
} data __aligned(sizeof(void *));
pr_info("Corrupting stack containing union ...\n");
__lkdtm_CORRUPT_STACK((void *)&data);
}
static pid_t stack_pid;
static unsigned long stack_addr;
static void lkdtm_REPORT_STACK(void)
{
volatile uintptr_t magic;
pid_t pid = task_pid_nr(current);
if (pid != stack_pid) {
pr_info("Starting stack offset tracking for pid %d\n", pid);
stack_pid = pid;
stack_addr = (uintptr_t)&magic;
}
pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
}
static pid_t stack_canary_pid;
static unsigned long stack_canary;
static unsigned long stack_canary_offset;
static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
{
int i = 0;
pid_t pid = task_pid_nr(current);
unsigned long *canary = (unsigned long *)stack;
unsigned long current_offset = 0, init_offset = 0;
/* Do our best to find the canary in a 16 word window ... */
for (i = 1; i < 16; i++) {
canary = (unsigned long *)stack + i;
#ifdef CONFIG_STACKPROTECTOR
if (*canary == current->stack_canary)
current_offset = i;
if (*canary == init_task.stack_canary)
init_offset = i;
#endif
}
if (current_offset == 0) {
/*
* If the canary doesn't match what's in the task_struct,
* we're either using a global canary or the stack frame
* layout changed.
*/
if (init_offset != 0) {
pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
init_offset, pid);
} else {
pr_warn("FAIL: did not correctly locate stack canary :(\n");
pr_expected_config(CONFIG_STACKPROTECTOR);
}
return;
} else if (init_offset != 0) {
pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
}
canary = (unsigned long *)stack + current_offset;
if (stack_canary_pid == 0) {
stack_canary = *canary;
stack_canary_pid = pid;
stack_canary_offset = current_offset;
pr_info("Recorded stack canary for pid %d at offset %ld\n",
stack_canary_pid, stack_canary_offset);
} else if (pid == stack_canary_pid) {
pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
} else {
if (current_offset != stack_canary_offset) {
pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
stack_canary_offset, current_offset);
return;
}
if (*canary == stack_canary) {
pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
stack_canary_pid, pid, current_offset);
} else {
pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
stack_canary_pid, pid, current_offset);
/* Reset the test. */
stack_canary_pid = 0;
}
}
}
static void lkdtm_REPORT_STACK_CANARY(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *)) = { };
__lkdtm_REPORT_STACK_CANARY((void *)&data);
}
static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
u32 val = 0x12345678;
p = (u32 *)(data + 1);
if (*p == 0)
val = 0x87654321;
*p = val;
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}
static void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
static void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
static void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
/* Let sparse know we intended to exit holding the lock. */
__release(&lock_me_up);
}
static void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
static volatile unsigned int huge = INT_MAX - 2;
static volatile unsigned int ignored;
static void lkdtm_OVERFLOW_SIGNED(void)
{
int value;
value = huge;
pr_info("Normal signed addition ...\n");
value += 1;
ignored = value;
pr_info("Overflowing signed addition ...\n");
value += 4;
ignored = value;
}
static void lkdtm_OVERFLOW_UNSIGNED(void)
{
unsigned int value;
value = huge;
pr_info("Normal unsigned addition ...\n");
value += 1;
ignored = value;
pr_info("Overflowing unsigned addition ...\n");
value += 4;
ignored = value;
}
/* Intentionally using unannotated flex array definition. */
struct array_bounds_flex_array {
int one;
int two;
char data[];
};
struct array_bounds {
int one;
int two;
char data[8];
int three;
};
static void lkdtm_ARRAY_BOUNDS(void)
{
struct array_bounds_flex_array *not_checked;
struct array_bounds *checked;
volatile int i;
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
if (!not_checked || !checked) {
kfree(not_checked);
kfree(checked);
return;
}
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
for (i = 0; i < sizeof(checked->data); i++)
checked->data[i] = 'A';
/*
* For the uninstrumented flex array member, also touch 1 byte
* beyond to verify it is correctly uninstrumented.
*/
for (i = 0; i < 2; i++)
not_checked->data[i] = 'A';
pr_info("Array access beyond bounds ...\n");
for (i = 0; i < sizeof(checked->data) + 1; i++)
checked->data[i] = 'B';
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
else
pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
struct lkdtm_annotated {
unsigned long flags;
int count;
int array[] __counted_by(count);
};
static volatile int fam_count = 4;
static void lkdtm_FAM_BOUNDS(void)
{
struct lkdtm_annotated *inst;
inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL);
if (!inst) {
pr_err("FAIL: could not allocate test struct!\n");
return;
}
inst->count = fam_count;
pr_info("Array access within bounds ...\n");
inst->array[1] = fam_count;
ignored = inst->array[1];
pr_info("Array access beyond bounds ...\n");
inst->array[fam_count] = fam_count;
ignored = inst->array[fam_count];
kfree(inst);
pr_err("FAIL: survived access of invalid flexible array member index!\n");
if (!__has_attribute(__counted_by__))
pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
lkdtm_kernel_info);
else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
else
pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
static void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
* Initially, an empty list via LIST_HEAD:
* test_head.next = &test_head
* test_head.prev = &test_head
*/
LIST_HEAD(test_head);
struct lkdtm_list good, bad;
void *target[2] = { };
void *redirection = ⌖
pr_info("attempting good list addition\n");
/*
* Adding to the list performs these actions:
* test_head.next->prev = &good.node
* good.node.next = test_head.next
* good.node.prev = test_head
* test_head.next = good.node
*/
list_add(&good.node, &test_head);
pr_info("attempting corrupted list addition\n");
/*
* In simulating this "write what where" primitive, the "what" is
* the address of &bad.node, and the "where" is the address held
* by "redirection".
*/
test_head.next = redirection;
list_add(&bad.node, &test_head);
if (target[0] == NULL && target[1] == NULL)
pr_err("Overwrite did not happen, but no BUG?!\n");
else {
pr_err("list_add() corruption not detected!\n");
pr_expected_config(CONFIG_LIST_HARDENED);
}
}
static void lkdtm_CORRUPT_LIST_DEL(void)
{
LIST_HEAD(test_head);
struct lkdtm_list item;
void *target[2] = { };
void *redirection = ⌖
list_add(&item.node, &test_head);
pr_info("attempting good list removal\n");
list_del(&item.node);
pr_info("attempting corrupted list removal\n");
list_add(&item.node, &test_head);
/* As with the list_add() test above, this corrupts "next". */
item.node.next = redirection;
list_del(&item.node);
if (target[0] == NULL && target[1] == NULL)
pr_err("Overwrite did not happen, but no BUG?!\n");
else {
pr_err("list_del() corruption not detected!\n");
pr_expected_config(CONFIG_LIST_HARDENED);
}
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack - 1;
volatile unsigned char byte;
pr_info("attempting bad read from page below current stack\n");
byte = *ptr;
pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack + THREAD_SIZE;
volatile unsigned char byte;
pr_info("attempting bad read from page above current stack\n");
byte = *ptr;
pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
static void lkdtm_UNSET_SMEP(void)
{
#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;
unsigned long cr4;
int i;
cr4 = native_read_cr4();
if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
pr_err("FAIL: SMEP not in use\n");
return;
}
cr4 &= ~(X86_CR4_SMEP);
pr_info("trying to clear SMEP normally\n");
native_write_cr4(cr4);
if (cr4 == native_read_cr4()) {
pr_err("FAIL: pinning SMEP failed!\n");
cr4 |= X86_CR4_SMEP;
pr_info("restoring SMEP\n");
native_write_cr4(cr4);
return;
}
pr_info("ok: SMEP did not get cleared\n");
/*
* To test the post-write pinning verification we need to call
* directly into the middle of native_write_cr4() where the
* cr4 write happens, skipping any pinning. This searches for
* the cr4 writing instruction.
*/
insn = (unsigned char *)native_write_cr4;
OPTIMIZER_HIDE_VAR(insn);
for (i = 0; i < MOV_CR4_DEPTH; i++) {
/* mov %rdi, %cr4 */
if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
break;
/* mov %rdi,%rax; mov %rax, %cr4 */
if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
insn[i+4] == 0x22 && insn[i+5] == 0xe0)
break;
}
if (i >= MOV_CR4_DEPTH) {
pr_info("ok: cannot locate cr4 writing call gadget\n");
return;
}
direct_write_cr4 = (void *)(insn + i);
pr_info("trying to clear SMEP with call gadget\n");
direct_write_cr4(cr4);
if (native_read_cr4() & X86_CR4_SMEP) {
pr_info("ok: SMEP removal was reverted\n");
} else {
pr_err("FAIL: cleared SMEP not detected!\n");
cr4 |= X86_CR4_SMEP;
pr_info("restoring SMEP\n");
native_write_cr4(cr4);
}
#else
pr_err("XFAIL: this test is x86_64-only\n");
#endif
}
static void lkdtm_DOUBLE_FAULT(void)
{
#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
* anyway due to the double fault.
*/
struct desc_struct d = {
.type = 3, /* expand-up, writable, accessed data */
.p = 1, /* present */
.d = 1, /* 32-bit */
.g = 0, /* limit in bytes */
.s = 1, /* not system */
};
local_irq_disable();
write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
/*
* Put our zero-limit segment in SS and then trigger a fault. The
* 4-byte access to (%esp) will fault with #SS, and the attempt to
* deliver the fault will recursively cause #SS and result in #DF.
* This whole process happens while NMIs and MCEs are blocked by the
* MOV SS window. This is nice because an NMI with an invalid SS
* would also double-fault, resulting in the NMI or MCE being lost.
*/
asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
"r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
pr_err("FAIL: tried to double fault but didn't die\n");
#else
pr_err("XFAIL: this test is ia32-only\n");
#endif
}
#ifdef CONFIG_ARM64
static noinline void change_pac_parameters(void)
{
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
/* Reset the keys of current task */
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
}
}
#endif
static noinline void lkdtm_CORRUPT_PAC(void)
{
#ifdef CONFIG_ARM64
#define CORRUPT_PAC_ITERATE 10
int i;
if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
if (!system_supports_address_auth()) {
pr_err("FAIL: CPU lacks pointer authentication feature\n");
return;
}
pr_info("changing PAC parameters to force function return failure...\n");
/*
* PAC is a hash value computed from input keys, return address and
* stack pointer. As pac has fewer bits so there is a chance of
* collision, so iterate few times to reduce the collision probability.
*/
for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
change_pac_parameters();
pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
#else
pr_err("XFAIL: this test is arm64-only\n");
#endif
}
static struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
CRASHTYPE(WARNING_MESSAGE),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(EXHAUST_STACK),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(REPORT_STACK),
CRASHTYPE(REPORT_STACK_CANARY),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(SOFTLOCKUP),
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
CRASHTYPE(OVERFLOW_SIGNED),
CRASHTYPE(OVERFLOW_UNSIGNED),
CRASHTYPE(ARRAY_BOUNDS),
CRASHTYPE(FAM_BOUNDS),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNSET_SMEP),
CRASHTYPE(DOUBLE_FAULT),
CRASHTYPE(CORRUPT_PAC),
};
struct crashtype_category bugs_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
| linux-master | drivers/misc/lkdtm/bugs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* altera.c
*
* altera FPGA driver
*
* Copyright (C) Altera Corporation 1998-2001
* Copyright (C) 2010,2011 NetUP Inc.
* Copyright (C) 2010,2011 Igor M. Liplianin <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <misc/altera.h>
#include "altera-exprt.h"
#include "altera-jtag.h"
static int debug = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debugging information");
MODULE_DESCRIPTION("altera FPGA kernel module");
MODULE_AUTHOR("Igor M. Liplianin <[email protected]>");
MODULE_LICENSE("GPL");
#define dprintk(args...) \
if (debug) { \
printk(KERN_DEBUG args); \
}
enum altera_fpga_opcode {
OP_NOP = 0,
OP_DUP,
OP_SWP,
OP_ADD,
OP_SUB,
OP_MULT,
OP_DIV,
OP_MOD,
OP_SHL,
OP_SHR,
OP_NOT,
OP_AND,
OP_OR,
OP_XOR,
OP_INV,
OP_GT,
OP_LT,
OP_RET,
OP_CMPS,
OP_PINT,
OP_PRNT,
OP_DSS,
OP_DSSC,
OP_ISS,
OP_ISSC,
OP_DPR = 0x1c,
OP_DPRL,
OP_DPO,
OP_DPOL,
OP_IPR,
OP_IPRL,
OP_IPO,
OP_IPOL,
OP_PCHR,
OP_EXIT,
OP_EQU,
OP_POPT,
OP_ABS = 0x2c,
OP_BCH0,
OP_PSH0 = 0x2f,
OP_PSHL = 0x40,
OP_PSHV,
OP_JMP,
OP_CALL,
OP_NEXT,
OP_PSTR,
OP_SINT = 0x47,
OP_ST,
OP_ISTP,
OP_DSTP,
OP_SWPN,
OP_DUPN,
OP_POPV,
OP_POPE,
OP_POPA,
OP_JMPZ,
OP_DS,
OP_IS,
OP_DPRA,
OP_DPOA,
OP_IPRA,
OP_IPOA,
OP_EXPT,
OP_PSHE,
OP_PSHA,
OP_DYNA,
OP_EXPV = 0x5c,
OP_COPY = 0x80,
OP_REVA,
OP_DSC,
OP_ISC,
OP_WAIT,
OP_VS,
OP_CMPA = 0xc0,
OP_VSC,
};
struct altera_procinfo {
char *name;
u8 attrs;
struct altera_procinfo *next;
};
/* This function checks if enough parameters are available on the stack. */
static int altera_check_stack(int stack_ptr, int count, int *status)
{
if (stack_ptr < count) {
*status = -EOVERFLOW;
return 0;
}
return 1;
}
static void altera_export_int(char *key, s32 value)
{
dprintk("Export: key = \"%s\", value = %d\n", key, value);
}
#define HEX_LINE_CHARS 72
#define HEX_LINE_BITS (HEX_LINE_CHARS * 4)
static void altera_export_bool_array(char *key, u8 *data, s32 count)
{
char string[HEX_LINE_CHARS + 1];
s32 i, offset;
u32 size, line, lines, linebits, value, j, k;
if (count > HEX_LINE_BITS) {
dprintk("Export: key = \"%s\", %d bits, value = HEX\n",
key, count);
lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS;
for (line = 0; line < lines; ++line) {
if (line < (lines - 1)) {
linebits = HEX_LINE_BITS;
size = HEX_LINE_CHARS;
offset = count - ((line + 1) * HEX_LINE_BITS);
} else {
linebits =
count - ((lines - 1) * HEX_LINE_BITS);
size = (linebits + 3) / 4;
offset = 0L;
}
string[size] = '\0';
j = size - 1;
value = 0;
for (k = 0; k < linebits; ++k) {
i = k + offset;
if (data[i >> 3] & (1 << (i & 7)))
value |= (1 << (i & 3));
if ((i & 3) == 3) {
sprintf(&string[j], "%1x", value);
value = 0;
--j;
}
}
if ((k & 3) > 0)
sprintf(&string[j], "%1x", value);
dprintk("%s\n", string);
}
} else {
size = (count + 3) / 4;
string[size] = '\0';
j = size - 1;
value = 0;
for (i = 0; i < count; ++i) {
if (data[i >> 3] & (1 << (i & 7)))
value |= (1 << (i & 3));
if ((i & 3) == 3) {
sprintf(&string[j], "%1x", value);
value = 0;
--j;
}
}
if ((i & 3) > 0)
sprintf(&string[j], "%1x", value);
dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n",
key, count, string);
}
}
static int altera_execute(struct altera_state *astate,
u8 *p,
s32 program_size,
s32 *error_address,
int *exit_code,
int *format_version)
{
struct altera_config *aconf = astate->config;
char *msg_buff = astate->msg_buff;
long *stack = astate->stack;
int status = 0;
u32 first_word = 0L;
u32 action_table = 0L;
u32 proc_table = 0L;
u32 str_table = 0L;
u32 sym_table = 0L;
u32 data_sect = 0L;
u32 code_sect = 0L;
u32 debug_sect = 0L;
u32 action_count = 0L;
u32 proc_count = 0L;
u32 sym_count = 0L;
long *vars = NULL;
s32 *var_size = NULL;
char *attrs = NULL;
u8 *proc_attributes = NULL;
u32 pc;
u32 opcode_address;
u32 args[3];
u32 opcode;
u32 name_id;
u8 charbuf[4];
long long_tmp;
u32 variable_id;
u8 *charptr_tmp;
u8 *charptr_tmp2;
long *longptr_tmp;
int version = 0;
int delta = 0;
int stack_ptr = 0;
u32 arg_count;
int done = 0;
int bad_opcode = 0;
u32 count;
u32 index;
u32 index2;
s32 long_count;
s32 long_idx;
s32 long_idx2;
u32 i;
u32 j;
u32 uncomp_size;
u32 offset;
u32 value;
int current_proc = 0;
int reverse;
char *name;
dprintk("%s\n", __func__);
/* Read header information */
if (program_size > 52L) {
first_word = get_unaligned_be32(&p[0]);
version = (first_word & 1L);
*format_version = version + 1;
delta = version * 8;
action_table = get_unaligned_be32(&p[4]);
proc_table = get_unaligned_be32(&p[8]);
str_table = get_unaligned_be32(&p[4 + delta]);
sym_table = get_unaligned_be32(&p[16 + delta]);
data_sect = get_unaligned_be32(&p[20 + delta]);
code_sect = get_unaligned_be32(&p[24 + delta]);
debug_sect = get_unaligned_be32(&p[28 + delta]);
action_count = get_unaligned_be32(&p[40 + delta]);
proc_count = get_unaligned_be32(&p[44 + delta]);
sym_count = get_unaligned_be32(&p[48 + (2 * delta)]);
}
if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) {
done = 1;
status = -EIO;
goto exit_done;
}
if (sym_count <= 0)
goto exit_done;
vars = kcalloc(sym_count, sizeof(long), GFP_KERNEL);
if (vars == NULL)
status = -ENOMEM;
if (status == 0) {
var_size = kcalloc(sym_count, sizeof(s32), GFP_KERNEL);
if (var_size == NULL)
status = -ENOMEM;
}
if (status == 0) {
attrs = kzalloc(sym_count, GFP_KERNEL);
if (attrs == NULL)
status = -ENOMEM;
}
if ((status == 0) && (version > 0)) {
proc_attributes = kzalloc(proc_count, GFP_KERNEL);
if (proc_attributes == NULL)
status = -ENOMEM;
}
if (status != 0)
goto exit_done;
delta = version * 2;
for (i = 0; i < sym_count; ++i) {
offset = (sym_table + ((11 + delta) * i));
value = get_unaligned_be32(&p[offset + 3 + delta]);
attrs[i] = p[offset];
/*
* use bit 7 of attribute byte to indicate that
* this buffer was dynamically allocated
* and should be freed later
*/
attrs[i] &= 0x7f;
var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]);
/*
* Attribute bits:
* bit 0: 0 = read-only, 1 = read-write
* bit 1: 0 = not compressed, 1 = compressed
* bit 2: 0 = not initialized, 1 = initialized
* bit 3: 0 = scalar, 1 = array
* bit 4: 0 = Boolean, 1 = integer
* bit 5: 0 = declared variable,
* 1 = compiler created temporary variable
*/
if ((attrs[i] & 0x0c) == 0x04)
/* initialized scalar variable */
vars[i] = value;
else if ((attrs[i] & 0x1e) == 0x0e) {
/* initialized compressed Boolean array */
uncomp_size = get_unaligned_le32(&p[data_sect + value]);
/* allocate a buffer for the uncompressed data */
vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL);
if (vars[i] == 0L)
status = -ENOMEM;
else {
/* set flag so buffer will be freed later */
attrs[i] |= 0x80;
/* uncompress the data */
if (altera_shrink(&p[data_sect + value],
var_size[i],
(u8 *)vars[i],
uncomp_size,
version) != uncomp_size)
/* decompression failed */
status = -EIO;
else
var_size[i] = uncomp_size * 8L;
}
} else if ((attrs[i] & 0x1e) == 0x0c) {
/* initialized Boolean array */
vars[i] = value + data_sect + (long)p;
} else if ((attrs[i] & 0x1c) == 0x1c) {
/* initialized integer array */
vars[i] = value + data_sect;
} else if ((attrs[i] & 0x0c) == 0x08) {
/* uninitialized array */
/* flag attrs so that memory is freed */
attrs[i] |= 0x80;
if (var_size[i] > 0) {
u32 size;
if (attrs[i] & 0x10)
/* integer array */
size = (var_size[i] * sizeof(s32));
else
/* Boolean array */
size = ((var_size[i] + 7L) / 8L);
vars[i] = (long)kzalloc(size, GFP_KERNEL);
if (vars[i] == 0) {
status = -ENOMEM;
} else {
/* zero out memory */
for (j = 0; j < size; ++j)
((u8 *)(vars[i]))[j] = 0;
}
} else
vars[i] = 0;
} else
vars[i] = 0;
}
exit_done:
if (status != 0)
done = 1;
altera_jinit(astate);
pc = code_sect;
msg_buff[0] = '\0';
/*
* For JBC version 2, we will execute the procedures corresponding to
* the selected ACTION
*/
if (version > 0) {
if (aconf->action == NULL) {
status = -EINVAL;
done = 1;
} else {
int action_found = 0;
for (i = 0; (i < action_count) && !action_found; ++i) {
name_id = get_unaligned_be32(&p[action_table +
(12 * i)]);
name = &p[str_table + name_id];
if (strncasecmp(aconf->action, name, strlen(name)) == 0) {
action_found = 1;
current_proc =
get_unaligned_be32(&p[action_table +
(12 * i) + 8]);
}
}
if (!action_found) {
status = -EINVAL;
done = 1;
}
}
if (status == 0) {
int first_time = 1;
i = current_proc;
while ((i != 0) || first_time) {
first_time = 0;
/* check procedure attribute byte */
proc_attributes[i] =
(p[proc_table +
(13 * i) + 8] &
0x03);
/*
* BIT0 - OPTIONAL
* BIT1 - RECOMMENDED
* BIT6 - FORCED OFF
* BIT7 - FORCED ON
*/
i = get_unaligned_be32(&p[proc_table +
(13 * i) + 4]);
}
/*
* Set current_proc to the first procedure
* to be executed
*/
i = current_proc;
while ((i != 0) &&
((proc_attributes[i] == 1) ||
((proc_attributes[i] & 0xc0) == 0x40))) {
i = get_unaligned_be32(&p[proc_table +
(13 * i) + 4]);
}
if ((i != 0) || ((i == 0) && (current_proc == 0) &&
((proc_attributes[0] != 1) &&
((proc_attributes[0] & 0xc0) != 0x40)))) {
current_proc = i;
pc = code_sect +
get_unaligned_be32(&p[proc_table +
(13 * i) + 9]);
if ((pc < code_sect) || (pc >= debug_sect))
status = -ERANGE;
} else
/* there are no procedures to execute! */
done = 1;
}
}
msg_buff[0] = '\0';
while (!done) {
opcode = (p[pc] & 0xff);
opcode_address = pc;
++pc;
if (debug > 1)
printk("opcode: %02x\n", opcode);
arg_count = (opcode >> 6) & 3;
for (i = 0; i < arg_count; ++i) {
args[i] = get_unaligned_be32(&p[pc]);
pc += 4;
}
switch (opcode) {
case OP_NOP:
break;
case OP_DUP:
if (altera_check_stack(stack_ptr, 1, &status)) {
stack[stack_ptr] = stack[stack_ptr - 1];
++stack_ptr;
}
break;
case OP_SWP:
if (altera_check_stack(stack_ptr, 2, &status))
swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
break;
case OP_ADD:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] += stack[stack_ptr];
}
break;
case OP_SUB:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] -= stack[stack_ptr];
}
break;
case OP_MULT:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] *= stack[stack_ptr];
}
break;
case OP_DIV:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] /= stack[stack_ptr];
}
break;
case OP_MOD:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] %= stack[stack_ptr];
}
break;
case OP_SHL:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] <<= stack[stack_ptr];
}
break;
case OP_SHR:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] >>= stack[stack_ptr];
}
break;
case OP_NOT:
if (altera_check_stack(stack_ptr, 1, &status))
stack[stack_ptr - 1] ^= (-1L);
break;
case OP_AND:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] &= stack[stack_ptr];
}
break;
case OP_OR:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] |= stack[stack_ptr];
}
break;
case OP_XOR:
if (altera_check_stack(stack_ptr, 2, &status)) {
--stack_ptr;
stack[stack_ptr - 1] ^= stack[stack_ptr];
}
break;
case OP_INV:
if (!altera_check_stack(stack_ptr, 1, &status))
break;
stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L;
break;
case OP_GT:
if (!altera_check_stack(stack_ptr, 2, &status))
break;
--stack_ptr;
stack[stack_ptr - 1] =
(stack[stack_ptr - 1] > stack[stack_ptr]) ?
1L : 0L;
break;
case OP_LT:
if (!altera_check_stack(stack_ptr, 2, &status))
break;
--stack_ptr;
stack[stack_ptr - 1] =
(stack[stack_ptr - 1] < stack[stack_ptr]) ?
1L : 0L;
break;
case OP_RET:
if ((version > 0) && (stack_ptr == 0)) {
/*
* We completed one of the main procedures
* of an ACTION.
* Find the next procedure
* to be executed and jump to it.
* If there are no more procedures, then EXIT.
*/
i = get_unaligned_be32(&p[proc_table +
(13 * current_proc) + 4]);
while ((i != 0) &&
((proc_attributes[i] == 1) ||
((proc_attributes[i] & 0xc0) == 0x40)))
i = get_unaligned_be32(&p[proc_table +
(13 * i) + 4]);
if (i == 0) {
/* no procedures to execute! */
done = 1;
*exit_code = 0; /* success */
} else {
current_proc = i;
pc = code_sect + get_unaligned_be32(
&p[proc_table +
(13 * i) + 9]);
if ((pc < code_sect) ||
(pc >= debug_sect))
status = -ERANGE;
}
} else
if (altera_check_stack(stack_ptr, 1, &status)) {
pc = stack[--stack_ptr] + code_sect;
if ((pc <= code_sect) ||
(pc >= debug_sect))
status = -ERANGE;
}
break;
case OP_CMPS:
/*
* Array short compare
* ...stack 0 is source 1 value
* ...stack 1 is source 2 value
* ...stack 2 is mask value
* ...stack 3 is count
*/
if (altera_check_stack(stack_ptr, 4, &status)) {
s32 a = stack[--stack_ptr];
s32 b = stack[--stack_ptr];
long_tmp = stack[--stack_ptr];
count = stack[stack_ptr - 1];
if ((count < 1) || (count > 32))
status = -ERANGE;
else {
long_tmp &= ((-1L) >> (32 - count));
stack[stack_ptr - 1] =
((a & long_tmp) == (b & long_tmp))
? 1L : 0L;
}
}
break;
case OP_PINT:
/*
* PRINT add integer
* ...stack 0 is integer value
*/
if (!altera_check_stack(stack_ptr, 1, &status))
break;
sprintf(&msg_buff[strlen(msg_buff)],
"%ld", stack[--stack_ptr]);
break;
case OP_PRNT:
/* PRINT finish */
if (debug)
printk(msg_buff, "\n");
msg_buff[0] = '\0';
break;
case OP_DSS:
/*
* DRSCAN short
* ...stack 0 is scan data
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_tmp = stack[--stack_ptr];
count = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_drscan(astate, count, charbuf, 0);
break;
case OP_DSSC:
/*
* DRSCAN short with capture
* ...stack 0 is scan data
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_tmp = stack[--stack_ptr];
count = stack[stack_ptr - 1];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_swap_dr(astate, count, charbuf,
0, charbuf, 0);
stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]);
break;
case OP_ISS:
/*
* IRSCAN short
* ...stack 0 is scan data
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_tmp = stack[--stack_ptr];
count = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_irscan(astate, count, charbuf, 0);
break;
case OP_ISSC:
/*
* IRSCAN short with capture
* ...stack 0 is scan data
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_tmp = stack[--stack_ptr];
count = stack[stack_ptr - 1];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_swap_ir(astate, count, charbuf,
0, charbuf, 0);
stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]);
break;
case OP_DPR:
if (!altera_check_stack(stack_ptr, 1, &status))
break;
count = stack[--stack_ptr];
status = altera_set_dr_pre(&astate->js, count, 0, NULL);
break;
case OP_DPRL:
/*
* DRPRE with literal data
* ...stack 0 is count
* ...stack 1 is literal data
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
count = stack[--stack_ptr];
long_tmp = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_set_dr_pre(&astate->js, count, 0,
charbuf);
break;
case OP_DPO:
/*
* DRPOST
* ...stack 0 is count
*/
if (altera_check_stack(stack_ptr, 1, &status)) {
count = stack[--stack_ptr];
status = altera_set_dr_post(&astate->js, count,
0, NULL);
}
break;
case OP_DPOL:
/*
* DRPOST with literal data
* ...stack 0 is count
* ...stack 1 is literal data
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
count = stack[--stack_ptr];
long_tmp = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_set_dr_post(&astate->js, count, 0,
charbuf);
break;
case OP_IPR:
if (altera_check_stack(stack_ptr, 1, &status)) {
count = stack[--stack_ptr];
status = altera_set_ir_pre(&astate->js, count,
0, NULL);
}
break;
case OP_IPRL:
/*
* IRPRE with literal data
* ...stack 0 is count
* ...stack 1 is literal data
*/
if (altera_check_stack(stack_ptr, 2, &status)) {
count = stack[--stack_ptr];
long_tmp = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_set_ir_pre(&astate->js, count,
0, charbuf);
}
break;
case OP_IPO:
/*
* IRPOST
* ...stack 0 is count
*/
if (altera_check_stack(stack_ptr, 1, &status)) {
count = stack[--stack_ptr];
status = altera_set_ir_post(&astate->js, count,
0, NULL);
}
break;
case OP_IPOL:
/*
* IRPOST with literal data
* ...stack 0 is count
* ...stack 1 is literal data
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
count = stack[--stack_ptr];
long_tmp = stack[--stack_ptr];
put_unaligned_le32(long_tmp, &charbuf[0]);
status = altera_set_ir_post(&astate->js, count, 0,
charbuf);
break;
case OP_PCHR:
if (altera_check_stack(stack_ptr, 1, &status)) {
u8 ch;
count = strlen(msg_buff);
ch = (char) stack[--stack_ptr];
if ((ch < 1) || (ch > 127)) {
/*
* character code out of range
* instead of flagging an error,
* force the value to 127
*/
ch = 127;
}
msg_buff[count] = ch;
msg_buff[count + 1] = '\0';
}
break;
case OP_EXIT:
if (altera_check_stack(stack_ptr, 1, &status))
*exit_code = stack[--stack_ptr];
done = 1;
break;
case OP_EQU:
if (!altera_check_stack(stack_ptr, 2, &status))
break;
--stack_ptr;
stack[stack_ptr - 1] =
(stack[stack_ptr - 1] == stack[stack_ptr]) ?
1L : 0L;
break;
case OP_POPT:
if (altera_check_stack(stack_ptr, 1, &status))
--stack_ptr;
break;
case OP_ABS:
if (!altera_check_stack(stack_ptr, 1, &status))
break;
if (stack[stack_ptr - 1] < 0)
stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1];
break;
case OP_BCH0:
/*
* Batch operation 0
* SWP
* SWPN 7
* SWP
* SWPN 6
* DUPN 8
* SWPN 2
* SWP
* DUPN 6
* DUPN 6
*/
/* SWP */
if (altera_check_stack(stack_ptr, 2, &status))
swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 7 */
index = 7 + 1;
if (altera_check_stack(stack_ptr, index, &status))
swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
if (altera_check_stack(stack_ptr, 2, &status))
swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* SWPN 6 */
index = 6 + 1;
if (altera_check_stack(stack_ptr, index, &status))
swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* DUPN 8 */
index = 8 + 1;
if (altera_check_stack(stack_ptr, index, &status)) {
stack[stack_ptr] = stack[stack_ptr - index];
++stack_ptr;
}
/* SWPN 2 */
index = 2 + 1;
if (altera_check_stack(stack_ptr, index, &status))
swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
/* SWP */
if (altera_check_stack(stack_ptr, 2, &status))
swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
/* DUPN 6 */
index = 6 + 1;
if (altera_check_stack(stack_ptr, index, &status)) {
stack[stack_ptr] = stack[stack_ptr - index];
++stack_ptr;
}
/* DUPN 6 */
index = 6 + 1;
if (altera_check_stack(stack_ptr, index, &status)) {
stack[stack_ptr] = stack[stack_ptr - index];
++stack_ptr;
}
break;
case OP_PSH0:
stack[stack_ptr++] = 0;
break;
case OP_PSHL:
stack[stack_ptr++] = (s32) args[0];
break;
case OP_PSHV:
stack[stack_ptr++] = vars[args[0]];
break;
case OP_JMP:
pc = args[0] + code_sect;
if ((pc < code_sect) || (pc >= debug_sect))
status = -ERANGE;
break;
case OP_CALL:
stack[stack_ptr++] = pc;
pc = args[0] + code_sect;
if ((pc < code_sect) || (pc >= debug_sect))
status = -ERANGE;
break;
case OP_NEXT:
/*
* Process FOR / NEXT loop
* ...argument 0 is variable ID
* ...stack 0 is step value
* ...stack 1 is end value
* ...stack 2 is top address
*/
if (altera_check_stack(stack_ptr, 3, &status)) {
s32 step = stack[stack_ptr - 1];
s32 end = stack[stack_ptr - 2];
s32 top = stack[stack_ptr - 3];
s32 iterator = vars[args[0]];
int break_out = 0;
if (step < 0) {
if (iterator <= end)
break_out = 1;
} else if (iterator >= end)
break_out = 1;
if (break_out) {
stack_ptr -= 3;
} else {
vars[args[0]] = iterator + step;
pc = top + code_sect;
if ((pc < code_sect) ||
(pc >= debug_sect))
status = -ERANGE;
}
}
break;
case OP_PSTR:
/*
* PRINT add string
* ...argument 0 is string ID
*/
count = strlen(msg_buff);
strscpy(&msg_buff[count],
&p[str_table + args[0]],
ALTERA_MESSAGE_LENGTH - count);
break;
case OP_SINT:
/*
* STATE intermediate state
* ...argument 0 is state code
*/
status = altera_goto_jstate(astate, args[0]);
break;
case OP_ST:
/*
* STATE final state
* ...argument 0 is state code
*/
status = altera_goto_jstate(astate, args[0]);
break;
case OP_ISTP:
/*
* IRSTOP state
* ...argument 0 is state code
*/
status = altera_set_irstop(&astate->js, args[0]);
break;
case OP_DSTP:
/*
* DRSTOP state
* ...argument 0 is state code
*/
status = altera_set_drstop(&astate->js, args[0]);
break;
case OP_SWPN:
/*
* Exchange top with Nth stack value
* ...argument 0 is 0-based stack entry
* to swap with top element
*/
index = (args[0]) + 1;
if (altera_check_stack(stack_ptr, index, &status))
swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
break;
case OP_DUPN:
/*
* Duplicate Nth stack value
* ...argument 0 is 0-based stack entry to duplicate
*/
index = (args[0]) + 1;
if (altera_check_stack(stack_ptr, index, &status)) {
stack[stack_ptr] = stack[stack_ptr - index];
++stack_ptr;
}
break;
case OP_POPV:
/*
* Pop stack into scalar variable
* ...argument 0 is variable ID
* ...stack 0 is value
*/
if (altera_check_stack(stack_ptr, 1, &status))
vars[args[0]] = stack[--stack_ptr];
break;
case OP_POPE:
/*
* Pop stack into integer array element
* ...argument 0 is variable ID
* ...stack 0 is array index
* ...stack 1 is value
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
variable_id = args[0];
/*
* If variable is read-only,
* convert to writable array
*/
if ((version > 0) &&
((attrs[variable_id] & 0x9c) == 0x1c)) {
/* Allocate a writable buffer for this array */
count = var_size[variable_id];
long_tmp = vars[variable_id];
longptr_tmp = kcalloc(count, sizeof(long),
GFP_KERNEL);
vars[variable_id] = (long)longptr_tmp;
if (vars[variable_id] == 0) {
status = -ENOMEM;
break;
}
/* copy previous contents into buffer */
for (i = 0; i < count; ++i) {
longptr_tmp[i] =
get_unaligned_be32(&p[long_tmp]);
long_tmp += sizeof(long);
}
/*
* set bit 7 - buffer was
* dynamically allocated
*/
attrs[variable_id] |= 0x80;
/* clear bit 2 - variable is writable */
attrs[variable_id] &= ~0x04;
attrs[variable_id] |= 0x01;
}
/* check that variable is a writable integer array */
if ((attrs[variable_id] & 0x1c) != 0x18)
status = -ERANGE;
else {
longptr_tmp = (long *)vars[variable_id];
/* pop the array index */
index = stack[--stack_ptr];
/* pop the value and store it into the array */
longptr_tmp[index] = stack[--stack_ptr];
}
break;
case OP_POPA:
/*
* Pop stack into Boolean array
* ...argument 0 is variable ID
* ...stack 0 is count
* ...stack 1 is array index
* ...stack 2 is value
*/
if (!altera_check_stack(stack_ptr, 3, &status))
break;
variable_id = args[0];
/*
* If variable is read-only,
* convert to writable array
*/
if ((version > 0) &&
((attrs[variable_id] & 0x9c) == 0x0c)) {
/* Allocate a writable buffer for this array */
long_tmp =
(var_size[variable_id] + 7L) >> 3L;
charptr_tmp2 = (u8 *)vars[variable_id];
charptr_tmp =
kzalloc(long_tmp, GFP_KERNEL);
vars[variable_id] = (long)charptr_tmp;
if (vars[variable_id] == 0) {
status = -ENOMEM;
break;
}
/* zero the buffer */
for (long_idx = 0L;
long_idx < long_tmp;
++long_idx) {
charptr_tmp[long_idx] = 0;
}
/* copy previous contents into buffer */
for (long_idx = 0L;
long_idx < var_size[variable_id];
++long_idx) {
long_idx2 = long_idx;
if (charptr_tmp2[long_idx2 >> 3] &
(1 << (long_idx2 & 7))) {
charptr_tmp[long_idx >> 3] |=
(1 << (long_idx & 7));
}
}
/*
* set bit 7 - buffer was
* dynamically allocated
*/
attrs[variable_id] |= 0x80;
/* clear bit 2 - variable is writable */
attrs[variable_id] &= ~0x04;
attrs[variable_id] |= 0x01;
}
/*
* check that variable is
* a writable Boolean array
*/
if ((attrs[variable_id] & 0x1c) != 0x08) {
status = -ERANGE;
break;
}
charptr_tmp = (u8 *)vars[variable_id];
/* pop the count (number of bits to copy) */
long_count = stack[--stack_ptr];
/* pop the array index */
long_idx = stack[--stack_ptr];
reverse = 0;
if (version > 0) {
/*
* stack 0 = array right index
* stack 1 = array left index
*/
if (long_idx > long_count) {
reverse = 1;
long_tmp = long_count;
long_count = 1 + long_idx -
long_count;
long_idx = long_tmp;
/* reverse POPA is not supported */
status = -ERANGE;
break;
} else
long_count = 1 + long_count -
long_idx;
}
/* pop the data */
long_tmp = stack[--stack_ptr];
if (long_count < 1) {
status = -ERANGE;
break;
}
for (i = 0; i < long_count; ++i) {
if (long_tmp & (1L << (s32) i))
charptr_tmp[long_idx >> 3L] |=
(1L << (long_idx & 7L));
else
charptr_tmp[long_idx >> 3L] &=
~(1L << (long_idx & 7L));
++long_idx;
}
break;
case OP_JMPZ:
/*
* Pop stack and branch if zero
* ...argument 0 is address
* ...stack 0 is condition value
*/
if (altera_check_stack(stack_ptr, 1, &status)) {
if (stack[--stack_ptr] == 0) {
pc = args[0] + code_sect;
if ((pc < code_sect) ||
(pc >= debug_sect))
status = -ERANGE;
}
}
break;
case OP_DS:
case OP_IS:
/*
* DRSCAN
* IRSCAN
* ...argument 0 is scan data variable ID
* ...stack 0 is array index
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_idx = stack[--stack_ptr];
long_count = stack[--stack_ptr];
reverse = 0;
if (version > 0) {
/*
* stack 0 = array right index
* stack 1 = array left index
* stack 2 = count
*/
long_tmp = long_count;
long_count = stack[--stack_ptr];
if (long_idx > long_tmp) {
reverse = 1;
long_idx = long_tmp;
}
}
charptr_tmp = (u8 *)vars[args[0]];
if (reverse) {
/*
* allocate a buffer
* and reverse the data order
*/
charptr_tmp2 = charptr_tmp;
charptr_tmp = kzalloc((long_count >> 3) + 1,
GFP_KERNEL);
if (charptr_tmp == NULL) {
status = -ENOMEM;
break;
}
long_tmp = long_idx + long_count - 1;
long_idx2 = 0;
while (long_idx2 < long_count) {
if (charptr_tmp2[long_tmp >> 3] &
(1 << (long_tmp & 7)))
charptr_tmp[long_idx2 >> 3] |=
(1 << (long_idx2 & 7));
else
charptr_tmp[long_idx2 >> 3] &=
~(1 << (long_idx2 & 7));
--long_tmp;
++long_idx2;
}
}
if (opcode == 0x51) /* DS */
status = altera_drscan(astate, long_count,
charptr_tmp, long_idx);
else /* IS */
status = altera_irscan(astate, long_count,
charptr_tmp, long_idx);
if (reverse)
kfree(charptr_tmp);
break;
case OP_DPRA:
/*
* DRPRE with array data
* ...argument 0 is variable ID
* ...stack 0 is array index
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
index = stack[--stack_ptr];
count = stack[--stack_ptr];
if (version > 0)
/*
* stack 0 = array right index
* stack 1 = array left index
*/
count = 1 + count - index;
charptr_tmp = (u8 *)vars[args[0]];
status = altera_set_dr_pre(&astate->js, count, index,
charptr_tmp);
break;
case OP_DPOA:
/*
* DRPOST with array data
* ...argument 0 is variable ID
* ...stack 0 is array index
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
index = stack[--stack_ptr];
count = stack[--stack_ptr];
if (version > 0)
/*
* stack 0 = array right index
* stack 1 = array left index
*/
count = 1 + count - index;
charptr_tmp = (u8 *)vars[args[0]];
status = altera_set_dr_post(&astate->js, count, index,
charptr_tmp);
break;
case OP_IPRA:
/*
* IRPRE with array data
* ...argument 0 is variable ID
* ...stack 0 is array index
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
index = stack[--stack_ptr];
count = stack[--stack_ptr];
if (version > 0)
/*
* stack 0 = array right index
* stack 1 = array left index
*/
count = 1 + count - index;
charptr_tmp = (u8 *)vars[args[0]];
status = altera_set_ir_pre(&astate->js, count, index,
charptr_tmp);
break;
case OP_IPOA:
/*
* IRPOST with array data
* ...argument 0 is variable ID
* ...stack 0 is array index
* ...stack 1 is count
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
index = stack[--stack_ptr];
count = stack[--stack_ptr];
if (version > 0)
/*
* stack 0 = array right index
* stack 1 = array left index
*/
count = 1 + count - index;
charptr_tmp = (u8 *)vars[args[0]];
status = altera_set_ir_post(&astate->js, count, index,
charptr_tmp);
break;
case OP_EXPT:
/*
* EXPORT
* ...argument 0 is string ID
* ...stack 0 is integer expression
*/
if (altera_check_stack(stack_ptr, 1, &status)) {
name = &p[str_table + args[0]];
long_tmp = stack[--stack_ptr];
altera_export_int(name, long_tmp);
}
break;
case OP_PSHE:
/*
* Push integer array element
* ...argument 0 is variable ID
* ...stack 0 is array index
*/
if (!altera_check_stack(stack_ptr, 1, &status))
break;
variable_id = args[0];
index = stack[stack_ptr - 1];
/* check variable type */
if ((attrs[variable_id] & 0x1f) == 0x19) {
/* writable integer array */
longptr_tmp = (long *)vars[variable_id];
stack[stack_ptr - 1] = longptr_tmp[index];
} else if ((attrs[variable_id] & 0x1f) == 0x1c) {
/* read-only integer array */
long_tmp = vars[variable_id] +
(index * sizeof(long));
stack[stack_ptr - 1] =
get_unaligned_be32(&p[long_tmp]);
} else
status = -ERANGE;
break;
case OP_PSHA:
/*
* Push Boolean array
* ...argument 0 is variable ID
* ...stack 0 is count
* ...stack 1 is array index
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
variable_id = args[0];
/* check that variable is a Boolean array */
if ((attrs[variable_id] & 0x18) != 0x08) {
status = -ERANGE;
break;
}
charptr_tmp = (u8 *)vars[variable_id];
/* pop the count (number of bits to copy) */
count = stack[--stack_ptr];
/* pop the array index */
index = stack[stack_ptr - 1];
if (version > 0)
/*
* stack 0 = array right index
* stack 1 = array left index
*/
count = 1 + count - index;
if ((count < 1) || (count > 32)) {
status = -ERANGE;
break;
}
long_tmp = 0L;
for (i = 0; i < count; ++i)
if (charptr_tmp[(i + index) >> 3] &
(1 << ((i + index) & 7)))
long_tmp |= (1L << i);
stack[stack_ptr - 1] = long_tmp;
break;
case OP_DYNA:
/*
* Dynamically change size of array
* ...argument 0 is variable ID
* ...stack 0 is new size
*/
if (!altera_check_stack(stack_ptr, 1, &status))
break;
variable_id = args[0];
long_tmp = stack[--stack_ptr];
if (long_tmp > var_size[variable_id]) {
var_size[variable_id] = long_tmp;
if (attrs[variable_id] & 0x10)
/* allocate integer array */
long_tmp *= sizeof(long);
else
/* allocate Boolean array */
long_tmp = (long_tmp + 7) >> 3;
/*
* If the buffer was previously allocated,
* free it
*/
if (attrs[variable_id] & 0x80) {
kfree((void *)vars[variable_id]);
vars[variable_id] = 0;
}
/*
* Allocate a new buffer
* of the requested size
*/
vars[variable_id] = (long)
kzalloc(long_tmp, GFP_KERNEL);
if (vars[variable_id] == 0) {
status = -ENOMEM;
break;
}
/*
* Set the attribute bit to indicate that
* this buffer was dynamically allocated and
* should be freed later
*/
attrs[variable_id] |= 0x80;
/* zero out memory */
count = ((var_size[variable_id] + 7L) /
8L);
charptr_tmp = (u8 *)(vars[variable_id]);
for (index = 0; index < count; ++index)
charptr_tmp[index] = 0;
}
break;
case OP_EXPV:
/*
* Export Boolean array
* ...argument 0 is string ID
* ...stack 0 is variable ID
* ...stack 1 is array right index
* ...stack 2 is array left index
*/
if (!altera_check_stack(stack_ptr, 3, &status))
break;
if (version == 0) {
/* EXPV is not supported in JBC 1.0 */
bad_opcode = 1;
break;
}
name = &p[str_table + args[0]];
variable_id = stack[--stack_ptr];
long_idx = stack[--stack_ptr];/* right indx */
long_idx2 = stack[--stack_ptr];/* left indx */
if (long_idx > long_idx2) {
/* reverse indices not supported */
status = -ERANGE;
break;
}
long_count = 1 + long_idx2 - long_idx;
charptr_tmp = (u8 *)vars[variable_id];
charptr_tmp2 = NULL;
if ((long_idx & 7L) != 0) {
s32 k = long_idx;
charptr_tmp2 =
kzalloc(((long_count + 7L) / 8L),
GFP_KERNEL);
if (charptr_tmp2 == NULL) {
status = -ENOMEM;
break;
}
for (i = 0; i < long_count; ++i) {
if (charptr_tmp[k >> 3] &
(1 << (k & 7)))
charptr_tmp2[i >> 3] |=
(1 << (i & 7));
else
charptr_tmp2[i >> 3] &=
~(1 << (i & 7));
++k;
}
charptr_tmp = charptr_tmp2;
} else if (long_idx != 0)
charptr_tmp = &charptr_tmp[long_idx >> 3];
altera_export_bool_array(name, charptr_tmp,
long_count);
/* free allocated buffer */
if ((long_idx & 7L) != 0)
kfree(charptr_tmp2);
break;
case OP_COPY: {
/*
* Array copy
* ...argument 0 is dest ID
* ...argument 1 is source ID
* ...stack 0 is count
* ...stack 1 is dest index
* ...stack 2 is source index
*/
s32 copy_count;
s32 copy_index;
s32 copy_index2;
s32 destleft;
s32 src_count;
s32 dest_count;
int src_reverse = 0;
int dest_reverse = 0;
if (!altera_check_stack(stack_ptr, 3, &status))
break;
copy_count = stack[--stack_ptr];
copy_index = stack[--stack_ptr];
copy_index2 = stack[--stack_ptr];
reverse = 0;
if (version > 0) {
/*
* stack 0 = source right index
* stack 1 = source left index
* stack 2 = destination right index
* stack 3 = destination left index
*/
destleft = stack[--stack_ptr];
if (copy_count > copy_index) {
src_reverse = 1;
reverse = 1;
src_count = 1 + copy_count - copy_index;
/* copy_index = source start index */
} else {
src_count = 1 + copy_index - copy_count;
/* source start index */
copy_index = copy_count;
}
if (copy_index2 > destleft) {
dest_reverse = 1;
reverse = !reverse;
dest_count = 1 + copy_index2 - destleft;
/* destination start index */
copy_index2 = destleft;
} else
dest_count = 1 + destleft - copy_index2;
copy_count = (src_count < dest_count) ?
src_count : dest_count;
if ((src_reverse || dest_reverse) &&
(src_count != dest_count))
/*
* If either the source or destination
* is reversed, we can't tolerate
* a length mismatch, because we
* "left justify" arrays when copying.
* This won't work correctly
* with reversed arrays.
*/
status = -ERANGE;
}
count = copy_count;
index = copy_index;
index2 = copy_index2;
/*
* If destination is a read-only array,
* allocate a buffer and convert it to a writable array
*/
variable_id = args[1];
if ((version > 0) &&
((attrs[variable_id] & 0x9c) == 0x0c)) {
/* Allocate a writable buffer for this array */
long_tmp =
(var_size[variable_id] + 7L) >> 3L;
charptr_tmp2 = (u8 *)vars[variable_id];
charptr_tmp =
kzalloc(long_tmp, GFP_KERNEL);
vars[variable_id] = (long)charptr_tmp;
if (vars[variable_id] == 0) {
status = -ENOMEM;
break;
}
/* zero the buffer */
for (long_idx = 0L; long_idx < long_tmp;
++long_idx)
charptr_tmp[long_idx] = 0;
/* copy previous contents into buffer */
for (long_idx = 0L;
long_idx < var_size[variable_id];
++long_idx) {
long_idx2 = long_idx;
if (charptr_tmp2[long_idx2 >> 3] &
(1 << (long_idx2 & 7)))
charptr_tmp[long_idx >> 3] |=
(1 << (long_idx & 7));
}
/*
set bit 7 - buffer was dynamically allocated */
attrs[variable_id] |= 0x80;
/* clear bit 2 - variable is writable */
attrs[variable_id] &= ~0x04;
attrs[variable_id] |= 0x01;
}
charptr_tmp = (u8 *)vars[args[1]];
charptr_tmp2 = (u8 *)vars[args[0]];
/* check if destination is a writable Boolean array */
if ((attrs[args[1]] & 0x1c) != 0x08) {
status = -ERANGE;
break;
}
if (count < 1) {
status = -ERANGE;
break;
}
if (reverse)
index2 += (count - 1);
for (i = 0; i < count; ++i) {
if (charptr_tmp2[index >> 3] &
(1 << (index & 7)))
charptr_tmp[index2 >> 3] |=
(1 << (index2 & 7));
else
charptr_tmp[index2 >> 3] &=
~(1 << (index2 & 7));
++index;
if (reverse)
--index2;
else
++index2;
}
break;
}
case OP_DSC:
case OP_ISC: {
/*
* DRSCAN with capture
* IRSCAN with capture
* ...argument 0 is scan data variable ID
* ...argument 1 is capture variable ID
* ...stack 0 is capture index
* ...stack 1 is scan data index
* ...stack 2 is count
*/
s32 scan_right, scan_left;
s32 capture_count = 0;
s32 scan_count = 0;
s32 capture_index;
s32 scan_index;
if (!altera_check_stack(stack_ptr, 3, &status))
break;
capture_index = stack[--stack_ptr];
scan_index = stack[--stack_ptr];
if (version > 0) {
/*
* stack 0 = capture right index
* stack 1 = capture left index
* stack 2 = scan right index
* stack 3 = scan left index
* stack 4 = count
*/
scan_right = stack[--stack_ptr];
scan_left = stack[--stack_ptr];
capture_count = 1 + scan_index - capture_index;
scan_count = 1 + scan_left - scan_right;
scan_index = scan_right;
}
long_count = stack[--stack_ptr];
/*
* If capture array is read-only, allocate a buffer
* and convert it to a writable array
*/
variable_id = args[1];
if ((version > 0) &&
((attrs[variable_id] & 0x9c) == 0x0c)) {
/* Allocate a writable buffer for this array */
long_tmp =
(var_size[variable_id] + 7L) >> 3L;
charptr_tmp2 = (u8 *)vars[variable_id];
charptr_tmp =
kzalloc(long_tmp, GFP_KERNEL);
vars[variable_id] = (long)charptr_tmp;
if (vars[variable_id] == 0) {
status = -ENOMEM;
break;
}
/* zero the buffer */
for (long_idx = 0L; long_idx < long_tmp;
++long_idx)
charptr_tmp[long_idx] = 0;
/* copy previous contents into buffer */
for (long_idx = 0L;
long_idx < var_size[variable_id];
++long_idx) {
long_idx2 = long_idx;
if (charptr_tmp2[long_idx2 >> 3] &
(1 << (long_idx2 & 7)))
charptr_tmp[long_idx >> 3] |=
(1 << (long_idx & 7));
}
/*
* set bit 7 - buffer was
* dynamically allocated
*/
attrs[variable_id] |= 0x80;
/* clear bit 2 - variable is writable */
attrs[variable_id] &= ~0x04;
attrs[variable_id] |= 0x01;
}
charptr_tmp = (u8 *)vars[args[0]];
charptr_tmp2 = (u8 *)vars[args[1]];
if ((version > 0) &&
((long_count > capture_count) ||
(long_count > scan_count))) {
status = -ERANGE;
break;
}
/*
* check that capture array
* is a writable Boolean array
*/
if ((attrs[args[1]] & 0x1c) != 0x08) {
status = -ERANGE;
break;
}
if (status == 0) {
if (opcode == 0x82) /* DSC */
status = altera_swap_dr(astate,
long_count,
charptr_tmp,
scan_index,
charptr_tmp2,
capture_index);
else /* ISC */
status = altera_swap_ir(astate,
long_count,
charptr_tmp,
scan_index,
charptr_tmp2,
capture_index);
}
break;
}
case OP_WAIT:
/*
* WAIT
* ...argument 0 is wait state
* ...argument 1 is end state
* ...stack 0 is cycles
* ...stack 1 is microseconds
*/
if (!altera_check_stack(stack_ptr, 2, &status))
break;
long_tmp = stack[--stack_ptr];
if (long_tmp != 0L)
status = altera_wait_cycles(astate, long_tmp,
args[0]);
long_tmp = stack[--stack_ptr];
if ((status == 0) && (long_tmp != 0L))
status = altera_wait_msecs(astate,
long_tmp,
args[0]);
if ((status == 0) && (args[1] != args[0]))
status = altera_goto_jstate(astate,
args[1]);
if (version > 0) {
--stack_ptr; /* throw away MAX cycles */
--stack_ptr; /* throw away MAX microseconds */
}
break;
case OP_CMPA: {
/*
* Array compare
* ...argument 0 is source 1 ID
* ...argument 1 is source 2 ID
* ...argument 2 is mask ID
* ...stack 0 is source 1 index
* ...stack 1 is source 2 index
* ...stack 2 is mask index
* ...stack 3 is count
*/
s32 a, b;
u8 *source1 = (u8 *)vars[args[0]];
u8 *source2 = (u8 *)vars[args[1]];
u8 *mask = (u8 *)vars[args[2]];
u32 index1;
u32 index2;
u32 mask_index;
if (!altera_check_stack(stack_ptr, 4, &status))
break;
index1 = stack[--stack_ptr];
index2 = stack[--stack_ptr];
mask_index = stack[--stack_ptr];
long_count = stack[--stack_ptr];
if (version > 0) {
/*
* stack 0 = source 1 right index
* stack 1 = source 1 left index
* stack 2 = source 2 right index
* stack 3 = source 2 left index
* stack 4 = mask right index
* stack 5 = mask left index
*/
s32 mask_right = stack[--stack_ptr];
s32 mask_left = stack[--stack_ptr];
/* source 1 count */
a = 1 + index2 - index1;
/* source 2 count */
b = 1 + long_count - mask_index;
a = (a < b) ? a : b;
/* mask count */
b = 1 + mask_left - mask_right;
a = (a < b) ? a : b;
/* source 2 start index */
index2 = mask_index;
/* mask start index */
mask_index = mask_right;
long_count = a;
}
long_tmp = 1L;
if (long_count < 1)
status = -ERANGE;
else {
count = long_count;
for (i = 0; i < count; ++i) {
if (mask[mask_index >> 3] &
(1 << (mask_index & 7))) {
a = source1[index1 >> 3] &
(1 << (index1 & 7))
? 1 : 0;
b = source2[index2 >> 3] &
(1 << (index2 & 7))
? 1 : 0;
if (a != b) /* failure */
long_tmp = 0L;
}
++index1;
++index2;
++mask_index;
}
}
stack[stack_ptr++] = long_tmp;
break;
}
default:
/* Unrecognized opcode -- ERROR! */
bad_opcode = 1;
break;
}
if (bad_opcode)
status = -ENOSYS;
if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE))
status = -EOVERFLOW;
if (status != 0) {
done = 1;
*error_address = (s32)(opcode_address - code_sect);
}
}
altera_free_buffers(astate);
/* Free all dynamically allocated arrays */
if ((attrs != NULL) && (vars != NULL))
for (i = 0; i < sym_count; ++i)
if (attrs[i] & 0x80)
kfree((void *)vars[i]);
kfree(vars);
kfree(var_size);
kfree(attrs);
kfree(proc_attributes);
return status;
}
static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
char *key, char *value, int keylen, int vallen)
/*
* Gets key and value of NOTE fields in the JBC file.
* Can be called in two modes: if offset pointer is NULL,
* then the function searches for note fields which match
* the key string provided. If offset is not NULL, then
* the function finds the next note field of any key,
* starting at the offset specified by the offset pointer.
* Returns 0 for success, else appropriate error code
*/
{
int status = -ENODATA;
u32 note_strings = 0L;
u32 note_table = 0L;
u32 note_count = 0L;
u32 first_word = 0L;
int version = 0;
int delta = 0;
char *key_ptr;
char *value_ptr;
int i;
/* Read header information */
if (program_size > 52L) {
first_word = get_unaligned_be32(&p[0]);
version = (first_word & 1L);
delta = version * 8;
note_strings = get_unaligned_be32(&p[8 + delta]);
note_table = get_unaligned_be32(&p[12 + delta]);
note_count = get_unaligned_be32(&p[44 + (2 * delta)]);
}
if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L))
return -EIO;
if (note_count <= 0L)
return status;
if (offset == NULL) {
/*
* We will search for the first note with a specific key,
* and return only the value
*/
for (i = 0; (i < note_count) &&
(status != 0); ++i) {
key_ptr = &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])];
if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) {
status = 0;
value_ptr = &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])];
if (value != NULL)
strscpy(value, value_ptr, vallen);
}
}
} else {
/*
* We will search for the next note, regardless of the key,
* and return both the value and the key
*/
i = *offset;
if ((i >= 0) && (i < note_count)) {
status = 0;
if (key != NULL)
strscpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
keylen);
if (value != NULL)
strscpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
vallen);
*offset = i + 1;
}
}
return status;
}
static int altera_check_crc(u8 *p, s32 program_size)
{
int status = 0;
u16 local_expected = 0,
local_actual = 0,
shift_reg = 0xffff;
int bit, feedback;
u8 databyte;
u32 i;
u32 crc_section = 0L;
u32 first_word = 0L;
int version = 0;
int delta = 0;
if (program_size > 52L) {
first_word = get_unaligned_be32(&p[0]);
version = (first_word & 1L);
delta = version * 8;
crc_section = get_unaligned_be32(&p[32 + delta]);
}
if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L))
status = -EIO;
if (crc_section >= program_size)
status = -EIO;
if (status == 0) {
local_expected = (u16)get_unaligned_be16(&p[crc_section]);
for (i = 0; i < crc_section; ++i) {
databyte = p[i];
for (bit = 0; bit < 8; bit++) {
feedback = (databyte ^ shift_reg) & 0x01;
shift_reg >>= 1;
if (feedback)
shift_reg ^= 0x8408;
databyte >>= 1;
}
}
local_actual = (u16)~shift_reg;
if (local_expected != local_actual)
status = -EILSEQ;
}
if (debug || status) {
switch (status) {
case 0:
printk(KERN_INFO "%s: CRC matched: %04x\n", __func__,
local_actual);
break;
case -EILSEQ:
printk(KERN_ERR "%s: CRC mismatch: expected %04x, "
"actual %04x\n", __func__, local_expected,
local_actual);
break;
case -EIO:
printk(KERN_ERR "%s: error: format isn't "
"recognized.\n", __func__);
break;
default:
printk(KERN_ERR "%s: CRC function returned error "
"code %d\n", __func__, status);
break;
}
}
return status;
}
static int altera_get_file_info(u8 *p,
s32 program_size,
int *format_version,
int *action_count,
int *procedure_count)
{
int status = -EIO;
u32 first_word = 0;
int version = 0;
if (program_size <= 52L)
return status;
first_word = get_unaligned_be32(&p[0]);
if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) {
status = 0;
version = (first_word & 1L);
*format_version = version + 1;
if (version > 0) {
*action_count = get_unaligned_be32(&p[48]);
*procedure_count = get_unaligned_be32(&p[52]);
}
}
return status;
}
static int altera_get_act_info(u8 *p,
s32 program_size,
int index,
char **name,
char **description,
struct altera_procinfo **proc_list)
{
int status = -EIO;
struct altera_procinfo *procptr = NULL;
struct altera_procinfo *tmpptr = NULL;
u32 first_word = 0L;
u32 action_table = 0L;
u32 proc_table = 0L;
u32 str_table = 0L;
u32 note_strings = 0L;
u32 action_count = 0L;
u32 proc_count = 0L;
u32 act_name_id = 0L;
u32 act_desc_id = 0L;
u32 act_proc_id = 0L;
u32 act_proc_name = 0L;
u8 act_proc_attribute = 0;
if (program_size <= 52L)
return status;
/* Read header information */
first_word = get_unaligned_be32(&p[0]);
if (first_word != 0x4A414D01L)
return status;
action_table = get_unaligned_be32(&p[4]);
proc_table = get_unaligned_be32(&p[8]);
str_table = get_unaligned_be32(&p[12]);
note_strings = get_unaligned_be32(&p[16]);
action_count = get_unaligned_be32(&p[48]);
proc_count = get_unaligned_be32(&p[52]);
if (index >= action_count)
return status;
act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]);
act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]);
act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]);
*name = &p[str_table + act_name_id];
if (act_desc_id < (note_strings - str_table))
*description = &p[str_table + act_desc_id];
do {
act_proc_name = get_unaligned_be32(
&p[proc_table + (13 * act_proc_id)]);
act_proc_attribute =
(p[proc_table + (13 * act_proc_id) + 8] & 0x03);
procptr =
kzalloc(sizeof(struct altera_procinfo),
GFP_KERNEL);
if (procptr == NULL)
status = -ENOMEM;
else {
procptr->name = &p[str_table + act_proc_name];
procptr->attrs = act_proc_attribute;
procptr->next = NULL;
/* add record to end of linked list */
if (*proc_list == NULL)
*proc_list = procptr;
else {
tmpptr = *proc_list;
while (tmpptr->next != NULL)
tmpptr = tmpptr->next;
tmpptr->next = procptr;
}
}
act_proc_id = get_unaligned_be32(
&p[proc_table + (13 * act_proc_id) + 4]);
} while ((act_proc_id != 0) && (act_proc_id < proc_count));
return status;
}
int altera_init(struct altera_config *config, const struct firmware *fw)
{
struct altera_state *astate = NULL;
struct altera_procinfo *proc_list = NULL;
struct altera_procinfo *procptr = NULL;
char *key = NULL;
char *value = NULL;
char *action_name = NULL;
char *description = NULL;
int exec_result = 0;
int exit_code = 0;
int format_version = 0;
int action_count = 0;
int procedure_count = 0;
int index = 0;
s32 offset = 0L;
s32 error_address = 0L;
int retval = 0;
key = kzalloc(33, GFP_KERNEL);
if (!key) {
retval = -ENOMEM;
goto out;
}
value = kzalloc(257, GFP_KERNEL);
if (!value) {
retval = -ENOMEM;
goto free_key;
}
astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL);
if (!astate) {
retval = -ENOMEM;
goto free_value;
}
astate->config = config;
if (!astate->config->jtag_io) {
if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
retval = -ENODEV;
goto free_state;
}
dprintk("%s: using byteblaster!\n", __func__);
astate->config->jtag_io = netup_jtag_io_lpt;
}
altera_check_crc((u8 *)fw->data, fw->size);
if (debug) {
altera_get_file_info((u8 *)fw->data, fw->size, &format_version,
&action_count, &procedure_count);
printk(KERN_INFO "%s: File format is %s ByteCode format\n",
__func__, (format_version == 2) ? "Jam STAPL" :
"pre-standardized Jam 1.1");
while (altera_get_note((u8 *)fw->data, fw->size,
&offset, key, value, 32, 256) == 0)
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
__func__, key, value);
}
if (debug && (format_version == 2) && (action_count > 0)) {
printk(KERN_INFO "%s: Actions available:\n", __func__);
for (index = 0; index < action_count; ++index) {
altera_get_act_info((u8 *)fw->data, fw->size,
index, &action_name,
&description,
&proc_list);
if (description == NULL)
printk(KERN_INFO "%s: %s\n",
__func__,
action_name);
else
printk(KERN_INFO "%s: %s \"%s\"\n",
__func__,
action_name,
description);
procptr = proc_list;
while (procptr != NULL) {
if (procptr->attrs != 0)
printk(KERN_INFO "%s: %s (%s)\n",
__func__,
procptr->name,
(procptr->attrs == 1) ?
"optional" : "recommended");
proc_list = procptr->next;
kfree(procptr);
procptr = proc_list;
}
}
printk(KERN_INFO "\n");
}
exec_result = altera_execute(astate, (u8 *)fw->data, fw->size,
&error_address, &exit_code, &format_version);
if (exit_code)
exec_result = -EREMOTEIO;
if ((format_version == 2) && (exec_result == -EINVAL)) {
if (astate->config->action == NULL)
printk(KERN_ERR "%s: error: no action specified for "
"Jam STAPL file.\nprogram terminated.\n",
__func__);
else
printk(KERN_ERR "%s: error: action \"%s\""
" is not supported "
"for this Jam STAPL file.\n"
"Program terminated.\n", __func__,
astate->config->action);
} else if (exec_result)
printk(KERN_ERR "%s: error %d\n", __func__, exec_result);
free_state:
kfree(astate);
free_value:
kfree(value);
free_key:
kfree(key);
out:
return retval;
}
EXPORT_SYMBOL(altera_init);
| linux-master | drivers/misc/altera-stapl/altera.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* altera-jtag.c
*
* altera FPGA driver
*
* Copyright (C) Altera Corporation 1998-2001
* Copyright (C) 2010 NetUP Inc.
* Copyright (C) 2010 Igor M. Liplianin <[email protected]>
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <misc/altera.h>
#include "altera-exprt.h"
#include "altera-jtag.h"
#define alt_jtag_io(a, b, c)\
astate->config->jtag_io(astate->config->dev, a, b, c);
#define alt_malloc(a) kzalloc(a, GFP_KERNEL);
/*
* This structure shows, for each JTAG state, which state is reached after
* a single TCK clock cycle with TMS high or TMS low, respectively. This
* describes all possible state transitions in the JTAG state machine.
*/
struct altera_jtag_machine {
enum altera_jtag_state tms_high;
enum altera_jtag_state tms_low;
};
static const struct altera_jtag_machine altera_transitions[] = {
/* RESET */ { RESET, IDLE },
/* IDLE */ { DRSELECT, IDLE },
/* DRSELECT */ { IRSELECT, DRCAPTURE },
/* DRCAPTURE */ { DREXIT1, DRSHIFT },
/* DRSHIFT */ { DREXIT1, DRSHIFT },
/* DREXIT1 */ { DRUPDATE, DRPAUSE },
/* DRPAUSE */ { DREXIT2, DRPAUSE },
/* DREXIT2 */ { DRUPDATE, DRSHIFT },
/* DRUPDATE */ { DRSELECT, IDLE },
/* IRSELECT */ { RESET, IRCAPTURE },
/* IRCAPTURE */ { IREXIT1, IRSHIFT },
/* IRSHIFT */ { IREXIT1, IRSHIFT },
/* IREXIT1 */ { IRUPDATE, IRPAUSE },
/* IRPAUSE */ { IREXIT2, IRPAUSE },
/* IREXIT2 */ { IRUPDATE, IRSHIFT },
/* IRUPDATE */ { DRSELECT, IDLE }
};
/*
* This table contains the TMS value to be used to take the NEXT STEP on
* the path to the desired state. The array index is the current state,
* and the bit position is the desired endstate. To find out which state
* is used as the intermediate state, look up the TMS value in the
* altera_transitions[] table.
*/
static const u16 altera_jtag_path_map[16] = {
/* RST RTI SDRS CDR SDR E1DR PDR E2DR */
0x0001, 0xFFFD, 0xFE01, 0xFFE7, 0xFFEF, 0xFF0F, 0xFFBF, 0xFFFF,
/* UDR SIRS CIR SIR E1IR PIR E2IR UIR */
0xFEFD, 0x0001, 0xF3FF, 0xF7FF, 0x87FF, 0xDFFF, 0xFFFF, 0x7FFD
};
/* Flag bits for alt_jtag_io() function */
#define TMS_HIGH 1
#define TMS_LOW 0
#define TDI_HIGH 1
#define TDI_LOW 0
#define READ_TDO 1
#define IGNORE_TDO 0
int altera_jinit(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
/* initial JTAG state is unknown */
js->jtag_state = ILLEGAL_JTAG_STATE;
/* initialize to default state */
js->drstop_state = IDLE;
js->irstop_state = IDLE;
js->dr_pre = 0;
js->dr_post = 0;
js->ir_pre = 0;
js->ir_post = 0;
js->dr_length = 0;
js->ir_length = 0;
js->dr_pre_data = NULL;
js->dr_post_data = NULL;
js->ir_pre_data = NULL;
js->ir_post_data = NULL;
js->dr_buffer = NULL;
js->ir_buffer = NULL;
return 0;
}
int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state)
{
js->drstop_state = state;
return 0;
}
int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state)
{
js->irstop_state = state;
return 0;
}
int altera_set_dr_pre(struct altera_jtag *js,
u32 count, u32 start_index,
u8 *preamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->dr_pre) {
kfree(js->dr_pre_data);
js->dr_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->dr_pre_data == NULL)
status = -ENOMEM;
else
js->dr_pre = count;
} else
js->dr_pre = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (preamble_data == NULL)
js->dr_pre_data[i >> 3] |= (1 << (i & 7));
else {
if (preamble_data[j >> 3] & (1 << (j & 7)))
js->dr_pre_data[i >> 3] |=
(1 << (i & 7));
else
js->dr_pre_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index,
u8 *preamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->ir_pre) {
kfree(js->ir_pre_data);
js->ir_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->ir_pre_data == NULL)
status = -ENOMEM;
else
js->ir_pre = count;
} else
js->ir_pre = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (preamble_data == NULL)
js->ir_pre_data[i >> 3] |= (1 << (i & 7));
else {
if (preamble_data[j >> 3] & (1 << (j & 7)))
js->ir_pre_data[i >> 3] |=
(1 << (i & 7));
else
js->ir_pre_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index,
u8 *postamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->dr_post) {
kfree(js->dr_post_data);
js->dr_post_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->dr_post_data == NULL)
status = -ENOMEM;
else
js->dr_post = count;
} else
js->dr_post = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (postamble_data == NULL)
js->dr_post_data[i >> 3] |= (1 << (i & 7));
else {
if (postamble_data[j >> 3] & (1 << (j & 7)))
js->dr_post_data[i >> 3] |=
(1 << (i & 7));
else
js->dr_post_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index,
u8 *postamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->ir_post) {
kfree(js->ir_post_data);
js->ir_post_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->ir_post_data == NULL)
status = -ENOMEM;
else
js->ir_post = count;
} else
js->ir_post = count;
if (status != 0)
return status;
for (i = 0; i < count; ++i) {
j = i + start_index;
if (postamble_data == NULL)
js->ir_post_data[i >> 3] |= (1 << (i & 7));
else {
if (postamble_data[j >> 3] & (1 << (j & 7)))
js->ir_post_data[i >> 3] |= (1 << (i & 7));
else
js->ir_post_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
return status;
}
static void altera_jreset_idle(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
int i;
/* Go to Test Logic Reset (no matter what the starting state may be) */
for (i = 0; i < 5; ++i)
alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
/* Now step to Run Test / Idle */
alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
js->jtag_state = IDLE;
}
int altera_goto_jstate(struct altera_state *astate,
enum altera_jtag_state state)
{
struct altera_jtag *js = &astate->js;
int tms;
int count = 0;
int status = 0;
if (js->jtag_state == ILLEGAL_JTAG_STATE)
/* initialize JTAG chain to known state */
altera_jreset_idle(astate);
if (js->jtag_state == state) {
/*
* We are already in the desired state.
* If it is a stable state, loop here.
* Otherwise do nothing (no clock cycles).
*/
if ((state == IDLE) || (state == DRSHIFT) ||
(state == DRPAUSE) || (state == IRSHIFT) ||
(state == IRPAUSE)) {
alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
} else if (state == RESET)
alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
} else {
while ((js->jtag_state != state) && (count < 9)) {
/* Get TMS value to take a step toward desired state */
tms = (altera_jtag_path_map[js->jtag_state] &
(1 << state))
? TMS_HIGH : TMS_LOW;
/* Take a step */
alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
if (tms)
js->jtag_state =
altera_transitions[js->jtag_state].tms_high;
else
js->jtag_state =
altera_transitions[js->jtag_state].tms_low;
++count;
}
}
if (js->jtag_state != state)
status = -EREMOTEIO;
return status;
}
int altera_wait_cycles(struct altera_state *astate,
s32 cycles,
enum altera_jtag_state wait_state)
{
struct altera_jtag *js = &astate->js;
int tms;
s32 count;
int status = 0;
if (js->jtag_state != wait_state)
status = altera_goto_jstate(astate, wait_state);
if (status == 0) {
/*
* Set TMS high to loop in RESET state
* Set TMS low to loop in any other stable state
*/
tms = (wait_state == RESET) ? TMS_HIGH : TMS_LOW;
for (count = 0L; count < cycles; count++)
alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
}
return status;
}
int altera_wait_msecs(struct altera_state *astate,
s32 microseconds, enum altera_jtag_state wait_state)
/*
* Causes JTAG hardware to sit in the specified stable
* state for the specified duration of real time. If
* no JTAG operations have been performed yet, then only
* a delay is performed. This permits the WAIT USECS
* statement to be used in VECTOR programs without causing
* any JTAG operations.
* Returns 0 for success, else appropriate error code.
*/
{
struct altera_jtag *js = &astate->js;
int status = 0;
if ((js->jtag_state != ILLEGAL_JTAG_STATE) &&
(js->jtag_state != wait_state))
status = altera_goto_jstate(astate, wait_state);
if (status == 0)
/* Wait for specified time interval */
udelay(microseconds);
return status;
}
static void altera_concatenate_data(u8 *buffer,
u8 *preamble_data,
u32 preamble_count,
u8 *target_data,
u32 start_index,
u32 target_count,
u8 *postamble_data,
u32 postamble_count)
/*
* Copies preamble data, target data, and postamble data
* into one buffer for IR or DR scans.
*/
{
u32 i, j, k;
for (i = 0L; i < preamble_count; ++i) {
if (preamble_data[i >> 3L] & (1L << (i & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
j = start_index;
k = preamble_count + target_count;
for (; i < k; ++i, ++j) {
if (target_data[j >> 3L] & (1L << (j & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
j = 0L;
k = preamble_count + target_count + postamble_count;
for (; i < k; ++i, ++j) {
if (postamble_data[j >> 3L] & (1L << (j & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
}
static int alt_jtag_drscan(struct altera_state *astate,
int start_state,
int count,
u8 *tdi,
u8 *tdo)
{
int i = 0;
int tdo_bit = 0;
int status = 1;
/* First go to DRSHIFT state */
switch (start_state) {
case 0: /* IDLE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
case 1: /* DRPAUSE */
alt_jtag_io(1, 0, 0); /* DREXIT2 */
alt_jtag_io(1, 0, 0); /* DRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
case 2: /* IRPAUSE */
alt_jtag_io(1, 0, 0); /* IREXIT2 */
alt_jtag_io(1, 0, 0); /* IRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
default:
status = 0;
}
if (status) {
/* loop in the SHIFT-DR state */
for (i = 0; i < count; i++) {
tdo_bit = alt_jtag_io(
(i == count - 1),
tdi[i >> 3] & (1 << (i & 7)),
(tdo != NULL));
if (tdo != NULL) {
if (tdo_bit)
tdo[i >> 3] |= (1 << (i & 7));
else
tdo[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
alt_jtag_io(0, 0, 0); /* DRPAUSE */
}
return status;
}
static int alt_jtag_irscan(struct altera_state *astate,
int start_state,
int count,
u8 *tdi,
u8 *tdo)
{
int i = 0;
int tdo_bit = 0;
int status = 1;
/* First go to IRSHIFT state */
switch (start_state) {
case 0: /* IDLE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
case 1: /* DRPAUSE */
alt_jtag_io(1, 0, 0); /* DREXIT2 */
alt_jtag_io(1, 0, 0); /* DRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
case 2: /* IRPAUSE */
alt_jtag_io(1, 0, 0); /* IREXIT2 */
alt_jtag_io(1, 0, 0); /* IRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
default:
status = 0;
}
if (status) {
/* loop in the SHIFT-IR state */
for (i = 0; i < count; i++) {
tdo_bit = alt_jtag_io(
(i == count - 1),
tdi[i >> 3] & (1 << (i & 7)),
(tdo != NULL));
if (tdo != NULL) {
if (tdo_bit)
tdo[i >> 3] |= (1 << (i & 7));
else
tdo[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
alt_jtag_io(0, 0, 0); /* IRPAUSE */
}
return status;
}
static void altera_extract_target_data(u8 *buffer,
u8 *target_data,
u32 start_index,
u32 preamble_count,
u32 target_count)
/*
* Copies target data from scan buffer, filtering out
* preamble and postamble data.
*/
{
u32 i;
u32 j;
u32 k;
j = preamble_count;
k = start_index + target_count;
for (i = start_index; i < k; ++i, ++j) {
if (buffer[j >> 3] & (1 << (j & 7)))
target_data[i >> 3] |= (1 << (i & 7));
else
target_data[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
int altera_irscan(struct altera_state *astate,
u32 count,
u8 *tdi_data,
u32 start_index)
/* Shifts data into instruction register */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->ir_pre + count + js->ir_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->ir_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->ir_buffer);
js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->ir_buffer == NULL)
status = -ENOMEM;
else
js->ir_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, IR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->ir_buffer,
js->ir_pre_data,
js->ir_pre,
tdi_data,
start_index,
count,
js->ir_post_data,
js->ir_post);
/* Do the IRSCAN */
alt_jtag_irscan(astate,
start_code,
shift_count,
js->ir_buffer,
NULL);
/* alt_jtag_irscan() always ends in IRPAUSE state */
js->jtag_state = IRPAUSE;
}
if (status == 0)
if (js->irstop_state != IRPAUSE)
status = altera_goto_jstate(astate, js->irstop_state);
return status;
}
int altera_swap_ir(struct altera_state *astate,
u32 count,
u8 *in_data,
u32 in_index,
u8 *out_data,
u32 out_index)
/* Shifts data into instruction register, capturing output data */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->ir_pre + count + js->ir_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->ir_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->ir_buffer);
js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->ir_buffer == NULL)
status = -ENOMEM;
else
js->ir_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, IR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->ir_buffer,
js->ir_pre_data,
js->ir_pre,
in_data,
in_index,
count,
js->ir_post_data,
js->ir_post);
/* Do the IRSCAN */
alt_jtag_irscan(astate,
start_code,
shift_count,
js->ir_buffer,
js->ir_buffer);
/* alt_jtag_irscan() always ends in IRPAUSE state */
js->jtag_state = IRPAUSE;
}
if (status == 0)
if (js->irstop_state != IRPAUSE)
status = altera_goto_jstate(astate, js->irstop_state);
if (status == 0)
/* Now extract the returned data from the buffer */
altera_extract_target_data(js->ir_buffer,
out_data, out_index,
js->ir_pre, count);
return status;
}
int altera_drscan(struct altera_state *astate,
u32 count,
u8 *tdi_data,
u32 start_index)
/* Shifts data into data register (ignoring output data) */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->dr_pre + count + js->dr_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->dr_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->dr_buffer);
js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->dr_buffer == NULL)
status = -ENOMEM;
else
js->dr_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, DR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->dr_buffer,
js->dr_pre_data,
js->dr_pre,
tdi_data,
start_index,
count,
js->dr_post_data,
js->dr_post);
/* Do the DRSCAN */
alt_jtag_drscan(astate, start_code, shift_count,
js->dr_buffer, NULL);
/* alt_jtag_drscan() always ends in DRPAUSE state */
js->jtag_state = DRPAUSE;
}
if (status == 0)
if (js->drstop_state != DRPAUSE)
status = altera_goto_jstate(astate, js->drstop_state);
return status;
}
int altera_swap_dr(struct altera_state *astate, u32 count,
u8 *in_data, u32 in_index,
u8 *out_data, u32 out_index)
/* Shifts data into data register, capturing output data */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->dr_pre + count + js->dr_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->dr_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->dr_buffer);
js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->dr_buffer == NULL)
status = -ENOMEM;
else
js->dr_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, DR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->dr_buffer,
js->dr_pre_data,
js->dr_pre,
in_data,
in_index,
count,
js->dr_post_data,
js->dr_post);
/* Do the DRSCAN */
alt_jtag_drscan(astate,
start_code,
shift_count,
js->dr_buffer,
js->dr_buffer);
/* alt_jtag_drscan() always ends in DRPAUSE state */
js->jtag_state = DRPAUSE;
}
if (status == 0)
if (js->drstop_state != DRPAUSE)
status = altera_goto_jstate(astate, js->drstop_state);
if (status == 0)
/* Now extract the returned data from the buffer */
altera_extract_target_data(js->dr_buffer,
out_data,
out_index,
js->dr_pre,
count);
return status;
}
void altera_free_buffers(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
/* If the JTAG interface was used, reset it to TLR */
if (js->jtag_state != ILLEGAL_JTAG_STATE)
altera_jreset_idle(astate);
kfree(js->dr_pre_data);
js->dr_pre_data = NULL;
kfree(js->dr_post_data);
js->dr_post_data = NULL;
kfree(js->dr_buffer);
js->dr_buffer = NULL;
kfree(js->ir_pre_data);
js->ir_pre_data = NULL;
kfree(js->ir_post_data);
js->ir_post_data = NULL;
kfree(js->ir_buffer);
js->ir_buffer = NULL;
}
| linux-master | drivers/misc/altera-stapl/altera-jtag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* altera-comp.c
*
* altera FPGA driver
*
* Copyright (C) Altera Corporation 1998-2001
* Copyright (C) 2010 NetUP Inc.
* Copyright (C) 2010 Igor M. Liplianin <[email protected]>
*/
#include <linux/kernel.h>
#include "altera-exprt.h"
#define SHORT_BITS 16
#define CHAR_BITS 8
#define DATA_BLOB_LENGTH 3
#define MATCH_DATA_LENGTH 8192
#define ALTERA_REQUEST_SIZE 1024
#define ALTERA_BUFFER_SIZE (MATCH_DATA_LENGTH + ALTERA_REQUEST_SIZE)
static u32 altera_bits_req(u32 n)
{
u32 result = SHORT_BITS;
if (n == 0)
result = 1;
else {
/* Look for the highest non-zero bit position */
while ((n & (1 << (SHORT_BITS - 1))) == 0) {
n <<= 1;
--result;
}
}
return result;
}
static u32 altera_read_packed(u8 *buffer, u32 bits, u32 *bits_avail,
u32 *in_index)
{
u32 result = 0;
u32 shift = 0;
u32 databyte = 0;
while (bits > 0) {
databyte = buffer[*in_index];
result |= (((databyte >> (CHAR_BITS - *bits_avail))
& (0xff >> (CHAR_BITS - *bits_avail))) << shift);
if (bits <= *bits_avail) {
result &= (0xffff >> (SHORT_BITS - (bits + shift)));
*bits_avail -= bits;
bits = 0;
} else {
++(*in_index);
shift += *bits_avail;
bits -= *bits_avail;
*bits_avail = CHAR_BITS;
}
}
return result;
}
u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version)
{
u32 i, j, data_length = 0L;
u32 offset, length;
u32 match_data_length = MATCH_DATA_LENGTH;
u32 bits_avail = CHAR_BITS;
u32 in_index = 0L;
if (version > 0)
--match_data_length;
for (i = 0; i < out_length; ++i)
out[i] = 0;
/* Read number of bytes in data. */
for (i = 0; i < sizeof(in_length); ++i) {
data_length = data_length | (
altera_read_packed(in,
CHAR_BITS,
&bits_avail,
&in_index) << (i * CHAR_BITS));
}
if (data_length > out_length) {
data_length = 0L;
return data_length;
}
i = 0;
while (i < data_length) {
/* A 0 bit indicates literal data. */
if (altera_read_packed(in, 1, &bits_avail,
&in_index) == 0) {
for (j = 0; j < DATA_BLOB_LENGTH; ++j) {
if (i < data_length) {
out[i] = (u8)altera_read_packed(in,
CHAR_BITS,
&bits_avail,
&in_index);
i++;
}
}
} else {
/* A 1 bit indicates offset/length to follow. */
offset = altera_read_packed(in, altera_bits_req((s16)
(i > match_data_length ?
match_data_length : i)),
&bits_avail,
&in_index);
length = altera_read_packed(in, CHAR_BITS,
&bits_avail,
&in_index);
for (j = 0; j < length; ++j) {
if (i < data_length) {
out[i] = out[i - offset];
i++;
}
}
}
}
return data_length;
}
| linux-master | drivers/misc/altera-stapl/altera-comp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* altera-lpt.c
*
* altera FPGA driver
*
* Copyright (C) Altera Corporation 1998-2001
* Copyright (C) 2010 NetUP Inc.
* Copyright (C) 2010 Abylay Ospan <[email protected]>
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include "altera-exprt.h"
static int lpt_hardware_initialized;
static void byteblaster_write(int port, int data)
{
outb((u8)data, (u16)(port + 0x378));
};
static int byteblaster_read(int port)
{
int data = 0;
data = inb((u16)(port + 0x378));
return data & 0xff;
};
int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo)
{
int data = 0;
int tdo = 0;
int initial_lpt_ctrl = 0;
if (!lpt_hardware_initialized) {
initial_lpt_ctrl = byteblaster_read(2);
byteblaster_write(2, (initial_lpt_ctrl | 0x02) & 0xdf);
lpt_hardware_initialized = 1;
}
data = ((tdi ? 0x40 : 0) | (tms ? 0x02 : 0));
byteblaster_write(0, data);
if (read_tdo) {
tdo = byteblaster_read(1);
tdo = ((tdo & 0x80) ? 0 : 1);
}
byteblaster_write(0, data | 0x01);
byteblaster_write(0, data);
return tdo;
}
| linux-master | drivers/misc/altera-stapl/altera-lpt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/slab.h>
#include "vmci_queue_pair.h"
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
/* Use a wide upper bound for the maximum contexts. */
#define VMCI_MAX_CONTEXTS 2000
/*
* List of current VMCI contexts. Contexts can be added by
* vmci_ctx_create() and removed via vmci_ctx_destroy().
* These, along with context lookup, are protected by the
* list structure's lock.
*/
static struct {
struct list_head head;
spinlock_t lock; /* Spinlock for context list operations */
} ctx_list = {
.head = LIST_HEAD_INIT(ctx_list.head),
.lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
};
/* Used by contexts that did not set up notify flag pointers */
static bool ctx_dummy_notify;
static void ctx_signal_notify(struct vmci_ctx *context)
{
*context->notify = true;
}
static void ctx_clear_notify(struct vmci_ctx *context)
{
*context->notify = false;
}
/*
* If nothing requires the attention of the guest, clears both
* notify flag and call.
*/
static void ctx_clear_notify_call(struct vmci_ctx *context)
{
if (context->pending_datagrams == 0 &&
vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
ctx_clear_notify(context);
}
/*
* Sets the context's notify flag iff datagrams are pending for this
* context. Called from vmci_setup_notify().
*/
void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
{
spin_lock(&context->lock);
if (context->pending_datagrams)
ctx_signal_notify(context);
spin_unlock(&context->lock);
}
/*
* Allocates and initializes a VMCI context.
*/
struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
uintptr_t event_hnd,
int user_version,
const struct cred *cred)
{
struct vmci_ctx *context;
int error;
if (cid == VMCI_INVALID_ID) {
pr_devel("Invalid context ID for VMCI context\n");
error = -EINVAL;
goto err_out;
}
if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
priv_flags);
error = -EINVAL;
goto err_out;
}
if (user_version == 0) {
pr_devel("Invalid suer_version %d\n", user_version);
error = -EINVAL;
goto err_out;
}
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
pr_warn("Failed to allocate memory for VMCI context\n");
error = -ENOMEM;
goto err_out;
}
kref_init(&context->kref);
spin_lock_init(&context->lock);
INIT_LIST_HEAD(&context->list_item);
INIT_LIST_HEAD(&context->datagram_queue);
INIT_LIST_HEAD(&context->notifier_list);
/* Initialize host-specific VMCI context. */
init_waitqueue_head(&context->host_context.wait_queue);
context->queue_pair_array =
vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
if (!context->queue_pair_array) {
error = -ENOMEM;
goto err_free_ctx;
}
context->doorbell_array =
vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->doorbell_array) {
error = -ENOMEM;
goto err_free_qp_array;
}
context->pending_doorbell_array =
vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
error = -ENOMEM;
goto err_free_db_array;
}
context->user_version = user_version;
context->priv_flags = priv_flags;
if (cred)
context->cred = get_cred(cred);
context->notify = &ctx_dummy_notify;
context->notify_page = NULL;
/*
* If we collide with an existing context we generate a new
* and use it instead. The VMX will determine if regeneration
* is okay. Since there isn't 4B - 16 VMs running on a given
* host, the below loop will terminate.
*/
spin_lock(&ctx_list.lock);
while (vmci_ctx_exists(cid)) {
/* We reserve the lowest 16 ids for fixed contexts. */
cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
if (cid == VMCI_INVALID_ID)
cid = VMCI_RESERVED_CID_LIMIT;
}
context->cid = cid;
list_add_tail_rcu(&context->list_item, &ctx_list.head);
spin_unlock(&ctx_list.lock);
return context;
err_free_db_array:
vmci_handle_arr_destroy(context->doorbell_array);
err_free_qp_array:
vmci_handle_arr_destroy(context->queue_pair_array);
err_free_ctx:
kfree(context);
err_out:
return ERR_PTR(error);
}
/*
* Destroy VMCI context.
*/
void vmci_ctx_destroy(struct vmci_ctx *context)
{
spin_lock(&ctx_list.lock);
list_del_rcu(&context->list_item);
spin_unlock(&ctx_list.lock);
synchronize_rcu();
vmci_ctx_put(context);
}
/*
* Fire notification for all contexts interested in given cid.
*/
static int ctx_fire_notification(u32 context_id, u32 priv_flags)
{
u32 i, array_size;
struct vmci_ctx *sub_ctx;
struct vmci_handle_arr *subscriber_array;
struct vmci_handle context_handle =
vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
/*
* We create an array to hold the subscribers we find when
* scanning through all contexts.
*/
subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
if (subscriber_array == NULL)
return VMCI_ERROR_NO_MEM;
/*
* Scan all contexts to find who is interested in being
* notified about given contextID.
*/
rcu_read_lock();
list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
struct vmci_handle_list *node;
/*
* We only deliver notifications of the removal of
* contexts, if the two contexts are allowed to
* interact.
*/
if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
continue;
list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
if (!vmci_handle_is_equal(node->handle, context_handle))
continue;
vmci_handle_arr_append_entry(&subscriber_array,
vmci_make_handle(sub_ctx->cid,
VMCI_EVENT_HANDLER));
}
}
rcu_read_unlock();
/* Fire event to all subscribers. */
array_size = vmci_handle_arr_get_size(subscriber_array);
for (i = 0; i < array_size; i++) {
int result;
struct vmci_event_ctx ev;
ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
ev.payload.context_id = context_id;
result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
&ev.msg.hdr, false);
if (result < VMCI_SUCCESS) {
pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
ev.msg.event_data.event,
ev.msg.hdr.dst.context);
/* We continue to enqueue on next subscriber. */
}
}
vmci_handle_arr_destroy(subscriber_array);
return VMCI_SUCCESS;
}
/*
* Returns the current number of pending datagrams. The call may
* also serve as a synchronization point for the datagram queue,
* as no enqueue operations can occur concurrently.
*/
int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
{
struct vmci_ctx *context;
context = vmci_ctx_get(cid);
if (context == NULL)
return VMCI_ERROR_INVALID_ARGS;
spin_lock(&context->lock);
if (pending)
*pending = context->pending_datagrams;
spin_unlock(&context->lock);
vmci_ctx_put(context);
return VMCI_SUCCESS;
}
/*
* Queues a VMCI datagram for the appropriate target VM context.
*/
int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
{
struct vmci_datagram_queue_entry *dq_entry;
struct vmci_ctx *context;
struct vmci_handle dg_src;
size_t vmci_dg_size;
vmci_dg_size = VMCI_DG_SIZE(dg);
if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size);
return VMCI_ERROR_INVALID_ARGS;
}
/* Get the target VM's VMCI context. */
context = vmci_ctx_get(cid);
if (!context) {
pr_devel("Invalid context (ID=0x%x)\n", cid);
return VMCI_ERROR_INVALID_ARGS;
}
/* Allocate guest call entry and add it to the target VM's queue. */
dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
if (dq_entry == NULL) {
pr_warn("Failed to allocate memory for datagram\n");
vmci_ctx_put(context);
return VMCI_ERROR_NO_MEM;
}
dq_entry->dg = dg;
dq_entry->dg_size = vmci_dg_size;
dg_src = dg->src;
INIT_LIST_HEAD(&dq_entry->list_item);
spin_lock(&context->lock);
/*
* We put a higher limit on datagrams from the hypervisor. If
* the pending datagram is not from hypervisor, then we check
* if enqueueing it would exceed the
* VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
* the pending datagram is from hypervisor, we allow it to be
* queued at the destination side provided we don't reach the
* VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
*/
if (context->datagram_queue_size + vmci_dg_size >=
VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
(!vmci_handle_is_equal(dg_src,
vmci_make_handle
(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID)) ||
context->datagram_queue_size + vmci_dg_size >=
VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
spin_unlock(&context->lock);
vmci_ctx_put(context);
kfree(dq_entry);
pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
return VMCI_ERROR_NO_RESOURCES;
}
list_add(&dq_entry->list_item, &context->datagram_queue);
context->pending_datagrams++;
context->datagram_queue_size += vmci_dg_size;
ctx_signal_notify(context);
wake_up(&context->host_context.wait_queue);
spin_unlock(&context->lock);
vmci_ctx_put(context);
return vmci_dg_size;
}
/*
* Verifies whether a context with the specified context ID exists.
* FIXME: utility is dubious as no decisions can be reliably made
* using this data as context can appear and disappear at any time.
*/
bool vmci_ctx_exists(u32 cid)
{
struct vmci_ctx *context;
bool exists = false;
rcu_read_lock();
list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
if (context->cid == cid) {
exists = true;
break;
}
}
rcu_read_unlock();
return exists;
}
/*
* Retrieves VMCI context corresponding to the given cid.
*/
struct vmci_ctx *vmci_ctx_get(u32 cid)
{
struct vmci_ctx *c, *context = NULL;
if (cid == VMCI_INVALID_ID)
return NULL;
rcu_read_lock();
list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
if (c->cid == cid) {
/*
* The context owner drops its own reference to the
* context only after removing it from the list and
* waiting for RCU grace period to expire. This
* means that we are not about to increase the
* reference count of something that is in the
* process of being destroyed.
*/
context = c;
kref_get(&context->kref);
break;
}
}
rcu_read_unlock();
return context;
}
/*
* Deallocates all parts of a context data structure. This
* function doesn't lock the context, because it assumes that
* the caller was holding the last reference to context.
*/
static void ctx_free_ctx(struct kref *kref)
{
struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
struct vmci_handle temp_handle;
struct vmci_handle_list *notifier, *tmp;
/*
* Fire event to all contexts interested in knowing this
* context is dying.
*/
ctx_fire_notification(context->cid, context->priv_flags);
/*
* Cleanup all queue pair resources attached to context. If
* the VM dies without cleaning up, this code will make sure
* that no resources are leaked.
*/
temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
if (vmci_qp_broker_detach(temp_handle,
context) < VMCI_SUCCESS) {
/*
* When vmci_qp_broker_detach() succeeds it
* removes the handle from the array. If
* detach fails, we must remove the handle
* ourselves.
*/
vmci_handle_arr_remove_entry(context->queue_pair_array,
temp_handle);
}
temp_handle =
vmci_handle_arr_get_entry(context->queue_pair_array, 0);
}
/*
* It is fine to destroy this without locking the callQueue, as
* this is the only thread having a reference to the context.
*/
list_for_each_entry_safe(dq_entry, dq_entry_tmp,
&context->datagram_queue, list_item) {
WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
list_del(&dq_entry->list_item);
kfree(dq_entry->dg);
kfree(dq_entry);
}
list_for_each_entry_safe(notifier, tmp,
&context->notifier_list, node) {
list_del(¬ifier->node);
kfree(notifier);
}
vmci_handle_arr_destroy(context->queue_pair_array);
vmci_handle_arr_destroy(context->doorbell_array);
vmci_handle_arr_destroy(context->pending_doorbell_array);
vmci_ctx_unset_notify(context);
if (context->cred)
put_cred(context->cred);
kfree(context);
}
/*
* Drops reference to VMCI context. If this is the last reference to
* the context it will be deallocated. A context is created with
* a reference count of one, and on destroy, it is removed from
* the context list before its reference count is decremented. Thus,
* if we reach zero, we are sure that nobody else are about to increment
* it (they need the entry in the context list for that), and so there
* is no need for locking.
*/
void vmci_ctx_put(struct vmci_ctx *context)
{
kref_put(&context->kref, ctx_free_ctx);
}
/*
* Dequeues the next datagram and returns it to caller.
* The caller passes in a pointer to the max size datagram
* it can handle and the datagram is only unqueued if the
* size is less than max_size. If larger max_size is set to
* the size of the datagram to give the caller a chance to
* set up a larger buffer for the guestcall.
*/
int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
size_t *max_size,
struct vmci_datagram **dg)
{
struct vmci_datagram_queue_entry *dq_entry;
struct list_head *list_item;
int rv;
/* Dequeue the next datagram entry. */
spin_lock(&context->lock);
if (context->pending_datagrams == 0) {
ctx_clear_notify_call(context);
spin_unlock(&context->lock);
pr_devel("No datagrams pending\n");
return VMCI_ERROR_NO_MORE_DATAGRAMS;
}
list_item = context->datagram_queue.next;
dq_entry =
list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
/* Check size of caller's buffer. */
if (*max_size < dq_entry->dg_size) {
*max_size = dq_entry->dg_size;
spin_unlock(&context->lock);
pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
(u32) *max_size);
return VMCI_ERROR_NO_MEM;
}
list_del(list_item);
context->pending_datagrams--;
context->datagram_queue_size -= dq_entry->dg_size;
if (context->pending_datagrams == 0) {
ctx_clear_notify_call(context);
rv = VMCI_SUCCESS;
} else {
/*
* Return the size of the next datagram.
*/
struct vmci_datagram_queue_entry *next_entry;
list_item = context->datagram_queue.next;
next_entry =
list_entry(list_item, struct vmci_datagram_queue_entry,
list_item);
/*
* The following size_t -> int truncation is fine as
* the maximum size of a (routable) datagram is 68KB.
*/
rv = (int)next_entry->dg_size;
}
spin_unlock(&context->lock);
/* Caller must free datagram. */
*dg = dq_entry->dg;
dq_entry->dg = NULL;
kfree(dq_entry);
return rv;
}
/*
* Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
* page mapped/locked by vmci_setup_notify().
*/
void vmci_ctx_unset_notify(struct vmci_ctx *context)
{
struct page *notify_page;
spin_lock(&context->lock);
notify_page = context->notify_page;
context->notify = &ctx_dummy_notify;
context->notify_page = NULL;
spin_unlock(&context->lock);
if (notify_page) {
kunmap(notify_page);
put_page(notify_page);
}
}
/*
* Add remote_cid to list of contexts current contexts wants
* notifications from/about.
*/
int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
{
struct vmci_ctx *context;
struct vmci_handle_list *notifier, *n;
int result;
bool exists = false;
context = vmci_ctx_get(context_id);
if (!context)
return VMCI_ERROR_NOT_FOUND;
if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
context_id, remote_cid);
result = VMCI_ERROR_DST_UNREACHABLE;
goto out;
}
if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
result = VMCI_ERROR_NO_ACCESS;
goto out;
}
notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
if (!notifier) {
result = VMCI_ERROR_NO_MEM;
goto out;
}
INIT_LIST_HEAD(¬ifier->node);
notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
spin_lock(&context->lock);
if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
list_for_each_entry(n, &context->notifier_list, node) {
if (vmci_handle_is_equal(n->handle, notifier->handle)) {
exists = true;
break;
}
}
if (exists) {
kfree(notifier);
result = VMCI_ERROR_ALREADY_EXISTS;
} else {
list_add_tail_rcu(¬ifier->node,
&context->notifier_list);
context->n_notifiers++;
result = VMCI_SUCCESS;
}
} else {
kfree(notifier);
result = VMCI_ERROR_NO_MEM;
}
spin_unlock(&context->lock);
out:
vmci_ctx_put(context);
return result;
}
/*
* Remove remote_cid from current context's list of contexts it is
* interested in getting notifications from/about.
*/
int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
{
struct vmci_ctx *context;
struct vmci_handle_list *notifier = NULL, *iter, *tmp;
struct vmci_handle handle;
context = vmci_ctx_get(context_id);
if (!context)
return VMCI_ERROR_NOT_FOUND;
handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
spin_lock(&context->lock);
list_for_each_entry_safe(iter, tmp,
&context->notifier_list, node) {
if (vmci_handle_is_equal(iter->handle, handle)) {
list_del_rcu(&iter->node);
context->n_notifiers--;
notifier = iter;
break;
}
}
spin_unlock(&context->lock);
if (notifier)
kvfree_rcu_mightsleep(notifier);
vmci_ctx_put(context);
return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
}
static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
u32 *buf_size, void **pbuf)
{
u32 *notifiers;
size_t data_size;
struct vmci_handle_list *entry;
int i = 0;
if (context->n_notifiers == 0) {
*buf_size = 0;
*pbuf = NULL;
return VMCI_SUCCESS;
}
data_size = context->n_notifiers * sizeof(*notifiers);
if (*buf_size < data_size) {
*buf_size = data_size;
return VMCI_ERROR_MORE_DATA;
}
notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
if (!notifiers)
return VMCI_ERROR_NO_MEM;
list_for_each_entry(entry, &context->notifier_list, node)
notifiers[i++] = entry->handle.context;
*buf_size = data_size;
*pbuf = notifiers;
return VMCI_SUCCESS;
}
static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
u32 *buf_size, void **pbuf)
{
struct dbell_cpt_state *dbells;
u32 i, n_doorbells;
n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
if (n_doorbells > 0) {
size_t data_size = n_doorbells * sizeof(*dbells);
if (*buf_size < data_size) {
*buf_size = data_size;
return VMCI_ERROR_MORE_DATA;
}
dbells = kzalloc(data_size, GFP_ATOMIC);
if (!dbells)
return VMCI_ERROR_NO_MEM;
for (i = 0; i < n_doorbells; i++)
dbells[i].handle = vmci_handle_arr_get_entry(
context->doorbell_array, i);
*buf_size = data_size;
*pbuf = dbells;
} else {
*buf_size = 0;
*pbuf = NULL;
}
return VMCI_SUCCESS;
}
/*
* Get current context's checkpoint state of given type.
*/
int vmci_ctx_get_chkpt_state(u32 context_id,
u32 cpt_type,
u32 *buf_size,
void **pbuf)
{
struct vmci_ctx *context;
int result;
context = vmci_ctx_get(context_id);
if (!context)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
switch (cpt_type) {
case VMCI_NOTIFICATION_CPT_STATE:
result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
break;
case VMCI_WELLKNOWN_CPT_STATE:
/*
* For compatibility with VMX'en with VM to VM communication, we
* always return zero wellknown handles.
*/
*buf_size = 0;
*pbuf = NULL;
result = VMCI_SUCCESS;
break;
case VMCI_DOORBELL_CPT_STATE:
result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
break;
default:
pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
result = VMCI_ERROR_INVALID_ARGS;
break;
}
spin_unlock(&context->lock);
vmci_ctx_put(context);
return result;
}
/*
* Set current context's checkpoint state of given type.
*/
int vmci_ctx_set_chkpt_state(u32 context_id,
u32 cpt_type,
u32 buf_size,
void *cpt_buf)
{
u32 i;
u32 current_id;
int result = VMCI_SUCCESS;
u32 num_ids = buf_size / sizeof(u32);
if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
/*
* We would end up here if VMX with VM to VM communication
* attempts to restore a checkpoint with wellknown handles.
*/
pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
return VMCI_ERROR_OBSOLETE;
}
if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
return VMCI_ERROR_INVALID_ARGS;
}
for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
current_id = ((u32 *)cpt_buf)[i];
result = vmci_ctx_add_notification(context_id, current_id);
if (result != VMCI_SUCCESS)
break;
}
if (result != VMCI_SUCCESS)
pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
cpt_type, result);
return result;
}
/*
* Retrieves the specified context's pending notifications in the
* form of a handle array. The handle arrays returned are the
* actual data - not a copy and should not be modified by the
* caller. They must be released using
* vmci_ctx_rcv_notifications_release.
*/
int vmci_ctx_rcv_notifications_get(u32 context_id,
struct vmci_handle_arr **db_handle_array,
struct vmci_handle_arr **qp_handle_array)
{
struct vmci_ctx *context;
int result = VMCI_SUCCESS;
context = vmci_ctx_get(context_id);
if (context == NULL)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
*db_handle_array = context->pending_doorbell_array;
context->pending_doorbell_array =
vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
context->pending_doorbell_array = *db_handle_array;
*db_handle_array = NULL;
result = VMCI_ERROR_NO_MEM;
}
*qp_handle_array = NULL;
spin_unlock(&context->lock);
vmci_ctx_put(context);
return result;
}
/*
* Releases handle arrays with pending notifications previously
* retrieved using vmci_ctx_rcv_notifications_get. If the
* notifications were not successfully handed over to the guest,
* success must be false.
*/
void vmci_ctx_rcv_notifications_release(u32 context_id,
struct vmci_handle_arr *db_handle_array,
struct vmci_handle_arr *qp_handle_array,
bool success)
{
struct vmci_ctx *context = vmci_ctx_get(context_id);
spin_lock(&context->lock);
if (!success) {
struct vmci_handle handle;
/*
* New notifications may have been added while we were not
* holding the context lock, so we transfer any new pending
* doorbell notifications to the old array, and reinstate the
* old array.
*/
handle = vmci_handle_arr_remove_tail(
context->pending_doorbell_array);
while (!vmci_handle_is_invalid(handle)) {
if (!vmci_handle_arr_has_entry(db_handle_array,
handle)) {
vmci_handle_arr_append_entry(
&db_handle_array, handle);
}
handle = vmci_handle_arr_remove_tail(
context->pending_doorbell_array);
}
vmci_handle_arr_destroy(context->pending_doorbell_array);
context->pending_doorbell_array = db_handle_array;
db_handle_array = NULL;
} else {
ctx_clear_notify_call(context);
}
spin_unlock(&context->lock);
vmci_ctx_put(context);
if (db_handle_array)
vmci_handle_arr_destroy(db_handle_array);
if (qp_handle_array)
vmci_handle_arr_destroy(qp_handle_array);
}
/*
* Registers that a new doorbell handle has been allocated by the
* context. Only doorbell handles registered can be notified.
*/
int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
{
struct vmci_ctx *context;
int result;
if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
context = vmci_ctx_get(context_id);
if (context == NULL)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
result = vmci_handle_arr_append_entry(&context->doorbell_array,
handle);
else
result = VMCI_ERROR_DUPLICATE_ENTRY;
spin_unlock(&context->lock);
vmci_ctx_put(context);
return result;
}
/*
* Unregisters a doorbell handle that was previously registered
* with vmci_ctx_dbell_create.
*/
int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
{
struct vmci_ctx *context;
struct vmci_handle removed_handle;
if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
context = vmci_ctx_get(context_id);
if (context == NULL)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
removed_handle =
vmci_handle_arr_remove_entry(context->doorbell_array, handle);
vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
spin_unlock(&context->lock);
vmci_ctx_put(context);
return vmci_handle_is_invalid(removed_handle) ?
VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
}
/*
* Unregisters all doorbell handles that were previously
* registered with vmci_ctx_dbell_create.
*/
int vmci_ctx_dbell_destroy_all(u32 context_id)
{
struct vmci_ctx *context;
struct vmci_handle handle;
if (context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
context = vmci_ctx_get(context_id);
if (context == NULL)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
do {
struct vmci_handle_arr *arr = context->doorbell_array;
handle = vmci_handle_arr_remove_tail(arr);
} while (!vmci_handle_is_invalid(handle));
do {
struct vmci_handle_arr *arr = context->pending_doorbell_array;
handle = vmci_handle_arr_remove_tail(arr);
} while (!vmci_handle_is_invalid(handle));
spin_unlock(&context->lock);
vmci_ctx_put(context);
return VMCI_SUCCESS;
}
/*
* Registers a notification of a doorbell handle initiated by the
* specified source context. The notification of doorbells are
* subject to the same isolation rules as datagram delivery. To
* allow host side senders of notifications a finer granularity
* of sender rights than those assigned to the sending context
* itself, the host context is required to specify a different
* set of privilege flags that will override the privileges of
* the source context.
*/
int vmci_ctx_notify_dbell(u32 src_cid,
struct vmci_handle handle,
u32 src_priv_flags)
{
struct vmci_ctx *dst_context;
int result;
if (vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
/* Get the target VM's VMCI context. */
dst_context = vmci_ctx_get(handle.context);
if (!dst_context) {
pr_devel("Invalid context (ID=0x%x)\n", handle.context);
return VMCI_ERROR_NOT_FOUND;
}
if (src_cid != handle.context) {
u32 dst_priv_flags;
if (VMCI_CONTEXT_IS_VM(src_cid) &&
VMCI_CONTEXT_IS_VM(handle.context)) {
pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
src_cid, handle.context);
result = VMCI_ERROR_DST_UNREACHABLE;
goto out;
}
result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
if (result < VMCI_SUCCESS) {
pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
goto out;
}
if (src_cid != VMCI_HOST_CONTEXT_ID ||
src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
src_priv_flags = vmci_context_get_priv_flags(src_cid);
}
if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
result = VMCI_ERROR_NO_ACCESS;
goto out;
}
}
if (handle.context == VMCI_HOST_CONTEXT_ID) {
result = vmci_dbell_host_context_notify(src_cid, handle);
} else {
spin_lock(&dst_context->lock);
if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
handle)) {
result = VMCI_ERROR_NOT_FOUND;
} else {
if (!vmci_handle_arr_has_entry(
dst_context->pending_doorbell_array,
handle)) {
result = vmci_handle_arr_append_entry(
&dst_context->pending_doorbell_array,
handle);
if (result == VMCI_SUCCESS) {
ctx_signal_notify(dst_context);
wake_up(&dst_context->host_context.wait_queue);
}
} else {
result = VMCI_SUCCESS;
}
}
spin_unlock(&dst_context->lock);
}
out:
vmci_ctx_put(dst_context);
return result;
}
bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
{
return context && context->user_version >= VMCI_VERSION_HOSTQP;
}
/*
* Registers that a new queue pair handle has been allocated by
* the context.
*/
int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
{
int result;
if (context == NULL || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
result = vmci_handle_arr_append_entry(
&context->queue_pair_array, handle);
else
result = VMCI_ERROR_DUPLICATE_ENTRY;
return result;
}
/*
* Unregisters a queue pair handle that was previously registered
* with vmci_ctx_qp_create.
*/
int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
{
struct vmci_handle hndl;
if (context == NULL || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
return vmci_handle_is_invalid(hndl) ?
VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
}
/*
* Determines whether a given queue pair handle is registered
* with the given context.
*/
bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
{
if (context == NULL || vmci_handle_is_invalid(handle))
return false;
return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
}
/*
* vmci_context_get_priv_flags() - Retrieve privilege flags.
* @context_id: The context ID of the VMCI context.
*
* Retrieves privilege flags of the given VMCI context ID.
*/
u32 vmci_context_get_priv_flags(u32 context_id)
{
if (vmci_host_code_active()) {
u32 flags;
struct vmci_ctx *context;
context = vmci_ctx_get(context_id);
if (!context)
return VMCI_LEAST_PRIVILEGE_FLAGS;
flags = context->priv_flags;
vmci_ctx_put(context);
return flags;
}
return VMCI_NO_PRIVILEGE_FLAGS;
}
EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
/*
* vmci_is_context_owner() - Determimnes if user is the context owner
* @context_id: The context ID of the VMCI context.
* @uid: The host user id (real kernel value).
*
* Determines whether a given UID is the owner of given VMCI context.
*/
bool vmci_is_context_owner(u32 context_id, kuid_t uid)
{
bool is_owner = false;
if (vmci_host_code_active()) {
struct vmci_ctx *context = vmci_ctx_get(context_id);
if (context) {
if (context->cred)
is_owner = uid_eq(context->cred->uid, uid);
vmci_ctx_put(context);
}
}
return is_owner;
}
EXPORT_SYMBOL_GPL(vmci_is_context_owner);
| linux-master | drivers/misc/vmw_vmci/vmci_context.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/processor.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
#define VMCI_UTIL_NUM_RESOURCES 1
/*
* Datagram buffers for DMA send/receive must accommodate at least
* a maximum sized datagram and the header.
*/
#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
static bool vmci_disable_msi;
module_param_named(disable_msi, vmci_disable_msi, bool, 0);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
static bool vmci_disable_msix;
module_param_named(disable_msix, vmci_disable_msix, bool, 0);
MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
static u32 ctx_update_sub_id = VMCI_INVALID_ID;
static u32 vm_context_id = VMCI_INVALID_ID;
struct vmci_guest_device {
struct device *dev; /* PCI device we are attached to */
void __iomem *iobase;
void __iomem *mmio_base;
bool exclusive_vectors;
struct wait_queue_head inout_wq;
void *data_buffer;
dma_addr_t data_buffer_base;
void *tx_buffer;
dma_addr_t tx_buffer_base;
void *notification_bitmap;
dma_addr_t notification_base;
};
static bool use_ppn64;
bool vmci_use_ppn64(void)
{
return use_ppn64;
}
/* vmci_dev singleton device and supporting data*/
struct pci_dev *vmci_pdev;
static struct vmci_guest_device *vmci_dev_g;
static DEFINE_SPINLOCK(vmci_dev_spinlock);
static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
bool vmci_guest_code_active(void)
{
return atomic_read(&vmci_num_guest_devices) != 0;
}
u32 vmci_get_vm_context_id(void)
{
if (vm_context_id == VMCI_INVALID_ID) {
struct vmci_datagram get_cid_msg;
get_cid_msg.dst =
vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_GET_CONTEXT_ID);
get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
get_cid_msg.payload_size = 0;
vm_context_id = vmci_send_datagram(&get_cid_msg);
}
return vm_context_id;
}
static unsigned int vmci_read_reg(struct vmci_guest_device *dev, u32 reg)
{
if (dev->mmio_base != NULL)
return readl(dev->mmio_base + reg);
return ioread32(dev->iobase + reg);
}
static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg)
{
if (dev->mmio_base != NULL)
writel(val, dev->mmio_base + reg);
else
iowrite32(val, dev->iobase + reg);
}
static void vmci_read_data(struct vmci_guest_device *vmci_dev,
void *dest, size_t size)
{
if (vmci_dev->mmio_base == NULL)
ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
dest, size);
else {
/*
* For DMA datagrams, the data_buffer will contain the header on the
* first page, followed by the incoming datagram(s) on the following
* pages. The header uses an S/G element immediately following the
* header on the first page to point to the data area.
*/
struct vmci_data_in_out_header *buffer_header = vmci_dev->data_buffer;
struct vmci_sg_elem *sg_array = (struct vmci_sg_elem *)(buffer_header + 1);
size_t buffer_offset = dest - vmci_dev->data_buffer;
buffer_header->opcode = 1;
buffer_header->size = 1;
buffer_header->busy = 0;
sg_array[0].addr = vmci_dev->data_buffer_base + buffer_offset;
sg_array[0].size = size;
vmci_write_reg(vmci_dev, lower_32_bits(vmci_dev->data_buffer_base),
VMCI_DATA_IN_LOW_ADDR);
wait_event(vmci_dev->inout_wq, buffer_header->busy == 1);
}
}
static int vmci_write_data(struct vmci_guest_device *dev,
struct vmci_datagram *dg)
{
int result;
if (dev->mmio_base != NULL) {
struct vmci_data_in_out_header *buffer_header = dev->tx_buffer;
u8 *dg_out_buffer = (u8 *)(buffer_header + 1);
if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE)
return VMCI_ERROR_INVALID_ARGS;
/*
* Initialize send buffer with outgoing datagram
* and set up header for inline data. Device will
* not access buffer asynchronously - only after
* the write to VMCI_DATA_OUT_LOW_ADDR.
*/
memcpy(dg_out_buffer, dg, VMCI_DG_SIZE(dg));
buffer_header->opcode = 0;
buffer_header->size = VMCI_DG_SIZE(dg);
buffer_header->busy = 1;
vmci_write_reg(dev, lower_32_bits(dev->tx_buffer_base),
VMCI_DATA_OUT_LOW_ADDR);
/* Caller holds a spinlock, so cannot block. */
spin_until_cond(buffer_header->busy == 0);
result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
if (result == VMCI_SUCCESS)
result = (int)buffer_header->result;
} else {
iowrite8_rep(dev->iobase + VMCI_DATA_OUT_ADDR,
dg, VMCI_DG_SIZE(dg));
result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
}
return result;
}
/*
* VM to hypervisor call mechanism. We use the standard VMware naming
* convention since shared code is calling this function as well.
*/
int vmci_send_datagram(struct vmci_datagram *dg)
{
unsigned long flags;
int result;
/* Check args. */
if (dg == NULL)
return VMCI_ERROR_INVALID_ARGS;
/*
* Need to acquire spinlock on the device because the datagram
* data may be spread over multiple pages and the monitor may
* interleave device user rpc calls from multiple
* VCPUs. Acquiring the spinlock precludes that
* possibility. Disabling interrupts to avoid incoming
* datagrams during a "rep out" and possibly landing up in
* this function.
*/
spin_lock_irqsave(&vmci_dev_spinlock, flags);
if (vmci_dev_g) {
vmci_write_data(vmci_dev_g, dg);
result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
} else {
result = VMCI_ERROR_UNAVAILABLE;
}
spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
return result;
}
EXPORT_SYMBOL_GPL(vmci_send_datagram);
/*
* Gets called with the new context id if updated or resumed.
* Context id.
*/
static void vmci_guest_cid_update(u32 sub_id,
const struct vmci_event_data *event_data,
void *client_data)
{
const struct vmci_event_payld_ctx *ev_payload =
vmci_event_data_const_payload(event_data);
if (sub_id != ctx_update_sub_id) {
pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
return;
}
if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
pr_devel("Invalid event data\n");
return;
}
pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
vm_context_id, ev_payload->context_id, event_data->event);
vm_context_id = ev_payload->context_id;
}
/*
* Verify that the host supports the hypercalls we need. If it does not,
* try to find fallback hypercalls and use those instead. Returns 0 if
* required hypercalls (or fallback hypercalls) are supported by the host,
* an error code otherwise.
*/
static int vmci_check_host_caps(struct pci_dev *pdev)
{
bool result;
struct vmci_resource_query_msg *msg;
u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
struct vmci_datagram *check_msg;
check_msg = kzalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
return -ENOMEM;
}
check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_RESOURCES_QUERY);
check_msg->src = VMCI_ANON_SRC_HANDLE;
check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
msg->resources[0] = VMCI_GET_CONTEXT_ID;
/* Checks that hyper calls are supported */
result = vmci_send_datagram(check_msg) == 0x01;
kfree(check_msg);
dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
__func__, result ? "PASSED" : "FAILED");
/* We need the vector. There are no fallbacks. */
return result ? 0 : -ENXIO;
}
/*
* Reads datagrams from the device and dispatches them. For IO port
* based access to the device, we always start reading datagrams into
* only the first page of the datagram buffer. If the datagrams don't
* fit into one page, we use the maximum datagram buffer size for the
* remainder of the invocation. This is a simple heuristic for not
* penalizing small datagrams. For DMA-based datagrams, we always
* use the maximum datagram buffer size, since there is no performance
* penalty for doing so.
*
* This function assumes that it has exclusive access to the data
* in register(s) for the duration of the call.
*/
static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
{
u8 *dg_in_buffer = vmci_dev->data_buffer;
struct vmci_datagram *dg;
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
size_t current_dg_in_buffer_size;
size_t remaining_bytes;
bool is_io_port = vmci_dev->mmio_base == NULL;
BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
if (!is_io_port) {
/* For mmio, the first page is used for the header. */
dg_in_buffer += PAGE_SIZE;
/*
* For DMA-based datagram operations, there is no performance
* penalty for reading the maximum buffer size.
*/
current_dg_in_buffer_size = VMCI_MAX_DG_SIZE;
} else {
current_dg_in_buffer_size = PAGE_SIZE;
}
vmci_read_data(vmci_dev, dg_in_buffer, current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
/*
* Read through the buffer until an invalid datagram header is
* encountered. The exit condition for datagrams read through
* VMCI_DATA_IN_ADDR is a bit more complicated, since a datagram
* can start on any page boundary in the buffer.
*/
while (dg->dst.resource != VMCI_INVALID_ID ||
(is_io_port && remaining_bytes > PAGE_SIZE)) {
unsigned dg_in_size;
/*
* If using VMCI_DATA_IN_ADDR, skip to the next page
* as a datagram can start on any page boundary.
*/
if (dg->dst.resource == VMCI_INVALID_ID) {
dg = (struct vmci_datagram *)roundup(
(uintptr_t)dg + 1, PAGE_SIZE);
remaining_bytes =
(size_t)(dg_in_buffer +
current_dg_in_buffer_size -
(u8 *)dg);
continue;
}
dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
if (dg_in_size <= dg_in_buffer_size) {
int result;
/*
* If the remaining bytes in the datagram
* buffer doesn't contain the complete
* datagram, we first make sure we have enough
* room for it and then we read the reminder
* of the datagram and possibly any following
* datagrams.
*/
if (dg_in_size > remaining_bytes) {
if (remaining_bytes !=
current_dg_in_buffer_size) {
/*
* We move the partial
* datagram to the front and
* read the reminder of the
* datagram and possibly
* following calls into the
* following bytes.
*/
memmove(dg_in_buffer, dg_in_buffer +
current_dg_in_buffer_size -
remaining_bytes,
remaining_bytes);
dg = (struct vmci_datagram *)
dg_in_buffer;
}
if (current_dg_in_buffer_size !=
dg_in_buffer_size)
current_dg_in_buffer_size =
dg_in_buffer_size;
vmci_read_data(vmci_dev,
dg_in_buffer +
remaining_bytes,
current_dg_in_buffer_size -
remaining_bytes);
}
/*
* We special case event datagrams from the
* hypervisor.
*/
if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
dg->dst.resource == VMCI_EVENT_HANDLER) {
result = vmci_event_dispatch(dg);
} else {
result = vmci_datagram_invoke_guest_handler(dg);
}
if (result < VMCI_SUCCESS)
dev_dbg(vmci_dev->dev,
"Datagram with resource (ID=0x%x) failed (err=%d)\n",
dg->dst.resource, result);
/* On to the next datagram. */
dg = (struct vmci_datagram *)((u8 *)dg +
dg_in_size);
} else {
size_t bytes_to_skip;
/*
* Datagram doesn't fit in datagram buffer of maximal
* size. We drop it.
*/
dev_dbg(vmci_dev->dev,
"Failed to receive datagram (size=%u bytes)\n",
dg_in_size);
bytes_to_skip = dg_in_size - remaining_bytes;
if (current_dg_in_buffer_size != dg_in_buffer_size)
current_dg_in_buffer_size = dg_in_buffer_size;
for (;;) {
vmci_read_data(vmci_dev, dg_in_buffer,
current_dg_in_buffer_size);
if (bytes_to_skip <= current_dg_in_buffer_size)
break;
bytes_to_skip -= current_dg_in_buffer_size;
}
dg = (struct vmci_datagram *)(dg_in_buffer +
bytes_to_skip);
}
remaining_bytes =
(size_t) (dg_in_buffer + current_dg_in_buffer_size -
(u8 *)dg);
if (remaining_bytes < VMCI_DG_HEADERSIZE) {
/* Get the next batch of datagrams. */
vmci_read_data(vmci_dev, dg_in_buffer,
current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
}
}
}
/*
* Scans the notification bitmap for raised flags, clears them
* and handles the notifications.
*/
static void vmci_process_bitmap(struct vmci_guest_device *dev)
{
if (!dev->notification_bitmap) {
dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
return;
}
vmci_dbell_scan_notification_entries(dev->notification_bitmap);
}
/*
* Interrupt handler for legacy or MSI interrupt, or for first MSI-X
* interrupt (vector VMCI_INTR_DATAGRAM).
*/
static irqreturn_t vmci_interrupt(int irq, void *_dev)
{
struct vmci_guest_device *dev = _dev;
/*
* If we are using MSI-X with exclusive vectors then we simply call
* vmci_dispatch_dgs(), since we know the interrupt was meant for us.
* Otherwise we must read the ICR to determine what to do.
*/
if (dev->exclusive_vectors) {
vmci_dispatch_dgs(dev);
} else {
unsigned int icr;
/* Acknowledge interrupt and determine what needs doing. */
icr = vmci_read_reg(dev, VMCI_ICR_ADDR);
if (icr == 0 || icr == ~0)
return IRQ_NONE;
if (icr & VMCI_ICR_DATAGRAM) {
vmci_dispatch_dgs(dev);
icr &= ~VMCI_ICR_DATAGRAM;
}
if (icr & VMCI_ICR_NOTIFICATION) {
vmci_process_bitmap(dev);
icr &= ~VMCI_ICR_NOTIFICATION;
}
if (icr & VMCI_ICR_DMA_DATAGRAM) {
wake_up_all(&dev->inout_wq);
icr &= ~VMCI_ICR_DMA_DATAGRAM;
}
if (icr != 0)
dev_warn(dev->dev,
"Ignoring unknown interrupt cause (%d)\n",
icr);
}
return IRQ_HANDLED;
}
/*
* Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
* which is for the notification bitmap. Will only get called if we are
* using MSI-X with exclusive vectors.
*/
static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
{
struct vmci_guest_device *dev = _dev;
/* For MSI-X we can just assume it was meant for us. */
vmci_process_bitmap(dev);
return IRQ_HANDLED;
}
/*
* Interrupt handler for MSI-X interrupt vector VMCI_INTR_DMA_DATAGRAM,
* which is for the completion of a DMA datagram send or receive operation.
* Will only get called if we are using MSI-X with exclusive vectors.
*/
static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev)
{
struct vmci_guest_device *dev = _dev;
wake_up_all(&dev->inout_wq);
return IRQ_HANDLED;
}
static void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev)
{
if (vmci_dev->mmio_base != NULL) {
if (vmci_dev->tx_buffer != NULL)
dma_free_coherent(vmci_dev->dev,
VMCI_DMA_DG_BUFFER_SIZE,
vmci_dev->tx_buffer,
vmci_dev->tx_buffer_base);
if (vmci_dev->data_buffer != NULL)
dma_free_coherent(vmci_dev->dev,
VMCI_DMA_DG_BUFFER_SIZE,
vmci_dev->data_buffer,
vmci_dev->data_buffer_base);
} else {
vfree(vmci_dev->data_buffer);
}
}
/*
* Most of the initialization at module load time is done here.
*/
static int vmci_guest_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct vmci_guest_device *vmci_dev;
void __iomem *iobase = NULL;
void __iomem *mmio_base = NULL;
unsigned int num_irq_vectors;
unsigned int capabilities;
unsigned int caps_in_use;
unsigned long cmd;
int vmci_err;
int error;
dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
error = pcim_enable_device(pdev);
if (error) {
dev_err(&pdev->dev,
"Failed to enable VMCI device: %d\n", error);
return error;
}
/*
* The VMCI device with mmio access to registers requests 256KB
* for BAR1. If present, driver will use new VMCI device
* functionality for register access and datagram send/recv.
*/
if (pci_resource_len(pdev, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE) {
dev_info(&pdev->dev, "MMIO register access is available\n");
mmio_base = pci_iomap_range(pdev, 1, VMCI_MMIO_ACCESS_OFFSET,
VMCI_MMIO_ACCESS_SIZE);
/* If the map fails, we fall back to IOIO access. */
if (!mmio_base)
dev_warn(&pdev->dev, "Failed to map MMIO register access\n");
}
if (!mmio_base) {
if (IS_ENABLED(CONFIG_ARM64)) {
dev_err(&pdev->dev, "MMIO base is invalid\n");
return -ENXIO;
}
error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
if (error) {
dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
return error;
}
iobase = pcim_iomap_table(pdev)[0];
}
vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
if (!vmci_dev) {
dev_err(&pdev->dev,
"Can't allocate memory for VMCI device\n");
return -ENOMEM;
}
vmci_dev->dev = &pdev->dev;
vmci_dev->exclusive_vectors = false;
vmci_dev->iobase = iobase;
vmci_dev->mmio_base = mmio_base;
init_waitqueue_head(&vmci_dev->inout_wq);
if (mmio_base != NULL) {
vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
&vmci_dev->tx_buffer_base,
GFP_KERNEL);
if (!vmci_dev->tx_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram tx buffer\n");
return -ENOMEM;
}
vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
&vmci_dev->data_buffer_base,
GFP_KERNEL);
} else {
vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
}
if (!vmci_dev->data_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram buffer\n");
error = -ENOMEM;
goto err_free_data_buffers;
}
pci_set_master(pdev); /* To enable queue_pair functionality. */
/*
* Verify that the VMCI Device supports the capabilities that
* we need. If the device is missing capabilities that we would
* like to use, check for fallback capabilities and use those
* instead (so we can run a new VM on old hosts). Fail the load if
* a required capability is missing and there is no fallback.
*
* Right now, we need datagrams. There are no fallbacks.
*/
capabilities = vmci_read_reg(vmci_dev, VMCI_CAPS_ADDR);
if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO;
goto err_free_data_buffers;
}
caps_in_use = VMCI_CAPS_DATAGRAM;
/*
* Use 64-bit PPNs if the device supports.
*
* There is no check for the return value of dma_set_mask_and_coherent
* since this driver can handle the default mask values if
* dma_set_mask_and_coherent fails.
*/
if (capabilities & VMCI_CAPS_PPN64) {
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
use_ppn64 = true;
caps_in_use |= VMCI_CAPS_PPN64;
} else {
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
use_ppn64 = false;
}
/*
* If the hardware supports notifications, we will use that as
* well.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
vmci_dev->notification_bitmap = dma_alloc_coherent(
&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
GFP_KERNEL);
if (!vmci_dev->notification_bitmap)
dev_warn(&pdev->dev,
"Unable to allocate notification bitmap\n");
else
caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
}
if (mmio_base != NULL) {
if (capabilities & VMCI_CAPS_DMA_DATAGRAM) {
caps_in_use |= VMCI_CAPS_DMA_DATAGRAM;
} else {
dev_err(&pdev->dev,
"Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
error = -ENXIO;
goto err_free_notification_bitmap;
}
}
dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);
/* Let the host know which capabilities we intend to use. */
vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR);
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
/* Let the device know the size for pages passed down. */
vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT);
/* Configure the high order parts of the data in/out buffers. */
vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base),
VMCI_DATA_IN_HIGH_ADDR);
vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base),
VMCI_DATA_OUT_HIGH_ADDR);
}
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = vmci_dev;
vmci_pdev = pdev;
spin_unlock_irq(&vmci_dev_spinlock);
/*
* Register notification bitmap with device if that capability is
* used.
*/
if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) {
unsigned long bitmap_ppn =
vmci_dev->notification_base >> PAGE_SHIFT;
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%lx\n",
bitmap_ppn);
error = -ENXIO;
goto err_remove_vmci_dev_g;
}
}
/* Check host capabilities. */
error = vmci_check_host_caps(pdev);
if (error)
goto err_remove_vmci_dev_g;
/* Enable device. */
/*
* We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
* update the internal context id when needed.
*/
vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
vmci_guest_cid_update, NULL,
&ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to subscribe to event (type=%d): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
/*
* Enable interrupts. Try MSI-X first, then MSI, and then fallback on
* legacy interrupts.
*/
if (vmci_dev->mmio_base != NULL)
num_irq_vectors = VMCI_MAX_INTRS;
else
num_irq_vectors = VMCI_MAX_INTRS_NOTIFICATION;
error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors,
PCI_IRQ_MSIX);
if (error < 0) {
error = pci_alloc_irq_vectors(pdev, 1, 1,
PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
if (error < 0)
goto err_unsubscribe_event;
} else {
vmci_dev->exclusive_vectors = true;
}
/*
* Request IRQ for legacy or MSI interrupts, or for first
* MSI-X vector.
*/
error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
vmci_interrupt, IRQF_SHARED,
KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev, "Irq %u in use: %d\n",
pci_irq_vector(pdev, 0), error);
goto err_disable_msi;
}
/*
* For MSI-X with exclusive vectors we need to request an
* interrupt for each vector so that we get a separate
* interrupt handler routine. This allows us to distinguish
* between the vectors.
*/
if (vmci_dev->exclusive_vectors) {
error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
vmci_interrupt_bm, 0,
KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
pci_irq_vector(pdev, 1), error);
goto err_free_irq;
}
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
error = request_threaded_irq(pci_irq_vector(pdev, 2),
NULL,
vmci_interrupt_dma_datagram,
0, KBUILD_MODNAME,
vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
pci_irq_vector(pdev, 2), error);
goto err_free_bm_irq;
}
}
}
dev_dbg(&pdev->dev, "Registered device\n");
atomic_inc(&vmci_num_guest_devices);
/* Enable specific interrupt bits. */
cmd = VMCI_IMR_DATAGRAM;
if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM)
cmd |= VMCI_IMR_DMA_DATAGRAM;
vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR);
/* Enable interrupts. */
vmci_write_reg(vmci_dev, VMCI_CONTROL_INT_ENABLE, VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
vmci_call_vsock_callback(false);
return 0;
err_free_bm_irq:
if (vmci_dev->exclusive_vectors)
free_irq(pci_irq_vector(pdev, 1), vmci_dev);
err_free_irq:
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
err_disable_msi:
pci_free_irq_vectors(pdev);
err_unsubscribe_event:
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
err_remove_vmci_dev_g:
spin_lock_irq(&vmci_dev_spinlock);
vmci_pdev = NULL;
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
err_free_notification_bitmap:
if (vmci_dev->notification_bitmap) {
vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);
dma_free_coherent(&pdev->dev, PAGE_SIZE,
vmci_dev->notification_bitmap,
vmci_dev->notification_base);
}
err_free_data_buffers:
vmci_free_dg_buffers(vmci_dev);
/* The rest are managed resources and will be freed by PCI core */
return error;
}
static void vmci_guest_remove_device(struct pci_dev *pdev)
{
struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
int vmci_err;
dev_dbg(&pdev->dev, "Removing device\n");
atomic_dec(&vmci_num_guest_devices);
vmci_qp_guest_endpoints_exit();
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = NULL;
vmci_pdev = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
dev_dbg(&pdev->dev, "Resetting vmci device\n");
vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);
/*
* Free IRQ and then disable MSI/MSI-X as appropriate. For
* MSI-X, we might have multiple vectors, each with their own
* IRQ, which we must free too.
*/
if (vmci_dev->exclusive_vectors) {
free_irq(pci_irq_vector(pdev, 1), vmci_dev);
if (vmci_dev->mmio_base != NULL)
free_irq(pci_irq_vector(pdev, 2), vmci_dev);
}
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
pci_free_irq_vectors(pdev);
if (vmci_dev->notification_bitmap) {
/*
* The device reset above cleared the bitmap state of the
* device, so we can safely free it here.
*/
dma_free_coherent(&pdev->dev, PAGE_SIZE,
vmci_dev->notification_bitmap,
vmci_dev->notification_base);
}
vmci_free_dg_buffers(vmci_dev);
if (vmci_dev->mmio_base != NULL)
pci_iounmap(pdev, vmci_dev->mmio_base);
/* The rest are managed resources and will be freed by PCI core */
}
static const struct pci_device_id vmci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, vmci_ids);
static struct pci_driver vmci_guest_driver = {
.name = KBUILD_MODNAME,
.id_table = vmci_ids,
.probe = vmci_guest_probe_device,
.remove = vmci_guest_remove_device,
};
int __init vmci_guest_init(void)
{
return pci_register_driver(&vmci_guest_driver);
}
void __exit vmci_guest_exit(void)
{
pci_unregister_driver(&vmci_guest_driver);
}
| linux-master | drivers/misc/vmw_vmci/vmci_guest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_route.h"
/*
* Make a routing decision for the given source and destination handles.
* This will try to determine the route using the handles and the available
* devices. Will set the source context if it is invalid.
*/
int vmci_route(struct vmci_handle *src,
const struct vmci_handle *dst,
bool from_guest,
enum vmci_route *route)
{
bool has_host_device = vmci_host_code_active();
bool has_guest_device = vmci_guest_code_active();
*route = VMCI_ROUTE_NONE;
/*
* "from_guest" is only ever set to true by
* IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
* which comes from the VMX, so we know it is coming from a
* guest.
*
* To avoid inconsistencies, test these once. We will test
* them again when we do the actual send to ensure that we do
* not touch a non-existent device.
*/
/* Must have a valid destination context. */
if (VMCI_INVALID_ID == dst->context)
return VMCI_ERROR_INVALID_ARGS;
/* Anywhere to hypervisor. */
if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
/*
* If this message already came from a guest then we
* cannot send it to the hypervisor. It must come
* from a local client.
*/
if (from_guest)
return VMCI_ERROR_DST_UNREACHABLE;
/*
* We must be acting as a guest in order to send to
* the hypervisor.
*/
if (!has_guest_device)
return VMCI_ERROR_DEVICE_NOT_FOUND;
/* And we cannot send if the source is the host context. */
if (VMCI_HOST_CONTEXT_ID == src->context)
return VMCI_ERROR_INVALID_ARGS;
/*
* If the client passed the ANON source handle then
* respect it (both context and resource are invalid).
* However, if they passed only an invalid context,
* then they probably mean ANY, in which case we
* should set the real context here before passing it
* down.
*/
if (VMCI_INVALID_ID == src->context &&
VMCI_INVALID_ID != src->resource)
src->context = vmci_get_context_id();
/* Send from local client down to the hypervisor. */
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}
/* Anywhere to local client on host. */
if (VMCI_HOST_CONTEXT_ID == dst->context) {
/*
* If it is not from a guest but we are acting as a
* guest, then we need to send it down to the host.
* Note that if we are also acting as a host then this
* will prevent us from sending from local client to
* local client, but we accept that restriction as a
* way to remove any ambiguity from the host context.
*/
if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
/*
* If the hypervisor is the source, this is
* host local communication. The hypervisor
* may send vmci event datagrams to the host
* itself, but it will never send datagrams to
* an "outer host" through the guest device.
*/
if (has_host_device) {
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
} else {
return VMCI_ERROR_DEVICE_NOT_FOUND;
}
}
if (!from_guest && has_guest_device) {
/* If no source context then use the current. */
if (VMCI_INVALID_ID == src->context)
src->context = vmci_get_context_id();
/* Send it from local client down to the host. */
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}
/*
* Otherwise we already received it from a guest and
* it is destined for a local client on this host, or
* it is from another local client on this host. We
* must be acting as a host to service it.
*/
if (!has_host_device)
return VMCI_ERROR_DEVICE_NOT_FOUND;
if (VMCI_INVALID_ID == src->context) {
/*
* If it came from a guest then it must have a
* valid context. Otherwise we can use the
* host context.
*/
if (from_guest)
return VMCI_ERROR_INVALID_ARGS;
src->context = VMCI_HOST_CONTEXT_ID;
}
/* Route to local client. */
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
}
/*
* If we are acting as a host then this might be destined for
* a guest.
*/
if (has_host_device) {
/* It will have a context if it is meant for a guest. */
if (vmci_ctx_exists(dst->context)) {
if (VMCI_INVALID_ID == src->context) {
/*
* If it came from a guest then it
* must have a valid context.
* Otherwise we can use the host
* context.
*/
if (from_guest)
return VMCI_ERROR_INVALID_ARGS;
src->context = VMCI_HOST_CONTEXT_ID;
} else if (VMCI_CONTEXT_IS_VM(src->context) &&
src->context != dst->context) {
/*
* VM to VM communication is not
* allowed. Since we catch all
* communication destined for the host
* above, this must be destined for a
* VM since there is a valid context.
*/
return VMCI_ERROR_DST_UNREACHABLE;
}
/* Pass it up to the guest. */
*route = VMCI_ROUTE_AS_HOST;
return VMCI_SUCCESS;
} else if (!has_guest_device) {
/*
* The host is attempting to reach a CID
* without an active context, and we can't
* send it down, since we have no guest
* device.
*/
return VMCI_ERROR_DST_UNREACHABLE;
}
}
/*
* We must be a guest trying to send to another guest, which means
* we need to send it down to the host. We do not filter out VM to
* VM communication here, since we want to be able to use the guest
* driver on older versions that do support VM to VM communication.
*/
if (!has_guest_device) {
/*
* Ending up here means we have neither guest nor host
* device.
*/
return VMCI_ERROR_DEVICE_NOT_FOUND;
}
/* If no source context then use the current context. */
if (VMCI_INVALID_ID == src->context)
src->context = vmci_get_context_id();
/*
* Send it from local client down to the host, which will
* route it to the other guest for us.
*/
*route = VMCI_ROUTE_AS_GUEST;
return VMCI_SUCCESS;
}
| linux-master | drivers/misc/vmw_vmci/vmci_route.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/completion.h>
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_resource.h"
#include "vmci_driver.h"
#include "vmci_route.h"
#define VMCI_DOORBELL_INDEX_BITS 6
#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
/*
* DoorbellEntry describes the a doorbell notification handle allocated by the
* host.
*/
struct dbell_entry {
struct vmci_resource resource;
struct hlist_node node;
struct work_struct work;
vmci_callback notify_cb;
void *client_data;
u32 idx;
u32 priv_flags;
bool run_delayed;
atomic_t active; /* Only used by guest personality */
};
/* The VMCI index table keeps track of currently registered doorbells. */
struct dbell_index_table {
spinlock_t lock; /* Index table lock */
struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
};
static struct dbell_index_table vmci_doorbell_it = {
.lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
};
/*
* The max_notify_idx is one larger than the currently known bitmap index in
* use, and is used to determine how much of the bitmap needs to be scanned.
*/
static u32 max_notify_idx;
/*
* The notify_idx_count is used for determining whether there are free entries
* within the bitmap (if notify_idx_count + 1 < max_notify_idx).
*/
static u32 notify_idx_count;
/*
* The last_notify_idx_reserved is used to track the last index handed out - in
* the case where multiple handles share a notification index, we hand out
* indexes round robin based on last_notify_idx_reserved.
*/
static u32 last_notify_idx_reserved;
/* This is a one entry cache used to by the index allocation. */
static u32 last_notify_idx_released = PAGE_SIZE;
/*
* Utility function that retrieves the privilege flags associated
* with a given doorbell handle. For guest endpoints, the
* privileges are determined by the context ID, but for host
* endpoints privileges are associated with the complete
* handle. Hypervisor endpoints are not yet supported.
*/
int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
{
if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
if (handle.context == VMCI_HOST_CONTEXT_ID) {
struct dbell_entry *entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource)
return VMCI_ERROR_NOT_FOUND;
entry = container_of(resource, struct dbell_entry, resource);
*priv_flags = entry->priv_flags;
vmci_resource_put(resource);
} else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
/*
* Hypervisor endpoints for notifications are not
* supported (yet).
*/
return VMCI_ERROR_INVALID_ARGS;
} else {
*priv_flags = vmci_context_get_priv_flags(handle.context);
}
return VMCI_SUCCESS;
}
/*
* Find doorbell entry by bitmap index.
*/
static struct dbell_entry *dbell_index_table_find(u32 idx)
{
u32 bucket = VMCI_DOORBELL_HASH(idx);
struct dbell_entry *dbell;
hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
node) {
if (idx == dbell->idx)
return dbell;
}
return NULL;
}
/*
* Add the given entry to the index table. This willi take a reference to the
* entry's resource so that the entry is not deleted before it is removed from
* the * table.
*/
static void dbell_index_table_add(struct dbell_entry *entry)
{
u32 bucket;
u32 new_notify_idx;
vmci_resource_get(&entry->resource);
spin_lock_bh(&vmci_doorbell_it.lock);
/*
* Below we try to allocate an index in the notification
* bitmap with "not too much" sharing between resources. If we
* use less that the full bitmap, we either add to the end if
* there are no unused flags within the currently used area,
* or we search for unused ones. If we use the full bitmap, we
* allocate the index round robin.
*/
if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
if (last_notify_idx_released < max_notify_idx &&
!dbell_index_table_find(last_notify_idx_released)) {
new_notify_idx = last_notify_idx_released;
last_notify_idx_released = PAGE_SIZE;
} else {
bool reused = false;
new_notify_idx = last_notify_idx_reserved;
if (notify_idx_count + 1 < max_notify_idx) {
do {
if (!dbell_index_table_find
(new_notify_idx)) {
reused = true;
break;
}
new_notify_idx = (new_notify_idx + 1) %
max_notify_idx;
} while (new_notify_idx !=
last_notify_idx_released);
}
if (!reused) {
new_notify_idx = max_notify_idx;
max_notify_idx++;
}
}
} else {
new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
}
last_notify_idx_reserved = new_notify_idx;
notify_idx_count++;
entry->idx = new_notify_idx;
bucket = VMCI_DOORBELL_HASH(entry->idx);
hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
spin_unlock_bh(&vmci_doorbell_it.lock);
}
/*
* Remove the given entry from the index table. This will release() the
* entry's resource.
*/
static void dbell_index_table_remove(struct dbell_entry *entry)
{
spin_lock_bh(&vmci_doorbell_it.lock);
hlist_del_init(&entry->node);
notify_idx_count--;
if (entry->idx == max_notify_idx - 1) {
/*
* If we delete an entry with the maximum known
* notification index, we take the opportunity to
* prune the current max. As there might be other
* unused indices immediately below, we lower the
* maximum until we hit an index in use.
*/
while (max_notify_idx > 0 &&
!dbell_index_table_find(max_notify_idx - 1))
max_notify_idx--;
}
last_notify_idx_released = entry->idx;
spin_unlock_bh(&vmci_doorbell_it.lock);
vmci_resource_put(&entry->resource);
}
/*
* Creates a link between the given doorbell handle and the given
* index in the bitmap in the device backend. A notification state
* is created in hypervisor.
*/
static int dbell_link(struct vmci_handle handle, u32 notify_idx)
{
struct vmci_doorbell_link_msg link_msg;
link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_LINK);
link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
link_msg.handle = handle;
link_msg.notify_idx = notify_idx;
return vmci_send_datagram(&link_msg.hdr);
}
/*
* Unlinks the given doorbell handle from an index in the bitmap in
* the device backend. The notification state is destroyed in hypervisor.
*/
static int dbell_unlink(struct vmci_handle handle)
{
struct vmci_doorbell_unlink_msg unlink_msg;
unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_UNLINK);
unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
unlink_msg.handle = handle;
return vmci_send_datagram(&unlink_msg.hdr);
}
/*
* Notify another guest or the host. We send a datagram down to the
* host via the hypervisor with the notification info.
*/
static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
{
struct vmci_doorbell_notify_msg notify_msg;
notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_DOORBELL_NOTIFY);
notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
notify_msg.handle = handle;
return vmci_send_datagram(¬ify_msg.hdr);
}
/*
* Calls the specified callback in a delayed context.
*/
static void dbell_delayed_dispatch(struct work_struct *work)
{
struct dbell_entry *entry = container_of(work,
struct dbell_entry, work);
entry->notify_cb(entry->client_data);
vmci_resource_put(&entry->resource);
}
/*
* Dispatches a doorbell notification to the host context.
*/
int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
{
struct dbell_entry *entry;
struct vmci_resource *resource;
if (vmci_handle_is_invalid(handle)) {
pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_INVALID_ARGS;
}
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource) {
pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct dbell_entry, resource);
if (entry->run_delayed) {
if (!schedule_work(&entry->work))
vmci_resource_put(resource);
} else {
entry->notify_cb(entry->client_data);
vmci_resource_put(resource);
}
return VMCI_SUCCESS;
}
/*
* Register the notification bitmap with the host.
*/
bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
{
int result;
struct vmci_notify_bm_set_msg bitmap_set_msg = { };
bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_SET_NOTIFY_BITMAP);
bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
VMCI_DG_HEADERSIZE;
if (vmci_use_ppn64())
bitmap_set_msg.bitmap_ppn64 = bitmap_ppn;
else
bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn;
result = vmci_send_datagram(&bitmap_set_msg.hdr);
if (result != VMCI_SUCCESS) {
pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n",
bitmap_ppn, result);
return false;
}
return true;
}
/*
* Executes or schedules the handlers for a given notify index.
*/
static void dbell_fire_entries(u32 notify_idx)
{
u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
struct dbell_entry *dbell;
spin_lock_bh(&vmci_doorbell_it.lock);
hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
if (dbell->idx == notify_idx &&
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
vmci_resource_get(&dbell->resource);
if (!schedule_work(&dbell->work))
vmci_resource_put(&dbell->resource);
} else {
dbell->notify_cb(dbell->client_data);
}
}
}
spin_unlock_bh(&vmci_doorbell_it.lock);
}
/*
* Scans the notification bitmap, collects pending notifications,
* resets the bitmap and invokes appropriate callbacks.
*/
void vmci_dbell_scan_notification_entries(u8 *bitmap)
{
u32 idx;
for (idx = 0; idx < max_notify_idx; idx++) {
if (bitmap[idx] & 0x1) {
bitmap[idx] &= ~1;
dbell_fire_entries(idx);
}
}
}
/*
* vmci_doorbell_create() - Creates a doorbell
* @handle: A handle used to track the resource. Can be invalid.
* @flags: Flag that determines context of callback.
* @priv_flags: Privileges flags.
* @notify_cb: The callback to be ivoked when the doorbell fires.
* @client_data: A parameter to be passed to the callback.
*
* Creates a doorbell with the given callback. If the handle is
* VMCI_INVALID_HANDLE, a free handle will be assigned, if
* possible. The callback can be run immediately (potentially with
* locks held - the default) or delayed (in a kernel thread) by
* specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
* is selected, a given callback may not be run if the kernel is
* unable to allocate memory for the delayed execution (highly
* unlikely).
*/
int vmci_doorbell_create(struct vmci_handle *handle,
u32 flags,
u32 priv_flags,
vmci_callback notify_cb, void *client_data)
{
struct dbell_entry *entry;
struct vmci_handle new_handle;
int result;
if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
return VMCI_ERROR_INVALID_ARGS;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry == NULL) {
pr_warn("Failed allocating memory for datagram entry\n");
return VMCI_ERROR_NO_MEM;
}
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
if (context_id == VMCI_INVALID_ID) {
pr_warn("Failed to get context ID\n");
result = VMCI_ERROR_NO_RESOURCES;
goto free_mem;
}
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
bool valid_context = false;
/*
* Validate the handle. We must do both of the checks below
* because we can be acting as both a host and a guest at the
* same time. We always allow the host context ID, since the
* host functionality is in practice always there with the
* unified driver.
*/
if (handle->context == VMCI_HOST_CONTEXT_ID ||
(vmci_guest_code_active() &&
vmci_get_context_id() == handle->context)) {
valid_context = true;
}
if (!valid_context || handle->resource == VMCI_INVALID_ID) {
pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
handle->context, handle->resource);
result = VMCI_ERROR_INVALID_ARGS;
goto free_mem;
}
new_handle = *handle;
}
entry->idx = 0;
INIT_HLIST_NODE(&entry->node);
entry->priv_flags = priv_flags;
INIT_WORK(&entry->work, dbell_delayed_dispatch);
entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
entry->notify_cb = notify_cb;
entry->client_data = client_data;
atomic_set(&entry->active, 0);
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_DOORBELL,
new_handle);
if (result != VMCI_SUCCESS) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
new_handle.context, new_handle.resource, result);
goto free_mem;
}
new_handle = vmci_resource_handle(&entry->resource);
if (vmci_guest_code_active()) {
dbell_index_table_add(entry);
result = dbell_link(new_handle, entry->idx);
if (VMCI_SUCCESS != result)
goto destroy_resource;
atomic_set(&entry->active, 1);
}
*handle = new_handle;
return result;
destroy_resource:
dbell_index_table_remove(entry);
vmci_resource_remove(&entry->resource);
free_mem:
kfree(entry);
return result;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_create);
/*
* vmci_doorbell_destroy() - Destroy a doorbell.
* @handle: The handle tracking the resource.
*
* Destroys a doorbell previously created with vmcii_doorbell_create. This
* operation may block waiting for a callback to finish.
*/
int vmci_doorbell_destroy(struct vmci_handle handle)
{
struct dbell_entry *entry;
struct vmci_resource *resource;
if (vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DOORBELL);
if (!resource) {
pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct dbell_entry, resource);
if (!hlist_unhashed(&entry->node)) {
int result;
dbell_index_table_remove(entry);
result = dbell_unlink(handle);
if (VMCI_SUCCESS != result) {
/*
* The only reason this should fail would be
* an inconsistency between guest and
* hypervisor state, where the guest believes
* it has an active registration whereas the
* hypervisor doesn't. One case where this may
* happen is if a doorbell is unregistered
* following a hibernation at a time where the
* doorbell state hasn't been restored on the
* hypervisor side yet. Since the handle has
* now been removed in the guest, we just
* print a warning and return success.
*/
pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
handle.context, handle.resource, result);
}
}
/*
* Now remove the resource from the table. It might still be in use
* after this, in a callback or still on the delayed work queue.
*/
vmci_resource_put(&entry->resource);
vmci_resource_remove(&entry->resource);
kfree(entry);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
/*
* vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
* @dst: The handlle identifying the doorbell resource
* @priv_flags: Priviledge flags.
*
* Generates a notification on the doorbell identified by the
* handle. For host side generation of notifications, the caller
* can specify what the privilege of the calling side is.
*/
int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
{
int retval;
enum vmci_route route;
struct vmci_handle src;
if (vmci_handle_is_invalid(dst) ||
(priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
return VMCI_ERROR_INVALID_ARGS;
src = VMCI_INVALID_HANDLE;
retval = vmci_route(&src, &dst, false, &route);
if (retval < VMCI_SUCCESS)
return retval;
if (VMCI_ROUTE_AS_HOST == route)
return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
dst, priv_flags);
if (VMCI_ROUTE_AS_GUEST == route)
return dbell_notify_as_guest(dst, priv_flags);
pr_warn("Unknown route (%d) for doorbell\n", route);
return VMCI_ERROR_DST_UNREACHABLE;
}
EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
| linux-master | drivers/misc/vmw_vmci/vmci_doorbell.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/skbuff.h>
#include "vmci_handle_array.h"
#include "vmci_queue_pair.h"
#include "vmci_datagram.h"
#include "vmci_resource.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#include "vmci_route.h"
/*
* In the following, we will distinguish between two kinds of VMX processes -
* the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
* VMCI page files in the VMX and supporting VM to VM communication and the
* newer ones that use the guest memory directly. We will in the following
* refer to the older VMX versions as old-style VMX'en, and the newer ones as
* new-style VMX'en.
*
* The state transition datagram is as follows (the VMCIQPB_ prefix has been
* removed for readability) - see below for more details on the transtions:
*
* -------------- NEW -------------
* | |
* \_/ \_/
* CREATED_NO_MEM <-----------------> CREATED_MEM
* | | |
* | o-----------------------o |
* | | |
* \_/ \_/ \_/
* ATTACHED_NO_MEM <----------------> ATTACHED_MEM
* | | |
* | o----------------------o |
* | | |
* \_/ \_/ \_/
* SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
* | |
* | |
* -------------> gone <-------------
*
* In more detail. When a VMCI queue pair is first created, it will be in the
* VMCIQPB_NEW state. It will then move into one of the following states:
*
* - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
*
* - the created was performed by a host endpoint, in which case there is
* no backing memory yet.
*
* - the create was initiated by an old-style VMX, that uses
* vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
* a later point in time. This state can be distinguished from the one
* above by the context ID of the creator. A host side is not allowed to
* attach until the page store has been set.
*
* - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
* is created by a VMX using the queue pair device backend that
* sets the UVAs of the queue pair immediately and stores the
* information for later attachers. At this point, it is ready for
* the host side to attach to it.
*
* Once the queue pair is in one of the created states (with the exception of
* the case mentioned for older VMX'en above), it is possible to attach to the
* queue pair. Again we have two new states possible:
*
* - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
* paths:
*
* - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
* pair, and attaches to a queue pair previously created by the host side.
*
* - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
* already created by a guest.
*
* - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
* vmci_qp_broker_set_page_store (see below).
*
* - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
* VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
* bring the queue pair into this state. Once vmci_qp_broker_set_page_store
* is called to register the user memory, the VMCIQPB_ATTACH_MEM state
* will be entered.
*
* From the attached queue pair, the queue pair can enter the shutdown states
* when either side of the queue pair detaches. If the guest side detaches
* first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
* the content of the queue pair will no longer be available. If the host
* side detaches first, the queue pair will either enter the
* VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
* VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
* (e.g., the host detaches while a guest is stunned).
*
* New-style VMX'en will also unmap guest memory, if the guest is
* quiesced, e.g., during a snapshot operation. In that case, the guest
* memory will no longer be available, and the queue pair will transition from
* *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
* in which case the queue pair will transition from the *_NO_MEM state at that
* point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
* since the peer may have either attached or detached in the meantime. The
* values are laid out such that ++ on a state will move from a *_NO_MEM to a
* *_MEM state, and vice versa.
*/
/* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if {
struct mutex __mutex; /* Protects the queue. */
struct mutex *mutex; /* Shared by producer and consumer queues. */
size_t num_pages; /* Number of pages incl. header. */
bool host; /* Host or guest? */
union {
struct {
dma_addr_t *pas;
void **vas;
} g; /* Used by the guest. */
struct {
struct page **page;
struct page **header_page;
} h; /* Used by the host. */
} u;
};
/*
* This structure is opaque to the clients.
*/
struct vmci_qp {
struct vmci_handle handle;
struct vmci_queue *produce_q;
struct vmci_queue *consume_q;
u64 produce_q_size;
u64 consume_q_size;
u32 peer;
u32 flags;
u32 priv_flags;
bool guest_endpoint;
unsigned int blocked;
unsigned int generation;
wait_queue_head_t event;
};
enum qp_broker_state {
VMCIQPB_NEW,
VMCIQPB_CREATED_NO_MEM,
VMCIQPB_CREATED_MEM,
VMCIQPB_ATTACHED_NO_MEM,
VMCIQPB_ATTACHED_MEM,
VMCIQPB_SHUTDOWN_NO_MEM,
VMCIQPB_SHUTDOWN_MEM,
VMCIQPB_GONE
};
#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
_qpb->state == VMCIQPB_ATTACHED_MEM || \
_qpb->state == VMCIQPB_SHUTDOWN_MEM)
/*
* In the queue pair broker, we always use the guest point of view for
* the produce and consume queue values and references, e.g., the
* produce queue size stored is the guests produce queue size. The
* host endpoint will need to swap these around. The only exception is
* the local queue pairs on the host, in which case the host endpoint
* that creates the queue pair will have the right orientation, and
* the attaching host endpoint will need to swap.
*/
struct qp_entry {
struct list_head list_item;
struct vmci_handle handle;
u32 peer;
u32 flags;
u64 produce_size;
u64 consume_size;
u32 ref_count;
};
struct qp_broker_entry {
struct vmci_resource resource;
struct qp_entry qp;
u32 create_id;
u32 attach_id;
enum qp_broker_state state;
bool require_trusted_attach;
bool created_by_trusted;
bool vmci_page_files; /* Created by VMX using VMCI page files */
struct vmci_queue *produce_q;
struct vmci_queue *consume_q;
struct vmci_queue_header saved_produce_q;
struct vmci_queue_header saved_consume_q;
vmci_event_release_cb wakeup_cb;
void *client_data;
void *local_mem; /* Kernel memory for local queue pair */
};
struct qp_guest_endpoint {
struct vmci_resource resource;
struct qp_entry qp;
u64 num_ppns;
void *produce_q;
void *consume_q;
struct ppn_set ppn_set;
};
struct qp_list {
struct list_head head;
struct mutex mutex; /* Protect queue list. */
};
static struct qp_list qp_broker_list = {
.head = LIST_HEAD_INIT(qp_broker_list.head),
.mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
};
static struct qp_list qp_guest_endpoints = {
.head = LIST_HEAD_INIT(qp_guest_endpoints.head),
.mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
};
#define INVALID_VMCI_GUEST_MEM_ID 0
#define QPE_NUM_PAGES(_QPE) ((u32) \
(DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \
((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \
(_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY)
/*
* Frees kernel VA space for a given queue and its queue header, and
* frees physical data pages.
*/
static void qp_free_queue(void *q, u64 size)
{
struct vmci_queue *queue = q;
if (queue) {
u64 i;
/* Given size does not include header, so add in a page here. */
for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
queue->kernel_if->u.g.vas[i],
queue->kernel_if->u.g.pas[i]);
}
vfree(queue);
}
}
/*
* Allocates kernel queue pages of specified size with IOMMU mappings,
* plus space for the queue structure/kernel interface and the queue
* header.
*/
static void *qp_alloc_queue(u64 size, u32 flags)
{
u64 i;
struct vmci_queue *queue;
size_t pas_size;
size_t vas_size;
size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
u64 num_pages;
if (size > SIZE_MAX - PAGE_SIZE)
return NULL;
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages >
(SIZE_MAX - queue_size) /
(sizeof(*queue->kernel_if->u.g.pas) +
sizeof(*queue->kernel_if->u.g.vas)))
return NULL;
pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
queue_size += pas_size + vas_size;
queue = vmalloc(queue_size);
if (!queue)
return NULL;
queue->q_header = NULL;
queue->saved_header = NULL;
queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue->kernel_if->mutex = NULL;
queue->kernel_if->num_pages = num_pages;
queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
queue->kernel_if->u.g.vas =
(void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
queue->kernel_if->host = false;
for (i = 0; i < num_pages; i++) {
queue->kernel_if->u.g.vas[i] =
dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
&queue->kernel_if->u.g.pas[i],
GFP_KERNEL);
if (!queue->kernel_if->u.g.vas[i]) {
/* Size excl. the header. */
qp_free_queue(queue, i * PAGE_SIZE);
return NULL;
}
}
/* Queue header is the first page. */
queue->q_header = queue->kernel_if->u.g.vas[0];
return queue;
}
/*
* Copies from a given buffer or iovector to a VMCI Queue. Uses
* kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
u64 queue_offset,
struct iov_iter *from,
size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
while (bytes_copied < size) {
const u64 page_index =
(queue_offset + bytes_copied) / PAGE_SIZE;
const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
if (kernel_if->host)
va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up from this page. */
to_copy = PAGE_SIZE - page_offset;
else
to_copy = size - bytes_copied;
if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
from)) {
if (kernel_if->host)
kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
kunmap_local(va);
}
return VMCI_SUCCESS;
}
/*
* Copies to a given buffer or iovector from a VMCI Queue. Uses
* kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
static int qp_memcpy_from_queue_iter(struct iov_iter *to,
const struct vmci_queue *queue,
u64 queue_offset, size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
while (bytes_copied < size) {
const u64 page_index =
(queue_offset + bytes_copied) / PAGE_SIZE;
const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
int err;
if (kernel_if->host)
va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up this page. */
to_copy = PAGE_SIZE - page_offset;
else
to_copy = size - bytes_copied;
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
if (err != to_copy) {
if (kernel_if->host)
kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
kunmap_local(va);
}
return VMCI_SUCCESS;
}
/*
* Allocates two list of PPNs --- one for the pages in the produce queue,
* and the other for the pages in the consume queue. Intializes the list
* of PPNs with the page frame numbers of the KVA for the two queues (and
* the queue headers).
*/
static int qp_alloc_ppn_set(void *prod_q,
u64 num_produce_pages,
void *cons_q,
u64 num_consume_pages, struct ppn_set *ppn_set)
{
u64 *produce_ppns;
u64 *consume_ppns;
struct vmci_queue *produce_q = prod_q;
struct vmci_queue *consume_q = cons_q;
u64 i;
if (!produce_q || !num_produce_pages || !consume_q ||
!num_consume_pages || !ppn_set)
return VMCI_ERROR_INVALID_ARGS;
if (ppn_set->initialized)
return VMCI_ERROR_ALREADY_EXISTS;
produce_ppns =
kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
GFP_KERNEL);
if (!produce_ppns)
return VMCI_ERROR_NO_MEM;
consume_ppns =
kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
GFP_KERNEL);
if (!consume_ppns) {
kfree(produce_ppns);
return VMCI_ERROR_NO_MEM;
}
for (i = 0; i < num_produce_pages; i++)
produce_ppns[i] =
produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
for (i = 0; i < num_consume_pages; i++)
consume_ppns[i] =
consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
ppn_set->num_produce_pages = num_produce_pages;
ppn_set->num_consume_pages = num_consume_pages;
ppn_set->produce_ppns = produce_ppns;
ppn_set->consume_ppns = consume_ppns;
ppn_set->initialized = true;
return VMCI_SUCCESS;
}
/*
* Frees the two list of PPNs for a queue pair.
*/
static void qp_free_ppn_set(struct ppn_set *ppn_set)
{
if (ppn_set->initialized) {
/* Do not call these functions on NULL inputs. */
kfree(ppn_set->produce_ppns);
kfree(ppn_set->consume_ppns);
}
memset(ppn_set, 0, sizeof(*ppn_set));
}
/*
* Populates the list of PPNs in the hypercall structure with the PPNS
* of the produce queue and the consume queue.
*/
static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
{
if (vmci_use_ppn64()) {
memcpy(call_buf, ppn_set->produce_ppns,
ppn_set->num_produce_pages *
sizeof(*ppn_set->produce_ppns));
memcpy(call_buf +
ppn_set->num_produce_pages *
sizeof(*ppn_set->produce_ppns),
ppn_set->consume_ppns,
ppn_set->num_consume_pages *
sizeof(*ppn_set->consume_ppns));
} else {
int i;
u32 *ppns = (u32 *) call_buf;
for (i = 0; i < ppn_set->num_produce_pages; i++)
ppns[i] = (u32) ppn_set->produce_ppns[i];
ppns = &ppns[ppn_set->num_produce_pages];
for (i = 0; i < ppn_set->num_consume_pages; i++)
ppns[i] = (u32) ppn_set->consume_ppns[i];
}
return VMCI_SUCCESS;
}
/*
* Allocates kernel VA space of specified size plus space for the queue
* and kernel interface. This is different from the guest queue allocator,
* because we do not allocate our own queue header/data pages here but
* share those of the guest.
*/
static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
size_t queue_page_size;
u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE))
return NULL;
num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
sizeof(*queue->kernel_if->u.h.page))
return NULL;
queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
return NULL;
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
queue->saved_header = NULL;
queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue->kernel_if->host = true;
queue->kernel_if->mutex = NULL;
queue->kernel_if->num_pages = num_pages;
queue->kernel_if->u.h.header_page =
(struct page **)((u8 *)queue + queue_size);
queue->kernel_if->u.h.page =
&queue->kernel_if->u.h.header_page[1];
}
return queue;
}
/*
* Frees kernel memory for a given queue (header plus translation
* structure).
*/
static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
{
kfree(queue);
}
/*
* Initialize the mutex for the pair of queues. This mutex is used to
* protect the q_header and the buffer from changing out from under any
* users of either queue. Of course, it's only any good if the mutexes
* are actually acquired. Queue structure must lie on non-paged memory
* or we cannot guarantee access to the mutex.
*/
static void qp_init_queue_mutex(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
/*
* Only the host queue has shared state - the guest queues do not
* need to synchronize access using a queue mutex.
*/
if (produce_q->kernel_if->host) {
produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
mutex_init(produce_q->kernel_if->mutex);
}
}
/*
* Cleans up the mutex for the pair of queues.
*/
static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
if (produce_q->kernel_if->host) {
produce_q->kernel_if->mutex = NULL;
consume_q->kernel_if->mutex = NULL;
}
}
/*
* Acquire the mutex for the queue. Note that the produce_q and
* the consume_q share a mutex. So, only one of the two need to
* be passed in to this routine. Either will work just fine.
*/
static void qp_acquire_queue_mutex(struct vmci_queue *queue)
{
if (queue->kernel_if->host)
mutex_lock(queue->kernel_if->mutex);
}
/*
* Release the mutex for the queue. Note that the produce_q and
* the consume_q share a mutex. So, only one of the two need to
* be passed in to this routine. Either will work just fine.
*/
static void qp_release_queue_mutex(struct vmci_queue *queue)
{
if (queue->kernel_if->host)
mutex_unlock(queue->kernel_if->mutex);
}
/*
* Helper function to release pages in the PageStoreAttachInfo
* previously obtained using get_user_pages.
*/
static void qp_release_pages(struct page **pages,
u64 num_pages, bool dirty)
{
int i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(pages[i]);
put_page(pages[i]);
pages[i] = NULL;
}
}
/*
* Lock the user pages referenced by the {produce,consume}Buffer
* struct into memory and populate the {produce,consume}Pages
* arrays in the attach structure with them.
*/
static int qp_host_get_user_memory(u64 produce_uva,
u64 consume_uva,
struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
int retval;
int err = VMCI_SUCCESS;
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages,
FOLL_WRITE,
produce_q->kernel_if->u.h.header_page);
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
if (retval > 0)
qp_release_pages(produce_q->kernel_if->u.h.header_page,
retval, false);
err = VMCI_ERROR_NO_MEM;
goto out;
}
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages,
FOLL_WRITE,
consume_q->kernel_if->u.h.header_page);
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
if (retval > 0)
qp_release_pages(consume_q->kernel_if->u.h.header_page,
retval, false);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM;
}
out:
return err;
}
/*
* Registers the specification of the user pages used for backing a queue
* pair. Enough information to map in pages is stored in the OS specific
* part of the struct vmci_queue structure.
*/
static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
u64 produce_uva;
u64 consume_uva;
/*
* The new style and the old style mapping only differs in
* that we either get a single or two UVAs, so we split the
* single UVA range at the appropriate spot.
*/
produce_uva = page_store->pages;
consume_uva = page_store->pages +
produce_q->kernel_if->num_pages * PAGE_SIZE;
return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
consume_q);
}
/*
* Releases and removes the references to user pages stored in the attach
* struct. Pages are released from the page cache and may become
* swappable again.
*/
static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, true);
memset(produce_q->kernel_if->u.h.header_page, 0,
sizeof(*produce_q->kernel_if->u.h.header_page) *
produce_q->kernel_if->num_pages);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
consume_q->kernel_if->num_pages, true);
memset(consume_q->kernel_if->u.h.header_page, 0,
sizeof(*consume_q->kernel_if->u.h.header_page) *
consume_q->kernel_if->num_pages);
}
/*
* Once qp_host_register_user_memory has been performed on a
* queue, the queue pair headers can be mapped into the
* kernel. Once mapped, they must be unmapped with
* qp_host_unmap_queues prior to calling
* qp_host_unregister_user_memory.
* Pages are pinned.
*/
static int qp_host_map_queues(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
int result;
if (!produce_q->q_header || !consume_q->q_header) {
struct page *headers[2];
if (produce_q->q_header != consume_q->q_header)
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
if (produce_q->kernel_if->u.h.header_page == NULL ||
*produce_q->kernel_if->u.h.header_page == NULL)
return VMCI_ERROR_UNAVAILABLE;
headers[0] = *produce_q->kernel_if->u.h.header_page;
headers[1] = *consume_q->kernel_if->u.h.header_page;
produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
if (produce_q->q_header != NULL) {
consume_q->q_header =
(struct vmci_queue_header *)((u8 *)
produce_q->q_header +
PAGE_SIZE);
result = VMCI_SUCCESS;
} else {
pr_warn("vmap failed\n");
result = VMCI_ERROR_NO_MEM;
}
} else {
result = VMCI_SUCCESS;
}
return result;
}
/*
* Unmaps previously mapped queue pair headers from the kernel.
* Pages are unpinned.
*/
static int qp_host_unmap_queues(u32 gid,
struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
if (produce_q->q_header) {
if (produce_q->q_header < consume_q->q_header)
vunmap(produce_q->q_header);
else
vunmap(consume_q->q_header);
produce_q->q_header = NULL;
consume_q->q_header = NULL;
}
return VMCI_SUCCESS;
}
/*
* Finds the entry in the list corresponding to a given handle. Assumes
* that the list is locked.
*/
static struct qp_entry *qp_list_find(struct qp_list *qp_list,
struct vmci_handle handle)
{
struct qp_entry *entry;
if (vmci_handle_is_invalid(handle))
return NULL;
list_for_each_entry(entry, &qp_list->head, list_item) {
if (vmci_handle_is_equal(entry->handle, handle))
return entry;
}
return NULL;
}
/*
* Finds the entry in the list corresponding to a given handle.
*/
static struct qp_guest_endpoint *
qp_guest_handle_to_entry(struct vmci_handle handle)
{
struct qp_guest_endpoint *entry;
struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
entry = qp ? container_of(
qp, struct qp_guest_endpoint, qp) : NULL;
return entry;
}
/*
* Finds the entry in the list corresponding to a given handle.
*/
static struct qp_broker_entry *
qp_broker_handle_to_entry(struct vmci_handle handle)
{
struct qp_broker_entry *entry;
struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
entry = qp ? container_of(
qp, struct qp_broker_entry, qp) : NULL;
return entry;
}
/*
* Dispatches a queue pair event message directly into the local event
* queue.
*/
static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
{
u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev;
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
ev.msg.event_data.event =
attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
ev.payload.peer_id = context_id;
ev.payload.handle = handle;
return vmci_event_dispatch(&ev.msg.hdr);
}
/*
* Allocates and initializes a qp_guest_endpoint structure.
* Allocates a queue_pair rid (and handle) iff the given entry has
* an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
* are reserved handles. Assumes that the QP list mutex is held
* by the caller.
*/
static struct qp_guest_endpoint *
qp_guest_endpoint_create(struct vmci_handle handle,
u32 peer,
u32 flags,
u64 produce_size,
u64 consume_size,
void *produce_q,
void *consume_q)
{
int result;
struct qp_guest_endpoint *entry;
/* One page each for the queue headers. */
const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
if (vmci_handle_is_invalid(handle)) {
u32 context_id = vmci_get_context_id();
handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
}
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
entry->qp.peer = peer;
entry->qp.flags = flags;
entry->qp.produce_size = produce_size;
entry->qp.consume_size = consume_size;
entry->qp.ref_count = 0;
entry->num_ppns = num_ppns;
entry->produce_q = produce_q;
entry->consume_q = consume_q;
INIT_LIST_HEAD(&entry->qp.list_item);
/* Add resource obj */
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_QPAIR_GUEST,
handle);
entry->qp.handle = vmci_resource_handle(&entry->resource);
if ((result != VMCI_SUCCESS) ||
qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
handle.context, handle.resource, result);
kfree(entry);
entry = NULL;
}
}
return entry;
}
/*
* Frees a qp_guest_endpoint structure.
*/
static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
{
qp_free_ppn_set(&entry->ppn_set);
qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
qp_free_queue(entry->produce_q, entry->qp.produce_size);
qp_free_queue(entry->consume_q, entry->qp.consume_size);
/* Unlink from resource hash table and free callback */
vmci_resource_remove(&entry->resource);
kfree(entry);
}
/*
* Helper to make a queue_pairAlloc hypercall when the driver is
* supporting a guest device.
*/
static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
{
struct vmci_qp_alloc_msg *alloc_msg;
size_t msg_size;
size_t ppn_size;
int result;
if (!entry || entry->num_ppns <= 2)
return VMCI_ERROR_INVALID_ARGS;
ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
msg_size = sizeof(*alloc_msg) +
(size_t) entry->num_ppns * ppn_size;
alloc_msg = kmalloc(msg_size, GFP_KERNEL);
if (!alloc_msg)
return VMCI_ERROR_NO_MEM;
alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_QUEUEPAIR_ALLOC);
alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
alloc_msg->handle = entry->qp.handle;
alloc_msg->peer = entry->qp.peer;
alloc_msg->flags = entry->qp.flags;
alloc_msg->produce_size = entry->qp.produce_size;
alloc_msg->consume_size = entry->qp.consume_size;
alloc_msg->num_ppns = entry->num_ppns;
result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
&entry->ppn_set);
if (result == VMCI_SUCCESS)
result = vmci_send_datagram(&alloc_msg->hdr);
kfree(alloc_msg);
return result;
}
/*
* Helper to make a queue_pairDetach hypercall when the driver is
* supporting a guest device.
*/
static int qp_detatch_hypercall(struct vmci_handle handle)
{
struct vmci_qp_detach_msg detach_msg;
detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_QUEUEPAIR_DETACH);
detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
detach_msg.hdr.payload_size = sizeof(handle);
detach_msg.handle = handle;
return vmci_send_datagram(&detach_msg.hdr);
}
/*
* Adds the given entry to the list. Assumes that the list is locked.
*/
static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
{
if (entry)
list_add(&entry->list_item, &qp_list->head);
}
/*
* Removes the given entry from the list. Assumes that the list is locked.
*/
static void qp_list_remove_entry(struct qp_list *qp_list,
struct qp_entry *entry)
{
if (entry)
list_del(&entry->list_item);
}
/*
* Helper for VMCI queue_pair detach interface. Frees the physical
* pages for the queue pair.
*/
static int qp_detatch_guest_work(struct vmci_handle handle)
{
int result;
struct qp_guest_endpoint *entry;
u32 ref_count = ~0; /* To avoid compiler warning below */
mutex_lock(&qp_guest_endpoints.mutex);
entry = qp_guest_handle_to_entry(handle);
if (!entry) {
mutex_unlock(&qp_guest_endpoints.mutex);
return VMCI_ERROR_NOT_FOUND;
}
if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
result = VMCI_SUCCESS;
if (entry->qp.ref_count > 1) {
result = qp_notify_peer_local(false, handle);
/*
* We can fail to notify a local queuepair
* because we can't allocate. We still want
* to release the entry if that happens, so
* don't bail out yet.
*/
}
} else {
result = qp_detatch_hypercall(handle);
if (result < VMCI_SUCCESS) {
/*
* We failed to notify a non-local queuepair.
* That other queuepair might still be
* accessing the shared memory, so don't
* release the entry yet. It will get cleaned
* up by VMCIqueue_pair_Exit() if necessary
* (assuming we are going away, otherwise why
* did this fail?).
*/
mutex_unlock(&qp_guest_endpoints.mutex);
return result;
}
}
/*
* If we get here then we either failed to notify a local queuepair, or
* we succeeded in all cases. Release the entry if required.
*/
entry->qp.ref_count--;
if (entry->qp.ref_count == 0)
qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
/* If we didn't remove the entry, this could change once we unlock. */
if (entry)
ref_count = entry->qp.ref_count;
mutex_unlock(&qp_guest_endpoints.mutex);
if (ref_count == 0)
qp_guest_endpoint_destroy(entry);
return result;
}
/*
* This functions handles the actual allocation of a VMCI queue
* pair guest endpoint. Allocates physical pages for the queue
* pair. It makes OS dependent calls through generic wrappers.
*/
static int qp_alloc_guest_work(struct vmci_handle *handle,
struct vmci_queue **produce_q,
u64 produce_size,
struct vmci_queue **consume_q,
u64 consume_size,
u32 peer,
u32 flags,
u32 priv_flags)
{
const u64 num_produce_pages =
DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
const u64 num_consume_pages =
DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
void *my_produce_q = NULL;
void *my_consume_q = NULL;
int result;
struct qp_guest_endpoint *queue_pair_entry = NULL;
if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
return VMCI_ERROR_NO_ACCESS;
mutex_lock(&qp_guest_endpoints.mutex);
queue_pair_entry = qp_guest_handle_to_entry(*handle);
if (queue_pair_entry) {
if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
/* Local attach case. */
if (queue_pair_entry->qp.ref_count > 1) {
pr_devel("Error attempting to attach more than once\n");
result = VMCI_ERROR_UNAVAILABLE;
goto error_keep_entry;
}
if (queue_pair_entry->qp.produce_size != consume_size ||
queue_pair_entry->qp.consume_size !=
produce_size ||
queue_pair_entry->qp.flags !=
(flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
pr_devel("Error mismatched queue pair in local attach\n");
result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
goto error_keep_entry;
}
/*
* Do a local attach. We swap the consume and
* produce queues for the attacher and deliver
* an attach event.
*/
result = qp_notify_peer_local(true, *handle);
if (result < VMCI_SUCCESS)
goto error_keep_entry;
my_produce_q = queue_pair_entry->consume_q;
my_consume_q = queue_pair_entry->produce_q;
goto out;
}
result = VMCI_ERROR_ALREADY_EXISTS;
goto error_keep_entry;
}
my_produce_q = qp_alloc_queue(produce_size, flags);
if (!my_produce_q) {
pr_warn("Error allocating pages for produce queue\n");
result = VMCI_ERROR_NO_MEM;
goto error;
}
my_consume_q = qp_alloc_queue(consume_size, flags);
if (!my_consume_q) {
pr_warn("Error allocating pages for consume queue\n");
result = VMCI_ERROR_NO_MEM;
goto error;
}
queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
produce_size, consume_size,
my_produce_q, my_consume_q);
if (!queue_pair_entry) {
pr_warn("Error allocating memory in %s\n", __func__);
result = VMCI_ERROR_NO_MEM;
goto error;
}
result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
num_consume_pages,
&queue_pair_entry->ppn_set);
if (result < VMCI_SUCCESS) {
pr_warn("qp_alloc_ppn_set failed\n");
goto error;
}
/*
* It's only necessary to notify the host if this queue pair will be
* attached to from another context.
*/
if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
/* Local create case. */
u32 context_id = vmci_get_context_id();
/*
* Enforce similar checks on local queue pairs as we
* do for regular ones. The handle's context must
* match the creator or attacher context id (here they
* are both the current context id) and the
* attach-only flag cannot exist during create. We
* also ensure specified peer is this context or an
* invalid one.
*/
if (queue_pair_entry->qp.handle.context != context_id ||
(queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
queue_pair_entry->qp.peer != context_id)) {
result = VMCI_ERROR_NO_ACCESS;
goto error;
}
if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
result = VMCI_ERROR_NOT_FOUND;
goto error;
}
} else {
result = qp_alloc_hypercall(queue_pair_entry);
if (result < VMCI_SUCCESS) {
pr_devel("qp_alloc_hypercall result = %d\n", result);
goto error;
}
}
qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
(struct vmci_queue *)my_consume_q);
qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
out:
queue_pair_entry->qp.ref_count++;
*handle = queue_pair_entry->qp.handle;
*produce_q = (struct vmci_queue *)my_produce_q;
*consume_q = (struct vmci_queue *)my_consume_q;
/*
* We should initialize the queue pair header pages on a local
* queue pair create. For non-local queue pairs, the
* hypervisor initializes the header pages in the create step.
*/
if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
queue_pair_entry->qp.ref_count == 1) {
vmci_q_header_init((*produce_q)->q_header, *handle);
vmci_q_header_init((*consume_q)->q_header, *handle);
}
mutex_unlock(&qp_guest_endpoints.mutex);
return VMCI_SUCCESS;
error:
mutex_unlock(&qp_guest_endpoints.mutex);
if (queue_pair_entry) {
/* The queues will be freed inside the destroy routine. */
qp_guest_endpoint_destroy(queue_pair_entry);
} else {
qp_free_queue(my_produce_q, produce_size);
qp_free_queue(my_consume_q, consume_size);
}
return result;
error_keep_entry:
/* This path should only be used when an existing entry was found. */
mutex_unlock(&qp_guest_endpoints.mutex);
return result;
}
/*
* The first endpoint issuing a queue pair allocation will create the state
* of the queue pair in the queue pair broker.
*
* If the creator is a guest, it will associate a VMX virtual address range
* with the queue pair as specified by the page_store. For compatibility with
* older VMX'en, that would use a separate step to set the VMX virtual
* address range, the virtual address range can be registered later using
* vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
* used.
*
* If the creator is the host, a page_store of NULL should be used as well,
* since the host is not able to supply a page store for the queue pair.
*
* For older VMX and host callers, the queue pair will be created in the
* VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
* created in VMCOQPB_CREATED_MEM state.
*/
static int qp_broker_create(struct vmci_handle handle,
u32 peer,
u32 flags,
u32 priv_flags,
u64 produce_size,
u64 consume_size,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context,
vmci_event_release_cb wakeup_cb,
void *client_data, struct qp_broker_entry **ent)
{
struct qp_broker_entry *entry = NULL;
const u32 context_id = vmci_ctx_get_id(context);
bool is_local = flags & VMCI_QPFLAG_LOCAL;
int result;
u64 guest_produce_size;
u64 guest_consume_size;
/* Do not create if the caller asked not to. */
if (flags & VMCI_QPFLAG_ATTACH_ONLY)
return VMCI_ERROR_NOT_FOUND;
/*
* Creator's context ID should match handle's context ID or the creator
* must allow the context in handle's context ID as the "peer".
*/
if (handle.context != context_id && handle.context != peer)
return VMCI_ERROR_NO_ACCESS;
if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
return VMCI_ERROR_DST_UNREACHABLE;
/*
* Creator's context ID for local queue pairs should match the
* peer, if a peer is specified.
*/
if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
return VMCI_ERROR_NO_ACCESS;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return VMCI_ERROR_NO_MEM;
if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
/*
* The queue pair broker entry stores values from the guest
* point of view, so a creating host side endpoint should swap
* produce and consume values -- unless it is a local queue
* pair, in which case no swapping is necessary, since the local
* attacher will swap queues.
*/
guest_produce_size = consume_size;
guest_consume_size = produce_size;
} else {
guest_produce_size = produce_size;
guest_consume_size = consume_size;
}
entry->qp.handle = handle;
entry->qp.peer = peer;
entry->qp.flags = flags;
entry->qp.produce_size = guest_produce_size;
entry->qp.consume_size = guest_consume_size;
entry->qp.ref_count = 1;
entry->create_id = context_id;
entry->attach_id = VMCI_INVALID_ID;
entry->state = VMCIQPB_NEW;
entry->require_trusted_attach =
!!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
entry->created_by_trusted =
!!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
entry->vmci_page_files = false;
entry->wakeup_cb = wakeup_cb;
entry->client_data = client_data;
entry->produce_q = qp_host_alloc_queue(guest_produce_size);
if (entry->produce_q == NULL) {
result = VMCI_ERROR_NO_MEM;
goto error;
}
entry->consume_q = qp_host_alloc_queue(guest_consume_size);
if (entry->consume_q == NULL) {
result = VMCI_ERROR_NO_MEM;
goto error;
}
qp_init_queue_mutex(entry->produce_q, entry->consume_q);
INIT_LIST_HEAD(&entry->qp.list_item);
if (is_local) {
u8 *tmp;
entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
PAGE_SIZE, GFP_KERNEL);
if (entry->local_mem == NULL) {
result = VMCI_ERROR_NO_MEM;
goto error;
}
entry->state = VMCIQPB_CREATED_MEM;
entry->produce_q->q_header = entry->local_mem;
tmp = (u8 *)entry->local_mem + PAGE_SIZE *
(DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
} else if (page_store) {
/*
* The VMX already initialized the queue pair headers, so no
* need for the kernel side to do that.
*/
result = qp_host_register_user_memory(page_store,
entry->produce_q,
entry->consume_q);
if (result < VMCI_SUCCESS)
goto error;
entry->state = VMCIQPB_CREATED_MEM;
} else {
/*
* A create without a page_store may be either a host
* side create (in which case we are waiting for the
* guest side to supply the memory) or an old style
* queue pair create (in which case we will expect a
* set page store call as the next step).
*/
entry->state = VMCIQPB_CREATED_NO_MEM;
}
qp_list_add_entry(&qp_broker_list, &entry->qp);
if (ent != NULL)
*ent = entry;
/* Add to resource obj */
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_QPAIR_HOST,
handle);
if (result != VMCI_SUCCESS) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
handle.context, handle.resource, result);
goto error;
}
entry->qp.handle = vmci_resource_handle(&entry->resource);
if (is_local) {
vmci_q_header_init(entry->produce_q->q_header,
entry->qp.handle);
vmci_q_header_init(entry->consume_q->q_header,
entry->qp.handle);
}
vmci_ctx_qp_create(context, entry->qp.handle);
return VMCI_SUCCESS;
error:
if (entry != NULL) {
qp_host_free_queue(entry->produce_q, guest_produce_size);
qp_host_free_queue(entry->consume_q, guest_consume_size);
kfree(entry);
}
return result;
}
/*
* Enqueues an event datagram to notify the peer VM attached to
* the given queue pair handle about attach/detach event by the
* given VM. Returns Payload size of datagram enqueued on
* success, error code otherwise.
*/
static int qp_notify_peer(bool attach,
struct vmci_handle handle,
u32 my_id,
u32 peer_id)
{
int rv;
struct vmci_event_qp ev;
if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
peer_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
/*
* In vmci_ctx_enqueue_datagram() we enforce the upper limit on
* number of pending events from the hypervisor to a given VM
* otherwise a rogue VM could do an arbitrary number of attach
* and detach operations causing memory pressure in the host
* kernel.
*/
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
ev.msg.event_data.event = attach ?
VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
ev.payload.handle = handle;
ev.payload.peer_id = my_id;
rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
&ev.msg.hdr, false);
if (rv < VMCI_SUCCESS)
pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
attach ? "ATTACH" : "DETACH", peer_id);
return rv;
}
/*
* The second endpoint issuing a queue pair allocation will attach to
* the queue pair registered with the queue pair broker.
*
* If the attacher is a guest, it will associate a VMX virtual address
* range with the queue pair as specified by the page_store. At this
* point, the already attach host endpoint may start using the queue
* pair, and an attach event is sent to it. For compatibility with
* older VMX'en, that used a separate step to set the VMX virtual
* address range, the virtual address range can be registered later
* using vmci_qp_broker_set_page_store. In that case, a page_store of
* NULL should be used, and the attach event will be generated once
* the actual page store has been set.
*
* If the attacher is the host, a page_store of NULL should be used as
* well, since the page store information is already set by the guest.
*
* For new VMX and host callers, the queue pair will be moved to the
* VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
* moved to the VMCOQPB_ATTACHED_NO_MEM state.
*/
static int qp_broker_attach(struct qp_broker_entry *entry,
u32 peer,
u32 flags,
u32 priv_flags,
u64 produce_size,
u64 consume_size,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context,
vmci_event_release_cb wakeup_cb,
void *client_data,
struct qp_broker_entry **ent)
{
const u32 context_id = vmci_ctx_get_id(context);
bool is_local = flags & VMCI_QPFLAG_LOCAL;
int result;
if (entry->state != VMCIQPB_CREATED_NO_MEM &&
entry->state != VMCIQPB_CREATED_MEM)
return VMCI_ERROR_UNAVAILABLE;
if (is_local) {
if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
context_id != entry->create_id) {
return VMCI_ERROR_INVALID_ARGS;
}
} else if (context_id == entry->create_id ||
context_id == entry->attach_id) {
return VMCI_ERROR_ALREADY_EXISTS;
}
if (VMCI_CONTEXT_IS_VM(context_id) &&
VMCI_CONTEXT_IS_VM(entry->create_id))
return VMCI_ERROR_DST_UNREACHABLE;
/*
* If we are attaching from a restricted context then the queuepair
* must have been created by a trusted endpoint.
*/
if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
!entry->created_by_trusted)
return VMCI_ERROR_NO_ACCESS;
/*
* If we are attaching to a queuepair that was created by a restricted
* context then we must be trusted.
*/
if (entry->require_trusted_attach &&
(!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
return VMCI_ERROR_NO_ACCESS;
/*
* If the creator specifies VMCI_INVALID_ID in "peer" field, access
* control check is not performed.
*/
if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
return VMCI_ERROR_NO_ACCESS;
if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
/*
* Do not attach if the caller doesn't support Host Queue Pairs
* and a host created this queue pair.
*/
if (!vmci_ctx_supports_host_qp(context))
return VMCI_ERROR_INVALID_RESOURCE;
} else if (context_id == VMCI_HOST_CONTEXT_ID) {
struct vmci_ctx *create_context;
bool supports_host_qp;
/*
* Do not attach a host to a user created queue pair if that
* user doesn't support host queue pair end points.
*/
create_context = vmci_ctx_get(entry->create_id);
supports_host_qp = vmci_ctx_supports_host_qp(create_context);
vmci_ctx_put(create_context);
if (!supports_host_qp)
return VMCI_ERROR_INVALID_RESOURCE;
}
if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
if (context_id != VMCI_HOST_CONTEXT_ID) {
/*
* The queue pair broker entry stores values from the guest
* point of view, so an attaching guest should match the values
* stored in the entry.
*/
if (entry->qp.produce_size != produce_size ||
entry->qp.consume_size != consume_size) {
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
}
} else if (entry->qp.produce_size != consume_size ||
entry->qp.consume_size != produce_size) {
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
}
if (context_id != VMCI_HOST_CONTEXT_ID) {
/*
* If a guest attached to a queue pair, it will supply
* the backing memory. If this is a pre NOVMVM vmx,
* the backing memory will be supplied by calling
* vmci_qp_broker_set_page_store() following the
* return of the vmci_qp_broker_alloc() call. If it is
* a vmx of version NOVMVM or later, the page store
* must be supplied as part of the
* vmci_qp_broker_alloc call. Under all circumstances
* must the initially created queue pair not have any
* memory associated with it already.
*/
if (entry->state != VMCIQPB_CREATED_NO_MEM)
return VMCI_ERROR_INVALID_ARGS;
if (page_store != NULL) {
/*
* Patch up host state to point to guest
* supplied memory. The VMX already
* initialized the queue pair headers, so no
* need for the kernel side to do that.
*/
result = qp_host_register_user_memory(page_store,
entry->produce_q,
entry->consume_q);
if (result < VMCI_SUCCESS)
return result;
entry->state = VMCIQPB_ATTACHED_MEM;
} else {
entry->state = VMCIQPB_ATTACHED_NO_MEM;
}
} else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
/*
* The host side is attempting to attach to a queue
* pair that doesn't have any memory associated with
* it. This must be a pre NOVMVM vmx that hasn't set
* the page store information yet, or a quiesced VM.
*/
return VMCI_ERROR_UNAVAILABLE;
} else {
/* The host side has successfully attached to a queue pair. */
entry->state = VMCIQPB_ATTACHED_MEM;
}
if (entry->state == VMCIQPB_ATTACHED_MEM) {
result =
qp_notify_peer(true, entry->qp.handle, context_id,
entry->create_id);
if (result < VMCI_SUCCESS)
pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
entry->create_id, entry->qp.handle.context,
entry->qp.handle.resource);
}
entry->attach_id = context_id;
entry->qp.ref_count++;
if (wakeup_cb) {
entry->wakeup_cb = wakeup_cb;
entry->client_data = client_data;
}
/*
* When attaching to local queue pairs, the context already has
* an entry tracking the queue pair, so don't add another one.
*/
if (!is_local)
vmci_ctx_qp_create(context, entry->qp.handle);
if (ent != NULL)
*ent = entry;
return VMCI_SUCCESS;
}
/*
* queue_pair_Alloc for use when setting up queue pair endpoints
* on the host.
*/
static int qp_broker_alloc(struct vmci_handle handle,
u32 peer,
u32 flags,
u32 priv_flags,
u64 produce_size,
u64 consume_size,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context,
vmci_event_release_cb wakeup_cb,
void *client_data,
struct qp_broker_entry **ent,
bool *swap)
{
const u32 context_id = vmci_ctx_get_id(context);
bool create;
struct qp_broker_entry *entry = NULL;
bool is_local = flags & VMCI_QPFLAG_LOCAL;
int result;
if (vmci_handle_is_invalid(handle) ||
(flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
!(produce_size || consume_size) ||
!context || context_id == VMCI_INVALID_ID ||
handle.context == VMCI_INVALID_ID) {
return VMCI_ERROR_INVALID_ARGS;
}
if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
return VMCI_ERROR_INVALID_ARGS;
/*
* In the initial argument check, we ensure that non-vmkernel hosts
* are not allowed to create local queue pairs.
*/
mutex_lock(&qp_broker_list.mutex);
if (!is_local && vmci_ctx_qp_exists(context, handle)) {
pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
context_id, handle.context, handle.resource);
mutex_unlock(&qp_broker_list.mutex);
return VMCI_ERROR_ALREADY_EXISTS;
}
if (handle.resource != VMCI_INVALID_ID)
entry = qp_broker_handle_to_entry(handle);
if (!entry) {
create = true;
result =
qp_broker_create(handle, peer, flags, priv_flags,
produce_size, consume_size, page_store,
context, wakeup_cb, client_data, ent);
} else {
create = false;
result =
qp_broker_attach(entry, peer, flags, priv_flags,
produce_size, consume_size, page_store,
context, wakeup_cb, client_data, ent);
}
mutex_unlock(&qp_broker_list.mutex);
if (swap)
*swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
!(create && is_local);
return result;
}
/*
* This function implements the kernel API for allocating a queue
* pair.
*/
static int qp_alloc_host_work(struct vmci_handle *handle,
struct vmci_queue **produce_q,
u64 produce_size,
struct vmci_queue **consume_q,
u64 consume_size,
u32 peer,
u32 flags,
u32 priv_flags,
vmci_event_release_cb wakeup_cb,
void *client_data)
{
struct vmci_handle new_handle;
struct vmci_ctx *context;
struct qp_broker_entry *entry;
int result;
bool swap;
if (vmci_handle_is_invalid(*handle)) {
new_handle = vmci_make_handle(
VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
} else
new_handle = *handle;
context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
entry = NULL;
result =
qp_broker_alloc(new_handle, peer, flags, priv_flags,
produce_size, consume_size, NULL, context,
wakeup_cb, client_data, &entry, &swap);
if (result == VMCI_SUCCESS) {
if (swap) {
/*
* If this is a local queue pair, the attacher
* will swap around produce and consume
* queues.
*/
*produce_q = entry->consume_q;
*consume_q = entry->produce_q;
} else {
*produce_q = entry->produce_q;
*consume_q = entry->consume_q;
}
*handle = vmci_resource_handle(&entry->resource);
} else {
*handle = VMCI_INVALID_HANDLE;
pr_devel("queue pair broker failed to alloc (result=%d)\n",
result);
}
vmci_ctx_put(context);
return result;
}
/*
* Allocates a VMCI queue_pair. Only checks validity of input
* arguments. The real work is done in the host or guest
* specific function.
*/
int vmci_qp_alloc(struct vmci_handle *handle,
struct vmci_queue **produce_q,
u64 produce_size,
struct vmci_queue **consume_q,
u64 consume_size,
u32 peer,
u32 flags,
u32 priv_flags,
bool guest_endpoint,
vmci_event_release_cb wakeup_cb,
void *client_data)
{
if (!handle || !produce_q || !consume_q ||
(!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
return VMCI_ERROR_INVALID_ARGS;
if (guest_endpoint) {
return qp_alloc_guest_work(handle, produce_q,
produce_size, consume_q,
consume_size, peer,
flags, priv_flags);
} else {
return qp_alloc_host_work(handle, produce_q,
produce_size, consume_q,
consume_size, peer, flags,
priv_flags, wakeup_cb, client_data);
}
}
/*
* This function implements the host kernel API for detaching from
* a queue pair.
*/
static int qp_detatch_host_work(struct vmci_handle handle)
{
int result;
struct vmci_ctx *context;
context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
result = vmci_qp_broker_detach(handle, context);
vmci_ctx_put(context);
return result;
}
/*
* Detaches from a VMCI queue_pair. Only checks validity of input argument.
* Real work is done in the host or guest specific function.
*/
static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
{
if (vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
if (guest_endpoint)
return qp_detatch_guest_work(handle);
else
return qp_detatch_host_work(handle);
}
/*
* Returns the entry from the head of the list. Assumes that the list is
* locked.
*/
static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
{
if (!list_empty(&qp_list->head)) {
struct qp_entry *entry =
list_first_entry(&qp_list->head, struct qp_entry,
list_item);
return entry;
}
return NULL;
}
void vmci_qp_broker_exit(void)
{
struct qp_entry *entry;
struct qp_broker_entry *be;
mutex_lock(&qp_broker_list.mutex);
while ((entry = qp_list_get_head(&qp_broker_list))) {
be = (struct qp_broker_entry *)entry;
qp_list_remove_entry(&qp_broker_list, entry);
kfree(be);
}
mutex_unlock(&qp_broker_list.mutex);
}
/*
* Requests that a queue pair be allocated with the VMCI queue
* pair broker. Allocates a queue pair entry if one does not
* exist. Attaches to one if it exists, and retrieves the page
* files backing that queue_pair. Assumes that the queue pair
* broker lock is held.
*/
int vmci_qp_broker_alloc(struct vmci_handle handle,
u32 peer,
u32 flags,
u32 priv_flags,
u64 produce_size,
u64 consume_size,
struct vmci_qp_page_store *page_store,
struct vmci_ctx *context)
{
if (!QP_SIZES_ARE_VALID(produce_size, consume_size))
return VMCI_ERROR_NO_RESOURCES;
return qp_broker_alloc(handle, peer, flags, priv_flags,
produce_size, consume_size,
page_store, context, NULL, NULL, NULL, NULL);
}
/*
* VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
* step to add the UVAs of the VMX mapping of the queue pair. This function
* provides backwards compatibility with such VMX'en, and takes care of
* registering the page store for a queue pair previously allocated by the
* VMX during create or attach. This function will move the queue pair state
* to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
* VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
* attached state with memory, the queue pair is ready to be used by the
* host peer, and an attached event will be generated.
*
* Assumes that the queue pair broker lock is held.
*
* This function is only used by the hosted platform, since there is no
* issue with backwards compatibility for vmkernel.
*/
int vmci_qp_broker_set_page_store(struct vmci_handle handle,
u64 produce_uva,
u64 consume_uva,
struct vmci_ctx *context)
{
struct qp_broker_entry *entry;
int result;
const u32 context_id = vmci_ctx_get_id(context);
if (vmci_handle_is_invalid(handle) || !context ||
context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
/*
* We only support guest to host queue pairs, so the VMX must
* supply UVAs for the mapped page files.
*/
if (produce_uva == 0 || consume_uva == 0)
return VMCI_ERROR_INVALID_ARGS;
mutex_lock(&qp_broker_list.mutex);
if (!vmci_ctx_qp_exists(context, handle)) {
pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
entry = qp_broker_handle_to_entry(handle);
if (!entry) {
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
/*
* If I'm the owner then I can set the page store.
*
* Or, if a host created the queue_pair and I'm the attached peer
* then I can set the page store.
*/
if (entry->create_id != context_id &&
(entry->create_id != VMCI_HOST_CONTEXT_ID ||
entry->attach_id != context_id)) {
result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
goto out;
}
if (entry->state != VMCIQPB_CREATED_NO_MEM &&
entry->state != VMCIQPB_ATTACHED_NO_MEM) {
result = VMCI_ERROR_UNAVAILABLE;
goto out;
}
result = qp_host_get_user_memory(produce_uva, consume_uva,
entry->produce_q, entry->consume_q);
if (result < VMCI_SUCCESS)
goto out;
result = qp_host_map_queues(entry->produce_q, entry->consume_q);
if (result < VMCI_SUCCESS) {
qp_host_unregister_user_memory(entry->produce_q,
entry->consume_q);
goto out;
}
if (entry->state == VMCIQPB_CREATED_NO_MEM)
entry->state = VMCIQPB_CREATED_MEM;
else
entry->state = VMCIQPB_ATTACHED_MEM;
entry->vmci_page_files = true;
if (entry->state == VMCIQPB_ATTACHED_MEM) {
result =
qp_notify_peer(true, handle, context_id, entry->create_id);
if (result < VMCI_SUCCESS) {
pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
entry->create_id, entry->qp.handle.context,
entry->qp.handle.resource);
}
}
result = VMCI_SUCCESS;
out:
mutex_unlock(&qp_broker_list.mutex);
return result;
}
/*
* Resets saved queue headers for the given QP broker
* entry. Should be used when guest memory becomes available
* again, or the guest detaches.
*/
static void qp_reset_saved_headers(struct qp_broker_entry *entry)
{
entry->produce_q->saved_header = NULL;
entry->consume_q->saved_header = NULL;
}
/*
* The main entry point for detaching from a queue pair registered with the
* queue pair broker. If more than one endpoint is attached to the queue
* pair, the first endpoint will mainly decrement a reference count and
* generate a notification to its peer. The last endpoint will clean up
* the queue pair state registered with the broker.
*
* When a guest endpoint detaches, it will unmap and unregister the guest
* memory backing the queue pair. If the host is still attached, it will
* no longer be able to access the queue pair content.
*
* If the queue pair is already in a state where there is no memory
* registered for the queue pair (any *_NO_MEM state), it will transition to
* the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
* endpoint is the first of two endpoints to detach. If the host endpoint is
* the first out of two to detach, the queue pair will move to the
* VMCIQPB_SHUTDOWN_MEM state.
*/
int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
u32 peer_id;
bool is_local = false;
int result;
if (vmci_handle_is_invalid(handle) || !context ||
context_id == VMCI_INVALID_ID) {
return VMCI_ERROR_INVALID_ARGS;
}
mutex_lock(&qp_broker_list.mutex);
if (!vmci_ctx_qp_exists(context, handle)) {
pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
entry = qp_broker_handle_to_entry(handle);
if (!entry) {
pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
if (context_id != entry->create_id && context_id != entry->attach_id) {
result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
goto out;
}
if (context_id == entry->create_id) {
peer_id = entry->attach_id;
entry->create_id = VMCI_INVALID_ID;
} else {
peer_id = entry->create_id;
entry->attach_id = VMCI_INVALID_ID;
}
entry->qp.ref_count--;
is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
if (context_id != VMCI_HOST_CONTEXT_ID) {
bool headers_mapped;
/*
* Pre NOVMVM vmx'en may detach from a queue pair
* before setting the page store, and in that case
* there is no user memory to detach from. Also, more
* recent VMX'en may detach from a queue pair in the
* quiesced state.
*/
qp_acquire_queue_mutex(entry->produce_q);
headers_mapped = entry->produce_q->q_header ||
entry->consume_q->q_header;
if (QPBROKERSTATE_HAS_MEM(entry)) {
result =
qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
entry->produce_q,
entry->consume_q);
if (result < VMCI_SUCCESS)
pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
handle.context, handle.resource,
result);
qp_host_unregister_user_memory(entry->produce_q,
entry->consume_q);
}
if (!headers_mapped)
qp_reset_saved_headers(entry);
qp_release_queue_mutex(entry->produce_q);
if (!headers_mapped && entry->wakeup_cb)
entry->wakeup_cb(entry->client_data);
} else {
if (entry->wakeup_cb) {
entry->wakeup_cb = NULL;
entry->client_data = NULL;
}
}
if (entry->qp.ref_count == 0) {
qp_list_remove_entry(&qp_broker_list, &entry->qp);
if (is_local)
kfree(entry->local_mem);
qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
/* Unlink from resource hash table and free callback */
vmci_resource_remove(&entry->resource);
kfree(entry);
vmci_ctx_qp_destroy(context, handle);
} else {
qp_notify_peer(false, handle, context_id, peer_id);
if (context_id == VMCI_HOST_CONTEXT_ID &&
QPBROKERSTATE_HAS_MEM(entry)) {
entry->state = VMCIQPB_SHUTDOWN_MEM;
} else {
entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
}
if (!is_local)
vmci_ctx_qp_destroy(context, handle);
}
result = VMCI_SUCCESS;
out:
mutex_unlock(&qp_broker_list.mutex);
return result;
}
/*
* Establishes the necessary mappings for a queue pair given a
* reference to the queue pair guest memory. This is usually
* called when a guest is unquiesced and the VMX is allowed to
* map guest memory once again.
*/
int vmci_qp_broker_map(struct vmci_handle handle,
struct vmci_ctx *context,
u64 guest_mem)
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
int result;
if (vmci_handle_is_invalid(handle) || !context ||
context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
mutex_lock(&qp_broker_list.mutex);
if (!vmci_ctx_qp_exists(context, handle)) {
pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
entry = qp_broker_handle_to_entry(handle);
if (!entry) {
pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
if (context_id != entry->create_id && context_id != entry->attach_id) {
result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
goto out;
}
result = VMCI_SUCCESS;
if (context_id != VMCI_HOST_CONTEXT_ID &&
!QPBROKERSTATE_HAS_MEM(entry)) {
struct vmci_qp_page_store page_store;
page_store.pages = guest_mem;
page_store.len = QPE_NUM_PAGES(entry->qp);
qp_acquire_queue_mutex(entry->produce_q);
qp_reset_saved_headers(entry);
result =
qp_host_register_user_memory(&page_store,
entry->produce_q,
entry->consume_q);
qp_release_queue_mutex(entry->produce_q);
if (result == VMCI_SUCCESS) {
/* Move state from *_NO_MEM to *_MEM */
entry->state++;
if (entry->wakeup_cb)
entry->wakeup_cb(entry->client_data);
}
}
out:
mutex_unlock(&qp_broker_list.mutex);
return result;
}
/*
* Saves a snapshot of the queue headers for the given QP broker
* entry. Should be used when guest memory is unmapped.
* Results:
* VMCI_SUCCESS on success, appropriate error code if guest memory
* can't be accessed..
*/
static int qp_save_headers(struct qp_broker_entry *entry)
{
int result;
if (entry->produce_q->saved_header != NULL &&
entry->consume_q->saved_header != NULL) {
/*
* If the headers have already been saved, we don't need to do
* it again, and we don't want to map in the headers
* unnecessarily.
*/
return VMCI_SUCCESS;
}
if (NULL == entry->produce_q->q_header ||
NULL == entry->consume_q->q_header) {
result = qp_host_map_queues(entry->produce_q, entry->consume_q);
if (result < VMCI_SUCCESS)
return result;
}
memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
sizeof(entry->saved_produce_q));
entry->produce_q->saved_header = &entry->saved_produce_q;
memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
sizeof(entry->saved_consume_q));
entry->consume_q->saved_header = &entry->saved_consume_q;
return VMCI_SUCCESS;
}
/*
* Removes all references to the guest memory of a given queue pair, and
* will move the queue pair from state *_MEM to *_NO_MEM. It is usually
* called when a VM is being quiesced where access to guest memory should
* avoided.
*/
int vmci_qp_broker_unmap(struct vmci_handle handle,
struct vmci_ctx *context,
u32 gid)
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
int result;
if (vmci_handle_is_invalid(handle) || !context ||
context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
mutex_lock(&qp_broker_list.mutex);
if (!vmci_ctx_qp_exists(context, handle)) {
pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
entry = qp_broker_handle_to_entry(handle);
if (!entry) {
pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
context_id, handle.context, handle.resource);
result = VMCI_ERROR_NOT_FOUND;
goto out;
}
if (context_id != entry->create_id && context_id != entry->attach_id) {
result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
goto out;
}
if (context_id != VMCI_HOST_CONTEXT_ID &&
QPBROKERSTATE_HAS_MEM(entry)) {
qp_acquire_queue_mutex(entry->produce_q);
result = qp_save_headers(entry);
if (result < VMCI_SUCCESS)
pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
handle.context, handle.resource, result);
qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
/*
* On hosted, when we unmap queue pairs, the VMX will also
* unmap the guest memory, so we invalidate the previously
* registered memory. If the queue pair is mapped again at a
* later point in time, we will need to reregister the user
* memory with a possibly new user VA.
*/
qp_host_unregister_user_memory(entry->produce_q,
entry->consume_q);
/*
* Move state from *_MEM to *_NO_MEM.
*/
entry->state--;
qp_release_queue_mutex(entry->produce_q);
}
result = VMCI_SUCCESS;
out:
mutex_unlock(&qp_broker_list.mutex);
return result;
}
/*
* Destroys all guest queue pair endpoints. If active guest queue
* pairs still exist, hypercalls to attempt detach from these
* queue pairs will be made. Any failure to detach is silently
* ignored.
*/
void vmci_qp_guest_endpoints_exit(void)
{
struct qp_entry *entry;
struct qp_guest_endpoint *ep;
mutex_lock(&qp_guest_endpoints.mutex);
while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
ep = (struct qp_guest_endpoint *)entry;
/* Don't make a hypercall for local queue_pairs. */
if (!(entry->flags & VMCI_QPFLAG_LOCAL))
qp_detatch_hypercall(entry->handle);
/* We cannot fail the exit, so let's reset ref_count. */
entry->ref_count = 0;
qp_list_remove_entry(&qp_guest_endpoints, entry);
qp_guest_endpoint_destroy(ep);
}
mutex_unlock(&qp_guest_endpoints.mutex);
}
/*
* Helper routine that will lock the queue pair before subsequent
* operations.
* Note: Non-blocking on the host side is currently only implemented in ESX.
* Since non-blocking isn't yet implemented on the host personality we
* have no reason to acquire a spin lock. So to avoid the use of an
* unnecessary lock only acquire the mutex if we can block.
*/
static void qp_lock(const struct vmci_qp *qpair)
{
qp_acquire_queue_mutex(qpair->produce_q);
}
/*
* Helper routine that unlocks the queue pair after calling
* qp_lock.
*/
static void qp_unlock(const struct vmci_qp *qpair)
{
qp_release_queue_mutex(qpair->produce_q);
}
/*
* The queue headers may not be mapped at all times. If a queue is
* currently not mapped, it will be attempted to do so.
*/
static int qp_map_queue_headers(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
int result;
if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
result = qp_host_map_queues(produce_q, consume_q);
if (result < VMCI_SUCCESS)
return (produce_q->saved_header &&
consume_q->saved_header) ?
VMCI_ERROR_QUEUEPAIR_NOT_READY :
VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
}
return VMCI_SUCCESS;
}
/*
* Helper routine that will retrieve the produce and consume
* headers of a given queue pair. If the guest memory of the
* queue pair is currently not available, the saved queue headers
* will be returned, if these are available.
*/
static int qp_get_queue_headers(const struct vmci_qp *qpair,
struct vmci_queue_header **produce_q_header,
struct vmci_queue_header **consume_q_header)
{
int result;
result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
if (result == VMCI_SUCCESS) {
*produce_q_header = qpair->produce_q->q_header;
*consume_q_header = qpair->consume_q->q_header;
} else if (qpair->produce_q->saved_header &&
qpair->consume_q->saved_header) {
*produce_q_header = qpair->produce_q->saved_header;
*consume_q_header = qpair->consume_q->saved_header;
result = VMCI_SUCCESS;
}
return result;
}
/*
* Callback from VMCI queue pair broker indicating that a queue
* pair that was previously not ready, now either is ready or
* gone forever.
*/
static int qp_wakeup_cb(void *client_data)
{
struct vmci_qp *qpair = (struct vmci_qp *)client_data;
qp_lock(qpair);
while (qpair->blocked > 0) {
qpair->blocked--;
qpair->generation++;
wake_up(&qpair->event);
}
qp_unlock(qpair);
return VMCI_SUCCESS;
}
/*
* Makes the calling thread wait for the queue pair to become
* ready for host side access. Returns true when thread is
* woken up after queue pair state change, false otherwise.
*/
static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
{
unsigned int generation;
qpair->blocked++;
generation = qpair->generation;
qp_unlock(qpair);
wait_event(qpair->event, generation != qpair->generation);
qp_lock(qpair);
return true;
}
/*
* Enqueues a given buffer to the produce queue using the provided
* function. As many bytes as possible (space available in the queue)
* are enqueued. Assumes the queue->mutex has been acquired. Returns
* VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
* data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
* queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
* an error occured when accessing the buffer,
* VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
* available. Otherwise, the number of bytes written to the queue is
* returned. Updates the tail pointer of the produce queue.
*/
static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 produce_q_size,
struct iov_iter *from)
{
s64 free_space;
u64 tail;
size_t buf_size = iov_iter_count(from);
size_t written;
ssize_t result;
result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
free_space = vmci_q_header_free_space(produce_q->q_header,
consume_q->q_header,
produce_q_size);
if (free_space == 0)
return VMCI_ERROR_QUEUEPAIR_NOSPACE;
if (free_space < VMCI_SUCCESS)
return (ssize_t) free_space;
written = (size_t) (free_space > buf_size ? buf_size : free_space);
tail = vmci_q_header_producer_tail(produce_q->q_header);
if (likely(tail + written < produce_q_size)) {
result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
} else {
/* Tail pointer wraps around. */
const size_t tmp = (size_t) (produce_q_size - tail);
result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
if (result >= VMCI_SUCCESS)
result = qp_memcpy_to_queue_iter(produce_q, 0, from,
written - tmp);
}
if (result < VMCI_SUCCESS)
return result;
/*
* This virt_wmb() ensures that data written to the queue
* is observable before the new producer_tail is.
*/
virt_wmb();
vmci_q_header_add_producer_tail(produce_q->q_header, written,
produce_q_size);
return written;
}
/*
* Dequeues data (if available) from the given consume queue. Writes data
* to the user provided buffer using the provided function.
* Assumes the queue->mutex has been acquired.
* Results:
* VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
* VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
* (as defined by the queue size).
* VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
* Otherwise the number of bytes dequeued is returned.
* Side effects:
* Updates the head pointer of the consume queue.
*/
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 consume_q_size,
struct iov_iter *to,
bool update_consumer)
{
size_t buf_size = iov_iter_count(to);
s64 buf_ready;
u64 head;
size_t read;
ssize_t result;
result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
produce_q->q_header,
consume_q_size);
if (buf_ready == 0)
return VMCI_ERROR_QUEUEPAIR_NODATA;
if (buf_ready < VMCI_SUCCESS)
return (ssize_t) buf_ready;
/*
* This virt_rmb() ensures that data from the queue will be read
* after we have determined how much is ready to be consumed.
*/
virt_rmb();
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
head = vmci_q_header_consumer_head(produce_q->q_header);
if (likely(head + read < consume_q_size)) {
result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
} else {
/* Head pointer wraps around. */
const size_t tmp = (size_t) (consume_q_size - head);
result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
if (result >= VMCI_SUCCESS)
result = qp_memcpy_from_queue_iter(to, consume_q, 0,
read - tmp);
}
if (result < VMCI_SUCCESS)
return result;
if (update_consumer)
vmci_q_header_add_consumer_head(produce_q->q_header,
read, consume_q_size);
return read;
}
/*
* vmci_qpair_alloc() - Allocates a queue pair.
* @qpair: Pointer for the new vmci_qp struct.
* @handle: Handle to track the resource.
* @produce_qsize: Desired size of the producer queue.
* @consume_qsize: Desired size of the consumer queue.
* @peer: ContextID of the peer.
* @flags: VMCI flags.
* @priv_flags: VMCI priviledge flags.
*
* This is the client interface for allocating the memory for a
* vmci_qp structure and then attaching to the underlying
* queue. If an error occurs allocating the memory for the
* vmci_qp structure no attempt is made to attach. If an
* error occurs attaching, then the structure is freed.
*/
int vmci_qpair_alloc(struct vmci_qp **qpair,
struct vmci_handle *handle,
u64 produce_qsize,
u64 consume_qsize,
u32 peer,
u32 flags,
u32 priv_flags)
{
struct vmci_qp *my_qpair;
int retval;
struct vmci_handle src = VMCI_INVALID_HANDLE;
struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
enum vmci_route route;
vmci_event_release_cb wakeup_cb;
void *client_data;
/*
* Restrict the size of a queuepair. The device already
* enforces a limit on the total amount of memory that can be
* allocated to queuepairs for a guest. However, we try to
* allocate this memory before we make the queuepair
* allocation hypercall. On Linux, we allocate each page
* separately, which means rather than fail, the guest will
* thrash while it tries to allocate, and will become
* increasingly unresponsive to the point where it appears to
* be hung. So we place a limit on the size of an individual
* queuepair here, and leave the device to enforce the
* restriction on total queuepair memory. (Note that this
* doesn't prevent all cases; a user with only this much
* physical memory could still get into trouble.) The error
* used by the device is NO_RESOURCES, so use that here too.
*/
if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize))
return VMCI_ERROR_NO_RESOURCES;
retval = vmci_route(&src, &dst, false, &route);
if (retval < VMCI_SUCCESS)
route = vmci_guest_code_active() ?
VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
pr_devel("NONBLOCK OR PINNED set");
return VMCI_ERROR_INVALID_ARGS;
}
my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
if (!my_qpair)
return VMCI_ERROR_NO_MEM;
my_qpair->produce_q_size = produce_qsize;
my_qpair->consume_q_size = consume_qsize;
my_qpair->peer = peer;
my_qpair->flags = flags;
my_qpair->priv_flags = priv_flags;
wakeup_cb = NULL;
client_data = NULL;
if (VMCI_ROUTE_AS_HOST == route) {
my_qpair->guest_endpoint = false;
if (!(flags & VMCI_QPFLAG_LOCAL)) {
my_qpair->blocked = 0;
my_qpair->generation = 0;
init_waitqueue_head(&my_qpair->event);
wakeup_cb = qp_wakeup_cb;
client_data = (void *)my_qpair;
}
} else {
my_qpair->guest_endpoint = true;
}
retval = vmci_qp_alloc(handle,
&my_qpair->produce_q,
my_qpair->produce_q_size,
&my_qpair->consume_q,
my_qpair->consume_q_size,
my_qpair->peer,
my_qpair->flags,
my_qpair->priv_flags,
my_qpair->guest_endpoint,
wakeup_cb, client_data);
if (retval < VMCI_SUCCESS) {
kfree(my_qpair);
return retval;
}
*qpair = my_qpair;
my_qpair->handle = *handle;
return retval;
}
EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
/*
* vmci_qpair_detach() - Detatches the client from a queue pair.
* @qpair: Reference of a pointer to the qpair struct.
*
* This is the client interface for detaching from a VMCIQPair.
* Note that this routine will free the memory allocated for the
* vmci_qp structure too.
*/
int vmci_qpair_detach(struct vmci_qp **qpair)
{
int result;
struct vmci_qp *old_qpair;
if (!qpair || !(*qpair))
return VMCI_ERROR_INVALID_ARGS;
old_qpair = *qpair;
result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
/*
* The guest can fail to detach for a number of reasons, and
* if it does so, it will cleanup the entry (if there is one).
* The host can fail too, but it won't cleanup the entry
* immediately, it will do that later when the context is
* freed. Either way, we need to release the qpair struct
* here; there isn't much the caller can do, and we don't want
* to leak.
*/
memset(old_qpair, 0, sizeof(*old_qpair));
old_qpair->handle = VMCI_INVALID_HANDLE;
old_qpair->peer = VMCI_INVALID_ID;
kfree(old_qpair);
*qpair = NULL;
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_detach);
/*
* vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
* @qpair: Pointer to the queue pair struct.
* @producer_tail: Reference used for storing producer tail index.
* @consumer_head: Reference used for storing the consumer head index.
*
* This is the client interface for getting the current indexes of the
* QPair from the point of the view of the caller as the producer.
*/
int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
u64 *producer_tail,
u64 *consumer_head)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
int result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
vmci_q_header_get_pointers(produce_q_header, consume_q_header,
producer_tail, consumer_head);
qp_unlock(qpair);
if (result == VMCI_SUCCESS &&
((producer_tail && *producer_tail >= qpair->produce_q_size) ||
(consumer_head && *consumer_head >= qpair->produce_q_size)))
return VMCI_ERROR_INVALID_SIZE;
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
/*
* vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
* @qpair: Pointer to the queue pair struct.
* @consumer_tail: Reference used for storing consumer tail index.
* @producer_head: Reference used for storing the producer head index.
*
* This is the client interface for getting the current indexes of the
* QPair from the point of the view of the caller as the consumer.
*/
int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
u64 *consumer_tail,
u64 *producer_head)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
int result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
vmci_q_header_get_pointers(consume_q_header, produce_q_header,
consumer_tail, producer_head);
qp_unlock(qpair);
if (result == VMCI_SUCCESS &&
((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
(producer_head && *producer_head >= qpair->consume_q_size)))
return VMCI_ERROR_INVALID_SIZE;
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
/*
* vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
* @qpair: Pointer to the queue pair struct.
*
* This is the client interface for getting the amount of free
* space in the QPair from the point of the view of the caller as
* the producer which is the common case. Returns < 0 if err, else
* available bytes into which data can be enqueued if > 0.
*/
s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
s64 result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
result = vmci_q_header_free_space(produce_q_header,
consume_q_header,
qpair->produce_q_size);
else
result = 0;
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
/*
* vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
* @qpair: Pointer to the queue pair struct.
*
* This is the client interface for getting the amount of free
* space in the QPair from the point of the view of the caller as
* the consumer which is not the common case. Returns < 0 if err, else
* available bytes into which data can be enqueued if > 0.
*/
s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
s64 result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
result = vmci_q_header_free_space(consume_q_header,
produce_q_header,
qpair->consume_q_size);
else
result = 0;
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
/*
* vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
* producer queue.
* @qpair: Pointer to the queue pair struct.
*
* This is the client interface for getting the amount of
* enqueued data in the QPair from the point of the view of the
* caller as the producer which is not the common case. Returns < 0 if err,
* else available bytes that may be read.
*/
s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
s64 result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
result = vmci_q_header_buf_ready(produce_q_header,
consume_q_header,
qpair->produce_q_size);
else
result = 0;
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
/*
* vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
* consumer queue.
* @qpair: Pointer to the queue pair struct.
*
* This is the client interface for getting the amount of
* enqueued data in the QPair from the point of the view of the
* caller as the consumer which is the normal case. Returns < 0 if err,
* else available bytes that may be read.
*/
s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
{
struct vmci_queue_header *produce_q_header;
struct vmci_queue_header *consume_q_header;
s64 result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
result =
qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
if (result == VMCI_SUCCESS)
result = vmci_q_header_buf_ready(consume_q_header,
produce_q_header,
qpair->consume_q_size);
else
result = 0;
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
/*
* vmci_qpair_enqueue() - Throw data on the queue.
* @qpair: Pointer to the queue pair struct.
* @buf: Pointer to buffer containing data
* @buf_size: Length of buffer.
* @buf_type: Buffer type (Unused).
*
* This is the client interface for enqueueing data into the queue.
* Returns number of bytes enqueued or < 0 on error.
*/
ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
const void *buf,
size_t buf_size,
int buf_type)
{
ssize_t result;
struct iov_iter from;
struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
iov_iter_kvec(&from, ITER_SOURCE, &v, 1, buf_size);
qp_lock(qpair);
do {
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
&from);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
/*
* vmci_qpair_dequeue() - Get data from the queue.
* @qpair: Pointer to the queue pair struct.
* @buf: Pointer to buffer for the data
* @buf_size: Length of buffer.
* @buf_type: Buffer type (Unused).
*
* This is the client interface for dequeueing data from the queue.
* Returns number of bytes dequeued or < 0 on error.
*/
ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
void *buf,
size_t buf_size,
int buf_type)
{
ssize_t result;
struct iov_iter to;
struct kvec v = {.iov_base = buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size);
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
&to, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
/*
* vmci_qpair_peek() - Peek at the data in the queue.
* @qpair: Pointer to the queue pair struct.
* @buf: Pointer to buffer for the data
* @buf_size: Length of buffer.
* @buf_type: Buffer type (Unused on Linux).
*
* This is the client interface for peeking into a queue. (I.e.,
* copy data from the queue without updating the head pointer.)
* Returns number of bytes dequeued or < 0 on error.
*/
ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
void *buf,
size_t buf_size,
int buf_type)
{
struct iov_iter to;
struct kvec v = {.iov_base = buf, .iov_len = buf_size};
ssize_t result;
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size);
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
&to, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_peek);
/*
* vmci_qpair_enquev() - Throw data on the queue using iov.
* @qpair: Pointer to the queue pair struct.
* @iov: Pointer to buffer containing data
* @iov_size: Length of buffer.
* @buf_type: Buffer type (Unused).
*
* This is the client interface for enqueueing data into the queue.
* This function uses IO vectors to handle the work. Returns number
* of bytes enqueued or < 0 on error.
*/
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
do {
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
&msg->msg_iter);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
/*
* vmci_qpair_dequev() - Get data from the queue using iov.
* @qpair: Pointer to the queue pair struct.
* @iov: Pointer to buffer for the data
* @iov_size: Length of buffer.
* @buf_type: Buffer type (Unused).
*
* This is the client interface for dequeueing data from the queue.
* This function uses IO vectors to handle the work. Returns number
* of bytes dequeued or < 0 on error.
*/
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
&msg->msg_iter, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
/*
* vmci_qpair_peekv() - Peek at the data in the queue using iov.
* @qpair: Pointer to the queue pair struct.
* @iov: Pointer to buffer for the data
* @iov_size: Length of buffer.
* @buf_type: Buffer type (Unused on Linux).
*
* This is the client interface for peeking into a queue. (I.e.,
* copy data from the queue without updating the head pointer.)
* This function uses IO vectors to handle the work. Returns number
* of bytes peeked or < 0 on error.
*/
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
&msg->msg_iter, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}
EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
| linux-master | drivers/misc/vmw_vmci/vmci_queue_pair.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include "vmci_datagram.h"
#include "vmci_resource.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#include "vmci_route.h"
/*
* struct datagram_entry describes the datagram entity. It is used for datagram
* entities created only on the host.
*/
struct datagram_entry {
struct vmci_resource resource;
u32 flags;
bool run_delayed;
vmci_datagram_recv_cb recv_cb;
void *client_data;
u32 priv_flags;
};
struct delayed_datagram_info {
struct datagram_entry *entry;
struct work_struct work;
bool in_dg_host_queue;
/* msg and msg_payload must be together. */
struct vmci_datagram msg;
u8 msg_payload[];
};
/* Number of in-flight host->host datagrams */
static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
/*
* Create a datagram entry given a handle pointer.
*/
static int dg_create_handle(u32 resource_id,
u32 flags,
u32 priv_flags,
vmci_datagram_recv_cb recv_cb,
void *client_data, struct vmci_handle *out_handle)
{
int result;
u32 context_id;
struct vmci_handle handle;
struct datagram_entry *entry;
if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
return VMCI_ERROR_INVALID_ARGS;
if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
context_id = VMCI_INVALID_ID;
} else {
context_id = vmci_get_context_id();
if (context_id == VMCI_INVALID_ID)
return VMCI_ERROR_NO_RESOURCES;
}
handle = vmci_make_handle(context_id, resource_id);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_warn("Failed allocating memory for datagram entry\n");
return VMCI_ERROR_NO_MEM;
}
entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
entry->flags = flags;
entry->recv_cb = recv_cb;
entry->client_data = client_data;
entry->priv_flags = priv_flags;
/* Make datagram resource live. */
result = vmci_resource_add(&entry->resource,
VMCI_RESOURCE_TYPE_DATAGRAM,
handle);
if (result != VMCI_SUCCESS) {
pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
handle.context, handle.resource, result);
kfree(entry);
return result;
}
*out_handle = vmci_resource_handle(&entry->resource);
return VMCI_SUCCESS;
}
/*
* Internal utility function with the same purpose as
* vmci_datagram_get_priv_flags that also takes a context_id.
*/
static int vmci_datagram_get_priv_flags(u32 context_id,
struct vmci_handle handle,
u32 *priv_flags)
{
if (context_id == VMCI_INVALID_ID)
return VMCI_ERROR_INVALID_ARGS;
if (context_id == VMCI_HOST_CONTEXT_ID) {
struct datagram_entry *src_entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource)
return VMCI_ERROR_INVALID_ARGS;
src_entry = container_of(resource, struct datagram_entry,
resource);
*priv_flags = src_entry->priv_flags;
vmci_resource_put(resource);
} else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
*priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
else
*priv_flags = vmci_context_get_priv_flags(context_id);
return VMCI_SUCCESS;
}
/*
* Calls the specified callback in a delayed context.
*/
static void dg_delayed_dispatch(struct work_struct *work)
{
struct delayed_datagram_info *dg_info =
container_of(work, struct delayed_datagram_info, work);
dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
vmci_resource_put(&dg_info->entry->resource);
if (dg_info->in_dg_host_queue)
atomic_dec(&delayed_dg_host_queue_size);
kfree(dg_info);
}
/*
* Dispatch datagram as a host, to the host, or other vm context. This
* function cannot dispatch to hypervisor context handlers. This should
* have been handled before we get here by vmci_datagram_dispatch.
* Returns number of bytes sent on success, error code otherwise.
*/
static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
{
int retval;
size_t dg_size;
u32 src_priv_flags;
dg_size = VMCI_DG_SIZE(dg);
/* Host cannot send to the hypervisor. */
if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
return VMCI_ERROR_DST_UNREACHABLE;
/* Check that source handle matches sending context. */
if (dg->src.context != context_id) {
pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
context_id, dg->src.context, dg->src.resource);
return VMCI_ERROR_NO_ACCESS;
}
/* Get hold of privileges of sending endpoint. */
retval = vmci_datagram_get_priv_flags(context_id, dg->src,
&src_priv_flags);
if (retval != VMCI_SUCCESS) {
pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
dg->src.context, dg->src.resource);
return retval;
}
/* Determine if we should route to host or guest destination. */
if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
/* Route to host datagram entry. */
struct datagram_entry *dst_entry;
struct vmci_resource *resource;
if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
dg->dst.resource == VMCI_EVENT_HANDLER) {
return vmci_event_dispatch(dg);
}
resource = vmci_resource_by_handle(dg->dst,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
dg->dst.context, dg->dst.resource);
return VMCI_ERROR_INVALID_RESOURCE;
}
dst_entry = container_of(resource, struct datagram_entry,
resource);
if (vmci_deny_interaction(src_priv_flags,
dst_entry->priv_flags)) {
vmci_resource_put(resource);
return VMCI_ERROR_NO_ACCESS;
}
/*
* If a VMCI datagram destined for the host is also sent by the
* host, we always run it delayed. This ensures that no locks
* are held when the datagram callback runs.
*/
if (dst_entry->run_delayed ||
dg->src.context == VMCI_HOST_CONTEXT_ID) {
struct delayed_datagram_info *dg_info;
if (atomic_add_return(1, &delayed_dg_host_queue_size)
== VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info = kmalloc(sizeof(*dg_info) +
(size_t) dg->payload_size, GFP_ATOMIC);
if (!dg_info) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, dg_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
retval = VMCI_SUCCESS;
} else {
retval = dst_entry->recv_cb(dst_entry->client_data, dg);
vmci_resource_put(resource);
if (retval < VMCI_SUCCESS)
return retval;
}
} else {
/* Route to destination VM context. */
struct vmci_datagram *new_dg;
if (context_id != dg->dst.context) {
if (vmci_deny_interaction(src_priv_flags,
vmci_context_get_priv_flags
(dg->dst.context))) {
return VMCI_ERROR_NO_ACCESS;
} else if (VMCI_CONTEXT_IS_VM(context_id)) {
/*
* If the sending context is a VM, it
* cannot reach another VM.
*/
pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
context_id, dg->dst.context);
return VMCI_ERROR_DST_UNREACHABLE;
}
}
/* We make a copy to enqueue. */
new_dg = kmemdup(dg, dg_size, GFP_KERNEL);
if (new_dg == NULL)
return VMCI_ERROR_NO_MEM;
retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
if (retval < VMCI_SUCCESS) {
kfree(new_dg);
return retval;
}
}
/*
* We currently truncate the size to signed 32 bits. This doesn't
* matter for this handler as it only support 4Kb messages.
*/
return (int)dg_size;
}
/*
* Dispatch datagram as a guest, down through the VMX and potentially to
* the host.
* Returns number of bytes sent on success, error code otherwise.
*/
static int dg_dispatch_as_guest(struct vmci_datagram *dg)
{
int retval;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(dg->src,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource)
return VMCI_ERROR_NO_HANDLE;
retval = vmci_send_datagram(dg);
vmci_resource_put(resource);
return retval;
}
/*
* Dispatch datagram. This will determine the routing for the datagram
* and dispatch it accordingly.
* Returns number of bytes sent on success, error code otherwise.
*/
int vmci_datagram_dispatch(u32 context_id,
struct vmci_datagram *dg, bool from_guest)
{
int retval;
enum vmci_route route;
BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
if (dg->payload_size > VMCI_MAX_DG_SIZE ||
VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
pr_devel("Payload (size=%llu bytes) too big to send\n",
(unsigned long long)dg->payload_size);
return VMCI_ERROR_INVALID_ARGS;
}
retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
if (retval < VMCI_SUCCESS) {
pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
dg->src.context, dg->dst.context, retval);
return retval;
}
if (VMCI_ROUTE_AS_HOST == route) {
if (VMCI_INVALID_ID == context_id)
context_id = VMCI_HOST_CONTEXT_ID;
return dg_dispatch_as_host(context_id, dg);
}
if (VMCI_ROUTE_AS_GUEST == route)
return dg_dispatch_as_guest(dg);
pr_warn("Unknown route (%d) for datagram\n", route);
return VMCI_ERROR_DST_UNREACHABLE;
}
/*
* Invoke the handler for the given datagram. This is intended to be
* called only when acting as a guest and receiving a datagram from the
* virtual device.
*/
int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
{
struct vmci_resource *resource;
struct datagram_entry *dst_entry;
resource = vmci_resource_by_handle(dg->dst,
VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
dg->dst.context, dg->dst.resource);
return VMCI_ERROR_NO_HANDLE;
}
dst_entry = container_of(resource, struct datagram_entry, resource);
if (dst_entry->run_delayed) {
struct delayed_datagram_info *dg_info;
dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
GFP_ATOMIC);
if (!dg_info) {
vmci_resource_put(resource);
return VMCI_ERROR_NO_MEM;
}
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
} else {
dst_entry->recv_cb(dst_entry->client_data, dg);
vmci_resource_put(resource);
}
return VMCI_SUCCESS;
}
/*
* vmci_datagram_create_handle_priv() - Create host context datagram endpoint
* @resource_id: The resource ID.
* @flags: Datagram Flags.
* @priv_flags: Privilege Flags.
* @recv_cb: Callback when receiving datagrams.
* @client_data: Pointer for a datagram_entry struct
* @out_handle: vmci_handle that is populated as a result of this function.
*
* Creates a host context datagram endpoint and returns a handle to it.
*/
int vmci_datagram_create_handle_priv(u32 resource_id,
u32 flags,
u32 priv_flags,
vmci_datagram_recv_cb recv_cb,
void *client_data,
struct vmci_handle *out_handle)
{
if (out_handle == NULL)
return VMCI_ERROR_INVALID_ARGS;
if (recv_cb == NULL) {
pr_devel("Client callback needed when creating datagram\n");
return VMCI_ERROR_INVALID_ARGS;
}
if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
return VMCI_ERROR_INVALID_ARGS;
return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
client_data, out_handle);
}
EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
/*
* vmci_datagram_create_handle() - Create host context datagram endpoint
* @resource_id: Resource ID.
* @flags: Datagram Flags.
* @recv_cb: Callback when receiving datagrams.
* @client_ata: Pointer for a datagram_entry struct
* @out_handle: vmci_handle that is populated as a result of this function.
*
* Creates a host context datagram endpoint and returns a handle to
* it. Same as vmci_datagram_create_handle_priv without the priviledge
* flags argument.
*/
int vmci_datagram_create_handle(u32 resource_id,
u32 flags,
vmci_datagram_recv_cb recv_cb,
void *client_data,
struct vmci_handle *out_handle)
{
return vmci_datagram_create_handle_priv(
resource_id, flags,
VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
recv_cb, client_data,
out_handle);
}
EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
/*
* vmci_datagram_destroy_handle() - Destroys datagram handle
* @handle: vmci_handle to be destroyed and reaped.
*
* Use this function to destroy any datagram handles created by
* vmci_datagram_create_handle{,Priv} functions.
*/
int vmci_datagram_destroy_handle(struct vmci_handle handle)
{
struct datagram_entry *entry;
struct vmci_resource *resource;
resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
if (!resource) {
pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
handle.context, handle.resource);
return VMCI_ERROR_NOT_FOUND;
}
entry = container_of(resource, struct datagram_entry, resource);
vmci_resource_put(&entry->resource);
vmci_resource_remove(&entry->resource);
kfree(entry);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
/*
* vmci_datagram_send() - Send a datagram
* @msg: The datagram to send.
*
* Sends the provided datagram on its merry way.
*/
int vmci_datagram_send(struct vmci_datagram *msg)
{
if (msg == NULL)
return VMCI_ERROR_INVALID_ARGS;
return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
}
EXPORT_SYMBOL_GPL(vmci_datagram_send);
| linux-master | drivers/misc/vmw_vmci/vmci_datagram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/io.h>
#include "vmci_handle_array.h"
#include "vmci_queue_pair.h"
#include "vmci_datagram.h"
#include "vmci_doorbell.h"
#include "vmci_resource.h"
#include "vmci_context.h"
#include "vmci_driver.h"
#include "vmci_event.h"
#define VMCI_UTIL_NUM_RESOURCES 1
enum {
VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
};
enum {
VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
};
/*
* VMCI driver initialization. This block can also be used to
* pass initial group membership etc.
*/
struct vmci_init_blk {
u32 cid;
u32 flags;
};
/* VMCIqueue_pairAllocInfo_VMToVM */
struct vmci_qp_alloc_info_vmvm {
struct vmci_handle handle;
u32 peer;
u32 flags;
u64 produce_size;
u64 consume_size;
u64 produce_page_file; /* User VA. */
u64 consume_page_file; /* User VA. */
u64 produce_page_file_size; /* Size of the file name array. */
u64 consume_page_file_size; /* Size of the file name array. */
s32 result;
u32 _pad;
};
/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
struct vmci_set_notify_info {
u64 notify_uva;
s32 result;
u32 _pad;
};
/*
* Per-instance host state
*/
struct vmci_host_dev {
struct vmci_ctx *context;
int user_version;
enum vmci_obj_type ct_type;
struct mutex lock; /* Mutex lock for vmci context access */
};
static struct vmci_ctx *host_context;
static bool vmci_host_device_initialized;
static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
/*
* Determines whether the VMCI host personality is
* available. Since the core functionality of the host driver is
* always present, all guests could possibly use the host
* personality. However, to minimize the deviation from the
* pre-unified driver state of affairs, we only consider the host
* device active if there is no active guest device or if there
* are VMX'en with active VMCI contexts using the host device.
*/
bool vmci_host_code_active(void)
{
return vmci_host_device_initialized &&
(!vmci_guest_code_active() ||
atomic_read(&vmci_host_active_users) > 0);
}
int vmci_host_users(void)
{
return atomic_read(&vmci_host_active_users);
}
/*
* Called on open of /dev/vmci.
*/
static int vmci_host_open(struct inode *inode, struct file *filp)
{
struct vmci_host_dev *vmci_host_dev;
vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
if (vmci_host_dev == NULL)
return -ENOMEM;
vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
mutex_init(&vmci_host_dev->lock);
filp->private_data = vmci_host_dev;
return 0;
}
/*
* Called on close of /dev/vmci, most often when the process
* exits.
*/
static int vmci_host_close(struct inode *inode, struct file *filp)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
vmci_ctx_destroy(vmci_host_dev->context);
vmci_host_dev->context = NULL;
/*
* The number of active contexts is used to track whether any
* VMX'en are using the host personality. It is incremented when
* a context is created through the IOCTL_VMCI_INIT_CONTEXT
* ioctl.
*/
atomic_dec(&vmci_host_active_users);
}
vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
kfree(vmci_host_dev);
filp->private_data = NULL;
return 0;
}
/*
* This is used to wake up the VMX when a VMCI call arrives, or
* to wake up select() or poll() at the next clock tick.
*/
static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
struct vmci_ctx *context;
__poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
/*
* Read context only if ct_type == VMCIOBJ_CONTEXT to make
* sure that context is initialized
*/
context = vmci_host_dev->context;
/* Check for VMCI calls to this VM context. */
if (wait)
poll_wait(filp, &context->host_context.wait_queue,
wait);
spin_lock(&context->lock);
if (context->pending_datagrams > 0 ||
vmci_handle_arr_get_size(
context->pending_doorbell_array) > 0) {
mask = EPOLLIN;
}
spin_unlock(&context->lock);
}
return mask;
}
/*
* Copies the handles of a handle array into a user buffer, and
* returns the new length in userBufferSize. If the copy to the
* user buffer fails, the functions still returns VMCI_SUCCESS,
* but retval != 0.
*/
static int drv_cp_harray_to_user(void __user *user_buf_uva,
u64 *user_buf_size,
struct vmci_handle_arr *handle_array,
int *retval)
{
u32 array_size = 0;
struct vmci_handle *handles;
if (handle_array)
array_size = vmci_handle_arr_get_size(handle_array);
if (array_size * sizeof(*handles) > *user_buf_size)
return VMCI_ERROR_MORE_DATA;
*user_buf_size = array_size * sizeof(*handles);
if (*user_buf_size)
*retval = copy_to_user(user_buf_uva,
vmci_handle_arr_get_handles
(handle_array), *user_buf_size);
return VMCI_SUCCESS;
}
/*
* Sets up a given context for notify to work. Maps the notify
* boolean in user VA into kernel space.
*/
static int vmci_host_setup_notify(struct vmci_ctx *context,
unsigned long uva)
{
int retval;
if (context->notify_page) {
pr_devel("%s: Notify mechanism is already set up\n", __func__);
return VMCI_ERROR_DUPLICATE_ENTRY;
}
/*
* We are using 'bool' internally, but let's make sure we explicit
* about the size.
*/
BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
/*
* Lock physical page backing a given user VA.
*/
retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
if (retval != 1) {
context->notify_page = NULL;
return VMCI_ERROR_GENERIC;
}
if (context->notify_page == NULL)
return VMCI_ERROR_UNAVAILABLE;
/*
* Map the locked page and set up notify pointer.
*/
context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
vmci_ctx_check_signal_notify(context);
return VMCI_SUCCESS;
}
static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
unsigned int cmd, void __user *uptr)
{
if (cmd == IOCTL_VMCI_VERSION2) {
int __user *vptr = uptr;
if (get_user(vmci_host_dev->user_version, vptr))
return -EFAULT;
}
/*
* The basic logic here is:
*
* If the user sends in a version of 0 tell it our version.
* If the user didn't send in a version, tell it our version.
* If the user sent in an old version, tell it -its- version.
* If the user sent in an newer version, tell it our version.
*
* The rationale behind telling the caller its version is that
* Workstation 6.5 required that VMX and VMCI kernel module were
* version sync'd. All new VMX users will be programmed to
* handle the VMCI kernel module version.
*/
if (vmci_host_dev->user_version > 0 &&
vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
return vmci_host_dev->user_version;
}
return VMCI_VERSION;
}
#define vmci_ioctl_err(fmt, ...) \
pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_init_blk init_block;
const struct cred *cred;
int retval;
if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
vmci_ioctl_err("error reading init block\n");
return -EFAULT;
}
mutex_lock(&vmci_host_dev->lock);
if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
vmci_ioctl_err("received VMCI init on initialized handle\n");
retval = -EINVAL;
goto out;
}
if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
vmci_ioctl_err("unsupported VMCI restriction flag\n");
retval = -EINVAL;
goto out;
}
cred = get_current_cred();
vmci_host_dev->context = vmci_ctx_create(init_block.cid,
init_block.flags, 0,
vmci_host_dev->user_version,
cred);
put_cred(cred);
if (IS_ERR(vmci_host_dev->context)) {
retval = PTR_ERR(vmci_host_dev->context);
vmci_ioctl_err("error initializing context\n");
goto out;
}
/*
* Copy cid to userlevel, we do this to allow the VMX
* to enforce its policy on cid generation.
*/
init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
vmci_ctx_destroy(vmci_host_dev->context);
vmci_host_dev->context = NULL;
vmci_ioctl_err("error writing init block\n");
retval = -EFAULT;
goto out;
}
vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
atomic_inc(&vmci_host_active_users);
vmci_call_vsock_callback(true);
retval = 0;
out:
mutex_unlock(&vmci_host_dev->lock);
return retval;
}
static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_datagram_snd_rcv_info send_info;
struct vmci_datagram *dg = NULL;
u32 cid;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&send_info, uptr, sizeof(send_info)))
return -EFAULT;
if (send_info.len > VMCI_MAX_DG_SIZE) {
vmci_ioctl_err("datagram is too big (size=%d)\n",
send_info.len);
return -EINVAL;
}
if (send_info.len < sizeof(*dg)) {
vmci_ioctl_err("datagram is too small (size=%d)\n",
send_info.len);
return -EINVAL;
}
dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
send_info.len);
if (IS_ERR(dg)) {
vmci_ioctl_err(
"cannot allocate memory to dispatch datagram\n");
return PTR_ERR(dg);
}
if (VMCI_DG_SIZE(dg) != send_info.len) {
vmci_ioctl_err("datagram size mismatch\n");
kfree(dg);
return -EINVAL;
}
pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
dg->dst.context, dg->dst.resource,
dg->src.context, dg->src.resource,
(unsigned long long)dg->payload_size);
/* Get source context id. */
cid = vmci_ctx_get_id(vmci_host_dev->context);
send_info.result = vmci_datagram_dispatch(cid, dg, true);
kfree(dg);
return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
}
static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_datagram_snd_rcv_info recv_info;
struct vmci_datagram *dg = NULL;
int retval;
size_t size;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
return -EFAULT;
size = recv_info.len;
recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
&size, &dg);
if (recv_info.result >= VMCI_SUCCESS) {
void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
kfree(dg);
if (retval != 0)
return -EFAULT;
}
return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
}
static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_handle handle;
int vmci_status;
int __user *retptr;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
struct vmci_qp_alloc_info_vmvm alloc_info;
struct vmci_qp_alloc_info_vmvm __user *info = uptr;
if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
return -EFAULT;
handle = alloc_info.handle;
retptr = &info->result;
vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
alloc_info.peer,
alloc_info.flags,
VMCI_NO_PRIVILEGE_FLAGS,
alloc_info.produce_size,
alloc_info.consume_size,
NULL,
vmci_host_dev->context);
if (vmci_status == VMCI_SUCCESS)
vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
} else {
struct vmci_qp_alloc_info alloc_info;
struct vmci_qp_alloc_info __user *info = uptr;
struct vmci_qp_page_store page_store;
if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
return -EFAULT;
handle = alloc_info.handle;
retptr = &info->result;
page_store.pages = alloc_info.ppn_va;
page_store.len = alloc_info.num_ppns;
vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
alloc_info.peer,
alloc_info.flags,
VMCI_NO_PRIVILEGE_FLAGS,
alloc_info.produce_size,
alloc_info.consume_size,
&page_store,
vmci_host_dev->context);
}
if (put_user(vmci_status, retptr)) {
if (vmci_status >= VMCI_SUCCESS) {
vmci_status = vmci_qp_broker_detach(handle,
vmci_host_dev->context);
}
return -EFAULT;
}
return 0;
}
static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_qp_set_va_info set_va_info;
struct vmci_qp_set_va_info __user *info = uptr;
s32 result;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
vmci_ioctl_err("is not allowed\n");
return -EINVAL;
}
if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
return -EFAULT;
if (set_va_info.va) {
/*
* VMX is passing down a new VA for the queue
* pair mapping.
*/
result = vmci_qp_broker_map(set_va_info.handle,
vmci_host_dev->context,
set_va_info.va);
} else {
/*
* The queue pair is about to be unmapped by
* the VMX.
*/
result = vmci_qp_broker_unmap(set_va_info.handle,
vmci_host_dev->context, 0);
}
return put_user(result, &info->result) ? -EFAULT : 0;
}
static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_qp_page_file_info page_file_info;
struct vmci_qp_page_file_info __user *info = uptr;
s32 result;
if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
vmci_ioctl_err("not supported on this VMX (version=%d)\n",
vmci_host_dev->user_version);
return -EINVAL;
}
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
return -EFAULT;
/*
* Communicate success pre-emptively to the caller. Note that the
* basic premise is that it is incumbent upon the caller not to look at
* the info.result field until after the ioctl() returns. And then,
* only if the ioctl() result indicates no error. We send up the
* SUCCESS status before calling SetPageStore() store because failing
* to copy up the result code means unwinding the SetPageStore().
*
* It turns out the logic to unwind a SetPageStore() opens a can of
* worms. For example, if a host had created the queue_pair and a
* guest attaches and SetPageStore() is successful but writing success
* fails, then ... the host has to be stopped from writing (anymore)
* data into the queue_pair. That means an additional test in the
* VMCI_Enqueue() code path. Ugh.
*/
if (put_user(VMCI_SUCCESS, &info->result)) {
/*
* In this case, we can't write a result field of the
* caller's info block. So, we don't even try to
* SetPageStore().
*/
return -EFAULT;
}
result = vmci_qp_broker_set_page_store(page_file_info.handle,
page_file_info.produce_va,
page_file_info.consume_va,
vmci_host_dev->context);
if (result < VMCI_SUCCESS) {
if (put_user(result, &info->result)) {
/*
* Note that in this case the SetPageStore()
* call failed but we were unable to
* communicate that to the caller (because the
* copy_to_user() call failed). So, if we
* simply return an error (in this case
* -EFAULT) then the caller will know that the
* SetPageStore failed even though we couldn't
* put the result code in the result field and
* indicate exactly why it failed.
*
* That says nothing about the issue where we
* were once able to write to the caller's info
* memory and now can't. Something more
* serious is probably going on than the fact
* that SetPageStore() didn't work.
*/
return -EFAULT;
}
}
return 0;
}
static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_qp_dtch_info detach_info;
struct vmci_qp_dtch_info __user *info = uptr;
s32 result;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
return -EFAULT;
result = vmci_qp_broker_detach(detach_info.handle,
vmci_host_dev->context);
if (result == VMCI_SUCCESS &&
vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
result = VMCI_SUCCESS_LAST_DETACH;
}
return put_user(result, &info->result) ? -EFAULT : 0;
}
static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_ctx_info ar_info;
struct vmci_ctx_info __user *info = uptr;
s32 result;
u32 cid;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
return -EFAULT;
cid = vmci_ctx_get_id(vmci_host_dev->context);
result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
return put_user(result, &info->result) ? -EFAULT : 0;
}
static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_ctx_info ar_info;
struct vmci_ctx_info __user *info = uptr;
u32 cid;
int result;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
return -EFAULT;
cid = vmci_ctx_get_id(vmci_host_dev->context);
result = vmci_ctx_remove_notification(cid,
ar_info.remote_cid);
return put_user(result, &info->result) ? -EFAULT : 0;
}
static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_ctx_chkpt_buf_info get_info;
u32 cid;
void *cpt_buf;
int retval;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&get_info, uptr, sizeof(get_info)))
return -EFAULT;
cid = vmci_ctx_get_id(vmci_host_dev->context);
get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
&get_info.buf_size, &cpt_buf);
if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
kfree(cpt_buf);
if (retval)
return -EFAULT;
}
return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
}
static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_ctx_chkpt_buf_info set_info;
u32 cid;
void *cpt_buf;
int retval;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&set_info, uptr, sizeof(set_info)))
return -EFAULT;
cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
set_info.buf_size);
if (IS_ERR(cpt_buf))
return PTR_ERR(cpt_buf);
cid = vmci_ctx_get_id(vmci_host_dev->context);
set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
set_info.buf_size, cpt_buf);
retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
kfree(cpt_buf);
return retval;
}
static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
u32 __user *u32ptr = uptr;
return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
}
static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_set_notify_info notify_info;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(¬ify_info, uptr, sizeof(notify_info)))
return -EFAULT;
if (notify_info.notify_uva) {
notify_info.result =
vmci_host_setup_notify(vmci_host_dev->context,
notify_info.notify_uva);
} else {
vmci_ctx_unset_notify(vmci_host_dev->context);
notify_info.result = VMCI_SUCCESS;
}
return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ?
-EFAULT : 0;
}
static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_dbell_notify_resource_info info;
u32 cid;
if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
vmci_ioctl_err("invalid for current VMX versions\n");
return -EINVAL;
}
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (copy_from_user(&info, uptr, sizeof(info)))
return -EFAULT;
cid = vmci_ctx_get_id(vmci_host_dev->context);
switch (info.action) {
case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
info.result = vmci_ctx_notify_dbell(cid, info.handle,
flags);
} else {
info.result = VMCI_ERROR_UNAVAILABLE;
}
break;
case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
info.result = vmci_ctx_dbell_create(cid, info.handle);
break;
case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
info.result = vmci_ctx_dbell_destroy(cid, info.handle);
break;
default:
vmci_ioctl_err("got unknown action (action=%d)\n",
info.action);
info.result = VMCI_ERROR_INVALID_ARGS;
}
return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
}
static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
const char *ioctl_name,
void __user *uptr)
{
struct vmci_ctx_notify_recv_info info;
struct vmci_handle_arr *db_handle_array;
struct vmci_handle_arr *qp_handle_array;
void __user *ubuf;
u32 cid;
int retval = 0;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
vmci_ioctl_err("not supported for the current vmx version\n");
return -EINVAL;
}
if (copy_from_user(&info, uptr, sizeof(info)))
return -EFAULT;
if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
(info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
return -EINVAL;
}
cid = vmci_ctx_get_id(vmci_host_dev->context);
info.result = vmci_ctx_rcv_notifications_get(cid,
&db_handle_array, &qp_handle_array);
if (info.result != VMCI_SUCCESS)
return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
db_handle_array, &retval);
if (info.result == VMCI_SUCCESS && !retval) {
ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
info.result = drv_cp_harray_to_user(ubuf,
&info.qp_handle_buf_size,
qp_handle_array, &retval);
}
if (!retval && copy_to_user(uptr, &info, sizeof(info)))
retval = -EFAULT;
vmci_ctx_rcv_notifications_release(cid,
db_handle_array, qp_handle_array,
info.result == VMCI_SUCCESS && !retval);
return retval;
}
static long vmci_host_unlocked_ioctl(struct file *filp,
unsigned int iocmd, unsigned long ioarg)
{
#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
char *name = "IOCTL_VMCI_" # ioctl_name; \
return vmci_host_do_ ## ioctl_fn( \
vmci_host_dev, name, uptr); \
} while (0)
struct vmci_host_dev *vmci_host_dev = filp->private_data;
void __user *uptr = (void __user *)ioarg;
switch (iocmd) {
case IOCTL_VMCI_INIT_CONTEXT:
VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
case IOCTL_VMCI_DATAGRAM_SEND:
VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
case IOCTL_VMCI_DATAGRAM_RECEIVE:
VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
case IOCTL_VMCI_QUEUEPAIR_ALLOC:
VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
case IOCTL_VMCI_QUEUEPAIR_SETVA:
VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
case IOCTL_VMCI_QUEUEPAIR_DETACH:
VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
case IOCTL_VMCI_CTX_GET_CPT_STATE:
VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
case IOCTL_VMCI_CTX_SET_CPT_STATE:
VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
case IOCTL_VMCI_GET_CONTEXT_ID:
VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
case IOCTL_VMCI_SET_NOTIFY:
VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
case IOCTL_VMCI_NOTIFY_RESOURCE:
VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
case IOCTL_VMCI_VERSION:
case IOCTL_VMCI_VERSION2:
return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
default:
pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
return -EINVAL;
}
#undef VMCI_DO_IOCTL
}
static const struct file_operations vmuser_fops = {
.owner = THIS_MODULE,
.open = vmci_host_open,
.release = vmci_host_close,
.poll = vmci_host_poll,
.unlocked_ioctl = vmci_host_unlocked_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice vmci_host_miscdev = {
.name = "vmci",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmuser_fops,
};
int __init vmci_host_init(void)
{
int error;
host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
-1, VMCI_VERSION, NULL);
if (IS_ERR(host_context)) {
error = PTR_ERR(host_context);
pr_warn("Failed to initialize VMCIContext (error%d)\n",
error);
return error;
}
error = misc_register(&vmci_host_miscdev);
if (error) {
pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
vmci_host_miscdev.name,
MISC_MAJOR, vmci_host_miscdev.minor,
error);
pr_warn("Unable to initialize host personality\n");
vmci_ctx_destroy(host_context);
return error;
}
pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
vmci_host_device_initialized = true;
return 0;
}
void __exit vmci_host_exit(void)
{
vmci_host_device_initialized = false;
misc_deregister(&vmci_host_miscdev);
vmci_ctx_destroy(host_context);
vmci_qp_broker_exit();
pr_debug("VMCI host driver module unloaded\n");
}
| linux-master | drivers/misc/vmw_vmci/vmci_host.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include "vmci_handle_array.h"
static size_t handle_arr_calc_size(u32 capacity)
{
return VMCI_HANDLE_ARRAY_HEADER_SIZE +
capacity * sizeof(struct vmci_handle);
}
struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
{
struct vmci_handle_arr *array;
if (max_capacity == 0 || capacity > max_capacity)
return NULL;
if (capacity == 0)
capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
max_capacity);
array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
if (!array)
return NULL;
array->capacity = capacity;
array->max_capacity = max_capacity;
array->size = 0;
return array;
}
void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
{
kfree(array);
}
int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
struct vmci_handle handle)
{
struct vmci_handle_arr *array = *array_ptr;
if (unlikely(array->size >= array->capacity)) {
/* reallocate. */
struct vmci_handle_arr *new_array;
u32 capacity_bump = min(array->max_capacity - array->capacity,
array->capacity);
size_t new_size = handle_arr_calc_size(array->capacity +
capacity_bump);
if (array->size >= array->max_capacity)
return VMCI_ERROR_NO_MEM;
new_array = krealloc(array, new_size, GFP_ATOMIC);
if (!new_array)
return VMCI_ERROR_NO_MEM;
new_array->capacity += capacity_bump;
*array_ptr = array = new_array;
}
array->entries[array->size] = handle;
array->size++;
return VMCI_SUCCESS;
}
/*
* Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
*/
struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
u32 i;
for (i = 0; i < array->size; i++) {
if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
handle = array->entries[i];
array->size--;
array->entries[i] = array->entries[array->size];
array->entries[array->size] = VMCI_INVALID_HANDLE;
break;
}
}
return handle;
}
/*
* Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
*/
struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
if (array->size) {
array->size--;
handle = array->entries[array->size];
array->entries[array->size] = VMCI_INVALID_HANDLE;
}
return handle;
}
/*
* Handle at given index, VMCI_INVALID_HANDLE if invalid index.
*/
struct vmci_handle
vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
{
if (unlikely(index >= array->size))
return VMCI_INVALID_HANDLE;
return array->entries[index];
}
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
u32 i;
for (i = 0; i < array->size; i++)
if (vmci_handle_is_equal(array->entries[i], entry_handle))
return true;
return false;
}
/*
* NULL if the array is empty. Otherwise, a pointer to the array
* of VMCI handles in the handle array.
*/
struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
{
if (array->size)
return array->entries;
return NULL;
}
| linux-master | drivers/misc/vmw_vmci/vmci_handle_array.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include "vmci_driver.h"
#include "vmci_event.h"
#define EVENT_MAGIC 0xEABE0000
#define VMCI_EVENT_MAX_ATTEMPTS 10
struct vmci_subscription {
u32 id;
u32 event;
vmci_event_cb callback;
void *callback_data;
struct list_head node; /* on one of subscriber lists */
};
static struct list_head subscriber_array[VMCI_EVENT_MAX];
static DEFINE_MUTEX(subscriber_mutex);
int __init vmci_event_init(void)
{
int i;
for (i = 0; i < VMCI_EVENT_MAX; i++)
INIT_LIST_HEAD(&subscriber_array[i]);
return VMCI_SUCCESS;
}
void vmci_event_exit(void)
{
int e;
/* We free all memory at exit. */
for (e = 0; e < VMCI_EVENT_MAX; e++) {
struct vmci_subscription *cur, *p2;
list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
/*
* We should never get here because all events
* should have been unregistered before we try
* to unload the driver module.
*/
pr_warn("Unexpected free events occurring\n");
list_del(&cur->node);
kfree(cur);
}
}
}
/*
* Find entry. Assumes subscriber_mutex is held.
*/
static struct vmci_subscription *event_find(u32 sub_id)
{
int e;
for (e = 0; e < VMCI_EVENT_MAX; e++) {
struct vmci_subscription *cur;
list_for_each_entry(cur, &subscriber_array[e], node) {
if (cur->id == sub_id)
return cur;
}
}
return NULL;
}
/*
* Actually delivers the events to the subscribers.
* The callback function for each subscriber is invoked.
*/
static void event_deliver(struct vmci_event_msg *event_msg)
{
struct vmci_subscription *cur;
struct list_head *subscriber_list;
rcu_read_lock();
subscriber_list = &subscriber_array[event_msg->event_data.event];
list_for_each_entry_rcu(cur, subscriber_list, node) {
cur->callback(cur->id, &event_msg->event_data,
cur->callback_data);
}
rcu_read_unlock();
}
/*
* Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
* subscribers for given event.
*/
int vmci_event_dispatch(struct vmci_datagram *msg)
{
struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
if (msg->payload_size < sizeof(u32) ||
msg->payload_size > sizeof(struct vmci_event_data_max))
return VMCI_ERROR_INVALID_ARGS;
if (!VMCI_EVENT_VALID(event_msg->event_data.event))
return VMCI_ERROR_EVENT_UNKNOWN;
event_deliver(event_msg);
return VMCI_SUCCESS;
}
/*
* vmci_event_subscribe() - Subscribe to a given event.
* @event: The event to subscribe to.
* @callback: The callback to invoke upon the event.
* @callback_data: Data to pass to the callback.
* @subscription_id: ID used to track subscription. Used with
* vmci_event_unsubscribe()
*
* Subscribes to the provided event. The callback specified will be
* fired from RCU critical section and therefore must not sleep.
*/
int vmci_event_subscribe(u32 event,
vmci_event_cb callback,
void *callback_data,
u32 *new_subscription_id)
{
struct vmci_subscription *sub;
int attempts;
int retval;
bool have_new_id = false;
if (!new_subscription_id) {
pr_devel("%s: Invalid subscription (NULL)\n", __func__);
return VMCI_ERROR_INVALID_ARGS;
}
if (!VMCI_EVENT_VALID(event) || !callback) {
pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
__func__, event, callback, callback_data);
return VMCI_ERROR_INVALID_ARGS;
}
sub = kzalloc(sizeof(*sub), GFP_KERNEL);
if (!sub)
return VMCI_ERROR_NO_MEM;
sub->id = VMCI_EVENT_MAX;
sub->event = event;
sub->callback = callback;
sub->callback_data = callback_data;
INIT_LIST_HEAD(&sub->node);
mutex_lock(&subscriber_mutex);
/* Creation of a new event is always allowed. */
for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
static u32 subscription_id;
/*
* We try to get an id a couple of time before
* claiming we are out of resources.
*/
/* Test for duplicate id. */
if (!event_find(++subscription_id)) {
sub->id = subscription_id;
have_new_id = true;
break;
}
}
if (have_new_id) {
list_add_rcu(&sub->node, &subscriber_array[event]);
retval = VMCI_SUCCESS;
} else {
retval = VMCI_ERROR_NO_RESOURCES;
}
mutex_unlock(&subscriber_mutex);
*new_subscription_id = sub->id;
return retval;
}
EXPORT_SYMBOL_GPL(vmci_event_subscribe);
/*
* vmci_event_unsubscribe() - unsubscribe from an event.
* @sub_id: A subscription ID as provided by vmci_event_subscribe()
*
* Unsubscribe from given event. Removes it from list and frees it.
* Will return callback_data if requested by caller.
*/
int vmci_event_unsubscribe(u32 sub_id)
{
struct vmci_subscription *s;
mutex_lock(&subscriber_mutex);
s = event_find(sub_id);
if (s)
list_del_rcu(&s->node);
mutex_unlock(&subscriber_mutex);
if (!s)
return VMCI_ERROR_NOT_FOUND;
kvfree_rcu_mightsleep(s);
return VMCI_SUCCESS;
}
EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
| linux-master | drivers/misc/vmw_vmci/vmci_event.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include "vmci_driver.h"
#include "vmci_event.h"
static bool vmci_disable_host;
module_param_named(disable_host, vmci_disable_host, bool, 0);
MODULE_PARM_DESC(disable_host,
"Disable driver host personality (default=enabled)");
static bool vmci_disable_guest;
module_param_named(disable_guest, vmci_disable_guest, bool, 0);
MODULE_PARM_DESC(disable_guest,
"Disable driver guest personality (default=enabled)");
static bool vmci_guest_personality_initialized;
static bool vmci_host_personality_initialized;
static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */
static vmci_vsock_cb vmci_vsock_transport_cb;
static bool vmci_vsock_cb_host_called;
/*
* vmci_get_context_id() - Gets the current context ID.
*
* Returns the current context ID. Note that since this is accessed only
* from code running in the host, this always returns the host context ID.
*/
u32 vmci_get_context_id(void)
{
if (vmci_guest_code_active())
return vmci_get_vm_context_id();
else if (vmci_host_code_active())
return VMCI_HOST_CONTEXT_ID;
return VMCI_INVALID_ID;
}
EXPORT_SYMBOL_GPL(vmci_get_context_id);
/*
* vmci_register_vsock_callback() - Register the VSOCK vmci_transport callback.
*
* The callback will be called when the first host or guest becomes active,
* or if they are already active when this function is called.
* To unregister the callback, call this function with NULL parameter.
*
* Returns 0 on success. -EBUSY if a callback is already registered.
*/
int vmci_register_vsock_callback(vmci_vsock_cb callback)
{
int err = 0;
mutex_lock(&vmci_vsock_mutex);
if (vmci_vsock_transport_cb && callback) {
err = -EBUSY;
goto out;
}
vmci_vsock_transport_cb = callback;
if (!vmci_vsock_transport_cb) {
vmci_vsock_cb_host_called = false;
goto out;
}
if (vmci_guest_code_active())
vmci_vsock_transport_cb(false);
if (vmci_host_users() > 0) {
vmci_vsock_cb_host_called = true;
vmci_vsock_transport_cb(true);
}
out:
mutex_unlock(&vmci_vsock_mutex);
return err;
}
EXPORT_SYMBOL_GPL(vmci_register_vsock_callback);
void vmci_call_vsock_callback(bool is_host)
{
mutex_lock(&vmci_vsock_mutex);
if (!vmci_vsock_transport_cb)
goto out;
/* In the host, this function could be called multiple times,
* but we want to register it only once.
*/
if (is_host) {
if (vmci_vsock_cb_host_called)
goto out;
vmci_vsock_cb_host_called = true;
}
vmci_vsock_transport_cb(is_host);
out:
mutex_unlock(&vmci_vsock_mutex);
}
static int __init vmci_drv_init(void)
{
int vmci_err;
int error;
vmci_err = vmci_event_init();
if (vmci_err < VMCI_SUCCESS) {
pr_err("Failed to initialize VMCIEvent (result=%d)\n",
vmci_err);
return -EINVAL;
}
if (!vmci_disable_guest) {
error = vmci_guest_init();
if (error) {
pr_warn("Failed to initialize guest personality (err=%d)\n",
error);
} else {
vmci_guest_personality_initialized = true;
pr_info("Guest personality initialized and is %s\n",
vmci_guest_code_active() ?
"active" : "inactive");
}
}
if (!vmci_disable_host) {
error = vmci_host_init();
if (error) {
pr_warn("Unable to initialize host personality (err=%d)\n",
error);
} else {
vmci_host_personality_initialized = true;
pr_info("Initialized host personality\n");
}
}
if (!vmci_guest_personality_initialized &&
!vmci_host_personality_initialized) {
vmci_event_exit();
return -ENODEV;
}
return 0;
}
module_init(vmci_drv_init);
static void __exit vmci_drv_exit(void)
{
if (vmci_guest_personality_initialized)
vmci_guest_exit();
if (vmci_host_personality_initialized)
vmci_host_exit();
vmci_event_exit();
}
module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
MODULE_VERSION("1.1.6.0-k");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/vmw_vmci/vmci_driver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMware VMCI Driver
*
* Copyright (C) 2012 VMware, Inc. All rights reserved.
*/
#include <linux/vmw_vmci_defs.h>
#include <linux/hash.h>
#include <linux/types.h>
#include <linux/rculist.h>
#include <linux/completion.h>
#include "vmci_resource.h"
#include "vmci_driver.h"
#define VMCI_RESOURCE_HASH_BITS 7
#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
struct vmci_hash_table {
spinlock_t lock;
struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
};
static struct vmci_hash_table vmci_resource_table = {
.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
};
static unsigned int vmci_resource_hash(struct vmci_handle handle)
{
return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
}
/*
* Gets a resource (if one exists) matching given handle from the hash table.
*/
static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
enum vmci_resource_type type)
{
struct vmci_resource *r, *resource = NULL;
unsigned int idx = vmci_resource_hash(handle);
rcu_read_lock();
hlist_for_each_entry_rcu(r,
&vmci_resource_table.entries[idx], node) {
u32 cid = r->handle.context;
u32 rid = r->handle.resource;
if (r->type == type &&
rid == handle.resource &&
(cid == handle.context || cid == VMCI_INVALID_ID ||
handle.context == VMCI_INVALID_ID)) {
resource = r;
break;
}
}
rcu_read_unlock();
return resource;
}
/*
* Find an unused resource ID and return it. The first
* VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
* its value + 1.
* Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
*/
static u32 vmci_resource_find_id(u32 context_id,
enum vmci_resource_type resource_type)
{
static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
u32 old_rid = resource_id;
u32 current_rid;
/*
* Generate a unique resource ID. Keep on trying until we wrap around
* in the RID space.
*/
do {
struct vmci_handle handle;
current_rid = resource_id;
resource_id++;
if (unlikely(resource_id == VMCI_INVALID_ID)) {
/* Skip the reserved rids. */
resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
}
handle = vmci_make_handle(context_id, current_rid);
if (!vmci_resource_lookup(handle, resource_type))
return current_rid;
} while (resource_id != old_rid);
return VMCI_INVALID_ID;
}
int vmci_resource_add(struct vmci_resource *resource,
enum vmci_resource_type resource_type,
struct vmci_handle handle)
{
unsigned int idx;
int result;
spin_lock(&vmci_resource_table.lock);
if (handle.resource == VMCI_INVALID_ID) {
handle.resource = vmci_resource_find_id(handle.context,
resource_type);
if (handle.resource == VMCI_INVALID_ID) {
result = VMCI_ERROR_NO_HANDLE;
goto out;
}
} else if (vmci_resource_lookup(handle, resource_type)) {
result = VMCI_ERROR_ALREADY_EXISTS;
goto out;
}
resource->handle = handle;
resource->type = resource_type;
INIT_HLIST_NODE(&resource->node);
kref_init(&resource->kref);
init_completion(&resource->done);
idx = vmci_resource_hash(resource->handle);
hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
result = VMCI_SUCCESS;
out:
spin_unlock(&vmci_resource_table.lock);
return result;
}
void vmci_resource_remove(struct vmci_resource *resource)
{
struct vmci_handle handle = resource->handle;
unsigned int idx = vmci_resource_hash(handle);
struct vmci_resource *r;
/* Remove resource from hash table. */
spin_lock(&vmci_resource_table.lock);
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
if (vmci_handle_is_equal(r->handle, resource->handle)) {
hlist_del_init_rcu(&r->node);
break;
}
}
spin_unlock(&vmci_resource_table.lock);
synchronize_rcu();
vmci_resource_put(resource);
wait_for_completion(&resource->done);
}
struct vmci_resource *
vmci_resource_by_handle(struct vmci_handle resource_handle,
enum vmci_resource_type resource_type)
{
struct vmci_resource *r, *resource = NULL;
rcu_read_lock();
r = vmci_resource_lookup(resource_handle, resource_type);
if (r &&
(resource_type == r->type ||
resource_type == VMCI_RESOURCE_TYPE_ANY)) {
resource = vmci_resource_get(r);
}
rcu_read_unlock();
return resource;
}
/*
* Get a reference to given resource.
*/
struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
{
kref_get(&resource->kref);
return resource;
}
static void vmci_release_resource(struct kref *kref)
{
struct vmci_resource *resource =
container_of(kref, struct vmci_resource, kref);
/* Verify the resource has been unlinked from hash table */
WARN_ON(!hlist_unhashed(&resource->node));
/* Signal that container of this resource can now be destroyed */
complete(&resource->done);
}
/*
* Resource's release function will get called if last reference.
* If it is the last reference, then we are sure that nobody else
* can increment the count again (it's gone from the resource hash
* table), so there's no need for locking here.
*/
int vmci_resource_put(struct vmci_resource *resource)
{
/*
* We propagate the information back to caller in case it wants to know
* whether entry was freed.
*/
return kref_put(&resource->kref, vmci_release_resource) ?
VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
}
struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
{
return resource->handle;
}
| linux-master | drivers/misc/vmw_vmci/vmci_resource.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Shared Transport Line discipline driver Core
* Init Manager module responsible for GPIO control
* and firmware download
* Copyright (C) 2009-2010 Texas Instruments
* Author: Pavan Savoy <[email protected]>
*/
#define pr_fmt(fmt) "(stk) :" fmt
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/gpio.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
/*
* st_get_plat_device -
* function which returns the reference to the platform device
* requested by id. As of now only 1 such device exists (id=0)
* the context requesting for reference can get the id to be
* requested by a. The protocol driver which is registering or
* b. the tty device which is opened.
*/
static struct platform_device *st_get_plat_device(int id)
{
return st_kim_devices[id];
}
/*
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
* response times out
*/
static void validate_firmware_response(struct kim_data_s *kim_gdata)
{
struct sk_buff *skb = kim_gdata->rx_skb;
if (!skb)
return;
/*
* these magic numbers are the position in the response buffer which
* allows us to distinguish whether the response is for the read
* version info. command
*/
if (skb->data[2] == 0x01 && skb->data[3] == 0x01 &&
skb->data[4] == 0x10 && skb->data[5] == 0x00) {
/* fw version response */
memcpy(kim_gdata->resp_buffer,
kim_gdata->rx_skb->data,
kim_gdata->rx_skb->len);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
} else if (unlikely(skb->data[5] != 0)) {
pr_err("no proper response during fw download");
pr_err("data6 %x", skb->data[5]);
kfree_skb(skb);
return; /* keep waiting for the proper response */
}
/* becos of all the script being downloaded */
complete_all(&kim_gdata->kim_rcvd);
kfree_skb(skb);
}
/*
* check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
{
register int room = skb_tailroom(kim_gdata->rx_skb);
pr_debug("len %d room %d", len, room);
if (!len) {
validate_firmware_response(kim_gdata);
} else if (len > room) {
/*
* Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(kim_gdata->rx_skb);
} else {
/*
* Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
kim_gdata->rx_state = ST_W4_DATA;
kim_gdata->rx_count = len;
return len;
}
/*
* Change ST LL state to continue to process next
* packet
*/
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
return 0;
}
/*
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
* tty_receive and hence the logic
*/
static void kim_int_recv(struct kim_data_s *kim_gdata, const u8 *ptr,
size_t count)
{
int len = 0;
unsigned char *plen;
pr_debug("%s", __func__);
/* Decode received bytes here */
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
skb_put_data(kim_gdata->rx_skb, ptr, len);
kim_gdata->rx_count -= len;
count -= len;
ptr += len;
if (kim_gdata->rx_count)
continue;
/* Check ST RX state machine , where are we? */
switch (kim_gdata->rx_state) {
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
validate_firmware_response(kim_gdata);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
continue;
/* Waiting for Bluetooth event header ? */
case ST_W4_HEADER:
plen =
(unsigned char *)&kim_gdata->rx_skb->data[1];
pr_debug("event hdr: plen 0x%02x\n", *plen);
kim_check_data_len(kim_gdata, *plen);
continue;
} /* end of switch */
} /* end of if rx_state */
switch (*ptr) {
/* Bluetooth event packet? */
case 0x04:
kim_gdata->rx_state = ST_W4_HEADER;
kim_gdata->rx_count = 2;
break;
default:
pr_info("unknown packet");
ptr++;
count--;
continue;
}
ptr++;
count--;
kim_gdata->rx_skb =
alloc_skb(1024+8, GFP_ATOMIC);
if (!kim_gdata->rx_skb) {
pr_err("can't allocate mem for new packet");
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_count = 0;
return;
}
skb_reserve(kim_gdata->rx_skb, 8);
kim_gdata->rx_skb->cb[0] = 4;
kim_gdata->rx_skb->cb[1] = 0;
}
return;
}
static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
static const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
long timeout;
pr_debug("%s", __func__);
reinit_completion(&kim_gdata->kim_rcvd);
if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
pr_err("kim: couldn't write 4 bytes");
return -EIO;
}
timeout = wait_for_completion_interruptible_timeout(
&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME));
if (timeout <= 0) {
pr_err(" waiting for ver info- timed out or received signal");
return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
/*
* the positions 12 & 13 in the response buffer provide with the
* chip, major & minor numbers
*/
version =
MAKEWORD(kim_gdata->resp_buffer[12],
kim_gdata->resp_buffer[13]);
chip = (version & 0x7C00) >> 10;
min_ver = (version & 0x007F);
maj_ver = (version & 0x0380) >> 7;
if (version & 0x8000)
maj_ver |= 0x0008;
sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts",
chip, maj_ver, min_ver);
/* to be accessed later via sysfs entry */
kim_gdata->version.full = version;
kim_gdata->version.chip = chip;
kim_gdata->version.maj_ver = maj_ver;
kim_gdata->version.min_ver = min_ver;
pr_info("%s", bts_scr_name);
return 0;
}
static void skip_change_remote_baud(unsigned char **ptr, long *len)
{
unsigned char *nxt_action, *cur_action;
cur_action = *ptr;
nxt_action = cur_action + sizeof(struct bts_action) +
((struct bts_action *) cur_action)->size;
if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) {
pr_err("invalid action after change remote baud command");
} else {
*ptr = *ptr + sizeof(struct bts_action) +
((struct bts_action *)cur_action)->size;
*len = *len - (sizeof(struct bts_action) +
((struct bts_action *)cur_action)->size);
/* warn user on not commenting these in firmware */
pr_warn("skipping the wait event of change remote baud");
}
}
/*
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
*/
static long download_firmware(struct kim_data_s *kim_gdata)
{
long err = 0;
long len = 0;
unsigned char *ptr = NULL;
unsigned char *action_ptr = NULL;
unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */
int wr_room_space;
int cmd_size;
unsigned long timeout;
err = read_local_version(kim_gdata, bts_scr_name);
if (err != 0) {
pr_err("kim: failed to read local ver");
return err;
}
err =
request_firmware(&kim_gdata->fw_entry, bts_scr_name,
&kim_gdata->kim_pdev->dev);
if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) ||
(kim_gdata->fw_entry->size == 0))) {
pr_err(" request_firmware failed(errno %ld) for %s", err,
bts_scr_name);
return -EINVAL;
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
/*
* bts_header to remove out magic number and
* version
*/
ptr += sizeof(struct bts_header);
len -= sizeof(struct bts_header);
while (len > 0 && ptr) {
pr_debug(" action size %d, type %d ",
((struct bts_action *)ptr)->size,
((struct bts_action *)ptr)->type);
switch (((struct bts_action *)ptr)->type) {
case ACTION_SEND_COMMAND: /* action send */
pr_debug("S");
action_ptr = &(((struct bts_action *)ptr)->data[0]);
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
0xFF36)) {
/*
* ignore remote change
* baud rate HCI VS command
*/
pr_warn("change remote baud"
" rate command in firmware");
skip_change_remote_baud(&ptr, &len);
break;
}
/*
* Make sure we have enough free space in uart
* tx buffer to write current firmware command
*/
cmd_size = ((struct bts_action *)ptr)->size;
timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME);
do {
wr_room_space =
st_get_uart_wr_room(kim_gdata->core_data);
if (wr_room_space < 0) {
pr_err("Unable to get free "
"space info from uart tx buffer");
release_firmware(kim_gdata->fw_entry);
return wr_room_space;
}
mdelay(1); /* wait 1ms before checking room */
} while ((wr_room_space < cmd_size) &&
time_before(jiffies, timeout));
/* Timeout happened ? */
if (time_after_eq(jiffies, timeout)) {
pr_err("Timeout while waiting for free "
"free space in uart tx buffer");
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
/*
* reinit completion before sending for the
* relevant wait
*/
reinit_completion(&kim_gdata->kim_rcvd);
/*
* Free space found in uart buffer, call st_int_write
* to send current firmware command to the uart tx
* buffer.
*/
err = st_int_write(kim_gdata->core_data,
((struct bts_action_send *)action_ptr)->data,
((struct bts_action *)ptr)->size);
if (unlikely(err < 0)) {
release_firmware(kim_gdata->fw_entry);
return err;
}
/*
* Check number of bytes written to the uart tx buffer
* and requested command write size
*/
if (err != cmd_size) {
pr_err("Number of bytes written to uart "
"tx buffer are not matching with "
"requested cmd write size");
release_firmware(kim_gdata->fw_entry);
return -EIO;
}
break;
case ACTION_WAIT_EVENT: /* wait */
pr_debug("W");
err = wait_for_completion_interruptible_timeout(
&kim_gdata->kim_rcvd,
msecs_to_jiffies(CMD_RESP_TIME));
if (err <= 0) {
pr_err("response timeout/signaled during fw download ");
/* timed out */
release_firmware(kim_gdata->fw_entry);
return err ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
break;
case ACTION_DELAY: /* sleep */
pr_info("sleep command in scr");
action_ptr = &(((struct bts_action *)ptr)->data[0]);
mdelay(((struct bts_action_delay *)action_ptr)->msec);
break;
}
len =
len - (sizeof(struct bts_action) +
((struct bts_action *)ptr)->size);
ptr =
ptr + sizeof(struct bts_action) +
((struct bts_action *)ptr)->size;
}
/* fw download complete */
release_firmware(kim_gdata->fw_entry);
return 0;
}
/**********************************************************************/
/* functions called from ST core */
/* called from ST Core, when REG_IN_PROGRESS (registration in progress)
* can be because of
* 1. response to read local version
* 2. during send/recv's of firmware download
*/
void st_kim_recv(void *disc_data, const u8 *data, size_t count)
{
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
/*
* proceed to gather all data and distinguish read fw version response
* from other fw responses when data gathering is complete
*/
kim_int_recv(kim_gdata, data, count);
return;
}
/*
* to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
void st_kim_complete(void *kim_data)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
complete(&kim_gdata->ldisc_installed);
}
/*
* st_kim_start - called from ST Core upon 1st registration
* This involves toggling the chip enable gpio, reading
* the firmware version from chip, forming the fw file name
* based on the chip version, requesting the fw, parsing it
* and perform download(send/recv).
*/
long st_kim_start(void *kim_data)
{
long err = 0;
long retry = POR_RETRY_COUNT;
struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
pdata = kim_gdata->kim_pdev->dev.platform_data;
do {
/* platform specific enabling code here */
if (pdata->chip_enable)
pdata->chip_enable(kim_gdata);
/* Configure BT nShutdown to HIGH state */
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
mdelay(5); /* FIXME: a proper toggle */
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(100);
/* re-initialize the completion */
reinit_completion(&kim_gdata->ldisc_installed);
/* send notification to UIM */
kim_gdata->ldisc_install = 1;
pr_info("ldisc_install = 1");
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
NULL, "install");
/* wait for ldisc to be installed */
err = wait_for_completion_interruptible_timeout(
&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME));
if (!err) {
/*
* ldisc installation timeout,
* flush uart, power cycle BT_EN
*/
pr_err("ldisc installation timeout");
err = st_kim_stop(kim_gdata);
continue;
} else {
/* ldisc installed now */
pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
/*
* ldisc installed but fw download failed,
* flush uart & power cycle BT_EN
*/
pr_err("download firmware failed");
err = st_kim_stop(kim_gdata);
continue;
} else { /* on success don't retry */
break;
}
}
} while (retry--);
return err;
}
/*
* st_kim_stop - stop communication with chip.
* This can be called from ST Core/KIM, on the-
* (a) last un-register when chip need not be powered there-after,
* (b) upon failure to either install ldisc or download firmware.
* The function is responsible to (a) notify UIM about un-installation,
* (b) flush UART if the ldisc was installed.
* (c) reset BT_EN - pull down nshutdown at the end.
* (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
struct ti_st_plat_data *pdata =
kim_gdata->kim_pdev->dev.platform_data;
struct tty_struct *tty = kim_gdata->core_data->tty;
reinit_completion(&kim_gdata->ldisc_installed);
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
}
/* send uninstall notification to UIM */
pr_info("ldisc_install = 0");
kim_gdata->ldisc_install = 0;
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install");
/* wait for ldisc to be un-installed */
err = wait_for_completion_interruptible_timeout(
&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME));
if (!err) { /* timeout */
pr_err(" timed out waiting for ldisc to be un-installed");
err = -ETIMEDOUT;
}
/* By default configure BT nShutdown to LOW state */
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
mdelay(1);
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(1);
gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
/* platform specific disable */
if (pdata->chip_disable)
pdata->chip_disable(kim_gdata);
return err;
}
/**********************************************************************/
/* functions called from subsystems */
/* called when debugfs entry is read from */
static int version_show(struct seq_file *s, void *unused)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
kim_gdata->version.chip, kim_gdata->version.maj_ver,
kim_gdata->version.min_ver);
return 0;
}
static int list_show(struct seq_file *s, void *unused)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
kim_st_list_protocols(kim_gdata->core_data, s);
return 0;
}
static ssize_t show_install(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", kim_data->ldisc_install);
}
#ifdef DEBUG
static ssize_t store_dev_name(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
pr_debug("storing dev name >%s<", buf);
strncpy(kim_data->dev_name, buf, count);
pr_debug("stored dev name >%s<", kim_data->dev_name);
return count;
}
static ssize_t store_baud_rate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
pr_debug("storing baud rate >%s<", buf);
sscanf(buf, "%ld", &kim_data->baud_rate);
pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
return count;
}
#endif /* if DEBUG */
static ssize_t show_dev_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", kim_data->dev_name);
}
static ssize_t show_baud_rate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", kim_data->baud_rate);
}
static ssize_t show_flow_cntrl(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", kim_data->flow_cntrl);
}
/* structures specific for sysfs entries */
static struct kobj_attribute ldisc_install =
__ATTR(install, 0444, (void *)show_install, NULL);
static struct kobj_attribute uart_dev_name =
#ifdef DEBUG /* TODO: move this to debug-fs if possible */
__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
#else
__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
#endif
static struct kobj_attribute uart_baud_rate =
#ifdef DEBUG /* TODO: move to debugfs */
__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
#else
__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
#endif
static struct kobj_attribute uart_flow_cntrl =
__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
static struct attribute *uim_attrs[] = {
&ldisc_install.attr,
&uart_dev_name.attr,
&uart_baud_rate.attr,
&uart_flow_cntrl.attr,
NULL,
};
static const struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
/*
* st_kim_ref - reference the core's data
* This references the per-ST platform device in the arch/xx/
* board-xx.c file.
* This would enable multiple such platform devices to exist
* on a given platform
*/
void st_kim_ref(struct st_data_s **core_data, int id)
{
struct platform_device *pdev;
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
pdev = st_get_plat_device(id);
if (!pdev)
goto err;
kim_gdata = platform_get_drvdata(pdev);
if (!kim_gdata)
goto err;
*core_data = kim_gdata->core_data;
return;
err:
*core_data = NULL;
}
DEFINE_SHOW_ATTRIBUTE(version);
DEFINE_SHOW_ATTRIBUTE(list);
/**********************************************************************/
/* functions called from platform device driver subsystem
* need to have a relevant platform device entry in the platform's
* board-*.c file
*/
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
struct kim_data_s *kim_gdata;
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
int err;
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
} else {
/* platform's sure about existence of 1 device */
st_kim_devices[0] = pdev;
}
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_KERNEL);
if (!kim_gdata) {
pr_err("no mem to allocate");
return -ENOMEM;
}
platform_set_drvdata(pdev, kim_gdata);
err = st_core_init(&kim_gdata->core_data);
if (err != 0) {
pr_err(" ST core init failed");
err = -EIO;
goto err_core_init;
}
/* refer to itself */
kim_gdata->core_data->kim_data = kim_gdata;
/* Claim the chip enable nShutdown gpio from the system */
kim_gdata->nshutdown = pdata->nshutdown_gpio;
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
goto err_sysfs_group;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
goto err_sysfs_group;
}
/* get reference of pdev for request_firmware */
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp);
if (err) {
pr_err("failed to create sysfs entries");
goto err_sysfs_group;
}
/* copying platform data */
strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN);
kim_gdata->flow_cntrl = pdata->flow_cntrl;
kim_gdata->baud_rate = pdata->baud_rate;
pr_info("sysfs entries created\n");
kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
kim_gdata, &version_fops);
debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
kim_gdata, &list_fops);
return 0;
err_sysfs_group:
st_core_exit(kim_gdata->core_data);
err_core_init:
kfree(kim_gdata);
return err;
}
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
kim_gdata = platform_get_drvdata(pdev);
/*
* Free the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(pdata->nshutdown_gpio);
pr_info("nshutdown GPIO Freed");
debugfs_remove_recursive(kim_debugfs_dir);
sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
pr_info("sysfs entries removed");
kim_gdata->kim_pdev = NULL;
st_core_exit(kim_gdata->core_data);
kfree(kim_gdata);
kim_gdata = NULL;
return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->suspend)
return pdata->suspend(pdev, state);
return 0;
}
static int kim_resume(struct platform_device *pdev)
{
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->resume)
return pdata->resume(pdev);
return 0;
}
/**********************************************************************/
/* entry point for ST KIM module, called in from ST Core */
static struct platform_driver kim_platform_driver = {
.probe = kim_probe,
.remove = kim_remove,
.suspend = kim_suspend,
.resume = kim_resume,
.driver = {
.name = "kim",
},
};
module_platform_driver(kim_platform_driver);
MODULE_AUTHOR("Pavan Savoy <[email protected]>");
MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips ");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/ti-st/st_kim.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Shared Transport Line discipline driver Core
* This hooks up ST KIM driver and ST LL driver
* Copyright (C) 2009-2010 Texas Instruments
* Author: Pavan Savoy <[email protected]>
*/
#define pr_fmt(fmt) "(stc): " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
/*
* function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
*/
static void (*st_recv)(void *disc_data, const u8 *ptr, size_t count);
/********************************************************************/
static void add_channel_to_table(struct st_data_s *st_gdata,
struct st_proto_s *new_proto)
{
pr_info("%s: id %d\n", __func__, new_proto->chnl_id);
/* list now has the channel id as index itself */
st_gdata->list[new_proto->chnl_id] = new_proto;
st_gdata->is_registered[new_proto->chnl_id] = true;
}
static void remove_channel_from_table(struct st_data_s *st_gdata,
struct st_proto_s *proto)
{
pr_info("%s: id %d\n", __func__, proto->chnl_id);
/* st_gdata->list[proto->chnl_id] = NULL; */
st_gdata->is_registered[proto->chnl_id] = false;
}
/*
* called from KIM during firmware download.
*
* This is a wrapper function to tty->ops->write_room.
* It returns number of free space available in
* uart tx buffer.
*/
int st_get_uart_wr_room(struct st_data_s *st_gdata)
{
if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
pr_err("tty unavailable to perform write");
return -1;
}
return tty_write_room(st_gdata->tty);
}
/*
* can be called in from
* -- KIM (during fw download)
* -- ST Core (during st_write)
*
* This is the internal write function - a wrapper
* to tty->ops->write
*/
int st_int_write(struct st_data_s *st_gdata,
const unsigned char *data, int count)
{
struct tty_struct *tty;
if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
pr_err("tty unavailable to perform write");
return -EINVAL;
}
tty = st_gdata->tty;
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE,
16, 1, data, count, 0);
#endif
return tty->ops->write(tty, data, count);
}
/*
* push the skb received to relevant
* protocol stacks
*/
static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
{
pr_debug(" %s(prot:%d) ", __func__, chnl_id);
if (unlikely
(st_gdata == NULL || st_gdata->rx_skb == NULL
|| st_gdata->is_registered[chnl_id] == false)) {
pr_err("chnl_id %d not registered, no data to send?",
chnl_id);
kfree_skb(st_gdata->rx_skb);
return;
}
/*
* this cannot fail
* this shouldn't take long
* - should be just skb_queue_tail for the
* protocol stack driver
*/
if (likely(st_gdata->list[chnl_id]->recv != NULL)) {
if (unlikely
(st_gdata->list[chnl_id]->recv
(st_gdata->list[chnl_id]->priv_data, st_gdata->rx_skb)
!= 0)) {
pr_err(" proto stack %d's ->recv failed", chnl_id);
kfree_skb(st_gdata->rx_skb);
return;
}
} else {
pr_err(" proto stack %d's ->recv null", chnl_id);
kfree_skb(st_gdata->rx_skb);
}
return;
}
/*
* st_reg_complete - to call registration complete callbacks
* of all protocol stack drivers
* This function is being called with spin lock held, protocol drivers are
* only expected to complete their waits and do nothing more than that.
*/
static void st_reg_complete(struct st_data_s *st_gdata, int err)
{
unsigned char i = 0;
pr_info(" %s ", __func__);
for (i = 0; i < ST_MAX_CHANNELS; i++) {
if (likely(st_gdata != NULL &&
st_gdata->is_registered[i] == true &&
st_gdata->list[i]->reg_complete_cb != NULL)) {
st_gdata->list[i]->reg_complete_cb
(st_gdata->list[i]->priv_data, err);
pr_info("protocol %d's cb sent %d\n", i, err);
if (err) { /* cleanup registered protocol */
st_gdata->is_registered[i] = false;
if (st_gdata->protos_registered)
st_gdata->protos_registered--;
}
}
}
}
static inline int st_check_data_len(struct st_data_s *st_gdata,
unsigned char chnl_id, int len)
{
int room = skb_tailroom(st_gdata->rx_skb);
pr_debug("len %d room %d", len, room);
if (!len) {
/*
* Received packet has only packet header and
* has zero length payload. So, ask ST CORE to
* forward the packet to protocol driver (BT/FM/GPS)
*/
st_send_frame(chnl_id, st_gdata);
} else if (len > room) {
/*
* Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(st_gdata->rx_skb);
} else {
/*
* Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
st_gdata->rx_state = ST_W4_DATA;
st_gdata->rx_count = len;
return len;
}
/* Change ST state to continue to process next packet */
st_gdata->rx_state = ST_W4_PACKET_TYPE;
st_gdata->rx_skb = NULL;
st_gdata->rx_count = 0;
st_gdata->rx_chnl = 0;
return 0;
}
/*
* st_wakeup_ack - internal function for action when wake-up ack
* received
*/
static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned char cmd)
{
struct sk_buff *waiting_skb;
unsigned long flags = 0;
spin_lock_irqsave(&st_gdata->lock, flags);
/*
* de-Q from waitQ and Q in txQ now that the
* chip is awake
*/
while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
skb_queue_tail(&st_gdata->txq, waiting_skb);
/* state forwarded to ST LL */
st_ll_sleep_state(st_gdata, (unsigned long)cmd);
spin_unlock_irqrestore(&st_gdata->lock, flags);
/* wake up to send the recently copied skbs from waitQ */
st_tx_wakeup(st_gdata);
}
/*
* st_int_recv - ST's internal receive function.
* Decodes received RAW data and forwards to corresponding
* client drivers (Bluetooth,FM,GPS..etc).
* This can receive various types of packets,
* HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets
* CH-8 packets from FM, CH-9 packets from GPS cores.
*/
static void st_int_recv(void *disc_data, const u8 *ptr, size_t count)
{
struct st_proto_s *proto;
unsigned short payload_len = 0;
int len = 0;
unsigned char type = 0;
unsigned char *plen;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;
if (st_gdata == NULL) {
pr_err(" received null from TTY ");
return;
}
pr_debug("count %zu rx_state %ld"
"rx_count %ld", count, st_gdata->rx_state,
st_gdata->rx_count);
spin_lock_irqsave(&st_gdata->lock, flags);
/* Decode received bytes here */
while (count) {
if (st_gdata->rx_count) {
len = min_t(unsigned int, st_gdata->rx_count, count);
skb_put_data(st_gdata->rx_skb, ptr, len);
st_gdata->rx_count -= len;
count -= len;
ptr += len;
if (st_gdata->rx_count)
continue;
/* Check ST RX state machine , where are we? */
switch (st_gdata->rx_state) {
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
/*
* Ask ST CORE to forward
* the packet to protocol driver
*/
st_send_frame(st_gdata->rx_chnl, st_gdata);
st_gdata->rx_state = ST_W4_PACKET_TYPE;
st_gdata->rx_skb = NULL;
continue;
/* parse the header to know details */
case ST_W4_HEADER:
proto = st_gdata->list[st_gdata->rx_chnl];
plen =
&st_gdata->rx_skb->data
[proto->offset_len_in_hdr];
pr_debug("plen pointing to %x\n", *plen);
if (proto->len_size == 1) /* 1 byte len field */
payload_len = *(unsigned char *)plen;
else if (proto->len_size == 2)
payload_len =
__le16_to_cpu(*(unsigned short *)plen);
else
pr_info("%s: invalid length "
"for id %d\n",
__func__, proto->chnl_id);
st_check_data_len(st_gdata, proto->chnl_id,
payload_len);
pr_debug("off %d, pay len %d\n",
proto->offset_len_in_hdr, payload_len);
continue;
} /* end of switch rx_state */
}
/* end of if rx_count */
/*
* Check first byte of packet and identify module
* owner (BT/FM/GPS)
*/
switch (*ptr) {
case LL_SLEEP_IND:
case LL_SLEEP_ACK:
case LL_WAKE_UP_IND:
pr_debug("PM packet");
/*
* this takes appropriate action based on
* sleep state received --
*/
st_ll_sleep_state(st_gdata, *ptr);
/*
* if WAKEUP_IND collides copy from waitq to txq
* and assume chip awake
*/
spin_unlock_irqrestore(&st_gdata->lock, flags);
if (st_ll_getstate(st_gdata) == ST_LL_AWAKE)
st_wakeup_ack(st_gdata, LL_WAKE_UP_ACK);
spin_lock_irqsave(&st_gdata->lock, flags);
ptr++;
count--;
continue;
case LL_WAKE_UP_ACK:
pr_debug("PM packet");
spin_unlock_irqrestore(&st_gdata->lock, flags);
/* wake up ack received */
st_wakeup_ack(st_gdata, *ptr);
spin_lock_irqsave(&st_gdata->lock, flags);
ptr++;
count--;
continue;
/* Unknown packet? */
default:
type = *ptr;
/*
* Default case means non-HCILL packets,
* possibilities are packets for:
* (a) valid protocol - Supported Protocols within
* the ST_MAX_CHANNELS.
* (b) registered protocol - Checked by
* "st_gdata->list[type] == NULL)" are supported
* protocols only.
* Rules out any invalid protocol and
* unregistered protocols with channel ID < 16.
*/
if ((type >= ST_MAX_CHANNELS) ||
(st_gdata->list[type] == NULL)) {
pr_err("chip/interface misbehavior: "
"dropping frame starting "
"with 0x%02x\n", type);
goto done;
}
st_gdata->rx_skb = alloc_skb(
st_gdata->list[type]->max_frame_size,
GFP_ATOMIC);
if (st_gdata->rx_skb == NULL) {
pr_err("out of memory: dropping\n");
goto done;
}
skb_reserve(st_gdata->rx_skb,
st_gdata->list[type]->reserve);
/* next 2 required for BT only */
st_gdata->rx_skb->cb[0] = type; /*pkt_type*/
st_gdata->rx_skb->cb[1] = 0; /*incoming*/
st_gdata->rx_chnl = *ptr;
st_gdata->rx_state = ST_W4_HEADER;
st_gdata->rx_count = st_gdata->list[type]->hdr_len;
pr_debug("rx_count %ld\n", st_gdata->rx_count);
}
ptr++;
count--;
}
done:
spin_unlock_irqrestore(&st_gdata->lock, flags);
pr_debug("done %s", __func__);
return;
}
/*
* st_int_dequeue - internal de-Q function.
* If the previous data set was not written
* completely, return that skb which has the pending data.
* In normal cases, return top of txq.
*/
static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
{
struct sk_buff *returning_skb;
pr_debug("%s", __func__);
if (st_gdata->tx_skb != NULL) {
returning_skb = st_gdata->tx_skb;
st_gdata->tx_skb = NULL;
return returning_skb;
}
return skb_dequeue(&st_gdata->txq);
}
/*
* st_int_enqueue - internal Q-ing function.
* Will either Q the skb to txq or the tx_waitq
* depending on the ST LL state.
* If the chip is asleep, then Q it onto waitq and
* wakeup the chip.
* txq and waitq needs protection since the other contexts
* may be sending data, waking up chip.
*/
static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
{
unsigned long flags = 0;
pr_debug("%s", __func__);
spin_lock_irqsave(&st_gdata->lock, flags);
switch (st_ll_getstate(st_gdata)) {
case ST_LL_AWAKE:
pr_debug("ST LL is AWAKE, sending normally");
skb_queue_tail(&st_gdata->txq, skb);
break;
case ST_LL_ASLEEP_TO_AWAKE:
skb_queue_tail(&st_gdata->tx_waitq, skb);
break;
case ST_LL_AWAKE_TO_ASLEEP:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
kfree_skb(skb);
break;
case ST_LL_ASLEEP:
skb_queue_tail(&st_gdata->tx_waitq, skb);
st_ll_wakeup(st_gdata);
break;
default:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
kfree_skb(skb);
break;
}
spin_unlock_irqrestore(&st_gdata->lock, flags);
pr_debug("done %s", __func__);
return;
}
/*
* internal wakeup function
* called from either
* - TTY layer when write's finished
* - st_write (in context of the protocol stack)
*/
static void work_fn_write_wakeup(struct work_struct *work)
{
struct st_data_s *st_gdata = container_of(work, struct st_data_s,
work_write_wakeup);
st_tx_wakeup((void *)st_gdata);
}
void st_tx_wakeup(struct st_data_s *st_data)
{
struct sk_buff *skb;
unsigned long flags; /* for irq save flags */
pr_debug("%s", __func__);
/* check for sending & set flag sending here */
if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) {
pr_debug("ST already sending");
/* keep sending */
set_bit(ST_TX_WAKEUP, &st_data->tx_state);
return;
/* TX_WAKEUP will be checked in another
* context
*/
}
do { /* come back if st_tx_wakeup is set */
/* woke-up to write */
clear_bit(ST_TX_WAKEUP, &st_data->tx_state);
while ((skb = st_int_dequeue(st_data))) {
int len;
spin_lock_irqsave(&st_data->lock, flags);
/* enable wake-up from TTY */
set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags);
len = st_int_write(st_data, skb->data, skb->len);
skb_pull(skb, len);
/* if skb->len = len as expected, skb->len=0 */
if (skb->len) {
/* would be the next skb to be sent */
st_data->tx_skb = skb;
spin_unlock_irqrestore(&st_data->lock, flags);
break;
}
kfree_skb(skb);
spin_unlock_irqrestore(&st_data->lock, flags);
}
/* if wake-up is set in another context- restart sending */
} while (test_bit(ST_TX_WAKEUP, &st_data->tx_state));
/* clear flag sending */
clear_bit(ST_TX_SENDING, &st_data->tx_state);
}
/********************************************************************/
/* functions called from ST KIM
*/
void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf)
{
seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n",
st_gdata->protos_registered,
st_gdata->is_registered[0x04] == true ? 'R' : 'U',
st_gdata->is_registered[0x08] == true ? 'R' : 'U',
st_gdata->is_registered[0x09] == true ? 'R' : 'U');
}
/********************************************************************/
/*
* functions called from protocol stack drivers
* to be EXPORT-ed
*/
long st_register(struct st_proto_s *new_proto)
{
struct st_data_s *st_gdata;
long err = 0;
unsigned long flags = 0;
st_kim_ref(&st_gdata, 0);
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|| new_proto->reg_complete_cb == NULL) {
pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
return -EINVAL;
}
if (new_proto->chnl_id >= ST_MAX_CHANNELS) {
pr_err("chnl_id %d not supported", new_proto->chnl_id);
return -EPROTONOSUPPORT;
}
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
pr_err("chnl_id %d already registered", new_proto->chnl_id);
return -EALREADY;
}
/* can be from process context only */
spin_lock_irqsave(&st_gdata->lock, flags);
if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) {
pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id);
/* fw download in progress */
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
set_bit(ST_REG_PENDING, &st_gdata->st_state);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EINPROGRESS;
} else if (st_gdata->protos_registered == ST_EMPTY) {
pr_info(" chnl_id list empty :%d ", new_proto->chnl_id);
set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_kim_recv;
/* enable the ST LL - to set default chip state */
st_ll_enable(st_gdata);
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
/*
* this may take a while to complete
* since it involves BT fw download
*/
err = st_kim_start(st_gdata->kim_data);
if (err != 0) {
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
spin_lock_irqsave(&st_gdata->lock, flags);
st_reg_complete(st_gdata, err);
spin_unlock_irqrestore(&st_gdata->lock, flags);
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
}
spin_lock_irqsave(&st_gdata->lock, flags);
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
/*
* this is where all pending registration
* are signalled to be complete by calling callback functions
*/
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_debug(" call reg complete callback ");
st_reg_complete(st_gdata, 0);
}
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
/*
* check for already registered once more,
* since the above check is old
*/
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
pr_err(" proto %d already registered ",
new_proto->chnl_id);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EALREADY;
}
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
}
/* if fw is already downloaded & new stack registers protocol */
else {
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
/* lock already held before entering else */
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
}
}
EXPORT_SYMBOL_GPL(st_register);
/*
* to unregister a protocol -
* to be called from protocol stack driver
*/
long st_unregister(struct st_proto_s *proto)
{
long err = 0;
unsigned long flags = 0;
struct st_data_s *st_gdata;
pr_debug("%s: %d ", __func__, proto->chnl_id);
st_kim_ref(&st_gdata, 0);
if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
pr_err(" chnl_id %d not supported", proto->chnl_id);
return -EPROTONOSUPPORT;
}
spin_lock_irqsave(&st_gdata->lock, flags);
if (st_gdata->is_registered[proto->chnl_id] == false) {
pr_err(" chnl_id %d not registered", proto->chnl_id);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EPROTONOSUPPORT;
}
if (st_gdata->protos_registered)
st_gdata->protos_registered--;
remove_channel_from_table(st_gdata, proto);
spin_unlock_irqrestore(&st_gdata->lock, flags);
if ((st_gdata->protos_registered == ST_EMPTY) &&
(!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_info(" all chnl_ids unregistered ");
/* stop traffic on tty */
if (st_gdata->tty) {
tty_ldisc_flush(st_gdata->tty);
stop_tty(st_gdata->tty);
}
/* all chnl_ids now unregistered */
st_kim_stop(st_gdata->kim_data);
/* disable ST LL */
st_ll_disable(st_gdata);
}
return err;
}
/*
* called in protocol stack drivers
* via the write function pointer
*/
long st_write(struct sk_buff *skb)
{
struct st_data_s *st_gdata;
long len;
st_kim_ref(&st_gdata, 0);
if (unlikely(skb == NULL || st_gdata == NULL
|| st_gdata->tty == NULL)) {
pr_err("data/tty unavailable to perform write");
return -EINVAL;
}
pr_debug("%d to be written", skb->len);
len = skb->len;
/* st_ll to decide where to enqueue the skb */
st_int_enqueue(st_gdata, skb);
/* wake up */
st_tx_wakeup(st_gdata);
/* return number of bytes written */
return len;
}
/* for protocols making use of shared transport */
EXPORT_SYMBOL_GPL(st_unregister);
/********************************************************************/
/*
* functions called from TTY layer
*/
static int st_tty_open(struct tty_struct *tty)
{
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
st_kim_ref(&st_gdata, 0);
st_gdata->tty = tty;
tty->disc_data = st_gdata;
/* don't do an wakeup for now */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/* mem already allocated
*/
tty->receive_room = 65536;
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
/*
* signal to UIM via KIM that -
* installation of N_TI_WL ldisc is complete
*/
st_kim_complete(st_gdata->kim_data);
pr_debug("done %s", __func__);
return 0;
}
static void st_tty_close(struct tty_struct *tty)
{
unsigned char i;
unsigned long flags;
struct st_data_s *st_gdata = tty->disc_data;
pr_info("%s ", __func__);
/*
* TODO:
* if a protocol has been registered & line discipline
* un-installed for some reason - what should be done ?
*/
spin_lock_irqsave(&st_gdata->lock, flags);
for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
if (st_gdata->is_registered[i] == true)
pr_err("%d not un-registered", i);
st_gdata->list[i] = NULL;
st_gdata->is_registered[i] = false;
}
st_gdata->protos_registered = 0;
spin_unlock_irqrestore(&st_gdata->lock, flags);
/*
* signal to UIM via KIM that -
* N_TI_WL ldisc is un-installed
*/
st_kim_complete(st_gdata->kim_data);
st_gdata->tty = NULL;
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
spin_lock_irqsave(&st_gdata->lock, flags);
/* empty out txq and tx_waitq */
skb_queue_purge(&st_gdata->txq);
skb_queue_purge(&st_gdata->tx_waitq);
/* reset the TTY Rx states of ST */
st_gdata->rx_count = 0;
st_gdata->rx_state = ST_W4_PACKET_TYPE;
kfree_skb(st_gdata->rx_skb);
st_gdata->rx_skb = NULL;
spin_unlock_irqrestore(&st_gdata->lock, flags);
pr_debug("%s: done ", __func__);
}
static void st_tty_receive(struct tty_struct *tty, const u8 *data,
const u8 *tty_flags, size_t count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
16, 1, data, count, 0);
#endif
/*
* if fw download is in progress then route incoming data
* to KIM for validation
*/
st_recv(tty->disc_data, data, count);
pr_debug("done %s", __func__);
}
/*
* wake-up function called in from the TTY layer
* inside the internal wakeup function will be called
*/
static void st_tty_wakeup(struct tty_struct *tty)
{
struct st_data_s *st_gdata = tty->disc_data;
pr_debug("%s ", __func__);
/* don't do an wakeup for now */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/*
* schedule the internal wakeup instead of calling directly to
* avoid lockup (port->lock needed in tty->ops->write is
* already taken here
*/
schedule_work(&st_gdata->work_write_wakeup);
}
static void st_tty_flush_buffer(struct tty_struct *tty)
{
struct st_data_s *st_gdata = tty->disc_data;
pr_debug("%s ", __func__);
kfree_skb(st_gdata->tx_skb);
st_gdata->tx_skb = NULL;
tty_driver_flush_buffer(tty);
return;
}
static struct tty_ldisc_ops st_ldisc_ops = {
.num = N_TI_WL,
.name = "n_st",
.open = st_tty_open,
.close = st_tty_close,
.receive_buf = st_tty_receive,
.write_wakeup = st_tty_wakeup,
.flush_buffer = st_tty_flush_buffer,
.owner = THIS_MODULE
};
/********************************************************************/
int st_core_init(struct st_data_s **core_data)
{
struct st_data_s *st_gdata;
long err;
err = tty_register_ldisc(&st_ldisc_ops);
if (err) {
pr_err("error registering %d line discipline %ld",
N_TI_WL, err);
return err;
}
pr_debug("registered n_shared line discipline");
st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
if (!st_gdata) {
pr_err("memory allocation failed");
err = -ENOMEM;
goto err_unreg_ldisc;
}
/* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's
* will be pushed in this queue for actual transmission.
*/
skb_queue_head_init(&st_gdata->txq);
skb_queue_head_init(&st_gdata->tx_waitq);
/* Locking used in st_int_enqueue() to avoid multiple execution */
spin_lock_init(&st_gdata->lock);
err = st_ll_init(st_gdata);
if (err) {
pr_err("error during st_ll initialization(%ld)", err);
goto err_free_gdata;
}
INIT_WORK(&st_gdata->work_write_wakeup, work_fn_write_wakeup);
*core_data = st_gdata;
return 0;
err_free_gdata:
kfree(st_gdata);
err_unreg_ldisc:
tty_unregister_ldisc(&st_ldisc_ops);
return err;
}
void st_core_exit(struct st_data_s *st_gdata)
{
long err;
/* internal module cleanup */
err = st_ll_deinit(st_gdata);
if (err)
pr_err("error during deinit of ST LL %ld", err);
if (st_gdata != NULL) {
/* Free ST Tx Qs and skbs */
skb_queue_purge(&st_gdata->txq);
skb_queue_purge(&st_gdata->tx_waitq);
kfree_skb(st_gdata->rx_skb);
kfree_skb(st_gdata->tx_skb);
/* TTY ldisc cleanup */
tty_unregister_ldisc(&st_ldisc_ops);
/* free the global data pointer */
kfree(st_gdata);
}
}
| linux-master | drivers/misc/ti-st/st_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Shared Transport driver
* HCI-LL module responsible for TI proprietary HCI_LL protocol
* Copyright (C) 2009-2010 Texas Instruments
* Author: Pavan Savoy <[email protected]>
*/
#define pr_fmt(fmt) "(stll) :" fmt
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/ti_wilink_st.h>
/**********************************************************************/
/* internal functions */
static void send_ll_cmd(struct st_data_s *st_data,
unsigned char cmd)
{
pr_debug("%s: writing %x", __func__, cmd);
st_int_write(st_data, &cmd, 1);
return;
}
static void ll_device_want_to_sleep(struct st_data_s *st_data)
{
struct kim_data_s *kim_data;
struct ti_st_plat_data *pdata;
pr_debug("%s", __func__);
/* sanity check */
if (st_data->ll_state != ST_LL_AWAKE)
pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND"
"in state %ld", st_data->ll_state);
send_ll_cmd(st_data, LL_SLEEP_ACK);
/* update state */
st_data->ll_state = ST_LL_ASLEEP;
/* communicate to platform about chip asleep */
kim_data = st_data->kim_data;
pdata = kim_data->kim_pdev->dev.platform_data;
if (pdata->chip_asleep)
pdata->chip_asleep(NULL);
}
static void ll_device_want_to_wakeup(struct st_data_s *st_data)
{
struct kim_data_s *kim_data;
struct ti_st_plat_data *pdata;
/* diff actions in diff states */
switch (st_data->ll_state) {
case ST_LL_ASLEEP:
send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */
break;
case ST_LL_ASLEEP_TO_AWAKE:
/* duplicate wake_ind */
pr_err("duplicate wake_ind while waiting for Wake ack");
break;
case ST_LL_AWAKE:
/* duplicate wake_ind */
pr_err("duplicate wake_ind already AWAKE");
break;
case ST_LL_AWAKE_TO_ASLEEP:
/* duplicate wake_ind */
pr_err("duplicate wake_ind");
break;
}
/* update state */
st_data->ll_state = ST_LL_AWAKE;
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
pdata = kim_data->kim_pdev->dev.platform_data;
if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
/**********************************************************************/
/* functions invoked by ST Core */
/* called when ST Core wants to
* enable ST LL */
void st_ll_enable(struct st_data_s *ll)
{
ll->ll_state = ST_LL_AWAKE;
}
/* called when ST Core /local module wants to
* disable ST LL */
void st_ll_disable(struct st_data_s *ll)
{
ll->ll_state = ST_LL_INVALID;
}
/* called when ST Core wants to update the state */
void st_ll_wakeup(struct st_data_s *ll)
{
if (likely(ll->ll_state != ST_LL_AWAKE)) {
send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */
ll->ll_state = ST_LL_ASLEEP_TO_AWAKE;
} else {
/* don't send the duplicate wake_indication */
pr_err(" Chip already AWAKE ");
}
}
/* called when ST Core wants the state */
unsigned long st_ll_getstate(struct st_data_s *ll)
{
pr_debug(" returning state %ld", ll->ll_state);
return ll->ll_state;
}
/* called from ST Core, when a PM related packet arrives */
unsigned long st_ll_sleep_state(struct st_data_s *st_data,
unsigned char cmd)
{
switch (cmd) {
case LL_SLEEP_IND: /* sleep ind */
pr_debug("sleep indication recvd");
ll_device_want_to_sleep(st_data);
break;
case LL_SLEEP_ACK: /* sleep ack */
pr_err("sleep ack rcvd: host shouldn't");
break;
case LL_WAKE_UP_IND: /* wake ind */
pr_debug("wake indication recvd");
ll_device_want_to_wakeup(st_data);
break;
case LL_WAKE_UP_ACK: /* wake ack */
pr_debug("wake ack rcvd");
st_data->ll_state = ST_LL_AWAKE;
break;
default:
pr_err(" unknown input/state ");
return -EINVAL;
}
return 0;
}
/* Called from ST CORE to initialize ST LL */
long st_ll_init(struct st_data_s *ll)
{
/* set state to invalid */
ll->ll_state = ST_LL_INVALID;
return 0;
}
/* Called from ST CORE to de-initialize ST LL */
long st_ll_deinit(struct st_data_s *ll)
{
return 0;
}
| linux-master | drivers/misc/ti-st/st_ll.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Module initialization and PCIe setup. Card health monitoring and
* recovery functionality. Character device creation and deletion are
* controlled from here.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/log2.h>
#include "card_base.h"
#include "card_ddcb.h"
MODULE_AUTHOR("Frank Haverkamp <[email protected]>");
MODULE_AUTHOR("Michael Ruettger <[email protected]>");
MODULE_AUTHOR("Joerg-Stephan Vogt <[email protected]>");
MODULE_AUTHOR("Michael Jung <[email protected]>");
MODULE_DESCRIPTION("GenWQE Card");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
static char genwqe_driver_name[] = GENWQE_DEVNAME;
static struct dentry *debugfs_genwqe;
static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
/* PCI structure for identifying device by PCI vendor and device ID */
static const struct pci_device_id genwqe_device_table[] = {
{ .vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_GENWQE,
.subvendor = PCI_SUBVENDOR_ID_IBM,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
.class = (PCI_CLASSCODE_GENWQE5 << 8),
.class_mask = ~0,
.driver_data = 0 },
/* Initial SR-IOV bring-up image */
{ .vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_GENWQE,
.subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
.class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
.class_mask = ~0,
.driver_data = 0 },
{ .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
.device = 0x0000, /* VF Device ID */
.subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
.class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
.class_mask = ~0,
.driver_data = 0 },
/* Fixed up image */
{ .vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_GENWQE,
.subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
.class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
.class_mask = ~0,
.driver_data = 0 },
{ .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
.device = 0x0000, /* VF Device ID */
.subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
.class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
.class_mask = ~0,
.driver_data = 0 },
/* Even one more ... */
{ .vendor = PCI_VENDOR_ID_IBM,
.device = PCI_DEVICE_GENWQE,
.subvendor = PCI_SUBVENDOR_ID_IBM,
.subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW,
.class = (PCI_CLASSCODE_GENWQE5 << 8),
.class_mask = ~0,
.driver_data = 0 },
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, genwqe_device_table);
/**
* genwqe_devnode() - Set default access mode for genwqe devices.
* @dev: Pointer to device (unused)
* @mode: Carrier to pass-back given mode (permissions)
*
* Default mode should be rw for everybody. Do not change default
* device name.
*/
static char *genwqe_devnode(const struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
return NULL;
}
static const struct class class_genwqe = {
.name = GENWQE_DEVNAME,
.devnode = genwqe_devnode,
};
/**
* genwqe_dev_alloc() - Create and prepare a new card descriptor
*
* Return: Pointer to card descriptor, or ERR_PTR(err) on error
*/
static struct genwqe_dev *genwqe_dev_alloc(void)
{
unsigned int i = 0, j;
struct genwqe_dev *cd;
for (i = 0; i < GENWQE_CARD_NO_MAX; i++) {
if (genwqe_devices[i] == NULL)
break;
}
if (i >= GENWQE_CARD_NO_MAX)
return ERR_PTR(-ENODEV);
cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
if (!cd)
return ERR_PTR(-ENOMEM);
cd->card_idx = i;
cd->class_genwqe = &class_genwqe;
cd->debugfs_genwqe = debugfs_genwqe;
/*
* This comes from kernel config option and can be overritten via
* debugfs.
*/
cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY;
init_waitqueue_head(&cd->queue_waitq);
spin_lock_init(&cd->file_lock);
INIT_LIST_HEAD(&cd->file_list);
cd->card_state = GENWQE_CARD_UNUSED;
spin_lock_init(&cd->print_lock);
cd->ddcb_software_timeout = GENWQE_DDCB_SOFTWARE_TIMEOUT;
cd->kill_timeout = GENWQE_KILL_TIMEOUT;
for (j = 0; j < GENWQE_MAX_VFS; j++)
cd->vf_jobtimeout_msec[j] = GENWQE_VF_JOBTIMEOUT_MSEC;
genwqe_devices[i] = cd;
return cd;
}
static void genwqe_dev_free(struct genwqe_dev *cd)
{
if (!cd)
return;
genwqe_devices[cd->card_idx] = NULL;
kfree(cd);
}
/**
* genwqe_bus_reset() - Card recovery
* @cd: GenWQE device information
*
* pci_reset_function() will recover the device and ensure that the
* registers are accessible again when it completes with success. If
* not, the card will stay dead and registers will be unaccessible
* still.
*/
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
void __iomem *mmio;
if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
return -EIO;
mmio = cd->mmio;
cd->mmio = NULL;
pci_iounmap(pci_dev, mmio);
pci_release_mem_regions(pci_dev);
/*
* Firmware/BIOS might change memory mapping during bus reset.
* Settings like enable bus-mastering, ... are backuped and
* restored by the pci_reset_function().
*/
dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
rc = pci_reset_function(pci_dev);
if (rc) {
dev_err(&pci_dev->dev,
"[%s] err: failed reset func (rc %d)\n", __func__, rc);
return rc;
}
dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);
/*
* Here is the right spot to clear the register read
* failure. pci_bus_reset() does this job in real systems.
*/
cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
GENWQE_INJECT_GFIR_FATAL |
GENWQE_INJECT_GFIR_INFO);
rc = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (rc) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, rc);
return -EIO;
}
cd->mmio = pci_iomap(pci_dev, 0, 0);
if (cd->mmio == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: mapping BAR0 failed\n", __func__);
return -ENOMEM;
}
return 0;
}
/*
* Hardware circumvention section. Certain bitstreams in our test-lab
* had different kinds of problems. Here is where we adjust those
* bitstreams to function will with this version of our device driver.
*
* Thise circumventions are applied to the physical function only.
* The magical numbers below are identifying development/manufacturing
* versions of the bitstream used on the card.
*
* Turn off error reporting for old/manufacturing images.
*/
bool genwqe_need_err_masking(struct genwqe_dev *cd)
{
return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
}
static void genwqe_tweak_hardware(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
/* Mask FIRs for development images */
if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
dev_warn(&pci_dev->dev,
"FIRs masked due to bitstream %016llx.%016llx\n",
cd->slu_unitcfg, cd->app_unitcfg);
__genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
0xFFFFFFFFFFFFFFFFull);
__genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
0x0000000000000000ull);
}
}
/**
* genwqe_recovery_on_fatal_gfir_required() - Version depended actions
* @cd: GenWQE device information
*
* Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
* be ignored. This is e.g. true for the bitstream we gave to the card
* manufacturer, but also for some old bitstreams we released to our
* test-lab.
*/
int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
{
return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
}
int genwqe_flash_readback_fails(struct genwqe_dev *cd)
{
return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
}
/**
* genwqe_T_psec() - Calculate PF/VF timeout register content
* @cd: GenWQE device information
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
* driver cannot understand new codes and is therefore always a
* problem. Better is to measure out the value or put the
* speed/frequency directly into a register which is always a valid
* value for old as well as for new software.
*/
/* T = 1/f */
static int genwqe_T_psec(struct genwqe_dev *cd)
{
u16 speed; /* 1/f -> 250, 200, 166, 175 */
static const int T[] = { 4000, 5000, 6000, 5714 };
speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
if (speed >= ARRAY_SIZE(T))
return -1; /* illegal value */
return T[speed];
}
/**
* genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
* @cd: GenWQE device information
*
* Do this _after_ card_reset() is called. Otherwise the values will
* vanish. The settings need to be done when the queues are inactive.
*
* The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16.
* The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16.
*/
static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
{
u32 T = genwqe_T_psec(cd);
u64 x;
if (GENWQE_PF_JOBTIMEOUT_MSEC == 0)
return false;
/* PF: large value needed, flash update 2sec per block */
x = ilog2(GENWQE_PF_JOBTIMEOUT_MSEC *
16000000000uL/(T * 15)) - 10;
genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
0xff00 | (x & 0xff), 0);
return true;
}
/**
* genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
* @cd: GenWQE device information
*/
static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
unsigned int vf;
u32 T = genwqe_T_psec(cd);
u64 x;
int totalvfs;
totalvfs = pci_sriov_get_totalvfs(pci_dev);
if (totalvfs <= 0)
return false;
for (vf = 0; vf < totalvfs; vf++) {
if (cd->vf_jobtimeout_msec[vf] == 0)
continue;
x = ilog2(cd->vf_jobtimeout_msec[vf] *
16000000000uL/(T * 15)) - 10;
genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
0xff00 | (x & 0xff), vf + 1);
}
return true;
}
static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
{
unsigned int type, e = 0;
for (type = 0; type < GENWQE_DBG_UNITS; type++) {
switch (type) {
case GENWQE_DBG_UNIT0:
e = genwqe_ffdc_buff_size(cd, 0);
break;
case GENWQE_DBG_UNIT1:
e = genwqe_ffdc_buff_size(cd, 1);
break;
case GENWQE_DBG_UNIT2:
e = genwqe_ffdc_buff_size(cd, 2);
break;
case GENWQE_DBG_REGS:
e = GENWQE_FFDC_REGS;
break;
}
/* currently support only the debug units mentioned here */
cd->ffdc[type].entries = e;
cd->ffdc[type].regs =
kmalloc_array(e, sizeof(struct genwqe_reg),
GFP_KERNEL);
/*
* regs == NULL is ok, the using code treats this as no regs,
* Printing warning is ok in this case.
*/
}
return 0;
}
static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
{
unsigned int type;
for (type = 0; type < GENWQE_DBG_UNITS; type++) {
kfree(cd->ffdc[type].regs);
cd->ffdc[type].regs = NULL;
}
}
static int genwqe_read_ids(struct genwqe_dev *cd)
{
int err = 0;
int slu_id;
struct pci_dev *pci_dev = cd->pci_dev;
cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"err: SLUID=%016llx\n", cd->slu_unitcfg);
err = -EIO;
goto out_err;
}
slu_id = genwqe_get_slu_id(cd);
if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) {
dev_err(&pci_dev->dev,
"err: incompatible SLU Architecture %u\n", slu_id);
err = -ENOENT;
goto out_err;
}
cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"err: APPID=%016llx\n", cd->app_unitcfg);
err = -EIO;
goto out_err;
}
genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
/*
* Is access to all registers possible? If we are a VF the
* answer is obvious. If we run fully virtualized, we need to
* check if we can access all registers. If we do not have
* full access we will cause an UR and some informational FIRs
* in the PF, but that should not harm.
*/
if (pci_dev->is_virtfn)
cd->is_privileged = 0;
else
cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
!= IO_ILLEGAL_VALUE);
out_err:
return err;
}
static int genwqe_start(struct genwqe_dev *cd)
{
int err;
struct pci_dev *pci_dev = cd->pci_dev;
err = genwqe_read_ids(cd);
if (err)
return err;
if (genwqe_is_privileged(cd)) {
/* do this after the tweaks. alloc fail is acceptable */
genwqe_ffdc_buffs_alloc(cd);
genwqe_stop_traps(cd);
/* Collect registers e.g. FIRs, UNITIDs, traces ... */
genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
cd->ffdc[GENWQE_DBG_REGS].entries, 0);
genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
cd->ffdc[GENWQE_DBG_UNIT0].regs,
cd->ffdc[GENWQE_DBG_UNIT0].entries);
genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
cd->ffdc[GENWQE_DBG_UNIT1].regs,
cd->ffdc[GENWQE_DBG_UNIT1].entries);
genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
cd->ffdc[GENWQE_DBG_UNIT2].regs,
cd->ffdc[GENWQE_DBG_UNIT2].entries);
genwqe_start_traps(cd);
if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
dev_warn(&pci_dev->dev,
"[%s] chip reload/recovery!\n", __func__);
/*
* Stealth Mode: Reload chip on either hot
* reset or PERST.
*/
cd->softreset = 0x7Cull;
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
cd->softreset);
err = genwqe_bus_reset(cd);
if (err != 0) {
dev_err(&pci_dev->dev,
"[%s] err: bus reset failed!\n",
__func__);
goto out;
}
/*
* Re-read the IDs because
* it could happen that the bitstream load
* failed!
*/
err = genwqe_read_ids(cd);
if (err)
goto out;
}
}
err = genwqe_setup_service_layer(cd); /* does a reset to the card */
if (err != 0) {
dev_err(&pci_dev->dev,
"[%s] err: could not setup servicelayer!\n", __func__);
err = -ENODEV;
goto out;
}
if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
genwqe_tweak_hardware(cd);
genwqe_setup_pf_jtimer(cd);
genwqe_setup_vf_jtimer(cd);
}
err = genwqe_device_create(cd);
if (err < 0) {
dev_err(&pci_dev->dev,
"err: chdev init failed! (err=%d)\n", err);
goto out_release_service_layer;
}
return 0;
out_release_service_layer:
genwqe_release_service_layer(cd);
out:
if (genwqe_is_privileged(cd))
genwqe_ffdc_buffs_free(cd);
return -EIO;
}
/**
* genwqe_stop() - Stop card operation
* @cd: GenWQE device information
*
* Recovery notes:
* As long as genwqe_thread runs we might access registers during
* error data capture. Same is with the genwqe_health_thread.
* When genwqe_bus_reset() fails this function might called two times:
* first by the genwqe_health_thread() and later by genwqe_remove() to
* unbind the device. We must be able to survive that.
*
* This function must be robust enough to be called twice.
*/
static int genwqe_stop(struct genwqe_dev *cd)
{
genwqe_finish_queue(cd); /* no register access */
genwqe_device_remove(cd); /* device removed, procs killed */
genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
if (genwqe_is_privileged(cd)) {
pci_disable_sriov(cd->pci_dev); /* access pci config space */
genwqe_ffdc_buffs_free(cd);
}
return 0;
}
/**
* genwqe_recover_card() - Try to recover the card if it is possible
* @cd: GenWQE device information
* @fatal_err: Indicate whether to attempt soft reset
*
* If fatal_err is set no register access is possible anymore. It is
* likely that genwqe_start fails in that situation. Proper error
* handling is required in this case.
*
* genwqe_bus_reset() will cause the pci code to call genwqe_remove()
* and later genwqe_probe() for all virtual functions.
*/
static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
genwqe_stop(cd);
/*
* Make sure chip is not reloaded to maintain FFDC. Write SLU
* Reset Register, CPLDReset field to 0.
*/
if (!fatal_err) {
cd->softreset = 0x70ull;
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
}
rc = genwqe_bus_reset(cd);
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] err: card recovery impossible!\n", __func__);
return rc;
}
rc = genwqe_start(cd);
if (rc < 0) {
dev_err(&pci_dev->dev,
"[%s] err: failed to launch device!\n", __func__);
return rc;
}
return 0;
}
static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
{
*gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
return (*gfir & GFIR_ERR_TRIGGER) &&
genwqe_recovery_on_fatal_gfir_required(cd);
}
/**
* genwqe_fir_checking() - Check the fault isolation registers of the card
* @cd: GenWQE device information
*
* If this code works ok, can be tried out with help of the genwqe_poke tool:
* sudo ./tools/genwqe_poke 0x8 0xfefefefefef
*
* Now the relevant FIRs/sFIRs should be printed out and the driver should
* invoke recovery (devices are removed and readded).
*/
static u64 genwqe_fir_checking(struct genwqe_dev *cd)
{
int j, iterations = 0;
u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec;
u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr;
struct pci_dev *pci_dev = cd->pci_dev;
healthMonitor:
iterations++;
if (iterations > 16) {
dev_err(&pci_dev->dev, "* exit looping after %d times\n",
iterations);
goto fatal_error;
}
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (gfir != 0x0)
dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n",
IO_SLC_CFGREG_GFIR, gfir);
if (gfir == IO_ILLEGAL_VALUE)
goto fatal_error;
/*
* Avoid printing when to GFIR bit is on prevents contignous
* printout e.g. for the following bug:
* FIR set without a 2ndary FIR/FIR cannot be cleared
* Comment out the following if to get the prints:
*/
if (gfir == 0)
return 0;
gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */
for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */
/* read the primary FIR (pfir) */
fir_addr = (uid << 24) + 0x08;
fir = __genwqe_readq(cd, fir_addr);
if (fir == 0x0)
continue; /* no error in this unit */
dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir);
if (fir == IO_ILLEGAL_VALUE)
goto fatal_error;
/* read primary FEC */
fec_addr = (uid << 24) + 0x18;
fec = __genwqe_readq(cd, fec_addr);
dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec);
if (fec == IO_ILLEGAL_VALUE)
goto fatal_error;
for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) {
/* secondary fir empty, skip it */
if ((fir & mask) == 0x0)
continue;
sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
sfir = __genwqe_readq(cd, sfir_addr);
if (sfir == IO_ILLEGAL_VALUE)
goto fatal_error;
dev_err(&pci_dev->dev,
"* 0x%08x 0x%016llx\n", sfir_addr, sfir);
sfec_addr = (uid << 24) + 0x300 + 0x08 * j;
sfec = __genwqe_readq(cd, sfec_addr);
if (sfec == IO_ILLEGAL_VALUE)
goto fatal_error;
dev_err(&pci_dev->dev,
"* 0x%08x 0x%016llx\n", sfec_addr, sfec);
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (gfir == IO_ILLEGAL_VALUE)
goto fatal_error;
/* gfir turned on during routine! get out and
start over. */
if ((gfir_masked == 0x0) &&
(gfir & GFIR_ERR_TRIGGER)) {
goto healthMonitor;
}
/* do not clear if we entered with a fatal gfir */
if (gfir_masked == 0x0) {
/* NEW clear by mask the logged bits */
sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
__genwqe_writeq(cd, sfir_addr, sfir);
dev_dbg(&pci_dev->dev,
"[HM] Clearing 2ndary FIR 0x%08x with 0x%016llx\n",
sfir_addr, sfir);
/*
* note, these cannot be error-Firs
* since gfir_masked is 0 after sfir
* was read. Also, it is safe to do
* this write if sfir=0. Still need to
* clear the primary. This just means
* there is no secondary FIR.
*/
/* clear by mask the logged bit. */
fir_clr_addr = (uid << 24) + 0x10;
__genwqe_writeq(cd, fir_clr_addr, mask);
dev_dbg(&pci_dev->dev,
"[HM] Clearing primary FIR 0x%08x with 0x%016llx\n",
fir_clr_addr, mask);
}
}
}
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (gfir == IO_ILLEGAL_VALUE)
goto fatal_error;
if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) {
/*
* Check once more that it didn't go on after all the
* FIRS were cleared.
*/
dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n",
iterations);
goto healthMonitor;
}
return gfir_masked;
fatal_error:
return IO_ILLEGAL_VALUE;
}
/**
* genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
* @pci_dev: PCI device information struct
*
* Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
* reset method will not work in all cases.
*
* Return: 0 on success or error code from pci_set_pcie_reset_state()
*/
static int genwqe_pci_fundamental_reset(struct pci_dev *pci_dev)
{
int rc;
/*
* lock pci config space access from userspace,
* save state and issue PCIe fundamental reset
*/
pci_cfg_access_lock(pci_dev);
pci_save_state(pci_dev);
rc = pci_set_pcie_reset_state(pci_dev, pcie_warm_reset);
if (!rc) {
/* keep PCIe reset asserted for 250ms */
msleep(250);
pci_set_pcie_reset_state(pci_dev, pcie_deassert_reset);
/* Wait for 2s to reload flash and train the link */
msleep(2000);
}
pci_restore_state(pci_dev);
pci_cfg_access_unlock(pci_dev);
return rc;
}
static int genwqe_platform_recovery(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
int rc;
dev_info(&pci_dev->dev,
"[%s] resetting card for error recovery\n", __func__);
/* Clear out error injection flags */
cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
GENWQE_INJECT_GFIR_FATAL |
GENWQE_INJECT_GFIR_INFO);
genwqe_stop(cd);
/* Try recoverying the card with fundamental reset */
rc = genwqe_pci_fundamental_reset(pci_dev);
if (!rc) {
rc = genwqe_start(cd);
if (!rc)
dev_info(&pci_dev->dev,
"[%s] card recovered\n", __func__);
else
dev_err(&pci_dev->dev,
"[%s] err: cannot start card services! (err=%d)\n",
__func__, rc);
} else {
dev_err(&pci_dev->dev,
"[%s] card reset failed\n", __func__);
}
return rc;
}
/**
* genwqe_reload_bistream() - reload card bitstream
* @cd: GenWQE device information
*
* Set the appropriate register and call fundamental reset to reaload the card
* bitstream.
*
* Return: 0 on success, error code otherwise
*/
static int genwqe_reload_bistream(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
int rc;
dev_info(&pci_dev->dev,
"[%s] resetting card for bitstream reload\n",
__func__);
genwqe_stop(cd);
/*
* Cause a CPLD reprogram with the 'next_bitstream'
* partition on PCIe hot or fundamental reset
*/
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
(cd->softreset & 0xcull) | 0x70ull);
rc = genwqe_pci_fundamental_reset(pci_dev);
if (rc) {
/*
* A fundamental reset failure can be caused
* by lack of support on the arch, so we just
* log the error and try to start the card
* again.
*/
dev_err(&pci_dev->dev,
"[%s] err: failed to reset card for bitstream reload\n",
__func__);
}
rc = genwqe_start(cd);
if (rc) {
dev_err(&pci_dev->dev,
"[%s] err: cannot start card services! (err=%d)\n",
__func__, rc);
return rc;
}
dev_info(&pci_dev->dev,
"[%s] card reloaded\n", __func__);
return 0;
}
/**
* genwqe_health_thread() - Health checking thread
* @data: GenWQE device information
*
* This thread is only started for the PF of the card.
*
* This thread monitors the health of the card. A critical situation
* is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In
* this case we need to be recovered from outside. Writing to
* registers will very likely not work either.
*
* This thread must only exit if kthread_should_stop() becomes true.
*
* Condition for the health-thread to trigger:
* a) when a kthread_stop() request comes in or
* b) a critical GFIR occured
*
* Informational GFIRs are checked and potentially printed in
* GENWQE_HEALTH_CHECK_INTERVAL seconds.
*/
static int genwqe_health_thread(void *data)
{
int rc, should_stop = 0;
struct genwqe_dev *cd = data;
struct pci_dev *pci_dev = cd->pci_dev;
u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
health_thread_begin:
while (!kthread_should_stop()) {
rc = wait_event_interruptible_timeout(cd->health_waitq,
(genwqe_health_check_cond(cd, &gfir) ||
(should_stop = kthread_should_stop())),
GENWQE_HEALTH_CHECK_INTERVAL * HZ);
if (should_stop)
break;
if (gfir == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"[%s] GFIR=%016llx\n", __func__, gfir);
goto fatal_error;
}
slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
if (slu_unitcfg == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"[%s] SLU_UNITCFG=%016llx\n",
__func__, slu_unitcfg);
goto fatal_error;
}
app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
if (app_unitcfg == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"[%s] APP_UNITCFG=%016llx\n",
__func__, app_unitcfg);
goto fatal_error;
}
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (gfir == IO_ILLEGAL_VALUE) {
dev_err(&pci_dev->dev,
"[%s] %s: GFIR=%016llx\n", __func__,
(gfir & GFIR_ERR_TRIGGER) ? "err" : "info",
gfir);
goto fatal_error;
}
gfir_masked = genwqe_fir_checking(cd);
if (gfir_masked == IO_ILLEGAL_VALUE)
goto fatal_error;
/*
* GFIR ErrorTrigger bits set => reset the card!
* Never do this for old/manufacturing images!
*/
if ((gfir_masked) && !cd->skip_recovery &&
genwqe_recovery_on_fatal_gfir_required(cd)) {
cd->card_state = GENWQE_CARD_FATAL_ERROR;
rc = genwqe_recover_card(cd, 0);
if (rc < 0) {
/* FIXME Card is unusable and needs unbind! */
goto fatal_error;
}
}
if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) {
/* Userspace requested card bitstream reload */
rc = genwqe_reload_bistream(cd);
if (rc)
goto fatal_error;
}
cd->last_gfir = gfir;
cond_resched();
}
return 0;
fatal_error:
if (cd->use_platform_recovery) {
/*
* Since we use raw accessors, EEH errors won't be detected
* by the platform until we do a non-raw MMIO or config space
* read
*/
readq(cd->mmio + IO_SLC_CFGREG_GFIR);
/* We do nothing if the card is going over PCI recovery */
if (pci_channel_offline(pci_dev))
return -EIO;
/*
* If it's supported by the platform, we try a fundamental reset
* to recover from a fatal error. Otherwise, we continue to wait
* for an external recovery procedure to take care of it.
*/
rc = genwqe_platform_recovery(cd);
if (!rc)
goto health_thread_begin;
}
dev_err(&pci_dev->dev,
"[%s] card unusable. Please trigger unbind!\n", __func__);
/* Bring down logical devices to inform user space via udev remove. */
cd->card_state = GENWQE_CARD_FATAL_ERROR;
genwqe_stop(cd);
/* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
while (!kthread_should_stop())
cond_resched();
return -EIO;
}
static int genwqe_health_check_start(struct genwqe_dev *cd)
{
int rc;
if (GENWQE_HEALTH_CHECK_INTERVAL <= 0)
return 0; /* valid for disabling the service */
/* moved before request_irq() */
/* init_waitqueue_head(&cd->health_waitq); */
cd->health_thread = kthread_run(genwqe_health_thread, cd,
GENWQE_DEVNAME "%d_health",
cd->card_idx);
if (IS_ERR(cd->health_thread)) {
rc = PTR_ERR(cd->health_thread);
cd->health_thread = NULL;
return rc;
}
return 0;
}
static int genwqe_health_thread_running(struct genwqe_dev *cd)
{
return cd->health_thread != NULL;
}
static int genwqe_health_check_stop(struct genwqe_dev *cd)
{
if (!genwqe_health_thread_running(cd))
return -EIO;
kthread_stop(cd->health_thread);
cd->health_thread = NULL;
return 0;
}
/**
* genwqe_pci_setup() - Allocate PCIe related resources for our card
* @cd: GenWQE device information
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
int err;
struct pci_dev *pci_dev = cd->pci_dev;
err = pci_enable_device_mem(pci_dev);
if (err) {
dev_err(&pci_dev->dev,
"err: failed to enable pci memory (err=%d)\n", err);
goto err_out;
}
/* Reserve PCI I/O and memory resources */
err = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (err) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, err);
err = -EIO;
goto err_disable_device;
}
/* check for 64-bit DMA address supported (DAC) */
/* check for 32-bit DMA address supported (SAC) */
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
dev_err(&pci_dev->dev,
"err: neither DMA32 nor DMA64 supported\n");
err = -EIO;
goto out_release_resources;
}
pci_set_master(pci_dev);
/* EEH recovery requires PCIe fundamental reset */
pci_dev->needs_freset = 1;
/* request complete BAR-0 space (length = 0) */
cd->mmio_len = pci_resource_len(pci_dev, 0);
cd->mmio = pci_iomap(pci_dev, 0, 0);
if (cd->mmio == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: mapping BAR0 failed\n", __func__);
err = -ENOMEM;
goto out_release_resources;
}
cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
if (cd->num_vfs < 0)
cd->num_vfs = 0;
err = genwqe_read_ids(cd);
if (err)
goto out_iounmap;
return 0;
out_iounmap:
pci_iounmap(pci_dev, cd->mmio);
out_release_resources:
pci_release_mem_regions(pci_dev);
err_disable_device:
pci_disable_device(pci_dev);
err_out:
return err;
}
/**
* genwqe_pci_remove() - Free PCIe related resources for our card
* @cd: GenWQE device information
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (cd->mmio)
pci_iounmap(pci_dev, cd->mmio);
pci_release_mem_regions(pci_dev);
pci_disable_device(pci_dev);
}
/**
* genwqe_probe() - Device initialization
* @pci_dev: PCI device information struct
* @id: PCI device ID
*
* Callable for multiple cards. This function is called on bind.
*
* Return: 0 if succeeded, < 0 when failed
*/
static int genwqe_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int err;
struct genwqe_dev *cd;
genwqe_init_crc32();
cd = genwqe_dev_alloc();
if (IS_ERR(cd)) {
dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n",
(int)PTR_ERR(cd));
return PTR_ERR(cd);
}
dev_set_drvdata(&pci_dev->dev, cd);
cd->pci_dev = pci_dev;
err = genwqe_pci_setup(cd);
if (err < 0) {
dev_err(&pci_dev->dev,
"err: problems with PCI setup (err=%d)\n", err);
goto out_free_dev;
}
err = genwqe_start(cd);
if (err < 0) {
dev_err(&pci_dev->dev,
"err: cannot start card services! (err=%d)\n", err);
goto out_pci_remove;
}
if (genwqe_is_privileged(cd)) {
err = genwqe_health_check_start(cd);
if (err < 0) {
dev_err(&pci_dev->dev,
"err: cannot start health checking! (err=%d)\n",
err);
goto out_stop_services;
}
}
return 0;
out_stop_services:
genwqe_stop(cd);
out_pci_remove:
genwqe_pci_remove(cd);
out_free_dev:
genwqe_dev_free(cd);
return err;
}
/**
* genwqe_remove() - Called when device is removed (hot-plugable)
* @pci_dev: PCI device information struct
*
* Or when driver is unloaded respecitively when unbind is done.
*/
static void genwqe_remove(struct pci_dev *pci_dev)
{
struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
genwqe_health_check_stop(cd);
/*
* genwqe_stop() must survive if it is called twice
* sequentially. This happens when the health thread calls it
* and fails on genwqe_bus_reset().
*/
genwqe_stop(cd);
genwqe_pci_remove(cd);
genwqe_dev_free(cd);
}
/**
* genwqe_err_error_detected() - Error detection callback
* @pci_dev: PCI device information struct
* @state: PCI channel state
*
* This callback is called by the PCI subsystem whenever a PCI bus
* error is detected.
*/
static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
pci_channel_state_t state)
{
struct genwqe_dev *cd;
dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
cd = dev_get_drvdata(&pci_dev->dev);
if (cd == NULL)
return PCI_ERS_RESULT_DISCONNECT;
/* Stop the card */
genwqe_health_check_stop(cd);
genwqe_stop(cd);
/*
* On permanent failure, the PCI code will call device remove
* after the return of this function.
* genwqe_stop() can be called twice.
*/
if (state == pci_channel_io_perm_failure) {
return PCI_ERS_RESULT_DISCONNECT;
} else {
genwqe_pci_remove(cd);
return PCI_ERS_RESULT_NEED_RESET;
}
}
static pci_ers_result_t genwqe_err_slot_reset(struct pci_dev *pci_dev)
{
int rc;
struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
rc = genwqe_pci_setup(cd);
if (!rc) {
return PCI_ERS_RESULT_RECOVERED;
} else {
dev_err(&pci_dev->dev,
"err: problems with PCI setup (err=%d)\n", rc);
return PCI_ERS_RESULT_DISCONNECT;
}
}
static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
{
return PCI_ERS_RESULT_NONE;
}
static void genwqe_err_resume(struct pci_dev *pci_dev)
{
int rc;
struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
rc = genwqe_start(cd);
if (!rc) {
rc = genwqe_health_check_start(cd);
if (rc)
dev_err(&pci_dev->dev,
"err: cannot start health checking! (err=%d)\n",
rc);
} else {
dev_err(&pci_dev->dev,
"err: cannot start card services! (err=%d)\n", rc);
}
}
static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
{
int rc;
struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
if (numvfs > 0) {
genwqe_setup_vf_jtimer(cd);
rc = pci_enable_sriov(dev, numvfs);
if (rc < 0)
return rc;
return numvfs;
}
if (numvfs == 0) {
pci_disable_sriov(dev);
return 0;
}
return 0;
}
static const struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
.resume = genwqe_err_resume,
};
static struct pci_driver genwqe_driver = {
.name = genwqe_driver_name,
.id_table = genwqe_device_table,
.probe = genwqe_probe,
.remove = genwqe_remove,
.sriov_configure = genwqe_sriov_configure,
.err_handler = &genwqe_err_handler,
};
/**
* genwqe_init_module() - Driver registration and initialization
*/
static int __init genwqe_init_module(void)
{
int rc;
rc = class_register(&class_genwqe);
if (rc) {
pr_err("[%s] create class failed\n", __func__);
return -ENOMEM;
}
debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
rc = pci_register_driver(&genwqe_driver);
if (rc != 0) {
pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc);
goto err_out0;
}
return rc;
err_out0:
debugfs_remove(debugfs_genwqe);
class_unregister(&class_genwqe);
return rc;
}
/**
* genwqe_exit_module() - Driver exit
*/
static void __exit genwqe_exit_module(void)
{
pci_unregister_driver(&genwqe_driver);
debugfs_remove(debugfs_genwqe);
class_unregister(&class_genwqe);
}
module_init(genwqe_init_module);
module_exit(genwqe_exit_module);
| linux-master | drivers/misc/genwqe/card_base.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.